python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 1998 - 2002 Frodo Looijaard <[email protected]>, Philip Edelbrock <[email protected]>, Kyösti Mälkki <[email protected]>, Mark D. Studebaker <[email protected]> Copyright (C) 2005 - 2008 Jean Delvare <[email protected]> */ /* Supports the following VIA south bridges: Chip name PCI ID REV I2C block VT82C596A 0x3050 no VT82C596B 0x3051 no VT82C686A 0x3057 0x30 no VT82C686B 0x3057 0x40 yes VT8231 0x8235 no? VT8233 0x3074 yes VT8233A 0x3147 yes? VT8235 0x3177 yes VT8237R 0x3227 yes VT8237A 0x3337 yes VT8237S 0x3372 yes VT8251 0x3287 yes CX700 0x8324 yes VX800/VX820 0x8353 yes VX855/VX875 0x8409 yes Note: we assume there can only be one device, with one SMBus interface. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/io.h> static struct pci_dev *vt596_pdev; #define SMBBA1 0x90 #define SMBBA2 0x80 #define SMBBA3 0xD0 /* SMBus address offsets */ static unsigned short vt596_smba; #define SMBHSTSTS (vt596_smba + 0) #define SMBHSTCNT (vt596_smba + 2) #define SMBHSTCMD (vt596_smba + 3) #define SMBHSTADD (vt596_smba + 4) #define SMBHSTDAT0 (vt596_smba + 5) #define SMBHSTDAT1 (vt596_smba + 6) #define SMBBLKDAT (vt596_smba + 7) /* PCI Address Constants */ /* SMBus data in configuration space can be found in two places, We try to select the better one */ static unsigned short SMBHSTCFG = 0xD2; /* Other settings */ #define MAX_TIMEOUT 500 /* VT82C596 constants */ #define VT596_QUICK 0x00 #define VT596_BYTE 0x04 #define VT596_BYTE_DATA 0x08 #define VT596_WORD_DATA 0x0C #define VT596_PROC_CALL 0x10 #define VT596_BLOCK_DATA 0x14 #define VT596_I2C_BLOCK_DATA 0x34 /* If force is set to anything different from 0, we forcibly enable the VT596. DANGEROUS! */ static bool force; module_param(force, bool, 0); MODULE_PARM_DESC(force, "Forcibly enable the SMBus. DANGEROUS!"); /* If force_addr is set to anything different from 0, we forcibly enable the VT596 at the given address. VERY DANGEROUS! */ static u16 force_addr; module_param_hw(force_addr, ushort, ioport, 0); MODULE_PARM_DESC(force_addr, "Forcibly enable the SMBus at the given address. " "EXTREMELY DANGEROUS!"); static struct pci_driver vt596_driver; static struct i2c_adapter vt596_adapter; #define FEATURE_I2CBLOCK (1<<0) static unsigned int vt596_features; #ifdef DEBUG static void vt596_dump_regs(const char *msg, u8 size) { dev_dbg(&vt596_adapter.dev, "%s: STS=%02x CNT=%02x CMD=%02x ADD=%02x " "DAT=%02x,%02x\n", msg, inb_p(SMBHSTSTS), inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); if (size == VT596_BLOCK_DATA || size == VT596_I2C_BLOCK_DATA) { int i; dev_dbg(&vt596_adapter.dev, "BLK="); for (i = 0; i < I2C_SMBUS_BLOCK_MAX / 2; i++) printk("%02x,", inb_p(SMBBLKDAT)); printk("\n"); dev_dbg(&vt596_adapter.dev, " "); for (; i < I2C_SMBUS_BLOCK_MAX - 1; i++) printk("%02x,", inb_p(SMBBLKDAT)); printk("%02x\n", inb_p(SMBBLKDAT)); } } #else static inline void vt596_dump_regs(const char *msg, u8 size) { } #endif /* Return -1 on error, 0 on success */ static int vt596_transaction(u8 size) { int temp; int result = 0; int timeout = 0; vt596_dump_regs("Transaction (pre)", size); /* Make sure the SMBus host is ready to start transmitting */ if ((temp = inb_p(SMBHSTSTS)) & 0x1F) { dev_dbg(&vt596_adapter.dev, "SMBus busy (0x%02x). " "Resetting...\n", temp); outb_p(temp, SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) & 0x1F) { dev_err(&vt596_adapter.dev, "SMBus reset failed! " "(0x%02x)\n", temp); return -EBUSY; } } /* Start the transaction by setting bit 6 */ outb_p(0x40 | size, SMBHSTCNT); /* We will always wait for a fraction of a second */ do { msleep(1); temp = inb_p(SMBHSTSTS); } while ((temp & 0x01) && (++timeout < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout == MAX_TIMEOUT) { result = -ETIMEDOUT; dev_err(&vt596_adapter.dev, "SMBus timeout!\n"); } if (temp & 0x10) { result = -EIO; dev_err(&vt596_adapter.dev, "Transaction failed (0x%02x)\n", size); } if (temp & 0x08) { result = -EIO; dev_err(&vt596_adapter.dev, "SMBus collision!\n"); } if (temp & 0x04) { result = -ENXIO; dev_dbg(&vt596_adapter.dev, "No response\n"); } /* Resetting status register */ if (temp & 0x1F) outb_p(temp, SMBHSTSTS); vt596_dump_regs("Transaction (post)", size); return result; } /* Return negative errno on error, 0 on success */ static s32 vt596_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int i; int status; switch (size) { case I2C_SMBUS_QUICK: size = VT596_QUICK; break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMBHSTCMD); size = VT596_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0); size = VT596_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); } size = VT596_WORD_DATA; break; case I2C_SMBUS_PROC_CALL: outb_p(command, SMBHSTCMD); outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); size = VT596_PROC_CALL; break; case I2C_SMBUS_I2C_BLOCK_DATA: if (!(vt596_features & FEATURE_I2CBLOCK)) goto exit_unsupported; if (read_write == I2C_SMBUS_READ) outb_p(data->block[0], SMBHSTDAT0); fallthrough; case I2C_SMBUS_BLOCK_DATA: outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { u8 len = data->block[0]; if (len > I2C_SMBUS_BLOCK_MAX) len = I2C_SMBUS_BLOCK_MAX; outb_p(len, SMBHSTDAT0); inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */ for (i = 1; i <= len; i++) outb_p(data->block[i], SMBBLKDAT); } size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ? VT596_I2C_BLOCK_DATA : VT596_BLOCK_DATA; break; default: goto exit_unsupported; } outb_p(((addr & 0x7f) << 1) | read_write, SMBHSTADD); status = vt596_transaction(size); if (status) return status; if (size == VT596_PROC_CALL) read_write = I2C_SMBUS_READ; if ((read_write == I2C_SMBUS_WRITE) || (size == VT596_QUICK)) return 0; switch (size) { case VT596_BYTE: case VT596_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0); break; case VT596_WORD_DATA: case VT596_PROC_CALL: data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); break; case VT596_I2C_BLOCK_DATA: case VT596_BLOCK_DATA: data->block[0] = inb_p(SMBHSTDAT0); if (data->block[0] > I2C_SMBUS_BLOCK_MAX) data->block[0] = I2C_SMBUS_BLOCK_MAX; inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */ for (i = 1; i <= data->block[0]; i++) data->block[i] = inb_p(SMBBLKDAT); break; } return 0; exit_unsupported: dev_warn(&vt596_adapter.dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } static u32 vt596_func(struct i2c_adapter *adapter) { u32 func = I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA; if (vt596_features & FEATURE_I2CBLOCK) func |= I2C_FUNC_SMBUS_I2C_BLOCK; return func; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = vt596_access, .functionality = vt596_func, }; static struct i2c_adapter vt596_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static int vt596_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned char temp; int error; /* Determine the address of the SMBus areas */ if (force_addr) { vt596_smba = force_addr & 0xfff0; force = 0; goto found; } if ((pci_read_config_word(pdev, id->driver_data, &vt596_smba)) || !(vt596_smba & 0x0001)) { /* try 2nd address and config reg. for 596 */ if (id->device == PCI_DEVICE_ID_VIA_82C596_3 && !pci_read_config_word(pdev, SMBBA2, &vt596_smba) && (vt596_smba & 0x0001)) { SMBHSTCFG = 0x84; } else { /* no matches at all */ dev_err(&pdev->dev, "Cannot configure " "SMBus I/O Base address\n"); return -ENODEV; } } vt596_smba &= 0xfff0; if (vt596_smba == 0) { dev_err(&pdev->dev, "SMBus base address " "uninitialized - upgrade BIOS or use " "force_addr=0xaddr\n"); return -ENODEV; } found: error = acpi_check_region(vt596_smba, 8, vt596_driver.name); if (error) return -ENODEV; if (!request_region(vt596_smba, 8, vt596_driver.name)) { dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", vt596_smba); return -ENODEV; } pci_read_config_byte(pdev, SMBHSTCFG, &temp); /* If force_addr is set, we program the new address here. Just to make sure, we disable the VT596 first. */ if (force_addr) { pci_write_config_byte(pdev, SMBHSTCFG, temp & 0xfe); pci_write_config_word(pdev, id->driver_data, vt596_smba); pci_write_config_byte(pdev, SMBHSTCFG, temp | 0x01); dev_warn(&pdev->dev, "WARNING: SMBus interface set to new " "address 0x%04x!\n", vt596_smba); } else if (!(temp & 0x01)) { if (force) { /* NOTE: This assumes I/O space and other allocations * WERE done by the Bios! Don't complain if your * hardware does weird things after enabling this. * :') Check for Bios updates before resorting to * this. */ pci_write_config_byte(pdev, SMBHSTCFG, temp | 0x01); dev_info(&pdev->dev, "Enabling SMBus device\n"); } else { dev_err(&pdev->dev, "SMBUS: Error: Host SMBus " "controller not enabled! - upgrade BIOS or " "use force=1\n"); error = -ENODEV; goto release_region; } } dev_dbg(&pdev->dev, "VT596_smba = 0x%X\n", vt596_smba); switch (pdev->device) { case PCI_DEVICE_ID_VIA_CX700: case PCI_DEVICE_ID_VIA_VX800: case PCI_DEVICE_ID_VIA_VX855: case PCI_DEVICE_ID_VIA_VX900: case PCI_DEVICE_ID_VIA_8251: case PCI_DEVICE_ID_VIA_8237: case PCI_DEVICE_ID_VIA_8237A: case PCI_DEVICE_ID_VIA_8237S: case PCI_DEVICE_ID_VIA_8235: case PCI_DEVICE_ID_VIA_8233A: case PCI_DEVICE_ID_VIA_8233_0: vt596_features |= FEATURE_I2CBLOCK; break; case PCI_DEVICE_ID_VIA_82C686_4: /* The VT82C686B (rev 0x40) does support I2C block transactions, but the VT82C686A (rev 0x30) doesn't */ if (pdev->revision >= 0x40) vt596_features |= FEATURE_I2CBLOCK; break; } vt596_adapter.dev.parent = &pdev->dev; snprintf(vt596_adapter.name, sizeof(vt596_adapter.name), "SMBus Via Pro adapter at %04x", vt596_smba); vt596_pdev = pci_dev_get(pdev); error = i2c_add_adapter(&vt596_adapter); if (error) { pci_dev_put(vt596_pdev); vt596_pdev = NULL; goto release_region; } /* Always return failure here. This is to allow other drivers to bind * to this pci device. We don't really want to have control over the * pci device, we only wanted to read as few register values from it. */ return -ENODEV; release_region: release_region(vt596_smba, 8); return error; } static const struct pci_device_id vt596_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3), .driver_data = SMBBA1 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3), .driver_data = SMBBA1 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4), .driver_data = SMBBA1 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237S), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4), .driver_data = SMBBA1 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8251), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855), .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX900), .driver_data = SMBBA3 }, { 0, } }; MODULE_DEVICE_TABLE(pci, vt596_ids); static struct pci_driver vt596_driver = { .name = "vt596_smbus", .id_table = vt596_ids, .probe = vt596_probe, }; static int __init i2c_vt596_init(void) { return pci_register_driver(&vt596_driver); } static void __exit i2c_vt596_exit(void) { pci_unregister_driver(&vt596_driver); if (vt596_pdev != NULL) { i2c_del_adapter(&vt596_adapter); release_region(vt596_smba, 8); pci_dev_put(vt596_pdev); vt596_pdev = NULL; } } MODULE_AUTHOR("Kyosti Malkki <[email protected]>"); MODULE_AUTHOR("Mark D. Studebaker <[email protected]>"); MODULE_AUTHOR("Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("vt82c596 SMBus driver"); MODULE_LICENSE("GPL"); module_init(i2c_vt596_init); module_exit(i2c_vt596_exit);
linux-master
drivers/i2c/busses/i2c-viapro.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2004 Steven J. Hill * Copyright (C) 2001,2002,2003 Broadcom Corporation * Copyright (C) 1995-2000 Simon G. Vogl */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/io.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_smbus.h> struct i2c_algo_sibyte_data { void *data; /* private data */ int bus; /* which bus */ void *reg_base; /* CSR base */ }; /* ----- global defines ----------------------------------------------- */ #define SMB_CSR(a,r) ((long)(a->reg_base + r)) static int smbus_xfer(struct i2c_adapter *i2c_adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data; int data_bytes = 0; int error; while (csr_in32(SMB_CSR(adap, R_SMB_STATUS)) & M_SMB_BUSY) ; switch (size) { case I2C_SMBUS_QUICK: csr_out32((V_SMB_ADDR(addr) | (read_write == I2C_SMBUS_READ ? M_SMB_QDATA : 0) | V_SMB_TT_QUICKCMD), SMB_CSR(adap, R_SMB_START)); break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_READ) { csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_RD1BYTE), SMB_CSR(adap, R_SMB_START)); data_bytes = 1; } else { csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD)); csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR1BYTE), SMB_CSR(adap, R_SMB_START)); } break; case I2C_SMBUS_BYTE_DATA: csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD)); if (read_write == I2C_SMBUS_READ) { csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_CMD_RD1BYTE), SMB_CSR(adap, R_SMB_START)); data_bytes = 1; } else { csr_out32(V_SMB_LB(data->byte), SMB_CSR(adap, R_SMB_DATA)); csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR2BYTE), SMB_CSR(adap, R_SMB_START)); } break; case I2C_SMBUS_WORD_DATA: csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD)); if (read_write == I2C_SMBUS_READ) { csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_CMD_RD2BYTE), SMB_CSR(adap, R_SMB_START)); data_bytes = 2; } else { csr_out32(V_SMB_LB(data->word & 0xff), SMB_CSR(adap, R_SMB_DATA)); csr_out32(V_SMB_MB(data->word >> 8), SMB_CSR(adap, R_SMB_DATA)); csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR2BYTE), SMB_CSR(adap, R_SMB_START)); } break; default: return -EOPNOTSUPP; } while (csr_in32(SMB_CSR(adap, R_SMB_STATUS)) & M_SMB_BUSY) ; error = csr_in32(SMB_CSR(adap, R_SMB_STATUS)); if (error & M_SMB_ERROR) { /* Clear error bit by writing a 1 */ csr_out32(M_SMB_ERROR, SMB_CSR(adap, R_SMB_STATUS)); return (error & M_SMB_ERROR_TYPE) ? -EIO : -ENXIO; } if (data_bytes == 1) data->byte = csr_in32(SMB_CSR(adap, R_SMB_DATA)) & 0xff; if (data_bytes == 2) data->word = csr_in32(SMB_CSR(adap, R_SMB_DATA)) & 0xffff; return 0; } static u32 bit_func(struct i2c_adapter *adap) { return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA); } /* -----exported algorithm data: ------------------------------------- */ static const struct i2c_algorithm i2c_sibyte_algo = { .smbus_xfer = smbus_xfer, .functionality = bit_func, }; /* * registering functions to load algorithms at runtime */ static int __init i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed) { struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data; /* Register new adapter to i2c module... */ i2c_adap->algo = &i2c_sibyte_algo; /* Set the requested frequency. */ csr_out32(speed, SMB_CSR(adap,R_SMB_FREQ)); csr_out32(0, SMB_CSR(adap,R_SMB_CONTROL)); return i2c_add_numbered_adapter(i2c_adap); } static struct i2c_algo_sibyte_data sibyte_board_data[2] = { { NULL, 0, (void *) (CKSEG1+A_SMB_BASE(0)) }, { NULL, 1, (void *) (CKSEG1+A_SMB_BASE(1)) } }; static struct i2c_adapter sibyte_board_adapter[2] = { { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = NULL, .algo_data = &sibyte_board_data[0], .nr = 0, .name = "SiByte SMBus 0", }, { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = NULL, .algo_data = &sibyte_board_data[1], .nr = 1, .name = "SiByte SMBus 1", }, }; static int __init i2c_sibyte_init(void) { pr_info("i2c-sibyte: i2c SMBus adapter module for SiByte board\n"); if (i2c_sibyte_add_bus(&sibyte_board_adapter[0], K_SMB_FREQ_100KHZ) < 0) return -ENODEV; if (i2c_sibyte_add_bus(&sibyte_board_adapter[1], K_SMB_FREQ_400KHZ) < 0) { i2c_del_adapter(&sibyte_board_adapter[0]); return -ENODEV; } return 0; } static void __exit i2c_sibyte_exit(void) { i2c_del_adapter(&sibyte_board_adapter[0]); i2c_del_adapter(&sibyte_board_adapter[1]); } module_init(i2c_sibyte_init); module_exit(i2c_sibyte_exit); MODULE_AUTHOR("Kip Walker (Broadcom Corp.)"); MODULE_AUTHOR("Steven J. Hill <[email protected]>"); MODULE_DESCRIPTION("SMBus adapter routines for SiByte boards"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-sibyte.c
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * AMD MP2 platform driver * * Setup the I2C adapters enumerated in the ACPI namespace. * MP2 controllers have 2 separate busses, up to 2 I2C adapters may be listed. * * Authors: Nehal Bakulchandra Shah <[email protected]> * Elie Morisse <[email protected]> */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #include "i2c-amd-mp2.h" #define AMD_MP2_I2C_MAX_RW_LENGTH ((1 << 12) - 1) #define AMD_I2C_TIMEOUT (msecs_to_jiffies(250)) /** * struct amd_i2c_dev - MP2 bus/i2c adapter context * @common: shared context with the MP2 PCI driver * @pdev: platform driver node * @adap: i2c adapter * @cmd_complete: xfer completion object */ struct amd_i2c_dev { struct amd_i2c_common common; struct platform_device *pdev; struct i2c_adapter adap; struct completion cmd_complete; }; #define amd_i2c_dev_common(__common) \ container_of(__common, struct amd_i2c_dev, common) static int i2c_amd_dma_map(struct amd_i2c_common *i2c_common) { struct device *dev_pci = &i2c_common->mp2_dev->pci_dev->dev; struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common); enum dma_data_direction dma_direction = i2c_common->msg->flags & I2C_M_RD ? DMA_FROM_DEVICE : DMA_TO_DEVICE; i2c_common->dma_buf = i2c_get_dma_safe_msg_buf(i2c_common->msg, 0); i2c_common->dma_addr = dma_map_single(dev_pci, i2c_common->dma_buf, i2c_common->msg->len, dma_direction); if (unlikely(dma_mapping_error(dev_pci, i2c_common->dma_addr))) { dev_err(&i2c_dev->pdev->dev, "Error while mapping dma buffer %p\n", i2c_common->dma_buf); return -EIO; } return 0; } static void i2c_amd_dma_unmap(struct amd_i2c_common *i2c_common) { struct device *dev_pci = &i2c_common->mp2_dev->pci_dev->dev; enum dma_data_direction dma_direction = i2c_common->msg->flags & I2C_M_RD ? DMA_FROM_DEVICE : DMA_TO_DEVICE; dma_unmap_single(dev_pci, i2c_common->dma_addr, i2c_common->msg->len, dma_direction); i2c_put_dma_safe_msg_buf(i2c_common->dma_buf, i2c_common->msg, true); } static void i2c_amd_start_cmd(struct amd_i2c_dev *i2c_dev) { struct amd_i2c_common *i2c_common = &i2c_dev->common; reinit_completion(&i2c_dev->cmd_complete); i2c_common->cmd_success = false; } static void i2c_amd_cmd_completion(struct amd_i2c_common *i2c_common) { struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common); union i2c_event *event = &i2c_common->eventval; if (event->r.status == i2c_readcomplete_event) dev_dbg(&i2c_dev->pdev->dev, "readdata:%*ph\n", event->r.length, i2c_common->msg->buf); complete(&i2c_dev->cmd_complete); } static int i2c_amd_check_cmd_completion(struct amd_i2c_dev *i2c_dev) { struct amd_i2c_common *i2c_common = &i2c_dev->common; unsigned long timeout; timeout = wait_for_completion_timeout(&i2c_dev->cmd_complete, i2c_dev->adap.timeout); if ((i2c_common->reqcmd == i2c_read || i2c_common->reqcmd == i2c_write) && i2c_common->msg->len > 32) i2c_amd_dma_unmap(i2c_common); if (timeout == 0) { amd_mp2_rw_timeout(i2c_common); return -ETIMEDOUT; } amd_mp2_process_event(i2c_common); if (!i2c_common->cmd_success) return -EIO; return 0; } static int i2c_amd_enable_set(struct amd_i2c_dev *i2c_dev, bool enable) { struct amd_i2c_common *i2c_common = &i2c_dev->common; i2c_amd_start_cmd(i2c_dev); amd_mp2_bus_enable_set(i2c_common, enable); return i2c_amd_check_cmd_completion(i2c_dev); } static int i2c_amd_xfer_msg(struct amd_i2c_dev *i2c_dev, struct i2c_msg *pmsg) { struct amd_i2c_common *i2c_common = &i2c_dev->common; i2c_amd_start_cmd(i2c_dev); i2c_common->msg = pmsg; if (pmsg->len > 32) if (i2c_amd_dma_map(i2c_common)) return -EIO; if (pmsg->flags & I2C_M_RD) amd_mp2_rw(i2c_common, i2c_read); else amd_mp2_rw(i2c_common, i2c_write); return i2c_amd_check_cmd_completion(i2c_dev); } static int i2c_amd_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct amd_i2c_dev *i2c_dev = i2c_get_adapdata(adap); int i; struct i2c_msg *pmsg; int err = 0; /* the adapter might have been deleted while waiting for the bus lock */ if (unlikely(!i2c_dev->common.mp2_dev)) return -EINVAL; amd_mp2_pm_runtime_get(i2c_dev->common.mp2_dev); for (i = 0; i < num; i++) { pmsg = &msgs[i]; err = i2c_amd_xfer_msg(i2c_dev, pmsg); if (err) break; } amd_mp2_pm_runtime_put(i2c_dev->common.mp2_dev); return err ? err : num; } static u32 i2c_amd_func(struct i2c_adapter *a) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm i2c_amd_algorithm = { .master_xfer = i2c_amd_xfer, .functionality = i2c_amd_func, }; #ifdef CONFIG_PM static int i2c_amd_suspend(struct amd_i2c_common *i2c_common) { struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common); i2c_amd_enable_set(i2c_dev, false); return 0; } static int i2c_amd_resume(struct amd_i2c_common *i2c_common) { struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common); return i2c_amd_enable_set(i2c_dev, true); } #endif static const u32 supported_speeds[] = { I2C_MAX_HIGH_SPEED_MODE_FREQ, I2C_MAX_TURBO_MODE_FREQ, I2C_MAX_FAST_MODE_PLUS_FREQ, I2C_MAX_FAST_MODE_FREQ, I2C_MAX_STANDARD_MODE_FREQ, }; static enum speed_enum i2c_amd_get_bus_speed(struct platform_device *pdev) { u32 acpi_speed; int i; acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); /* round down to the lowest standard speed */ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { if (acpi_speed >= supported_speeds[i]) break; } acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0; switch (acpi_speed) { case I2C_MAX_STANDARD_MODE_FREQ: return speed100k; case I2C_MAX_FAST_MODE_FREQ: return speed400k; case I2C_MAX_FAST_MODE_PLUS_FREQ: return speed1000k; case I2C_MAX_TURBO_MODE_FREQ: return speed1400k; case I2C_MAX_HIGH_SPEED_MODE_FREQ: return speed3400k; default: return speed400k; } } static const struct i2c_adapter_quirks amd_i2c_dev_quirks = { .max_read_len = AMD_MP2_I2C_MAX_RW_LENGTH, .max_write_len = AMD_MP2_I2C_MAX_RW_LENGTH, }; static int i2c_amd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; int ret; struct amd_i2c_dev *i2c_dev; struct amd_mp2_dev *mp2_dev; u64 uid; ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid); if (ret) return dev_err_probe(dev, ret, "missing UID/bus id!\n"); if (uid >= 2) return dev_err_probe(dev, -EINVAL, "incorrect UID/bus id \"%llu\"!\n", uid); dev_dbg(dev, "bus id is %llu\n", uid); /* The ACPI namespace doesn't contain information about which MP2 PCI * device an AMDI0011 ACPI device is related to, so assume that there's * only one MP2 PCI device per system. */ mp2_dev = amd_mp2_find_device(); if (!mp2_dev || !mp2_dev->probed) /* The MP2 PCI device should get probed later */ return -EPROBE_DEFER; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; i2c_dev->common.bus_id = uid; i2c_dev->common.mp2_dev = mp2_dev; i2c_dev->pdev = pdev; platform_set_drvdata(pdev, i2c_dev); i2c_dev->common.cmd_completion = &i2c_amd_cmd_completion; #ifdef CONFIG_PM i2c_dev->common.suspend = &i2c_amd_suspend; i2c_dev->common.resume = &i2c_amd_resume; #endif /* Register the adapter */ amd_mp2_pm_runtime_get(mp2_dev); i2c_dev->common.reqcmd = i2c_none; if (amd_mp2_register_cb(&i2c_dev->common)) return -EINVAL; device_link_add(&i2c_dev->pdev->dev, &mp2_dev->pci_dev->dev, DL_FLAG_AUTOREMOVE_CONSUMER); i2c_dev->common.i2c_speed = i2c_amd_get_bus_speed(pdev); /* Setup i2c adapter description */ i2c_dev->adap.owner = THIS_MODULE; i2c_dev->adap.algo = &i2c_amd_algorithm; i2c_dev->adap.quirks = &amd_i2c_dev_quirks; i2c_dev->adap.dev.parent = &pdev->dev; i2c_dev->adap.algo_data = i2c_dev; i2c_dev->adap.timeout = AMD_I2C_TIMEOUT; ACPI_COMPANION_SET(&i2c_dev->adap.dev, ACPI_COMPANION(&pdev->dev)); i2c_dev->adap.dev.of_node = pdev->dev.of_node; snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name), "AMD MP2 i2c bus %u", i2c_dev->common.bus_id); i2c_set_adapdata(&i2c_dev->adap, i2c_dev); init_completion(&i2c_dev->cmd_complete); /* Enable the bus */ if (i2c_amd_enable_set(i2c_dev, true)) dev_err(&pdev->dev, "initial bus enable failed\n"); /* Attach to the i2c layer */ ret = i2c_add_adapter(&i2c_dev->adap); amd_mp2_pm_runtime_put(mp2_dev); if (ret < 0) dev_err(&pdev->dev, "i2c add adapter failed = %d\n", ret); return ret; } static void i2c_amd_remove(struct platform_device *pdev) { struct amd_i2c_dev *i2c_dev = platform_get_drvdata(pdev); struct amd_i2c_common *i2c_common = &i2c_dev->common; i2c_lock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER); i2c_amd_enable_set(i2c_dev, false); amd_mp2_unregister_cb(i2c_common); i2c_common->mp2_dev = NULL; i2c_unlock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER); i2c_del_adapter(&i2c_dev->adap); } static const struct acpi_device_id i2c_amd_acpi_match[] = { { "AMDI0011" }, { }, }; MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match); static struct platform_driver i2c_amd_plat_driver = { .probe = i2c_amd_probe, .remove_new = i2c_amd_remove, .driver = { .name = "i2c_amd_mp2", .acpi_match_table = ACPI_PTR(i2c_amd_acpi_match), }, }; module_platform_driver(i2c_amd_plat_driver); MODULE_DESCRIPTION("AMD(R) MP2 I2C Platform Driver"); MODULE_AUTHOR("Nehal Shah <[email protected]>"); MODULE_AUTHOR("Elie Morisse <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/i2c/busses/i2c-amd-mp2-plat.c
// SPDX-License-Identifier: GPL-2.0+ // Expose an I2C passthrough to the ChromeOS EC. // // Copyright (C) 2013 Google, Inc. #include <linux/acpi.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> #include <linux/platform_device.h> #include <linux/slab.h> #define I2C_MAX_RETRIES 3 /** * struct ec_i2c_device - Driver data for I2C tunnel * * @dev: Device node * @adap: I2C adapter * @ec: Pointer to EC device * @remote_bus: The EC bus number we tunnel to on the other side. * @request_buf: Buffer for transmitting data; we expect most transfers to fit. * @response_buf: Buffer for receiving data; we expect most transfers to fit. */ struct ec_i2c_device { struct device *dev; struct i2c_adapter adap; struct cros_ec_device *ec; u16 remote_bus; u8 request_buf[256]; u8 response_buf[256]; }; /** * ec_i2c_count_message - Count bytes needed for ec_i2c_construct_message * * @i2c_msgs: The i2c messages to read * @num: The number of i2c messages. * * Returns the number of bytes the messages will take up. */ static int ec_i2c_count_message(const struct i2c_msg i2c_msgs[], int num) { int i; int size; size = sizeof(struct ec_params_i2c_passthru); size += num * sizeof(struct ec_params_i2c_passthru_msg); for (i = 0; i < num; i++) if (!(i2c_msgs[i].flags & I2C_M_RD)) size += i2c_msgs[i].len; return size; } /** * ec_i2c_construct_message - construct a message to go to the EC * * This function effectively stuffs the standard i2c_msg format of Linux into * a format that the EC understands. * * @buf: The buffer to fill. We assume that the buffer is big enough. * @i2c_msgs: The i2c messages to read. * @num: The number of i2c messages. * @bus_num: The remote bus number we want to talk to. * * Returns 0 or a negative error number. */ static int ec_i2c_construct_message(u8 *buf, const struct i2c_msg i2c_msgs[], int num, u16 bus_num) { struct ec_params_i2c_passthru *params; u8 *out_data; int i; out_data = buf + sizeof(struct ec_params_i2c_passthru) + num * sizeof(struct ec_params_i2c_passthru_msg); params = (struct ec_params_i2c_passthru *)buf; params->port = bus_num; params->num_msgs = num; for (i = 0; i < num; i++) { const struct i2c_msg *i2c_msg = &i2c_msgs[i]; struct ec_params_i2c_passthru_msg *msg = &params->msg[i]; msg->len = i2c_msg->len; msg->addr_flags = i2c_msg->addr; if (i2c_msg->flags & I2C_M_TEN) return -EINVAL; if (i2c_msg->flags & I2C_M_RD) { msg->addr_flags |= EC_I2C_FLAG_READ; } else { memcpy(out_data, i2c_msg->buf, msg->len); out_data += msg->len; } } return 0; } /** * ec_i2c_count_response - Count bytes needed for ec_i2c_parse_response * * @i2c_msgs: The i2c messages to fill up. * @num: The number of i2c messages expected. * * Returns the number of response bytes expeced. */ static int ec_i2c_count_response(struct i2c_msg i2c_msgs[], int num) { int size; int i; size = sizeof(struct ec_response_i2c_passthru); for (i = 0; i < num; i++) if (i2c_msgs[i].flags & I2C_M_RD) size += i2c_msgs[i].len; return size; } /** * ec_i2c_parse_response - Parse a response from the EC * * We'll take the EC's response and copy it back into msgs. * * @buf: The buffer to parse. * @i2c_msgs: The i2c messages to fill up. * @num: The number of i2c messages; will be modified to include the actual * number received. * * Returns 0 or a negative error number. */ static int ec_i2c_parse_response(const u8 *buf, struct i2c_msg i2c_msgs[], int *num) { const struct ec_response_i2c_passthru *resp; const u8 *in_data; int i; in_data = buf + sizeof(struct ec_response_i2c_passthru); resp = (const struct ec_response_i2c_passthru *)buf; if (resp->i2c_status & EC_I2C_STATUS_TIMEOUT) return -ETIMEDOUT; else if (resp->i2c_status & EC_I2C_STATUS_NAK) return -ENXIO; else if (resp->i2c_status & EC_I2C_STATUS_ERROR) return -EIO; /* Other side could send us back fewer messages, but not more */ if (resp->num_msgs > *num) return -EPROTO; *num = resp->num_msgs; for (i = 0; i < *num; i++) { struct i2c_msg *i2c_msg = &i2c_msgs[i]; if (i2c_msgs[i].flags & I2C_M_RD) { memcpy(i2c_msg->buf, in_data, i2c_msg->len); in_data += i2c_msg->len; } } return 0; } static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], int num) { struct ec_i2c_device *bus = adap->algo_data; struct device *dev = bus->dev; const u16 bus_num = bus->remote_bus; int request_len; int response_len; int alloc_size; int result; struct cros_ec_command *msg; request_len = ec_i2c_count_message(i2c_msgs, num); if (request_len < 0) { dev_warn(dev, "Error constructing message %d\n", request_len); return request_len; } response_len = ec_i2c_count_response(i2c_msgs, num); if (response_len < 0) { /* Unexpected; no errors should come when NULL response */ dev_warn(dev, "Error preparing response %d\n", response_len); return response_len; } alloc_size = max(request_len, response_len); msg = kmalloc(sizeof(*msg) + alloc_size, GFP_KERNEL); if (!msg) return -ENOMEM; result = ec_i2c_construct_message(msg->data, i2c_msgs, num, bus_num); if (result) { dev_err(dev, "Error constructing EC i2c message %d\n", result); goto exit; } msg->version = 0; msg->command = EC_CMD_I2C_PASSTHRU; msg->outsize = request_len; msg->insize = response_len; result = cros_ec_cmd_xfer_status(bus->ec, msg); if (result < 0) { dev_err(dev, "Error transferring EC i2c message %d\n", result); goto exit; } result = ec_i2c_parse_response(msg->data, i2c_msgs, &num); if (result < 0) goto exit; /* Indicate success by saying how many messages were sent */ result = num; exit: kfree(msg); return result; } static u32 ec_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm ec_i2c_algorithm = { .master_xfer = ec_i2c_xfer, .functionality = ec_i2c_functionality, }; static int ec_i2c_probe(struct platform_device *pdev) { struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent); struct device *dev = &pdev->dev; struct ec_i2c_device *bus = NULL; u32 remote_bus; int err; if (!ec->cmd_xfer) { dev_err(dev, "Missing sendrecv\n"); return -EINVAL; } bus = devm_kzalloc(dev, sizeof(*bus), GFP_KERNEL); if (bus == NULL) return -ENOMEM; err = device_property_read_u32(dev, "google,remote-bus", &remote_bus); if (err) { dev_err(dev, "Couldn't read remote-bus property\n"); return err; } bus->remote_bus = remote_bus; bus->ec = ec; bus->dev = dev; bus->adap.owner = THIS_MODULE; strscpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name)); bus->adap.algo = &ec_i2c_algorithm; bus->adap.algo_data = bus; bus->adap.dev.parent = &pdev->dev; bus->adap.dev.of_node = pdev->dev.of_node; bus->adap.retries = I2C_MAX_RETRIES; ACPI_COMPANION_SET(&bus->adap.dev, ACPI_COMPANION(&pdev->dev)); err = i2c_add_adapter(&bus->adap); if (err) return err; platform_set_drvdata(pdev, bus); return err; } static void ec_i2c_remove(struct platform_device *dev) { struct ec_i2c_device *bus = platform_get_drvdata(dev); i2c_del_adapter(&bus->adap); } static const struct of_device_id cros_ec_i2c_of_match[] __maybe_unused = { { .compatible = "google,cros-ec-i2c-tunnel" }, {}, }; MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match); static const struct acpi_device_id cros_ec_i2c_tunnel_acpi_id[] __maybe_unused = { { "GOOG0012", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id); static struct platform_driver ec_i2c_tunnel_driver = { .probe = ec_i2c_probe, .remove_new = ec_i2c_remove, .driver = { .name = "cros-ec-i2c-tunnel", .acpi_match_table = ACPI_PTR(cros_ec_i2c_tunnel_acpi_id), .of_match_table = of_match_ptr(cros_ec_i2c_of_match), }, }; module_platform_driver(ec_i2c_tunnel_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("EC I2C tunnel driver"); MODULE_ALIAS("platform:cros-ec-i2c-tunnel");
linux-master
drivers/i2c/busses/i2c-cros-ec-tunnel.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * IBM OPAL I2C driver * Copyright (C) 2014 IBM */ #include <linux/device.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/firmware.h> #include <asm/opal.h> static int i2c_opal_translate_error(int rc) { switch (rc) { case OPAL_NO_MEM: return -ENOMEM; case OPAL_PARAMETER: return -EINVAL; case OPAL_I2C_ARBT_LOST: return -EAGAIN; case OPAL_I2C_TIMEOUT: return -ETIMEDOUT; case OPAL_I2C_NACK_RCVD: return -ENXIO; case OPAL_I2C_STOP_ERR: return -EBUSY; default: return -EIO; } } static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req) { struct opal_msg msg; int token, rc; token = opal_async_get_token_interruptible(); if (token < 0) { if (token != -ERESTARTSYS) pr_err("Failed to get the async token\n"); return token; } rc = opal_i2c_request(token, bus_id, req); if (rc != OPAL_ASYNC_COMPLETION) { rc = i2c_opal_translate_error(rc); goto exit; } rc = opal_async_wait_response(token, &msg); if (rc) goto exit; rc = opal_get_async_rc(msg); if (rc != OPAL_SUCCESS) { rc = i2c_opal_translate_error(rc); goto exit; } exit: opal_async_release_token(token); return rc; } static int i2c_opal_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { unsigned long opal_id = (unsigned long)adap->algo_data; struct opal_i2c_request req; int rc, i; /* We only support fairly simple combinations here of one * or two messages */ memset(&req, 0, sizeof(req)); switch(num) { case 1: req.type = (msgs[0].flags & I2C_M_RD) ? OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE; req.addr = cpu_to_be16(msgs[0].addr); req.size = cpu_to_be32(msgs[0].len); req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf)); break; case 2: req.type = (msgs[1].flags & I2C_M_RD) ? OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE; req.addr = cpu_to_be16(msgs[0].addr); req.subaddr_sz = msgs[0].len; for (i = 0; i < msgs[0].len; i++) req.subaddr = (req.subaddr << 8) | msgs[0].buf[i]; req.subaddr = cpu_to_be32(req.subaddr); req.size = cpu_to_be32(msgs[1].len); req.buffer_ra = cpu_to_be64(__pa(msgs[1].buf)); break; } rc = i2c_opal_send_request(opal_id, &req); if (rc) return rc; return num; } static int i2c_opal_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { unsigned long opal_id = (unsigned long)adap->algo_data; struct opal_i2c_request req; u8 local[2]; int rc; memset(&req, 0, sizeof(req)); req.addr = cpu_to_be16(addr); switch (size) { case I2C_SMBUS_BYTE: req.buffer_ra = cpu_to_be64(__pa(&data->byte)); req.size = cpu_to_be32(1); fallthrough; case I2C_SMBUS_QUICK: req.type = (read_write == I2C_SMBUS_READ) ? OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE; break; case I2C_SMBUS_BYTE_DATA: req.buffer_ra = cpu_to_be64(__pa(&data->byte)); req.size = cpu_to_be32(1); req.subaddr = cpu_to_be32(command); req.subaddr_sz = 1; req.type = (read_write == I2C_SMBUS_READ) ? OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE; break; case I2C_SMBUS_WORD_DATA: if (!read_write) { local[0] = data->word & 0xff; local[1] = (data->word >> 8) & 0xff; } req.buffer_ra = cpu_to_be64(__pa(local)); req.size = cpu_to_be32(2); req.subaddr = cpu_to_be32(command); req.subaddr_sz = 1; req.type = (read_write == I2C_SMBUS_READ) ? OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE; break; case I2C_SMBUS_I2C_BLOCK_DATA: req.buffer_ra = cpu_to_be64(__pa(&data->block[1])); req.size = cpu_to_be32(data->block[0]); req.subaddr = cpu_to_be32(command); req.subaddr_sz = 1; req.type = (read_write == I2C_SMBUS_READ) ? OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE; break; default: return -EINVAL; } rc = i2c_opal_send_request(opal_id, &req); if (!rc && read_write && size == I2C_SMBUS_WORD_DATA) { data->word = ((u16)local[1]) << 8; data->word |= local[0]; } return rc; } static u32 i2c_opal_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK; } static const struct i2c_algorithm i2c_opal_algo = { .master_xfer = i2c_opal_master_xfer, .smbus_xfer = i2c_opal_smbus_xfer, .functionality = i2c_opal_func, }; /* * For two messages, we basically support simple smbus transactions of a * write-then-anything. */ static const struct i2c_adapter_quirks i2c_opal_quirks = { .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR, .max_comb_1st_msg_len = 4, }; static int i2c_opal_probe(struct platform_device *pdev) { struct i2c_adapter *adapter; const char *pname; u32 opal_id; int rc; if (!pdev->dev.of_node) return -ENODEV; rc = of_property_read_u32(pdev->dev.of_node, "ibm,opal-id", &opal_id); if (rc) { dev_err(&pdev->dev, "Missing ibm,opal-id property !\n"); return -EIO; } adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL); if (!adapter) return -ENOMEM; adapter->algo = &i2c_opal_algo; adapter->algo_data = (void *)(unsigned long)opal_id; adapter->quirks = &i2c_opal_quirks; adapter->dev.parent = &pdev->dev; adapter->dev.of_node = of_node_get(pdev->dev.of_node); pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL); if (pname) strscpy(adapter->name, pname, sizeof(adapter->name)); else strscpy(adapter->name, "opal", sizeof(adapter->name)); platform_set_drvdata(pdev, adapter); rc = i2c_add_adapter(adapter); if (rc) dev_err(&pdev->dev, "Failed to register the i2c adapter\n"); return rc; } static void i2c_opal_remove(struct platform_device *pdev) { struct i2c_adapter *adapter = platform_get_drvdata(pdev); i2c_del_adapter(adapter); } static const struct of_device_id i2c_opal_of_match[] = { { .compatible = "ibm,opal-i2c", }, { } }; MODULE_DEVICE_TABLE(of, i2c_opal_of_match); static struct platform_driver i2c_opal_driver = { .probe = i2c_opal_probe, .remove_new = i2c_opal_remove, .driver = { .name = "i2c-opal", .of_match_table = i2c_opal_of_match, }, }; static int __init i2c_opal_init(void) { if (!firmware_has_feature(FW_FEATURE_OPAL)) return -ENODEV; return platform_driver_register(&i2c_opal_driver); } module_init(i2c_opal_init); static void __exit i2c_opal_exit(void) { return platform_driver_unregister(&i2c_opal_driver); } module_exit(i2c_opal_exit); MODULE_AUTHOR("Neelesh Gupta <[email protected]>"); MODULE_DESCRIPTION("IBM OPAL I2C driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-opal.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014 Linaro Ltd. * Copyright (c) 2014 HiSilicon Limited. * * Now only support 7 bit address. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> /* Register Map */ #define HIX5I2C_CTRL 0x00 #define HIX5I2C_COM 0x04 #define HIX5I2C_ICR 0x08 #define HIX5I2C_SR 0x0c #define HIX5I2C_SCL_H 0x10 #define HIX5I2C_SCL_L 0x14 #define HIX5I2C_TXR 0x18 #define HIX5I2C_RXR 0x1c /* I2C_CTRL_REG */ #define I2C_ENABLE BIT(8) #define I2C_UNMASK_TOTAL BIT(7) #define I2C_UNMASK_START BIT(6) #define I2C_UNMASK_END BIT(5) #define I2C_UNMASK_SEND BIT(4) #define I2C_UNMASK_RECEIVE BIT(3) #define I2C_UNMASK_ACK BIT(2) #define I2C_UNMASK_ARBITRATE BIT(1) #define I2C_UNMASK_OVER BIT(0) #define I2C_UNMASK_ALL (I2C_UNMASK_ACK | I2C_UNMASK_OVER) /* I2C_COM_REG */ #define I2C_NO_ACK BIT(4) #define I2C_START BIT(3) #define I2C_READ BIT(2) #define I2C_WRITE BIT(1) #define I2C_STOP BIT(0) /* I2C_ICR_REG */ #define I2C_CLEAR_START BIT(6) #define I2C_CLEAR_END BIT(5) #define I2C_CLEAR_SEND BIT(4) #define I2C_CLEAR_RECEIVE BIT(3) #define I2C_CLEAR_ACK BIT(2) #define I2C_CLEAR_ARBITRATE BIT(1) #define I2C_CLEAR_OVER BIT(0) #define I2C_CLEAR_ALL (I2C_CLEAR_START | I2C_CLEAR_END | \ I2C_CLEAR_SEND | I2C_CLEAR_RECEIVE | \ I2C_CLEAR_ACK | I2C_CLEAR_ARBITRATE | \ I2C_CLEAR_OVER) /* I2C_SR_REG */ #define I2C_BUSY BIT(7) #define I2C_START_INTR BIT(6) #define I2C_END_INTR BIT(5) #define I2C_SEND_INTR BIT(4) #define I2C_RECEIVE_INTR BIT(3) #define I2C_ACK_INTR BIT(2) #define I2C_ARBITRATE_INTR BIT(1) #define I2C_OVER_INTR BIT(0) enum hix5hd2_i2c_state { HIX5I2C_STAT_RW_ERR = -1, HIX5I2C_STAT_INIT, HIX5I2C_STAT_RW, HIX5I2C_STAT_SND_STOP, HIX5I2C_STAT_RW_SUCCESS, }; struct hix5hd2_i2c_priv { struct i2c_adapter adap; struct i2c_msg *msg; struct completion msg_complete; unsigned int msg_idx; unsigned int msg_len; int stop; void __iomem *regs; struct clk *clk; struct device *dev; spinlock_t lock; /* IRQ synchronization */ int err; unsigned int freq; enum hix5hd2_i2c_state state; }; static u32 hix5hd2_i2c_clr_pend_irq(struct hix5hd2_i2c_priv *priv) { u32 val = readl_relaxed(priv->regs + HIX5I2C_SR); writel_relaxed(val, priv->regs + HIX5I2C_ICR); return val; } static void hix5hd2_i2c_clr_all_irq(struct hix5hd2_i2c_priv *priv) { writel_relaxed(I2C_CLEAR_ALL, priv->regs + HIX5I2C_ICR); } static void hix5hd2_i2c_disable_irq(struct hix5hd2_i2c_priv *priv) { writel_relaxed(0, priv->regs + HIX5I2C_CTRL); } static void hix5hd2_i2c_enable_irq(struct hix5hd2_i2c_priv *priv) { writel_relaxed(I2C_ENABLE | I2C_UNMASK_TOTAL | I2C_UNMASK_ALL, priv->regs + HIX5I2C_CTRL); } static void hix5hd2_i2c_drv_setrate(struct hix5hd2_i2c_priv *priv) { u32 rate, val; u32 scl, sysclock; /* close all i2c interrupt */ val = readl_relaxed(priv->regs + HIX5I2C_CTRL); writel_relaxed(val & (~I2C_UNMASK_TOTAL), priv->regs + HIX5I2C_CTRL); rate = priv->freq; sysclock = clk_get_rate(priv->clk); scl = (sysclock / (rate * 2)) / 2 - 1; writel_relaxed(scl, priv->regs + HIX5I2C_SCL_H); writel_relaxed(scl, priv->regs + HIX5I2C_SCL_L); /* restore original interrupt*/ writel_relaxed(val, priv->regs + HIX5I2C_CTRL); dev_dbg(priv->dev, "%s: sysclock=%d, rate=%d, scl=%d\n", __func__, sysclock, rate, scl); } static void hix5hd2_i2c_init(struct hix5hd2_i2c_priv *priv) { hix5hd2_i2c_disable_irq(priv); hix5hd2_i2c_drv_setrate(priv); hix5hd2_i2c_clr_all_irq(priv); hix5hd2_i2c_enable_irq(priv); } static void hix5hd2_i2c_reset(struct hix5hd2_i2c_priv *priv) { clk_disable_unprepare(priv->clk); msleep(20); clk_prepare_enable(priv->clk); hix5hd2_i2c_init(priv); } static int hix5hd2_i2c_wait_bus_idle(struct hix5hd2_i2c_priv *priv) { unsigned long stop_time; u32 int_status; /* wait for 100 milli seconds for the bus to be idle */ stop_time = jiffies + msecs_to_jiffies(100); do { int_status = hix5hd2_i2c_clr_pend_irq(priv); if (!(int_status & I2C_BUSY)) return 0; usleep_range(50, 200); } while (time_before(jiffies, stop_time)); return -EBUSY; } static void hix5hd2_rw_over(struct hix5hd2_i2c_priv *priv) { if (priv->state == HIX5I2C_STAT_SND_STOP) dev_dbg(priv->dev, "%s: rw and send stop over\n", __func__); else dev_dbg(priv->dev, "%s: have not data to send\n", __func__); priv->state = HIX5I2C_STAT_RW_SUCCESS; priv->err = 0; } static void hix5hd2_rw_handle_stop(struct hix5hd2_i2c_priv *priv) { if (priv->stop) { priv->state = HIX5I2C_STAT_SND_STOP; writel_relaxed(I2C_STOP, priv->regs + HIX5I2C_COM); } else { hix5hd2_rw_over(priv); } } static void hix5hd2_read_handle(struct hix5hd2_i2c_priv *priv) { if (priv->msg_len == 1) { /* the last byte don't need send ACK */ writel_relaxed(I2C_READ | I2C_NO_ACK, priv->regs + HIX5I2C_COM); } else if (priv->msg_len > 1) { /* if i2c master receive data will send ACK */ writel_relaxed(I2C_READ, priv->regs + HIX5I2C_COM); } else { hix5hd2_rw_handle_stop(priv); } } static void hix5hd2_write_handle(struct hix5hd2_i2c_priv *priv) { u8 data; if (priv->msg_len > 0) { data = priv->msg->buf[priv->msg_idx++]; writel_relaxed(data, priv->regs + HIX5I2C_TXR); writel_relaxed(I2C_WRITE, priv->regs + HIX5I2C_COM); } else { hix5hd2_rw_handle_stop(priv); } } static int hix5hd2_rw_preprocess(struct hix5hd2_i2c_priv *priv) { u8 data; if (priv->state == HIX5I2C_STAT_INIT) { priv->state = HIX5I2C_STAT_RW; } else if (priv->state == HIX5I2C_STAT_RW) { if (priv->msg->flags & I2C_M_RD) { data = readl_relaxed(priv->regs + HIX5I2C_RXR); priv->msg->buf[priv->msg_idx++] = data; } priv->msg_len--; } else { dev_dbg(priv->dev, "%s: error: priv->state = %d, msg_len = %d\n", __func__, priv->state, priv->msg_len); return -EAGAIN; } return 0; } static irqreturn_t hix5hd2_i2c_irq(int irqno, void *dev_id) { struct hix5hd2_i2c_priv *priv = dev_id; u32 int_status; int ret; spin_lock(&priv->lock); int_status = hix5hd2_i2c_clr_pend_irq(priv); /* handle error */ if (int_status & I2C_ARBITRATE_INTR) { /* bus error */ dev_dbg(priv->dev, "ARB bus loss\n"); priv->err = -EAGAIN; priv->state = HIX5I2C_STAT_RW_ERR; goto stop; } else if (int_status & I2C_ACK_INTR) { /* ack error */ dev_dbg(priv->dev, "No ACK from device\n"); priv->err = -ENXIO; priv->state = HIX5I2C_STAT_RW_ERR; goto stop; } if (int_status & I2C_OVER_INTR) { if (priv->msg_len > 0) { ret = hix5hd2_rw_preprocess(priv); if (ret) { priv->err = ret; priv->state = HIX5I2C_STAT_RW_ERR; goto stop; } if (priv->msg->flags & I2C_M_RD) hix5hd2_read_handle(priv); else hix5hd2_write_handle(priv); } else { hix5hd2_rw_over(priv); } } stop: if ((priv->state == HIX5I2C_STAT_RW_SUCCESS && priv->msg->len == priv->msg_idx) || (priv->state == HIX5I2C_STAT_RW_ERR)) { hix5hd2_i2c_disable_irq(priv); hix5hd2_i2c_clr_pend_irq(priv); complete(&priv->msg_complete); } spin_unlock(&priv->lock); return IRQ_HANDLED; } static void hix5hd2_i2c_message_start(struct hix5hd2_i2c_priv *priv, int stop) { unsigned long flags; spin_lock_irqsave(&priv->lock, flags); hix5hd2_i2c_clr_all_irq(priv); hix5hd2_i2c_enable_irq(priv); writel_relaxed(i2c_8bit_addr_from_msg(priv->msg), priv->regs + HIX5I2C_TXR); writel_relaxed(I2C_WRITE | I2C_START, priv->regs + HIX5I2C_COM); spin_unlock_irqrestore(&priv->lock, flags); } static int hix5hd2_i2c_xfer_msg(struct hix5hd2_i2c_priv *priv, struct i2c_msg *msgs, int stop) { unsigned long timeout; int ret; priv->msg = msgs; priv->msg_idx = 0; priv->msg_len = priv->msg->len; priv->stop = stop; priv->err = 0; priv->state = HIX5I2C_STAT_INIT; reinit_completion(&priv->msg_complete); hix5hd2_i2c_message_start(priv, stop); timeout = wait_for_completion_timeout(&priv->msg_complete, priv->adap.timeout); if (timeout == 0) { priv->state = HIX5I2C_STAT_RW_ERR; priv->err = -ETIMEDOUT; dev_warn(priv->dev, "%s timeout=%d\n", msgs->flags & I2C_M_RD ? "rx" : "tx", priv->adap.timeout); } ret = priv->state; /* * If this is the last message to be transfered (stop == 1) * Then check if the bus can be brought back to idle. */ if (priv->state == HIX5I2C_STAT_RW_SUCCESS && stop) ret = hix5hd2_i2c_wait_bus_idle(priv); if (ret < 0) hix5hd2_i2c_reset(priv); return priv->err; } static int hix5hd2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct hix5hd2_i2c_priv *priv = i2c_get_adapdata(adap); int i, ret, stop; pm_runtime_get_sync(priv->dev); for (i = 0; i < num; i++, msgs++) { if ((i == num - 1) || (msgs->flags & I2C_M_STOP)) stop = 1; else stop = 0; ret = hix5hd2_i2c_xfer_msg(priv, msgs, stop); if (ret < 0) goto out; } ret = num; out: pm_runtime_mark_last_busy(priv->dev); pm_runtime_put_autosuspend(priv->dev); return ret; } static u32 hix5hd2_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static const struct i2c_algorithm hix5hd2_i2c_algorithm = { .master_xfer = hix5hd2_i2c_xfer, .functionality = hix5hd2_i2c_func, }; static int hix5hd2_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct hix5hd2_i2c_priv *priv; unsigned int freq; int irq, ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; if (of_property_read_u32(np, "clock-frequency", &freq)) { /* use 100k as default value */ priv->freq = I2C_MAX_STANDARD_MODE_FREQ; } else { if (freq > I2C_MAX_FAST_MODE_FREQ) { priv->freq = I2C_MAX_FAST_MODE_FREQ; dev_warn(priv->dev, "use max freq %d instead\n", I2C_MAX_FAST_MODE_FREQ); } else { priv->freq = freq; } } priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; priv->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "cannot enable clock\n"); return PTR_ERR(priv->clk); } strscpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name)); priv->dev = &pdev->dev; priv->adap.owner = THIS_MODULE; priv->adap.algo = &hix5hd2_i2c_algorithm; priv->adap.retries = 3; priv->adap.dev.of_node = np; priv->adap.algo_data = priv; priv->adap.dev.parent = &pdev->dev; i2c_set_adapdata(&priv->adap, priv); platform_set_drvdata(pdev, priv); spin_lock_init(&priv->lock); init_completion(&priv->msg_complete); hix5hd2_i2c_init(priv); ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq, IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv); if (ret != 0) { dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq); return ret; } pm_runtime_set_autosuspend_delay(priv->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(priv->dev); pm_runtime_set_active(priv->dev); pm_runtime_enable(priv->dev); ret = i2c_add_adapter(&priv->adap); if (ret < 0) goto err_runtime; return ret; err_runtime: pm_runtime_disable(priv->dev); pm_runtime_set_suspended(priv->dev); return ret; } static void hix5hd2_i2c_remove(struct platform_device *pdev) { struct hix5hd2_i2c_priv *priv = platform_get_drvdata(pdev); i2c_del_adapter(&priv->adap); pm_runtime_disable(priv->dev); pm_runtime_set_suspended(priv->dev); } static int hix5hd2_i2c_runtime_suspend(struct device *dev) { struct hix5hd2_i2c_priv *priv = dev_get_drvdata(dev); clk_disable_unprepare(priv->clk); return 0; } static int hix5hd2_i2c_runtime_resume(struct device *dev) { struct hix5hd2_i2c_priv *priv = dev_get_drvdata(dev); clk_prepare_enable(priv->clk); hix5hd2_i2c_init(priv); return 0; } static const struct dev_pm_ops hix5hd2_i2c_pm_ops = { RUNTIME_PM_OPS(hix5hd2_i2c_runtime_suspend, hix5hd2_i2c_runtime_resume, NULL) }; static const struct of_device_id hix5hd2_i2c_match[] = { { .compatible = "hisilicon,hix5hd2-i2c" }, {}, }; MODULE_DEVICE_TABLE(of, hix5hd2_i2c_match); static struct platform_driver hix5hd2_i2c_driver = { .probe = hix5hd2_i2c_probe, .remove_new = hix5hd2_i2c_remove, .driver = { .name = "hix5hd2-i2c", .pm = pm_ptr(&hix5hd2_i2c_pm_ops), .of_match_table = hix5hd2_i2c_match, }, }; module_platform_driver(hix5hd2_i2c_driver); MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver"); MODULE_AUTHOR("Wei Yan <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:hix5hd2-i2c");
linux-master
drivers/i2c/busses/i2c-hix5hd2.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for the Renesas R-Car I2C unit * * Copyright (C) 2014-19 Wolfram Sang <[email protected]> * Copyright (C) 2011-2019 Renesas Electronics Corporation * * Copyright (C) 2012-14 Renesas Solutions Corp. * Kuninori Morimoto <[email protected]> * * This file is based on the drivers/i2c/busses/i2c-sh7760.c * (c) 2005-2008 MSC Vertriebsges.m.b.H, Manuel Lauss <[email protected]> */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/slab.h> /* register offsets */ #define ICSCR 0x00 /* slave ctrl */ #define ICMCR 0x04 /* master ctrl */ #define ICSSR 0x08 /* slave status */ #define ICMSR 0x0C /* master status */ #define ICSIER 0x10 /* slave irq enable */ #define ICMIER 0x14 /* master irq enable */ #define ICCCR 0x18 /* clock dividers */ #define ICSAR 0x1C /* slave address */ #define ICMAR 0x20 /* master address */ #define ICRXTX 0x24 /* data port */ #define ICFBSCR 0x38 /* first bit setup cycle (Gen3) */ #define ICDMAER 0x3c /* DMA enable (Gen3) */ /* ICSCR */ #define SDBS BIT(3) /* slave data buffer select */ #define SIE BIT(2) /* slave interface enable */ #define GCAE BIT(1) /* general call address enable */ #define FNA BIT(0) /* forced non acknowledgment */ /* ICMCR */ #define MDBS BIT(7) /* non-fifo mode switch */ #define FSCL BIT(6) /* override SCL pin */ #define FSDA BIT(5) /* override SDA pin */ #define OBPC BIT(4) /* override pins */ #define MIE BIT(3) /* master if enable */ #define TSBE BIT(2) #define FSB BIT(1) /* force stop bit */ #define ESG BIT(0) /* enable start bit gen */ /* ICSSR (also for ICSIER) */ #define GCAR BIT(6) /* general call received */ #define STM BIT(5) /* slave transmit mode */ #define SSR BIT(4) /* stop received */ #define SDE BIT(3) /* slave data empty */ #define SDT BIT(2) /* slave data transmitted */ #define SDR BIT(1) /* slave data received */ #define SAR BIT(0) /* slave addr received */ /* ICMSR (also for ICMIE) */ #define MNR BIT(6) /* nack received */ #define MAL BIT(5) /* arbitration lost */ #define MST BIT(4) /* sent a stop */ #define MDE BIT(3) #define MDT BIT(2) #define MDR BIT(1) #define MAT BIT(0) /* slave addr xfer done */ /* ICDMAER */ #define RSDMAE BIT(3) /* DMA Slave Received Enable */ #define TSDMAE BIT(2) /* DMA Slave Transmitted Enable */ #define RMDMAE BIT(1) /* DMA Master Received Enable */ #define TMDMAE BIT(0) /* DMA Master Transmitted Enable */ /* ICFBSCR */ #define TCYC17 0x0f /* 17*Tcyc delay 1st bit between SDA and SCL */ #define RCAR_MIN_DMA_LEN 8 #define RCAR_BUS_PHASE_START (MDBS | MIE | ESG) #define RCAR_BUS_PHASE_DATA (MDBS | MIE) #define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB) #define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE) #define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR) #define RCAR_IRQ_STOP (MST) #define ID_LAST_MSG BIT(0) #define ID_REP_AFTER_RD BIT(1) #define ID_DONE BIT(2) #define ID_ARBLOST BIT(3) #define ID_NACK BIT(4) #define ID_EPROTO BIT(5) /* persistent flags */ #define ID_P_NOT_ATOMIC BIT(28) #define ID_P_HOST_NOTIFY BIT(29) #define ID_P_NO_RXDMA BIT(30) /* HW forbids RXDMA sometimes */ #define ID_P_PM_BLOCKED BIT(31) #define ID_P_MASK GENMASK(31, 28) enum rcar_i2c_type { I2C_RCAR_GEN1, I2C_RCAR_GEN2, I2C_RCAR_GEN3, }; struct rcar_i2c_priv { u32 flags; void __iomem *io; struct i2c_adapter adap; struct i2c_msg *msg; int msgs_left; struct clk *clk; wait_queue_head_t wait; int pos; u32 icccr; u8 recovery_icmcr; /* protected by adapter lock */ enum rcar_i2c_type devtype; struct i2c_client *slave; struct resource *res; struct dma_chan *dma_tx; struct dma_chan *dma_rx; struct scatterlist sg; enum dma_data_direction dma_direction; struct reset_control *rstc; int irq; struct i2c_client *host_notify_client; }; #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) #define rcar_i2c_is_recv(p) ((p)->msg->flags & I2C_M_RD) static void rcar_i2c_write(struct rcar_i2c_priv *priv, int reg, u32 val) { writel(val, priv->io + reg); } static u32 rcar_i2c_read(struct rcar_i2c_priv *priv, int reg) { return readl(priv->io + reg); } static void rcar_i2c_clear_irq(struct rcar_i2c_priv *priv, u32 val) { writel(~val & 0x7f, priv->io + ICMSR); } static int rcar_i2c_get_scl(struct i2c_adapter *adap) { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); return !!(rcar_i2c_read(priv, ICMCR) & FSCL); }; static void rcar_i2c_set_scl(struct i2c_adapter *adap, int val) { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); if (val) priv->recovery_icmcr |= FSCL; else priv->recovery_icmcr &= ~FSCL; rcar_i2c_write(priv, ICMCR, priv->recovery_icmcr); }; static void rcar_i2c_set_sda(struct i2c_adapter *adap, int val) { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); if (val) priv->recovery_icmcr |= FSDA; else priv->recovery_icmcr &= ~FSDA; rcar_i2c_write(priv, ICMCR, priv->recovery_icmcr); }; static int rcar_i2c_get_bus_free(struct i2c_adapter *adap) { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); return !(rcar_i2c_read(priv, ICMCR) & FSDA); }; static struct i2c_bus_recovery_info rcar_i2c_bri = { .get_scl = rcar_i2c_get_scl, .set_scl = rcar_i2c_set_scl, .set_sda = rcar_i2c_set_sda, .get_bus_free = rcar_i2c_get_bus_free, .recover_bus = i2c_generic_scl_recovery, }; static void rcar_i2c_init(struct rcar_i2c_priv *priv) { /* reset master mode */ rcar_i2c_write(priv, ICMIER, 0); rcar_i2c_write(priv, ICMCR, MDBS); rcar_i2c_write(priv, ICMSR, 0); /* start clock */ rcar_i2c_write(priv, ICCCR, priv->icccr); if (priv->devtype == I2C_RCAR_GEN3) rcar_i2c_write(priv, ICFBSCR, TCYC17); } static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv) { int ret; u32 val; ret = readl_poll_timeout(priv->io + ICMCR, val, !(val & FSDA), 10, priv->adap.timeout); if (ret) { /* Waiting did not help, try to recover */ priv->recovery_icmcr = MDBS | OBPC | FSDA | FSCL; ret = i2c_recover_bus(&priv->adap); } return ret; } static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv) { u32 scgd, cdf, round, ick, sum, scl, cdf_width; unsigned long rate; struct device *dev = rcar_i2c_priv_to_dev(priv); struct i2c_timings t = { .bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ, .scl_fall_ns = 35, .scl_rise_ns = 200, .scl_int_delay_ns = 50, }; /* Fall back to previously used values if not supplied */ i2c_parse_fw_timings(dev, &t, false); switch (priv->devtype) { case I2C_RCAR_GEN1: cdf_width = 2; break; case I2C_RCAR_GEN2: case I2C_RCAR_GEN3: cdf_width = 3; break; default: dev_err(dev, "device type error\n"); return -EIO; } /* * calculate SCL clock * see * ICCCR * * ick = clkp / (1 + CDF) * SCL = ick / (20 + SCGD * 8 + F[(ticf + tr + intd) * ick]) * * ick : I2C internal clock < 20 MHz * ticf : I2C SCL falling time * tr : I2C SCL rising time * intd : LSI internal delay * clkp : peripheral_clk * F[] : integer up-valuation */ rate = clk_get_rate(priv->clk); cdf = rate / 20000000; if (cdf >= 1U << cdf_width) { dev_err(dev, "Input clock %lu too high\n", rate); return -EIO; } ick = rate / (cdf + 1); /* * it is impossible to calculate large scale * number on u32. separate it * * F[(ticf + tr + intd) * ick] with sum = (ticf + tr + intd) * = F[sum * ick / 1000000000] * = F[(ick / 1000000) * sum / 1000] */ sum = t.scl_fall_ns + t.scl_rise_ns + t.scl_int_delay_ns; round = (ick + 500000) / 1000000 * sum; round = (round + 500) / 1000; /* * SCL = ick / (20 + SCGD * 8 + F[(ticf + tr + intd) * ick]) * * Calculation result (= SCL) should be less than * bus_speed for hardware safety * * We could use something along the lines of * div = ick / (bus_speed + 1) + 1; * scgd = (div - 20 - round + 7) / 8; * scl = ick / (20 + (scgd * 8) + round); * (not fully verified) but that would get pretty involved */ for (scgd = 0; scgd < 0x40; scgd++) { scl = ick / (20 + (scgd * 8) + round); if (scl <= t.bus_freq_hz) goto scgd_find; } dev_err(dev, "it is impossible to calculate best SCL\n"); return -EIO; scgd_find: dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n", scl, t.bus_freq_hz, rate, round, cdf, scgd); /* keep icccr value */ priv->icccr = scgd << cdf_width | cdf; return 0; } /* * We don't have a test case but the HW engineers say that the write order of * ICMSR and ICMCR depends on whether we issue START or REP_START. So, ICMSR * handling is outside of this function. First messages clear ICMSR before this * function, interrupt handlers clear the relevant bits after this function. */ static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv) { int read = !!rcar_i2c_is_recv(priv); bool rep_start = !(priv->flags & ID_REP_AFTER_RD); priv->pos = 0; priv->flags &= ID_P_MASK; if (priv->msgs_left == 1) priv->flags |= ID_LAST_MSG; rcar_i2c_write(priv, ICMAR, i2c_8bit_addr_from_msg(priv->msg)); if (priv->flags & ID_P_NOT_ATOMIC) rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND); if (rep_start) rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START); } static void rcar_i2c_first_msg(struct rcar_i2c_priv *priv, struct i2c_msg *msgs, int num) { priv->msg = msgs; priv->msgs_left = num; rcar_i2c_write(priv, ICMSR, 0); /* must be before preparing msg */ rcar_i2c_prepare_msg(priv); } static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv) { priv->msg++; priv->msgs_left--; rcar_i2c_prepare_msg(priv); /* ICMSR handling must come afterwards in the irq handler */ } static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate) { struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE ? priv->dma_rx : priv->dma_tx; /* only allowed from thread context! */ if (terminate) dmaengine_terminate_sync(chan); dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), sg_dma_len(&priv->sg), priv->dma_direction); /* Gen3 can only do one RXDMA per transfer and we just completed it */ if (priv->devtype == I2C_RCAR_GEN3 && priv->dma_direction == DMA_FROM_DEVICE) priv->flags |= ID_P_NO_RXDMA; priv->dma_direction = DMA_NONE; /* Disable DMA Master Received/Transmitted, must be last! */ rcar_i2c_write(priv, ICDMAER, 0); } static void rcar_i2c_dma_callback(void *data) { struct rcar_i2c_priv *priv = data; priv->pos += sg_dma_len(&priv->sg); rcar_i2c_cleanup_dma(priv, false); } static bool rcar_i2c_dma(struct rcar_i2c_priv *priv) { struct device *dev = rcar_i2c_priv_to_dev(priv); struct i2c_msg *msg = priv->msg; bool read = msg->flags & I2C_M_RD; enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; struct dma_chan *chan = read ? priv->dma_rx : priv->dma_tx; struct dma_async_tx_descriptor *txdesc; dma_addr_t dma_addr; dma_cookie_t cookie; unsigned char *buf; int len; /* Do various checks to see if DMA is feasible at all */ if (!(priv->flags & ID_P_NOT_ATOMIC) || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN || !(msg->flags & I2C_M_DMA_SAFE) || (read && priv->flags & ID_P_NO_RXDMA)) return false; if (read) { /* * The last two bytes needs to be fetched using PIO in * order for the STOP phase to work. */ buf = priv->msg->buf; len = priv->msg->len - 2; } else { /* * First byte in message was sent using PIO. */ buf = priv->msg->buf + 1; len = priv->msg->len - 1; } dma_addr = dma_map_single(chan->device->dev, buf, len, dir); if (dma_mapping_error(chan->device->dev, dma_addr)) { dev_dbg(dev, "dma map failed, using PIO\n"); return false; } sg_dma_len(&priv->sg) = len; sg_dma_address(&priv->sg) = dma_addr; priv->dma_direction = dir; txdesc = dmaengine_prep_slave_sg(chan, &priv->sg, 1, read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) { dev_dbg(dev, "dma prep slave sg failed, using PIO\n"); rcar_i2c_cleanup_dma(priv, false); return false; } txdesc->callback = rcar_i2c_dma_callback; txdesc->callback_param = priv; cookie = dmaengine_submit(txdesc); if (dma_submit_error(cookie)) { dev_dbg(dev, "submitting dma failed, using PIO\n"); rcar_i2c_cleanup_dma(priv, false); return false; } /* Enable DMA Master Received/Transmitted */ if (read) rcar_i2c_write(priv, ICDMAER, RMDMAE); else rcar_i2c_write(priv, ICDMAER, TMDMAE); dma_async_issue_pending(chan); return true; } static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr) { struct i2c_msg *msg = priv->msg; u32 irqs_to_clear = MDE; /* FIXME: sometimes, unknown interrupt happened. Do nothing */ if (!(msr & MDE)) return; if (msr & MAT) irqs_to_clear |= MAT; /* Check if DMA can be enabled and take over */ if (priv->pos == 1 && rcar_i2c_dma(priv)) return; if (priv->pos < msg->len) { /* * Prepare next data to ICRXTX register. * This data will go to _SHIFT_ register. * * * * [ICRXTX] -> [SHIFT] -> [I2C bus] */ rcar_i2c_write(priv, ICRXTX, msg->buf[priv->pos]); priv->pos++; } else { /* * The last data was pushed to ICRXTX on _PREV_ empty irq. * It is on _SHIFT_ register, and will sent to I2C bus. * * * * [ICRXTX] -> [SHIFT] -> [I2C bus] */ if (priv->flags & ID_LAST_MSG) /* * If current msg is the _LAST_ msg, * prepare stop condition here. * ID_DONE will be set on STOP irq. */ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP); else rcar_i2c_next_msg(priv); } rcar_i2c_clear_irq(priv, irqs_to_clear); } static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr) { struct i2c_msg *msg = priv->msg; bool recv_len_init = priv->pos == 0 && msg->flags & I2C_M_RECV_LEN; u32 irqs_to_clear = MDR; /* FIXME: sometimes, unknown interrupt happened. Do nothing */ if (!(msr & MDR)) return; if (msr & MAT) { irqs_to_clear |= MAT; /* * Address transfer phase finished, but no data at this point. * Try to use DMA to receive data. */ rcar_i2c_dma(priv); } else if (priv->pos < msg->len) { /* get received data */ u8 data = rcar_i2c_read(priv, ICRXTX); msg->buf[priv->pos] = data; if (recv_len_init) { if (data == 0 || data > I2C_SMBUS_BLOCK_MAX) { priv->flags |= ID_DONE | ID_EPROTO; return; } msg->len += msg->buf[0]; /* Enough data for DMA? */ if (rcar_i2c_dma(priv)) return; /* new length after RECV_LEN now properly initialized */ recv_len_init = false; } priv->pos++; } /* * If next received data is the _LAST_ and we are not waiting for a new * length because of RECV_LEN, then go to a new phase. */ if (priv->pos + 1 == msg->len && !recv_len_init) { if (priv->flags & ID_LAST_MSG) { rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP); } else { rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START); priv->flags |= ID_REP_AFTER_RD; } } if (priv->pos == msg->len && !(priv->flags & ID_LAST_MSG)) rcar_i2c_next_msg(priv); rcar_i2c_clear_irq(priv, irqs_to_clear); } static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) { u32 ssr_raw, ssr_filtered; u8 value; ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff; ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER); if (!ssr_filtered) return false; /* address detected */ if (ssr_filtered & SAR) { /* read or write request */ if (ssr_raw & STM) { i2c_slave_event(priv->slave, I2C_SLAVE_READ_REQUESTED, &value); rcar_i2c_write(priv, ICRXTX, value); rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR); } else { i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value); rcar_i2c_read(priv, ICRXTX); /* dummy read */ rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); } /* Clear SSR, too, because of old STOPs to other clients than us */ rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff); } /* master sent stop */ if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } /* master wants to write to us */ if (ssr_filtered & SDR) { int ret; value = rcar_i2c_read(priv, ICRXTX); ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value); /* Send NACK in case of error */ rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0)); rcar_i2c_write(priv, ICSSR, ~SDR & 0xff); } /* master wants to read from us */ if (ssr_filtered & SDE) { i2c_slave_event(priv->slave, I2C_SLAVE_READ_PROCESSED, &value); rcar_i2c_write(priv, ICRXTX, value); rcar_i2c_write(priv, ICSSR, ~SDE & 0xff); } return true; } /* * This driver has a lock-free design because there are IP cores (at least * R-Car Gen2) which have an inherent race condition in their hardware design. * There, we need to switch to RCAR_BUS_PHASE_DATA as soon as possible after * the interrupt was generated, otherwise an unwanted repeated message gets * generated. It turned out that taking a spinlock at the beginning of the ISR * was already causing repeated messages. Thus, this driver was converted to * the now lockless behaviour. Please keep this in mind when hacking the driver. * R-Car Gen3 seems to have this fixed but earlier versions than R-Car Gen2 are * likely affected. Therefore, we have different interrupt handler entries. */ static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr) { if (!msr) { if (rcar_i2c_slave_irq(priv)) return IRQ_HANDLED; return IRQ_NONE; } /* Arbitration lost */ if (msr & MAL) { priv->flags |= ID_DONE | ID_ARBLOST; goto out; } /* Nack */ if (msr & MNR) { /* HW automatically sends STOP after received NACK */ if (priv->flags & ID_P_NOT_ATOMIC) rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP); priv->flags |= ID_NACK; goto out; } /* Stop */ if (msr & MST) { priv->msgs_left--; /* The last message also made it */ priv->flags |= ID_DONE; goto out; } if (rcar_i2c_is_recv(priv)) rcar_i2c_irq_recv(priv, msr); else rcar_i2c_irq_send(priv, msr); out: if (priv->flags & ID_DONE) { rcar_i2c_write(priv, ICMIER, 0); rcar_i2c_write(priv, ICMSR, 0); if (priv->flags & ID_P_NOT_ATOMIC) wake_up(&priv->wait); } return IRQ_HANDLED; } static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr) { struct rcar_i2c_priv *priv = ptr; u32 msr; /* Clear START or STOP immediately, except for REPSTART after read */ if (likely(!(priv->flags & ID_REP_AFTER_RD))) rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA); /* Only handle interrupts that are currently enabled */ msr = rcar_i2c_read(priv, ICMSR); if (priv->flags & ID_P_NOT_ATOMIC) msr &= rcar_i2c_read(priv, ICMIER); return rcar_i2c_irq(irq, priv, msr); } static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr) { struct rcar_i2c_priv *priv = ptr; u32 msr; /* Only handle interrupts that are currently enabled */ msr = rcar_i2c_read(priv, ICMSR); if (priv->flags & ID_P_NOT_ATOMIC) msr &= rcar_i2c_read(priv, ICMIER); /* * Clear START or STOP immediately, except for REPSTART after read or * if a spurious interrupt was detected. */ if (likely(!(priv->flags & ID_REP_AFTER_RD) && msr)) rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA); return rcar_i2c_irq(irq, priv, msr); } static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev, enum dma_transfer_direction dir, dma_addr_t port_addr) { struct dma_chan *chan; struct dma_slave_config cfg; char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx"; int ret; chan = dma_request_chan(dev, chan_name); if (IS_ERR(chan)) { dev_dbg(dev, "request_channel failed for %s (%ld)\n", chan_name, PTR_ERR(chan)); return chan; } memset(&cfg, 0, sizeof(cfg)); cfg.direction = dir; if (dir == DMA_MEM_TO_DEV) { cfg.dst_addr = port_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; } else { cfg.src_addr = port_addr; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; } ret = dmaengine_slave_config(chan, &cfg); if (ret) { dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret); dma_release_channel(chan); return ERR_PTR(ret); } dev_dbg(dev, "got DMA channel for %s\n", chan_name); return chan; } static void rcar_i2c_request_dma(struct rcar_i2c_priv *priv, struct i2c_msg *msg) { struct device *dev = rcar_i2c_priv_to_dev(priv); bool read; struct dma_chan *chan; enum dma_transfer_direction dir; read = msg->flags & I2C_M_RD; chan = read ? priv->dma_rx : priv->dma_tx; if (PTR_ERR(chan) != -EPROBE_DEFER) return; dir = read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; chan = rcar_i2c_request_dma_chan(dev, dir, priv->res->start + ICRXTX); if (read) priv->dma_rx = chan; else priv->dma_tx = chan; } static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv) { if (!IS_ERR(priv->dma_tx)) { dma_release_channel(priv->dma_tx); priv->dma_tx = ERR_PTR(-EPROBE_DEFER); } if (!IS_ERR(priv->dma_rx)) { dma_release_channel(priv->dma_rx); priv->dma_rx = ERR_PTR(-EPROBE_DEFER); } } /* I2C is a special case, we need to poll the status of a reset */ static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv) { int ret; ret = reset_control_reset(priv->rstc); if (ret) return ret; return read_poll_timeout_atomic(reset_control_status, ret, ret == 0, 1, 100, false, priv->rstc); } static int rcar_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); struct device *dev = rcar_i2c_priv_to_dev(priv); int i, ret; long time_left; priv->flags |= ID_P_NOT_ATOMIC; pm_runtime_get_sync(dev); /* Check bus state before init otherwise bus busy info will be lost */ ret = rcar_i2c_bus_barrier(priv); if (ret < 0) goto out; /* Gen3 needs a reset before allowing RXDMA once */ if (priv->devtype == I2C_RCAR_GEN3) { priv->flags |= ID_P_NO_RXDMA; if (!IS_ERR(priv->rstc)) { ret = rcar_i2c_do_reset(priv); if (ret == 0) priv->flags &= ~ID_P_NO_RXDMA; } } rcar_i2c_init(priv); for (i = 0; i < num; i++) rcar_i2c_request_dma(priv, msgs + i); rcar_i2c_first_msg(priv, msgs, num); time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE, num * adap->timeout); /* cleanup DMA if it couldn't complete properly due to an error */ if (priv->dma_direction != DMA_NONE) rcar_i2c_cleanup_dma(priv, true); if (!time_left) { rcar_i2c_init(priv); ret = -ETIMEDOUT; } else if (priv->flags & ID_NACK) { ret = -ENXIO; } else if (priv->flags & ID_ARBLOST) { ret = -EAGAIN; } else if (priv->flags & ID_EPROTO) { ret = -EPROTO; } else { ret = num - priv->msgs_left; /* The number of transfer */ } out: pm_runtime_put(dev); if (ret < 0 && ret != -ENXIO) dev_err(dev, "error %d : %x\n", ret, priv->flags); return ret; } static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); struct device *dev = rcar_i2c_priv_to_dev(priv); unsigned long j; bool time_left; int ret; priv->flags &= ~ID_P_NOT_ATOMIC; pm_runtime_get_sync(dev); /* Check bus state before init otherwise bus busy info will be lost */ ret = rcar_i2c_bus_barrier(priv); if (ret < 0) goto out; rcar_i2c_init(priv); rcar_i2c_first_msg(priv, msgs, num); j = jiffies + num * adap->timeout; do { u32 msr = rcar_i2c_read(priv, ICMSR); msr &= (rcar_i2c_is_recv(priv) ? RCAR_IRQ_RECV : RCAR_IRQ_SEND) | RCAR_IRQ_STOP; if (msr) { if (priv->devtype < I2C_RCAR_GEN3) rcar_i2c_gen2_irq(0, priv); else rcar_i2c_gen3_irq(0, priv); } time_left = time_before_eq(jiffies, j); } while (!(priv->flags & ID_DONE) && time_left); if (!time_left) { rcar_i2c_init(priv); ret = -ETIMEDOUT; } else if (priv->flags & ID_NACK) { ret = -ENXIO; } else if (priv->flags & ID_ARBLOST) { ret = -EAGAIN; } else if (priv->flags & ID_EPROTO) { ret = -EPROTO; } else { ret = num - priv->msgs_left; /* The number of transfer */ } out: pm_runtime_put(dev); if (ret < 0 && ret != -ENXIO) dev_err(dev, "error %d : %x\n", ret, priv->flags); return ret; } static int rcar_reg_slave(struct i2c_client *slave) { struct rcar_i2c_priv *priv = i2c_get_adapdata(slave->adapter); if (priv->slave) return -EBUSY; if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; /* Keep device active for slave address detection logic */ pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv)); priv->slave = slave; rcar_i2c_write(priv, ICSAR, slave->addr); rcar_i2c_write(priv, ICSSR, 0); rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSCR, SIE | SDBS); return 0; } static int rcar_unreg_slave(struct i2c_client *slave) { struct rcar_i2c_priv *priv = i2c_get_adapdata(slave->adapter); WARN_ON(!priv->slave); /* ensure no irq is running before clearing ptr */ disable_irq(priv->irq); rcar_i2c_write(priv, ICSIER, 0); rcar_i2c_write(priv, ICSSR, 0); enable_irq(priv->irq); rcar_i2c_write(priv, ICSCR, SDBS); rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ priv->slave = NULL; pm_runtime_put(rcar_i2c_priv_to_dev(priv)); return 0; } static u32 rcar_i2c_func(struct i2c_adapter *adap) { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); /* * This HW can't do: * I2C_SMBUS_QUICK (setting FSB during START didn't work) * I2C_M_NOSTART (automatically sends address after START) * I2C_M_IGNORE_NAK (automatically sends STOP after NAK) */ u32 func = I2C_FUNC_I2C | I2C_FUNC_SLAVE | (I2C_FUNC_SMBUS_EMUL_ALL & ~I2C_FUNC_SMBUS_QUICK); if (priv->flags & ID_P_HOST_NOTIFY) func |= I2C_FUNC_SMBUS_HOST_NOTIFY; return func; } static const struct i2c_algorithm rcar_i2c_algo = { .master_xfer = rcar_i2c_master_xfer, .master_xfer_atomic = rcar_i2c_master_xfer_atomic, .functionality = rcar_i2c_func, .reg_slave = rcar_reg_slave, .unreg_slave = rcar_unreg_slave, }; static const struct i2c_adapter_quirks rcar_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; static const struct of_device_id rcar_i2c_dt_ids[] = { { .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 }, { .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 }, { .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 }, { .compatible = "renesas,i2c-r8a7791", .data = (void *)I2C_RCAR_GEN2 }, { .compatible = "renesas,i2c-r8a7792", .data = (void *)I2C_RCAR_GEN2 }, { .compatible = "renesas,i2c-r8a7793", .data = (void *)I2C_RCAR_GEN2 }, { .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 }, { .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 }, { .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 }, { .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 }, { .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 }, { .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 }, { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN3 }, {}, }; MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids); static int rcar_i2c_probe(struct platform_device *pdev) { struct rcar_i2c_priv *priv; struct i2c_adapter *adap; struct device *dev = &pdev->dev; unsigned long irqflags = 0; irqreturn_t (*irqhandler)(int irq, void *ptr) = rcar_i2c_gen3_irq; int ret; /* Otherwise logic will break because some bytes must always use PIO */ BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length"); priv = devm_kzalloc(dev, sizeof(struct rcar_i2c_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->clk = devm_clk_get(dev, NULL); if (IS_ERR(priv->clk)) { dev_err(dev, "cannot get clock\n"); return PTR_ERR(priv->clk); } priv->io = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->res); if (IS_ERR(priv->io)) return PTR_ERR(priv->io); priv->devtype = (enum rcar_i2c_type)of_device_get_match_data(dev); init_waitqueue_head(&priv->wait); adap = &priv->adap; adap->nr = pdev->id; adap->algo = &rcar_i2c_algo; adap->class = I2C_CLASS_DEPRECATED; adap->retries = 3; adap->dev.parent = dev; adap->dev.of_node = dev->of_node; adap->bus_recovery_info = &rcar_i2c_bri; adap->quirks = &rcar_i2c_quirks; i2c_set_adapdata(adap, priv); strscpy(adap->name, pdev->name, sizeof(adap->name)); /* Init DMA */ sg_init_table(&priv->sg, 1); priv->dma_direction = DMA_NONE; priv->dma_rx = priv->dma_tx = ERR_PTR(-EPROBE_DEFER); /* Activate device for clock calculation */ pm_runtime_enable(dev); pm_runtime_get_sync(dev); ret = rcar_i2c_clock_calculate(priv); if (ret < 0) { pm_runtime_put(dev); goto out_pm_disable; } rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ if (priv->devtype < I2C_RCAR_GEN3) { irqflags |= IRQF_NO_THREAD; irqhandler = rcar_i2c_gen2_irq; } if (priv->devtype == I2C_RCAR_GEN3) { priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (!IS_ERR(priv->rstc)) { ret = reset_control_status(priv->rstc); if (ret < 0) priv->rstc = ERR_PTR(-ENOTSUPP); } } /* Stay always active when multi-master to keep arbitration working */ if (of_property_read_bool(dev->of_node, "multi-master")) priv->flags |= ID_P_PM_BLOCKED; else pm_runtime_put(dev); if (of_property_read_bool(dev->of_node, "smbus")) priv->flags |= ID_P_HOST_NOTIFY; ret = platform_get_irq(pdev, 0); if (ret < 0) goto out_pm_put; priv->irq = ret; ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv); if (ret < 0) { dev_err(dev, "cannot get irq %d\n", priv->irq); goto out_pm_put; } platform_set_drvdata(pdev, priv); ret = i2c_add_numbered_adapter(adap); if (ret < 0) goto out_pm_put; if (priv->flags & ID_P_HOST_NOTIFY) { priv->host_notify_client = i2c_new_slave_host_notify_device(adap); if (IS_ERR(priv->host_notify_client)) { ret = PTR_ERR(priv->host_notify_client); goto out_del_device; } } dev_info(dev, "probed\n"); return 0; out_del_device: i2c_del_adapter(&priv->adap); out_pm_put: if (priv->flags & ID_P_PM_BLOCKED) pm_runtime_put(dev); out_pm_disable: pm_runtime_disable(dev); return ret; } static void rcar_i2c_remove(struct platform_device *pdev) { struct rcar_i2c_priv *priv = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; if (priv->host_notify_client) i2c_free_slave_host_notify_device(priv->host_notify_client); i2c_del_adapter(&priv->adap); rcar_i2c_release_dma(priv); if (priv->flags & ID_P_PM_BLOCKED) pm_runtime_put(dev); pm_runtime_disable(dev); } static int rcar_i2c_suspend(struct device *dev) { struct rcar_i2c_priv *priv = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&priv->adap); return 0; } static int rcar_i2c_resume(struct device *dev) { struct rcar_i2c_priv *priv = dev_get_drvdata(dev); i2c_mark_adapter_resumed(&priv->adap); return 0; } static const struct dev_pm_ops rcar_i2c_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(rcar_i2c_suspend, rcar_i2c_resume) }; static struct platform_driver rcar_i2c_driver = { .driver = { .name = "i2c-rcar", .of_match_table = rcar_i2c_dt_ids, .pm = pm_sleep_ptr(&rcar_i2c_pm_ops), }, .probe = rcar_i2c_probe, .remove_new = rcar_i2c_remove, }; module_platform_driver(rcar_i2c_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Renesas R-Car I2C bus driver"); MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
linux-master
drivers/i2c/busses/i2c-rcar.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2002,2003 Alexander Malysh <[email protected]> */ /* Status: beta Supports: SIS 630 SIS 730 SIS 964 Notable differences between chips: +------------------------+--------------------+-------------------+ | | SIS630/730 | SIS964 | +------------------------+--------------------+-------------------+ | Clock | 14kHz/56kHz | 55.56kHz/27.78kHz | | SMBus registers offset | 0x80 | 0xE0 | | SMB_CNT | Bit 1 = Slave Busy | Bit 1 = Bus probe | | (not used yet) | Bit 3 is reserved | Bit 3 = Last byte | | SMB_PCOUNT | Offset + 0x06 | Offset + 0x14 | | SMB_COUNT | 4:0 bits | 5:0 bits | +------------------------+--------------------+-------------------+ (Other differences don't affect the functions provided by the driver) Note: we assume there can only be one device, with one SMBus interface. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* SIS964 id is defined here as we are the only file using it */ #define PCI_DEVICE_ID_SI_964 0x0964 /* SIS630/730/964 SMBus registers */ #define SMB_STS 0x00 /* status */ #define SMB_CNT 0x02 /* control */ #define SMBHOST_CNT 0x03 /* host control */ #define SMB_ADDR 0x04 /* address */ #define SMB_CMD 0x05 /* command */ #define SMB_COUNT 0x07 /* byte count */ #define SMB_BYTE 0x08 /* ~0x8F data byte field */ /* SMB_STS register */ #define BYTE_DONE_STS 0x10 /* Byte Done Status / Block Array */ #define SMBCOL_STS 0x04 /* Collision */ #define SMBERR_STS 0x02 /* Device error */ /* SMB_CNT register */ #define MSTO_EN 0x40 /* Host Master Timeout Enable */ #define SMBCLK_SEL 0x20 /* Host master clock selection */ #define SMB_PROBE 0x02 /* Bus Probe/Slave busy */ #define SMB_HOSTBUSY 0x01 /* Host Busy */ /* SMBHOST_CNT register */ #define SMB_KILL 0x20 /* Kill */ #define SMB_START 0x10 /* Start */ /* register count for request_region * As we don't use SMB_PCOUNT, 20 is ok for SiS630 and SiS964 */ #define SIS630_SMB_IOREGION 20 /* PCI address constants */ /* acpi base address register */ #define SIS630_ACPI_BASE_REG 0x74 /* bios control register */ #define SIS630_BIOS_CTL_REG 0x40 /* Other settings */ #define MAX_TIMEOUT 500 /* SIS630 constants */ #define SIS630_QUICK 0x00 #define SIS630_BYTE 0x01 #define SIS630_BYTE_DATA 0x02 #define SIS630_WORD_DATA 0x03 #define SIS630_PCALL 0x04 #define SIS630_BLOCK_DATA 0x05 static struct pci_driver sis630_driver; /* insmod parameters */ static bool high_clock; static bool force; module_param(high_clock, bool, 0); MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz) (SIS630/730 only)."); module_param(force, bool, 0); MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!"); /* SMBus base address */ static unsigned short smbus_base; /* supported chips */ static int supported[] = { PCI_DEVICE_ID_SI_630, PCI_DEVICE_ID_SI_730, PCI_DEVICE_ID_SI_760, 0 /* terminates the list */ }; static inline u8 sis630_read(u8 reg) { return inb(smbus_base + reg); } static inline void sis630_write(u8 reg, u8 data) { outb(data, smbus_base + reg); } static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldclock) { int temp; /* Make sure the SMBus host is ready to start transmitting. */ temp = sis630_read(SMB_CNT); if ((temp & (SMB_PROBE | SMB_HOSTBUSY)) != 0x00) { dev_dbg(&adap->dev, "SMBus busy (%02x). Resetting...\n", temp); /* kill smbus transaction */ sis630_write(SMBHOST_CNT, SMB_KILL); temp = sis630_read(SMB_CNT); if (temp & (SMB_PROBE | SMB_HOSTBUSY)) { dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&adap->dev, "Successful!\n"); } } /* save old clock, so we can prevent machine for hung */ *oldclock = sis630_read(SMB_CNT); dev_dbg(&adap->dev, "saved clock 0x%02x\n", *oldclock); /* disable timeout interrupt, * set Host Master Clock to 56KHz if requested */ if (high_clock) sis630_write(SMB_CNT, SMBCLK_SEL); else sis630_write(SMB_CNT, (*oldclock & ~MSTO_EN)); /* clear all sticky bits */ temp = sis630_read(SMB_STS); sis630_write(SMB_STS, temp & 0x1e); /* start the transaction by setting bit 4 and size */ sis630_write(SMBHOST_CNT, SMB_START | (size & 0x07)); return 0; } static int sis630_transaction_wait(struct i2c_adapter *adap, int size) { int temp, result = 0, timeout = 0; /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis630_read(SMB_STS); /* check if block transmitted */ if (size == SIS630_BLOCK_DATA && (temp & BYTE_DONE_STS)) break; } while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & SMBERR_STS) { dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); result = -ENXIO; } if (temp & SMBCOL_STS) { dev_err(&adap->dev, "Bus collision!\n"); result = -EAGAIN; } return result; } static void sis630_transaction_end(struct i2c_adapter *adap, u8 oldclock) { /* clear all status "sticky" bits */ sis630_write(SMB_STS, 0xFF); dev_dbg(&adap->dev, "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT)); /* * restore old Host Master Clock if high_clock is set * and oldclock was not 56KHz */ if (high_clock && !(oldclock & SMBCLK_SEL)) sis630_write(SMB_CNT, sis630_read(SMB_CNT) & ~SMBCLK_SEL); dev_dbg(&adap->dev, "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT)); } static int sis630_transaction(struct i2c_adapter *adap, int size) { int result = 0; u8 oldclock = 0; result = sis630_transaction_start(adap, size, &oldclock); if (!result) { result = sis630_transaction_wait(adap, size); sis630_transaction_end(adap, oldclock); } return result; } static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *data, int read_write) { int i, len = 0, rc = 0; u8 oldclock = 0; if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) len = 0; else if (len > 32) len = 32; sis630_write(SMB_COUNT, len); for (i = 1; i <= len; i++) { dev_dbg(&adap->dev, "set data 0x%02x\n", data->block[i]); /* set data */ sis630_write(SMB_BYTE + (i - 1) % 8, data->block[i]); if (i == 8 || (len < 8 && i == len)) { dev_dbg(&adap->dev, "start trans len=%d i=%d\n", len, i); /* first transaction */ rc = sis630_transaction_start(adap, SIS630_BLOCK_DATA, &oldclock); if (rc) return rc; } else if ((i - 1) % 8 == 7 || i == len) { dev_dbg(&adap->dev, "trans_wait len=%d i=%d\n", len, i); if (i > 8) { dev_dbg(&adap->dev, "clear smbary_sts" " len=%d i=%d\n", len, i); /* If this is not first transaction, we must clear sticky bit. clear SMBARY_STS */ sis630_write(SMB_STS, BYTE_DONE_STS); } rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA); if (rc) { dev_dbg(&adap->dev, "trans_wait failed\n"); break; } } } } else { /* read request */ data->block[0] = len = 0; rc = sis630_transaction_start(adap, SIS630_BLOCK_DATA, &oldclock); if (rc) return rc; do { rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA); if (rc) { dev_dbg(&adap->dev, "trans_wait failed\n"); break; } /* if this first transaction then read byte count */ if (len == 0) data->block[0] = sis630_read(SMB_COUNT); /* just to be sure */ if (data->block[0] > 32) data->block[0] = 32; dev_dbg(&adap->dev, "block data read len=0x%x\n", data->block[0]); for (i = 0; i < 8 && len < data->block[0]; i++, len++) { dev_dbg(&adap->dev, "read i=%d len=%d\n", i, len); data->block[len + 1] = sis630_read(SMB_BYTE + i); } dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n", len, i); /* clear SMBARY_STS */ sis630_write(SMB_STS, BYTE_DONE_STS); } while (len < data->block[0]); } sis630_transaction_end(adap, oldclock); return rc; } /* Return negative errno on error. */ static s32 sis630_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS630_QUICK; break; case I2C_SMBUS_BYTE: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis630_write(SMB_CMD, command); size = SIS630_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis630_write(SMB_BYTE, data->byte); size = SIS630_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis630_write(SMB_BYTE, data->word & 0xff); sis630_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL ? SIS630_PCALL : SIS630_WORD_DATA); break; case I2C_SMBUS_BLOCK_DATA: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); size = SIS630_BLOCK_DATA; return sis630_block_data(adap, data, read_write); default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } status = sis630_transaction(adap, size); if (status) return status; if ((size != SIS630_PCALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS630_QUICK))) { return 0; } switch (size) { case SIS630_BYTE: case SIS630_BYTE_DATA: data->byte = sis630_read(SMB_BYTE); break; case SIS630_PCALL: case SIS630_WORD_DATA: data->word = sis630_read(SMB_BYTE) + (sis630_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis630_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA; } static int sis630_setup(struct pci_dev *sis630_dev) { unsigned char b; struct pci_dev *dummy = NULL; int retval, i; /* acpi base address */ unsigned short acpi_base; /* check for supported SiS devices */ for (i = 0; supported[i] > 0; i++) { dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy); if (dummy) break; /* found */ } if (dummy) { pci_dev_put(dummy); } else if (force) { dev_err(&sis630_dev->dev, "WARNING: Can't detect SIS630 compatible device, but " "loading because of force option enabled\n"); } else { return -ENODEV; } /* Enable ACPI first , so we can accsess reg 74-75 in acpi io space and read acpi base addr */ if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, &b)) { dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n"); retval = -ENODEV; goto exit; } /* if ACPI already enabled , do nothing */ if (!(b & 0x80) && pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) { dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n"); retval = -ENODEV; goto exit; } /* Determine the ACPI base address */ if (pci_read_config_word(sis630_dev, SIS630_ACPI_BASE_REG, &acpi_base)) { dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n"); retval = -ENODEV; goto exit; } dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04hx\n", acpi_base); if (supported[i] == PCI_DEVICE_ID_SI_760) smbus_base = acpi_base + 0xE0; else smbus_base = acpi_base + 0x80; dev_dbg(&sis630_dev->dev, "SMBus base at 0x%04hx\n", smbus_base); retval = acpi_check_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name); if (retval) goto exit; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name)) { dev_err(&sis630_dev->dev, "I/O Region 0x%04x-0x%04x for SMBus already in use.\n", smbus_base + SMB_STS, smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1); retval = -EBUSY; goto exit; } retval = 0; exit: if (retval) smbus_base = 0; return retval; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis630_access, .functionality = sis630_func, }; static struct i2c_adapter sis630_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, .retries = 3 }; static const struct pci_device_id sis630_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_964) }, { 0, } }; MODULE_DEVICE_TABLE(pci, sis630_ids); static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) { if (sis630_setup(dev)) { dev_err(&dev->dev, "SIS630 compatible bus not detected, " "module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ sis630_adapter.dev.parent = &dev->dev; snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS); return i2c_add_adapter(&sis630_adapter); } static void sis630_remove(struct pci_dev *dev) { if (smbus_base) { i2c_del_adapter(&sis630_adapter); release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION); smbus_base = 0; } } static struct pci_driver sis630_driver = { .name = "sis630_smbus", .id_table = sis630_ids, .probe = sis630_probe, .remove = sis630_remove, }; module_pci_driver(sis630_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Malysh <[email protected]>"); MODULE_DESCRIPTION("SIS630 SMBus driver");
linux-master
drivers/i2c/busses/i2c-sis630.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard * * Copyright (C) 2008 Jean Delvare <[email protected]> */ /* * We select the channels by sending commands to the Philips * PCA9556 chip at I2C address 0x18. The main adapter is used for * the non-multiplexed part of the bus, and 4 virtual adapters * are defined for the multiplexed addresses: 0x50-0x53 (memory * module EEPROM) located on channels 1-4. We define one virtual * adapter per CPU, which corresponds to one multiplexed channel: * CPU0: virtual adapter 1, channel 1 * CPU1: virtual adapter 2, channel 2 * CPU2: virtual adapter 3, channel 3 * CPU3: virtual adapter 4, channel 4 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> extern struct i2c_adapter *nforce2_smbus; static struct i2c_adapter *s4985_adapter; static struct i2c_algorithm *s4985_algo; /* Wrapper access functions for multiplexed SMBus */ static DEFINE_MUTEX(nforce2_lock); static s32 nforce2_access_virt0(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int error; /* We exclude the multiplexed addresses */ if ((addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 || addr == 0x18) return -ENXIO; mutex_lock(&nforce2_lock); error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); mutex_unlock(&nforce2_lock); return error; } /* We remember the last used channels combination so as to only switch channels when it is really needed. This greatly reduces the SMBus overhead, but also assumes that nobody will be writing to the PCA9556 in our back. */ static u8 last_channels; static inline s32 nforce2_access_channel(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data, u8 channels) { int error; /* We exclude the non-multiplexed addresses */ if ((addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) return -ENXIO; mutex_lock(&nforce2_lock); if (last_channels != channels) { union i2c_smbus_data mplxdata; mplxdata.byte = channels; error = nforce2_smbus->algo->smbus_xfer(adap, 0x18, 0, I2C_SMBUS_WRITE, 0x01, I2C_SMBUS_BYTE_DATA, &mplxdata); if (error) goto UNLOCK; last_channels = channels; } error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); UNLOCK: mutex_unlock(&nforce2_lock); return error; } static s32 nforce2_access_virt1(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* CPU0: channel 1 enabled */ return nforce2_access_channel(adap, addr, flags, read_write, command, size, data, 0x02); } static s32 nforce2_access_virt2(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* CPU1: channel 2 enabled */ return nforce2_access_channel(adap, addr, flags, read_write, command, size, data, 0x04); } static s32 nforce2_access_virt3(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* CPU2: channel 3 enabled */ return nforce2_access_channel(adap, addr, flags, read_write, command, size, data, 0x08); } static s32 nforce2_access_virt4(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* CPU3: channel 4 enabled */ return nforce2_access_channel(adap, addr, flags, read_write, command, size, data, 0x10); } static int __init nforce2_s4985_init(void) { int i, error; union i2c_smbus_data ioconfig; if (!nforce2_smbus) return -ENODEV; /* Configure the PCA9556 multiplexer */ ioconfig.byte = 0x00; /* All I/O to output mode */ error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, I2C_SMBUS_BYTE_DATA, &ioconfig); if (error) { dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n"); error = -EIO; goto ERROR0; } /* Unregister physical bus */ i2c_del_adapter(nforce2_smbus); printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4985\n"); /* Define the 5 virtual adapters and algorithms structures */ s4985_adapter = kcalloc(5, sizeof(struct i2c_adapter), GFP_KERNEL); if (!s4985_adapter) { error = -ENOMEM; goto ERROR1; } s4985_algo = kcalloc(5, sizeof(struct i2c_algorithm), GFP_KERNEL); if (!s4985_algo) { error = -ENOMEM; goto ERROR2; } /* Fill in the new structures */ s4985_algo[0] = *(nforce2_smbus->algo); s4985_algo[0].smbus_xfer = nforce2_access_virt0; s4985_adapter[0] = *nforce2_smbus; s4985_adapter[0].algo = s4985_algo; s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent; for (i = 1; i < 5; i++) { s4985_algo[i] = *(nforce2_smbus->algo); s4985_adapter[i] = *nforce2_smbus; snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name), "SMBus nForce2 adapter (CPU%d)", i - 1); s4985_adapter[i].algo = s4985_algo + i; s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent; } s4985_algo[1].smbus_xfer = nforce2_access_virt1; s4985_algo[2].smbus_xfer = nforce2_access_virt2; s4985_algo[3].smbus_xfer = nforce2_access_virt3; s4985_algo[4].smbus_xfer = nforce2_access_virt4; /* Register virtual adapters */ for (i = 0; i < 5; i++) { error = i2c_add_adapter(s4985_adapter + i); if (error) { printk(KERN_ERR "i2c-nforce2-s4985: " "Virtual adapter %d registration " "failed, module not inserted\n", i); for (i--; i >= 0; i--) i2c_del_adapter(s4985_adapter + i); goto ERROR3; } } return 0; ERROR3: kfree(s4985_algo); s4985_algo = NULL; ERROR2: kfree(s4985_adapter); s4985_adapter = NULL; ERROR1: /* Restore physical bus */ i2c_add_adapter(nforce2_smbus); ERROR0: return error; } static void __exit nforce2_s4985_exit(void) { if (s4985_adapter) { int i; for (i = 0; i < 5; i++) i2c_del_adapter(s4985_adapter+i); kfree(s4985_adapter); s4985_adapter = NULL; } kfree(s4985_algo); s4985_algo = NULL; /* Restore physical bus */ if (i2c_add_adapter(nforce2_smbus)) printk(KERN_ERR "i2c-nforce2-s4985: " "Physical bus restoration failed\n"); } MODULE_AUTHOR("Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("S4985 SMBus multiplexing"); MODULE_LICENSE("GPL"); module_init(nforce2_s4985_init); module_exit(nforce2_s4985_exit);
linux-master
drivers/i2c/busses/i2c-nforce2-s4985.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/pci.h> #include <linux/mutex.h> #include <linux/ktime.h> #include <linux/slab.h> #define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ #define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */ #define PCH_MAX_CLK 100000 /* Maximum Clock speed in MHz */ #define PCH_BUFFER_MODE_ENABLE 0x0002 /* flag for Buffer mode enable */ #define PCH_EEPROM_SW_RST_MODE_ENABLE 0x0008 /* EEPROM SW RST enable flag */ #define PCH_I2CSADR 0x00 /* I2C slave address register */ #define PCH_I2CCTL 0x04 /* I2C control register */ #define PCH_I2CSR 0x08 /* I2C status register */ #define PCH_I2CDR 0x0C /* I2C data register */ #define PCH_I2CMON 0x10 /* I2C bus monitor register */ #define PCH_I2CBC 0x14 /* I2C bus transfer rate setup counter */ #define PCH_I2CMOD 0x18 /* I2C mode register */ #define PCH_I2CBUFSLV 0x1C /* I2C buffer mode slave address register */ #define PCH_I2CBUFSUB 0x20 /* I2C buffer mode subaddress register */ #define PCH_I2CBUFFOR 0x24 /* I2C buffer mode format register */ #define PCH_I2CBUFCTL 0x28 /* I2C buffer mode control register */ #define PCH_I2CBUFMSK 0x2C /* I2C buffer mode interrupt mask register */ #define PCH_I2CBUFSTA 0x30 /* I2C buffer mode status register */ #define PCH_I2CBUFLEV 0x34 /* I2C buffer mode level register */ #define PCH_I2CESRFOR 0x38 /* EEPROM software reset mode format register */ #define PCH_I2CESRCTL 0x3C /* EEPROM software reset mode ctrl register */ #define PCH_I2CESRMSK 0x40 /* EEPROM software reset mode */ #define PCH_I2CESRSTA 0x44 /* EEPROM software reset mode status register */ #define PCH_I2CTMR 0x48 /* I2C timer register */ #define PCH_I2CSRST 0xFC /* I2C reset register */ #define PCH_I2CNF 0xF8 /* I2C noise filter register */ #define BUS_IDLE_TIMEOUT 20 #define PCH_I2CCTL_I2CMEN 0x0080 #define TEN_BIT_ADDR_DEFAULT 0xF000 #define TEN_BIT_ADDR_MASK 0xF0 #define PCH_START 0x0020 #define PCH_RESTART 0x0004 #define PCH_ESR_START 0x0001 #define PCH_BUFF_START 0x1 #define PCH_REPSTART 0x0004 #define PCH_ACK 0x0008 #define PCH_GETACK 0x0001 #define CLR_REG 0x0 #define I2C_RD 0x1 #define I2CMCF_BIT 0x0080 #define I2CMIF_BIT 0x0002 #define I2CMAL_BIT 0x0010 #define I2CBMFI_BIT 0x0001 #define I2CBMAL_BIT 0x0002 #define I2CBMNA_BIT 0x0004 #define I2CBMTO_BIT 0x0008 #define I2CBMIS_BIT 0x0010 #define I2CESRFI_BIT 0X0001 #define I2CESRTO_BIT 0x0002 #define I2CESRFIIE_BIT 0x1 #define I2CESRTOIE_BIT 0x2 #define I2CBMDZ_BIT 0x0040 #define I2CBMAG_BIT 0x0020 #define I2CMBB_BIT 0x0020 #define BUFFER_MODE_MASK (I2CBMFI_BIT | I2CBMAL_BIT | I2CBMNA_BIT | \ I2CBMTO_BIT | I2CBMIS_BIT) #define I2C_ADDR_MSK 0xFF #define I2C_MSB_2B_MSK 0x300 #define FAST_MODE_CLK 400 #define FAST_MODE_EN 0x0001 #define SUB_ADDR_LEN_MAX 4 #define BUF_LEN_MAX 32 #define PCH_BUFFER_MODE 0x1 #define EEPROM_SW_RST_MODE 0x0002 #define NORMAL_INTR_ENBL 0x0300 #define EEPROM_RST_INTR_ENBL (I2CESRFIIE_BIT | I2CESRTOIE_BIT) #define EEPROM_RST_INTR_DISBL 0x0 #define BUFFER_MODE_INTR_ENBL 0x001F #define BUFFER_MODE_INTR_DISBL 0x0 #define NORMAL_MODE 0x0 #define BUFFER_MODE 0x1 #define EEPROM_SR_MODE 0x2 #define I2C_TX_MODE 0x0010 #define PCH_BUF_TX 0xFFF7 #define PCH_BUF_RD 0x0008 #define I2C_ERROR_MASK (I2CESRTO_EVENT | I2CBMIS_EVENT | I2CBMTO_EVENT | \ I2CBMNA_EVENT | I2CBMAL_EVENT | I2CMAL_EVENT) #define I2CMAL_EVENT 0x0001 #define I2CMCF_EVENT 0x0002 #define I2CBMFI_EVENT 0x0004 #define I2CBMAL_EVENT 0x0008 #define I2CBMNA_EVENT 0x0010 #define I2CBMTO_EVENT 0x0020 #define I2CBMIS_EVENT 0x0040 #define I2CESRFI_EVENT 0x0080 #define I2CESRTO_EVENT 0x0100 #define PCI_DEVICE_ID_PCH_I2C 0x8817 #define pch_dbg(adap, fmt, arg...) \ dev_dbg(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg) #define pch_err(adap, fmt, arg...) \ dev_err(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg) #define pch_pci_err(pdev, fmt, arg...) \ dev_err(&pdev->dev, "%s :" fmt, __func__, ##arg) #define pch_pci_dbg(pdev, fmt, arg...) \ dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg) /* Set the number of I2C instance max Intel EG20T PCH : 1ch LAPIS Semiconductor ML7213 IOH : 2ch LAPIS Semiconductor ML7831 IOH : 1ch */ #define PCH_I2C_MAX_DEV 2 /** * struct i2c_algo_pch_data - for I2C driver functionalities * @pch_adapter: stores the reference to i2c_adapter structure * @p_adapter_info: stores the reference to adapter_info structure * @pch_base_address: specifies the remapped base address * @pch_buff_mode_en: specifies if buffer mode is enabled * @pch_event_flag: specifies occurrence of interrupt events * @pch_i2c_xfer_in_progress: specifies whether the transfer is completed */ struct i2c_algo_pch_data { struct i2c_adapter pch_adapter; struct adapter_info *p_adapter_info; void __iomem *pch_base_address; int pch_buff_mode_en; u32 pch_event_flag; bool pch_i2c_xfer_in_progress; }; /** * struct adapter_info - This structure holds the adapter information for the * PCH i2c controller * @pch_data: stores a list of i2c_algo_pch_data * @pch_i2c_suspended: specifies whether the system is suspended or not * perhaps with more lines and words. * @ch_num: specifies the number of i2c instance * * pch_data has as many elements as maximum I2C channels */ struct adapter_info { struct i2c_algo_pch_data pch_data[PCH_I2C_MAX_DEV]; bool pch_i2c_suspended; int ch_num; }; static int pch_i2c_speed = 100; /* I2C bus speed in Kbps */ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */ static wait_queue_head_t pch_event; static DEFINE_MUTEX(pch_mutex); /* Definition for ML7213 by LAPIS Semiconductor */ #define PCI_DEVICE_ID_ML7213_I2C 0x802D #define PCI_DEVICE_ID_ML7223_I2C 0x8010 #define PCI_DEVICE_ID_ML7831_I2C 0x8817 static const struct pci_device_id pch_pcidev_id[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, }, {0,} }; MODULE_DEVICE_TABLE(pci, pch_pcidev_id); static irqreturn_t pch_i2c_handler(int irq, void *pData); static inline void pch_setbit(void __iomem *addr, u32 offset, u32 bitmask) { u32 val; val = ioread32(addr + offset); val |= bitmask; iowrite32(val, addr + offset); } static inline void pch_clrbit(void __iomem *addr, u32 offset, u32 bitmask) { u32 val; val = ioread32(addr + offset); val &= (~bitmask); iowrite32(val, addr + offset); } /** * pch_i2c_init() - hardware initialization of I2C module * @adap: Pointer to struct i2c_algo_pch_data. */ static void pch_i2c_init(struct i2c_algo_pch_data *adap) { void __iomem *p = adap->pch_base_address; u32 pch_i2cbc; u32 pch_i2ctmr; u32 reg_value; /* reset I2C controller */ iowrite32(0x01, p + PCH_I2CSRST); msleep(20); iowrite32(0x0, p + PCH_I2CSRST); /* Initialize I2C registers */ iowrite32(0x21, p + PCH_I2CNF); pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_I2CCTL_I2CMEN); if (pch_i2c_speed != 400) pch_i2c_speed = 100; reg_value = PCH_I2CCTL_I2CMEN; if (pch_i2c_speed == FAST_MODE_CLK) { reg_value |= FAST_MODE_EN; pch_dbg(adap, "Fast mode enabled\n"); } if (pch_clk > PCH_MAX_CLK) pch_clk = 62500; pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8); /* Set transfer speed in I2CBC */ iowrite32(pch_i2cbc, p + PCH_I2CBC); pch_i2ctmr = (pch_clk) / 8; iowrite32(pch_i2ctmr, p + PCH_I2CTMR); reg_value |= NORMAL_INTR_ENBL; /* Enable interrupts in normal mode */ iowrite32(reg_value, p + PCH_I2CCTL); pch_dbg(adap, "I2CCTL=%x pch_i2cbc=%x pch_i2ctmr=%x Enable interrupts\n", ioread32(p + PCH_I2CCTL), pch_i2cbc, pch_i2ctmr); init_waitqueue_head(&pch_event); } /** * pch_i2c_wait_for_bus_idle() - check the status of bus. * @adap: Pointer to struct i2c_algo_pch_data. * @timeout: waiting time counter (ms). */ static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap, s32 timeout) { void __iomem *p = adap->pch_base_address; int schedule = 0; unsigned long end = jiffies + msecs_to_jiffies(timeout); while (ioread32(p + PCH_I2CSR) & I2CMBB_BIT) { if (time_after(jiffies, end)) { pch_dbg(adap, "I2CSR = %x\n", ioread32(p + PCH_I2CSR)); pch_err(adap, "%s: Timeout Error.return%d\n", __func__, -ETIME); pch_i2c_init(adap); return -ETIME; } if (!schedule) /* Retry after some usecs */ udelay(5); else /* Wait a bit more without consuming CPU */ usleep_range(20, 1000); schedule = 1; } return 0; } /** * pch_i2c_start() - Generate I2C start condition in normal mode. * @adap: Pointer to struct i2c_algo_pch_data. * * Generate I2C start condition in normal mode by setting I2CCTL.I2CMSTA to 1. */ static void pch_i2c_start(struct i2c_algo_pch_data *adap) { void __iomem *p = adap->pch_base_address; pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_START); } /** * pch_i2c_stop() - generate stop condition in normal mode. * @adap: Pointer to struct i2c_algo_pch_data. */ static void pch_i2c_stop(struct i2c_algo_pch_data *adap) { void __iomem *p = adap->pch_base_address; pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); /* clear the start bit */ pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START); } static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap) { long ret; void __iomem *p = adap->pch_base_address; ret = wait_event_timeout(pch_event, (adap->pch_event_flag != 0), msecs_to_jiffies(1000)); if (!ret) { pch_err(adap, "%s:wait-event timeout\n", __func__); adap->pch_event_flag = 0; pch_i2c_stop(adap); pch_i2c_init(adap); return -ETIMEDOUT; } if (adap->pch_event_flag & I2C_ERROR_MASK) { pch_err(adap, "Lost Arbitration\n"); adap->pch_event_flag = 0; pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT); pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT); pch_i2c_init(adap); return -EAGAIN; } adap->pch_event_flag = 0; if (ioread32(p + PCH_I2CSR) & PCH_GETACK) { pch_dbg(adap, "Receive NACK for slave address setting\n"); return -ENXIO; } return 0; } /** * pch_i2c_repstart() - generate repeated start condition in normal mode * @adap: Pointer to struct i2c_algo_pch_data. */ static void pch_i2c_repstart(struct i2c_algo_pch_data *adap) { void __iomem *p = adap->pch_base_address; pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_REPSTART); } /** * pch_i2c_writebytes() - write data to I2C bus in normal mode * @i2c_adap: Pointer to the struct i2c_adapter. * @msgs: Pointer to the i2c message structure. * @last: specifies whether last message or not. * In the case of compound mode it will be 1 for last message, * otherwise 0. * @first: specifies whether first message or not. * 1 for first message otherwise 0. */ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, u32 last, u32 first) { struct i2c_algo_pch_data *adap = i2c_adap->algo_data; u8 *buf; u32 length; u32 addr; u32 addr_2_msb; u32 addr_8_lsb; s32 wrcount; s32 rtn; void __iomem *p = adap->pch_base_address; length = msgs->len; buf = msgs->buf; addr = msgs->addr; /* enable master tx */ pch_setbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE); pch_dbg(adap, "I2CCTL = %x msgs->len = %d\n", ioread32(p + PCH_I2CCTL), length); if (first) { if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME) return -ETIME; } if (msgs->flags & I2C_M_TEN) { addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7) & 0x06; iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR); if (first) pch_i2c_start(adap); rtn = pch_i2c_wait_for_check_xfer(adap); if (rtn) return rtn; addr_8_lsb = (addr & I2C_ADDR_MSK); iowrite32(addr_8_lsb, p + PCH_I2CDR); } else { /* set 7 bit slave address and R/W bit as 0 */ iowrite32(i2c_8bit_addr_from_msg(msgs), p + PCH_I2CDR); if (first) pch_i2c_start(adap); } rtn = pch_i2c_wait_for_check_xfer(adap); if (rtn) return rtn; for (wrcount = 0; wrcount < length; ++wrcount) { /* write buffer value to I2C data register */ iowrite32(buf[wrcount], p + PCH_I2CDR); pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]); rtn = pch_i2c_wait_for_check_xfer(adap); if (rtn) return rtn; pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMCF_BIT); pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT); } /* check if this is the last message */ if (last) pch_i2c_stop(adap); else pch_i2c_repstart(adap); pch_dbg(adap, "return=%d\n", wrcount); return wrcount; } /** * pch_i2c_sendack() - send ACK * @adap: Pointer to struct i2c_algo_pch_data. */ static void pch_i2c_sendack(struct i2c_algo_pch_data *adap) { void __iomem *p = adap->pch_base_address; pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK); } /** * pch_i2c_sendnack() - send NACK * @adap: Pointer to struct i2c_algo_pch_data. */ static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap) { void __iomem *p = adap->pch_base_address; pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK); } /** * pch_i2c_restart() - Generate I2C restart condition in normal mode. * @adap: Pointer to struct i2c_algo_pch_data. * * Generate I2C restart condition in normal mode by setting I2CCTL.I2CRSTA. */ static void pch_i2c_restart(struct i2c_algo_pch_data *adap) { void __iomem *p = adap->pch_base_address; pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL)); pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_RESTART); } /** * pch_i2c_readbytes() - read data from I2C bus in normal mode. * @i2c_adap: Pointer to the struct i2c_adapter. * @msgs: Pointer to i2c_msg structure. * @last: specifies whether last message or not. * @first: specifies whether first message or not. */ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, u32 last, u32 first) { struct i2c_algo_pch_data *adap = i2c_adap->algo_data; u8 *buf; u32 count; u32 length; u32 addr; u32 addr_2_msb; u32 addr_8_lsb; void __iomem *p = adap->pch_base_address; s32 rtn; length = msgs->len; buf = msgs->buf; addr = msgs->addr; /* enable master reception */ pch_clrbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE); if (first) { if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME) return -ETIME; } if (msgs->flags & I2C_M_TEN) { addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7); iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR); if (first) pch_i2c_start(adap); rtn = pch_i2c_wait_for_check_xfer(adap); if (rtn) return rtn; addr_8_lsb = (addr & I2C_ADDR_MSK); iowrite32(addr_8_lsb, p + PCH_I2CDR); pch_i2c_restart(adap); rtn = pch_i2c_wait_for_check_xfer(adap); if (rtn) return rtn; addr_2_msb |= I2C_RD; iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR); } else { /* 7 address bits + R/W bit */ iowrite32(i2c_8bit_addr_from_msg(msgs), p + PCH_I2CDR); } /* check if it is the first message */ if (first) pch_i2c_start(adap); rtn = pch_i2c_wait_for_check_xfer(adap); if (rtn) return rtn; if (length == 0) { pch_i2c_stop(adap); ioread32(p + PCH_I2CDR); /* Dummy read needs */ count = length; } else { int read_index; int loop; pch_i2c_sendack(adap); /* Dummy read */ for (loop = 1, read_index = 0; loop < length; loop++) { buf[read_index] = ioread32(p + PCH_I2CDR); if (loop != 1) read_index++; rtn = pch_i2c_wait_for_check_xfer(adap); if (rtn) return rtn; } /* end for */ pch_i2c_sendnack(adap); buf[read_index] = ioread32(p + PCH_I2CDR); /* Read final - 1 */ if (length != 1) read_index++; rtn = pch_i2c_wait_for_check_xfer(adap); if (rtn) return rtn; if (last) pch_i2c_stop(adap); else pch_i2c_repstart(adap); buf[read_index++] = ioread32(p + PCH_I2CDR); /* Read Final */ count = read_index; } return count; } /** * pch_i2c_cb() - Interrupt handler Call back function * @adap: Pointer to struct i2c_algo_pch_data. */ static void pch_i2c_cb(struct i2c_algo_pch_data *adap) { u32 sts; void __iomem *p = adap->pch_base_address; sts = ioread32(p + PCH_I2CSR); sts &= (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT); if (sts & I2CMAL_BIT) adap->pch_event_flag |= I2CMAL_EVENT; if (sts & I2CMCF_BIT) adap->pch_event_flag |= I2CMCF_EVENT; /* clear the applicable bits */ pch_clrbit(adap->pch_base_address, PCH_I2CSR, sts); pch_dbg(adap, "PCH_I2CSR = %x\n", ioread32(p + PCH_I2CSR)); wake_up(&pch_event); } /** * pch_i2c_handler() - interrupt handler for the PCH I2C controller * @irq: irq number. * @pData: cookie passed back to the handler function. */ static irqreturn_t pch_i2c_handler(int irq, void *pData) { u32 reg_val; int flag; int i; struct adapter_info *adap_info = pData; void __iomem *p; u32 mode; for (i = 0, flag = 0; i < adap_info->ch_num; i++) { p = adap_info->pch_data[i].pch_base_address; mode = ioread32(p + PCH_I2CMOD); mode &= BUFFER_MODE | EEPROM_SR_MODE; if (mode != NORMAL_MODE) { pch_err(adap_info->pch_data, "I2C-%d mode(%d) is not supported\n", mode, i); continue; } reg_val = ioread32(p + PCH_I2CSR); if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT)) { pch_i2c_cb(&adap_info->pch_data[i]); flag = 1; } } return flag ? IRQ_HANDLED : IRQ_NONE; } /** * pch_i2c_xfer() - Reading adnd writing data through I2C bus * @i2c_adap: Pointer to the struct i2c_adapter. * @msgs: Pointer to i2c_msg structure. * @num: number of messages. */ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, s32 num) { struct i2c_msg *pmsg; u32 i = 0; u32 status; s32 ret; struct i2c_algo_pch_data *adap = i2c_adap->algo_data; ret = mutex_lock_interruptible(&pch_mutex); if (ret) return ret; if (adap->p_adapter_info->pch_i2c_suspended) { mutex_unlock(&pch_mutex); return -EBUSY; } pch_dbg(adap, "adap->p_adapter_info->pch_i2c_suspended is %d\n", adap->p_adapter_info->pch_i2c_suspended); /* transfer not completed */ adap->pch_i2c_xfer_in_progress = true; for (i = 0; i < num && ret >= 0; i++) { pmsg = &msgs[i]; pmsg->flags |= adap->pch_buff_mode_en; status = pmsg->flags; pch_dbg(adap, "After invoking I2C_MODE_SEL :flag= 0x%x\n", status); if ((status & (I2C_M_RD)) != false) { ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num), (i == 0)); } else { ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num), (i == 0)); } } adap->pch_i2c_xfer_in_progress = false; /* transfer completed */ mutex_unlock(&pch_mutex); return (ret < 0) ? ret : num; } /** * pch_i2c_func() - return the functionality of the I2C driver * @adap: Pointer to struct i2c_algo_pch_data. */ static u32 pch_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; } static const struct i2c_algorithm pch_algorithm = { .master_xfer = pch_i2c_xfer, .functionality = pch_i2c_func }; /** * pch_i2c_disbl_int() - Disable PCH I2C interrupts * @adap: Pointer to struct i2c_algo_pch_data. */ static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap) { void __iomem *p = adap->pch_base_address; pch_clrbit(adap->pch_base_address, PCH_I2CCTL, NORMAL_INTR_ENBL); iowrite32(EEPROM_RST_INTR_DISBL, p + PCH_I2CESRMSK); iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK); } static int pch_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem *base_addr; int ret; int i, j; struct adapter_info *adap_info; struct i2c_adapter *pch_adap; pch_pci_dbg(pdev, "Entered.\n"); adap_info = kzalloc((sizeof(struct adapter_info)), GFP_KERNEL); if (adap_info == NULL) return -ENOMEM; ret = pci_enable_device(pdev); if (ret) { pch_pci_err(pdev, "pci_enable_device FAILED\n"); goto err_pci_enable; } ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { pch_pci_err(pdev, "pci_request_regions FAILED\n"); goto err_pci_req; } base_addr = pci_iomap(pdev, 1, 0); if (base_addr == NULL) { pch_pci_err(pdev, "pci_iomap FAILED\n"); ret = -ENOMEM; goto err_pci_iomap; } /* Set the number of I2C channel instance */ adap_info->ch_num = id->driver_data; for (i = 0; i < adap_info->ch_num; i++) { pch_adap = &adap_info->pch_data[i].pch_adapter; adap_info->pch_i2c_suspended = false; adap_info->pch_data[i].p_adapter_info = adap_info; pch_adap->owner = THIS_MODULE; pch_adap->class = I2C_CLASS_HWMON; strscpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name)); pch_adap->algo = &pch_algorithm; pch_adap->algo_data = &adap_info->pch_data[i]; /* base_addr + offset; */ adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i; pch_adap->dev.of_node = pdev->dev.of_node; pch_adap->dev.parent = &pdev->dev; } ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, KBUILD_MODNAME, adap_info); if (ret) { pch_pci_err(pdev, "request_irq FAILED\n"); goto err_request_irq; } for (i = 0; i < adap_info->ch_num; i++) { pch_adap = &adap_info->pch_data[i].pch_adapter; pch_i2c_init(&adap_info->pch_data[i]); pch_adap->nr = i; ret = i2c_add_numbered_adapter(pch_adap); if (ret) { pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i); goto err_add_adapter; } } pci_set_drvdata(pdev, adap_info); pch_pci_dbg(pdev, "returns %d.\n", ret); return 0; err_add_adapter: for (j = 0; j < i; j++) i2c_del_adapter(&adap_info->pch_data[j].pch_adapter); free_irq(pdev->irq, adap_info); err_request_irq: pci_iounmap(pdev, base_addr); err_pci_iomap: pci_release_regions(pdev); err_pci_req: pci_disable_device(pdev); err_pci_enable: kfree(adap_info); return ret; } static void pch_i2c_remove(struct pci_dev *pdev) { int i; struct adapter_info *adap_info = pci_get_drvdata(pdev); free_irq(pdev->irq, adap_info); for (i = 0; i < adap_info->ch_num; i++) { pch_i2c_disbl_int(&adap_info->pch_data[i]); i2c_del_adapter(&adap_info->pch_data[i].pch_adapter); } if (adap_info->pch_data[0].pch_base_address) pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address); for (i = 0; i < adap_info->ch_num; i++) adap_info->pch_data[i].pch_base_address = NULL; pci_release_regions(pdev); pci_disable_device(pdev); kfree(adap_info); } static int __maybe_unused pch_i2c_suspend(struct device *dev) { int i; struct pci_dev *pdev = to_pci_dev(dev); struct adapter_info *adap_info = pci_get_drvdata(pdev); void __iomem *p = adap_info->pch_data[0].pch_base_address; adap_info->pch_i2c_suspended = true; for (i = 0; i < adap_info->ch_num; i++) { while ((adap_info->pch_data[i].pch_i2c_xfer_in_progress)) { /* Wait until all channel transfers are completed */ msleep(20); } } /* Disable the i2c interrupts */ for (i = 0; i < adap_info->ch_num; i++) pch_i2c_disbl_int(&adap_info->pch_data[i]); pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x " "invoked function pch_i2c_disbl_int successfully\n", ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA), ioread32(p + PCH_I2CESRSTA)); return 0; } static int __maybe_unused pch_i2c_resume(struct device *dev) { int i; struct adapter_info *adap_info = dev_get_drvdata(dev); for (i = 0; i < adap_info->ch_num; i++) pch_i2c_init(&adap_info->pch_data[i]); adap_info->pch_i2c_suspended = false; return 0; } static SIMPLE_DEV_PM_OPS(pch_i2c_pm_ops, pch_i2c_suspend, pch_i2c_resume); static struct pci_driver pch_pcidriver = { .name = KBUILD_MODNAME, .id_table = pch_pcidev_id, .probe = pch_i2c_probe, .remove = pch_i2c_remove, .driver.pm = &pch_i2c_pm_ops, }; module_pci_driver(pch_pcidriver); MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tomoya MORINAGA. <[email protected]>"); module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
linux-master
drivers/i2c/busses/i2c-eg20t.c
/* * Provides I2C support for Philips PNX010x/PNX4008 boards. * * Authors: Dennis Kovalev <[email protected]> * Vitaly Wool <[email protected]> * * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/timer.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/of.h> #define I2C_PNX_TIMEOUT_DEFAULT 10 /* msec */ #define I2C_PNX_SPEED_KHZ_DEFAULT 100 #define I2C_PNX_REGION_SIZE 0x100 struct i2c_pnx_mif { int ret; /* Return value */ int mode; /* Interface mode */ struct completion complete; /* I/O completion */ struct timer_list timer; /* Timeout */ u8 * buf; /* Data buffer */ int len; /* Length of data buffer */ int order; /* RX Bytes to order via TX */ }; struct i2c_pnx_algo_data { void __iomem *ioaddr; struct i2c_pnx_mif mif; int last; struct clk *clk; struct i2c_adapter adapter; int irq; u32 timeout; }; enum { mstatus_tdi = 0x00000001, mstatus_afi = 0x00000002, mstatus_nai = 0x00000004, mstatus_drmi = 0x00000008, mstatus_active = 0x00000020, mstatus_scl = 0x00000040, mstatus_sda = 0x00000080, mstatus_rff = 0x00000100, mstatus_rfe = 0x00000200, mstatus_tff = 0x00000400, mstatus_tfe = 0x00000800, }; enum { mcntrl_tdie = 0x00000001, mcntrl_afie = 0x00000002, mcntrl_naie = 0x00000004, mcntrl_drmie = 0x00000008, mcntrl_drsie = 0x00000010, mcntrl_rffie = 0x00000020, mcntrl_daie = 0x00000040, mcntrl_tffie = 0x00000080, mcntrl_reset = 0x00000100, mcntrl_cdbmode = 0x00000400, }; enum { rw_bit = 1 << 0, start_bit = 1 << 8, stop_bit = 1 << 9, }; #define I2C_REG_RX(a) ((a)->ioaddr) /* Rx FIFO reg (RO) */ #define I2C_REG_TX(a) ((a)->ioaddr) /* Tx FIFO reg (WO) */ #define I2C_REG_STS(a) ((a)->ioaddr + 0x04) /* Status reg (RO) */ #define I2C_REG_CTL(a) ((a)->ioaddr + 0x08) /* Ctl reg */ #define I2C_REG_CKL(a) ((a)->ioaddr + 0x0c) /* Clock divider low */ #define I2C_REG_CKH(a) ((a)->ioaddr + 0x10) /* Clock divider high */ #define I2C_REG_ADR(a) ((a)->ioaddr + 0x14) /* I2C address */ #define I2C_REG_RFL(a) ((a)->ioaddr + 0x18) /* Rx FIFO level (RO) */ #define I2C_REG_TFL(a) ((a)->ioaddr + 0x1c) /* Tx FIFO level (RO) */ #define I2C_REG_RXB(a) ((a)->ioaddr + 0x20) /* Num of bytes Rx-ed (RO) */ #define I2C_REG_TXB(a) ((a)->ioaddr + 0x24) /* Num of bytes Tx-ed (RO) */ #define I2C_REG_TXS(a) ((a)->ioaddr + 0x28) /* Tx slave FIFO (RO) */ #define I2C_REG_STFL(a) ((a)->ioaddr + 0x2c) /* Tx slave FIFO level (RO) */ static inline int wait_timeout(struct i2c_pnx_algo_data *data) { long timeout = data->timeout; while (timeout > 0 && (ioread32(I2C_REG_STS(data)) & mstatus_active)) { mdelay(1); timeout--; } return (timeout <= 0); } static inline int wait_reset(struct i2c_pnx_algo_data *data) { long timeout = data->timeout; while (timeout > 0 && (ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) { mdelay(1); timeout--; } return (timeout <= 0); } static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) { struct timer_list *timer = &alg_data->mif.timer; unsigned long expires = msecs_to_jiffies(alg_data->timeout); if (expires <= 1) expires = 2; del_timer_sync(timer); dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n", jiffies, expires); timer->expires = jiffies + expires; add_timer(timer); } /** * i2c_pnx_start - start a device * @slave_addr: slave address * @alg_data: pointer to local driver data structure * * Generate a START signal in the desired mode. */ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_pnx_algo_data *alg_data) { dev_dbg(&alg_data->adapter.dev, "%s(): addr 0x%x mode %d\n", __func__, slave_addr, alg_data->mif.mode); /* Check for 7 bit slave addresses only */ if (slave_addr & ~0x7f) { dev_err(&alg_data->adapter.dev, "%s: Invalid slave address %x. Only 7-bit addresses are supported\n", alg_data->adapter.name, slave_addr); return -EINVAL; } /* First, make sure bus is idle */ if (wait_timeout(alg_data)) { /* Somebody else is monopolizing the bus */ dev_err(&alg_data->adapter.dev, "%s: Bus busy. Slave addr = %02x, cntrl = %x, stat = %x\n", alg_data->adapter.name, slave_addr, ioread32(I2C_REG_CTL(alg_data)), ioread32(I2C_REG_STS(alg_data))); return -EBUSY; } else if (ioread32(I2C_REG_STS(alg_data)) & mstatus_afi) { /* Sorry, we lost the bus */ dev_err(&alg_data->adapter.dev, "%s: Arbitration failure. Slave addr = %02x\n", alg_data->adapter.name, slave_addr); return -EIO; } /* * OK, I2C is enabled and we have the bus. * Clear the current TDI and AFI status flags. */ iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): sending %#x\n", __func__, (slave_addr << 1) | start_bit | alg_data->mif.mode); /* Write the slave address, START bit and R/W bit */ iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode, I2C_REG_TX(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): exit\n", __func__); return 0; } /** * i2c_pnx_stop - stop a device * @alg_data: pointer to local driver data structure * * Generate a STOP signal to terminate the master transaction. */ static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data) { /* Only 1 msec max timeout due to interrupt context */ long timeout = 1000; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); /* Write a STOP bit to TX FIFO */ iowrite32(0xff | stop_bit, I2C_REG_TX(alg_data)); /* Wait until the STOP is seen. */ while (timeout > 0 && (ioread32(I2C_REG_STS(alg_data)) & mstatus_active)) { /* may be called from interrupt context */ udelay(1); timeout--; } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); } /** * i2c_pnx_master_xmit - transmit data to slave * @alg_data: pointer to local driver data structure * * Sends one byte of data to the slave */ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) { u32 val; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); if (alg_data->mif.len > 0) { /* We still have something to talk about... */ val = *alg_data->mif.buf++; if (alg_data->mif.len == 1) val |= stop_bit; alg_data->mif.len--; iowrite32(val, I2C_REG_TX(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): xmit %#x [%d]\n", __func__, val, alg_data->mif.len + 1); if (alg_data->mif.len == 0) { if (alg_data->last) { /* Wait until the STOP is seen. */ if (wait_timeout(alg_data)) dev_err(&alg_data->adapter.dev, "The bus is still active after timeout\n"); } /* Disable master interrupts */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) & ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine.\n", __func__); complete(&alg_data->mif.complete); } } else if (alg_data->mif.len == 0) { /* zero-sized transfer */ i2c_pnx_stop(alg_data); /* Disable master interrupts. */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) & ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); /* Stop timer. */ del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine after zero-xfer.\n", __func__); complete(&alg_data->mif.complete); } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); return 0; } /** * i2c_pnx_master_rcv - receive data from slave * @alg_data: pointer to local driver data structure * * Reads one byte data from the slave */ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) { unsigned int val = 0; u32 ctl = 0; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); /* Check, whether there is already data, * or we didn't 'ask' for it yet. */ if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) { /* 'Asking' is done asynchronously, e.g. dummy TX of several * bytes is done before the first actual RX arrives in FIFO. * Therefore, ordered bytes (via TX) are counted separately. */ if (alg_data->mif.order) { dev_dbg(&alg_data->adapter.dev, "%s(): Write dummy data to fill Rx-fifo...\n", __func__); if (alg_data->mif.order == 1) { /* Last byte, do not acknowledge next rcv. */ val |= stop_bit; /* * Enable interrupt RFDAIE (data in Rx fifo), * and disable DRMIE (need data for Tx) */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl |= mcntrl_rffie | mcntrl_daie; ctl &= ~mcntrl_drmie; iowrite32(ctl, I2C_REG_CTL(alg_data)); } /* * Now we'll 'ask' for data: * For each byte we want to receive, we must * write a (dummy) byte to the Tx-FIFO. */ iowrite32(val, I2C_REG_TX(alg_data)); alg_data->mif.order--; } return 0; } /* Handle data. */ if (alg_data->mif.len > 0) { val = ioread32(I2C_REG_RX(alg_data)); *alg_data->mif.buf++ = (u8) (val & 0xff); dev_dbg(&alg_data->adapter.dev, "%s(): rcv 0x%x [%d]\n", __func__, val, alg_data->mif.len); alg_data->mif.len--; if (alg_data->mif.len == 0) { if (alg_data->last) /* Wait until the STOP is seen. */ if (wait_timeout(alg_data)) dev_err(&alg_data->adapter.dev, "The bus is still active after timeout\n"); /* Disable master interrupts */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie | mcntrl_daie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Kill timer. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); return 0; } static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) { struct i2c_pnx_algo_data *alg_data = dev_id; u32 stat, ctl; dev_dbg(&alg_data->adapter.dev, "%s(): mstat = %x mctrl = %x, mode = %d\n", __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data)), alg_data->mif.mode); stat = ioread32(I2C_REG_STS(alg_data)); /* let's see what kind of event this is */ if (stat & mstatus_afi) { /* We lost arbitration in the midst of a transfer */ alg_data->mif.ret = -EIO; /* Disable master interrupts. */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Stop timer, to prevent timeout. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else if (stat & mstatus_nai) { /* Slave did not acknowledge, generate a STOP */ dev_dbg(&alg_data->adapter.dev, "%s(): Slave did not acknowledge, generating a STOP.\n", __func__); i2c_pnx_stop(alg_data); /* Disable master interrupts. */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Our return value. */ alg_data->mif.ret = -EIO; /* Stop timer, to prevent timeout. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else { /* * Two options: * - Master Tx needs data. * - There is data in the Rx-fifo * The latter is only the case if we have requested for data, * via a dummy write. (See 'i2c_pnx_master_rcv'.) * We therefore check, as a sanity check, whether that interrupt * has been enabled. */ if ((stat & mstatus_drmi) || !(stat & mstatus_rfe)) { if (alg_data->mif.mode == I2C_SMBUS_WRITE) { i2c_pnx_master_xmit(alg_data); } else if (alg_data->mif.mode == I2C_SMBUS_READ) { i2c_pnx_master_rcv(alg_data); } } } /* Clear TDI and AFI bits */ stat = ioread32(I2C_REG_STS(alg_data)); iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x ctrl = %x.\n", __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data))); return IRQ_HANDLED; } static void i2c_pnx_timeout(struct timer_list *t) { struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer); u32 ctl; dev_err(&alg_data->adapter.dev, "Master timed out. stat = %04x, cntrl = %04x. Resetting master...\n", ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data))); /* Reset master and disable interrupts */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); ctl |= mcntrl_reset; iowrite32(ctl, I2C_REG_CTL(alg_data)); wait_reset(alg_data); alg_data->mif.ret = -EIO; complete(&alg_data->mif.complete); } static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data) { u32 stat; if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_active) { dev_err(&alg_data->adapter.dev, "%s: Bus is still active after xfer. Reset it...\n", alg_data->adapter.name); iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(alg_data); } else if (!(stat & mstatus_rfe) || !(stat & mstatus_tfe)) { /* If there is data in the fifo's after transfer, * flush fifo's by reset. */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(alg_data); } else if (stat & mstatus_nai) { iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(alg_data); } } /** * i2c_pnx_xfer - generic transfer entry point * @adap: pointer to I2C adapter structure * @msgs: array of messages * @num: number of messages * * Initiates the transfer */ static int i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int rc = 0, completed = 0, i; struct i2c_pnx_algo_data *alg_data = adap->algo_data; u32 stat; dev_dbg(&alg_data->adapter.dev, "%s(): entering: %d messages, stat = %04x.\n", __func__, num, ioread32(I2C_REG_STS(alg_data))); bus_reset_if_active(alg_data); /* Process transactions in a loop. */ for (i = 0; rc >= 0 && i < num; i++) { u8 addr; pmsg = &msgs[i]; addr = pmsg->addr; if (pmsg->flags & I2C_M_TEN) { dev_err(&alg_data->adapter.dev, "%s: 10 bits addr not supported!\n", alg_data->adapter.name); rc = -EINVAL; break; } alg_data->mif.buf = pmsg->buf; alg_data->mif.len = pmsg->len; alg_data->mif.order = pmsg->len; alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ? I2C_SMBUS_READ : I2C_SMBUS_WRITE; alg_data->mif.ret = 0; alg_data->last = (i == num - 1); dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n", __func__, alg_data->mif.mode, alg_data->mif.len); i2c_pnx_arm_timer(alg_data); /* initialize the completion var */ init_completion(&alg_data->mif.complete); /* Enable master interrupt */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_afie | mcntrl_naie | mcntrl_drmie, I2C_REG_CTL(alg_data)); /* Put start-code and slave-address on the bus. */ rc = i2c_pnx_start(addr, alg_data); if (rc < 0) break; /* Wait for completion */ wait_for_completion(&alg_data->mif.complete); if (!(rc = alg_data->mif.ret)) completed++; dev_dbg(&alg_data->adapter.dev, "%s(): Complete, return code = %d.\n", __func__, rc); /* Clear TDI and AFI bits in case they are set. */ if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) { dev_dbg(&alg_data->adapter.dev, "%s: TDI still set... clearing now.\n", alg_data->adapter.name); iowrite32(stat, I2C_REG_STS(alg_data)); } if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) { dev_dbg(&alg_data->adapter.dev, "%s: AFI still set... clearing now.\n", alg_data->adapter.name); iowrite32(stat, I2C_REG_STS(alg_data)); } } bus_reset_if_active(alg_data); /* Cleanup to be sure... */ alg_data->mif.buf = NULL; alg_data->mif.len = 0; alg_data->mif.order = 0; dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x\n", __func__, ioread32(I2C_REG_STS(alg_data))); if (completed != num) return ((rc < 0) ? rc : -EREMOTEIO); return num; } static u32 i2c_pnx_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm pnx_algorithm = { .master_xfer = i2c_pnx_xfer, .functionality = i2c_pnx_func, }; static int i2c_pnx_controller_suspend(struct device *dev) { struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); clk_disable_unprepare(alg_data->clk); return 0; } static int i2c_pnx_controller_resume(struct device *dev) { struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); return clk_prepare_enable(alg_data->clk); } static DEFINE_SIMPLE_DEV_PM_OPS(i2c_pnx_pm, i2c_pnx_controller_suspend, i2c_pnx_controller_resume); static int i2c_pnx_probe(struct platform_device *pdev) { unsigned long tmp; int ret = 0; struct i2c_pnx_algo_data *alg_data; unsigned long freq; struct resource *res; u32 speed = I2C_PNX_SPEED_KHZ_DEFAULT * 1000; alg_data = devm_kzalloc(&pdev->dev, sizeof(*alg_data), GFP_KERNEL); if (!alg_data) return -ENOMEM; platform_set_drvdata(pdev, alg_data); alg_data->adapter.dev.parent = &pdev->dev; alg_data->adapter.algo = &pnx_algorithm; alg_data->adapter.algo_data = alg_data; alg_data->adapter.nr = pdev->id; alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT; #ifdef CONFIG_OF alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node); if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "clock-frequency", &speed); /* * At this point, it is planned to add an OF timeout property. * As soon as there is a consensus about how to call and handle * this, sth. like the following can be put here: * * of_property_read_u32(pdev->dev.of_node, "timeout", * &alg_data->timeout); */ } #endif alg_data->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(alg_data->clk)) return PTR_ERR(alg_data->clk); timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0); snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name), "%s", pdev->name); /* Register I/O resource */ alg_data->ioaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(alg_data->ioaddr)) return PTR_ERR(alg_data->ioaddr); ret = clk_prepare_enable(alg_data->clk); if (ret) return ret; freq = clk_get_rate(alg_data->clk); /* * Clock Divisor High This value is the number of system clocks * the serial clock (SCL) will be high. * For example, if the system clock period is 50 ns and the maximum * desired serial period is 10000 ns (100 kHz), then CLKHI would be * set to 0.5*(f_sys/f_i2c)-2=0.5*(20e6/100e3)-2=98. The actual value * programmed into CLKHI will vary from this slightly due to * variations in the output pad's rise and fall times as well as * the deglitching filter length. */ tmp = (freq / speed) / 2 - 2; if (tmp > 0x3FF) tmp = 0x3FF; iowrite32(tmp, I2C_REG_CKH(alg_data)); iowrite32(tmp, I2C_REG_CKL(alg_data)); iowrite32(mcntrl_reset, I2C_REG_CTL(alg_data)); if (wait_reset(alg_data)) { ret = -ENODEV; goto out_clock; } init_completion(&alg_data->mif.complete); alg_data->irq = platform_get_irq(pdev, 0); if (alg_data->irq < 0) { ret = alg_data->irq; goto out_clock; } ret = devm_request_irq(&pdev->dev, alg_data->irq, i2c_pnx_interrupt, 0, pdev->name, alg_data); if (ret) goto out_clock; /* Register this adapter with the I2C subsystem */ ret = i2c_add_numbered_adapter(&alg_data->adapter); if (ret < 0) goto out_clock; dev_dbg(&pdev->dev, "%s: Master at %pap, irq %d.\n", alg_data->adapter.name, &res->start, alg_data->irq); return 0; out_clock: clk_disable_unprepare(alg_data->clk); return ret; } static void i2c_pnx_remove(struct platform_device *pdev) { struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); i2c_del_adapter(&alg_data->adapter); clk_disable_unprepare(alg_data->clk); } #ifdef CONFIG_OF static const struct of_device_id i2c_pnx_of_match[] = { { .compatible = "nxp,pnx-i2c" }, { }, }; MODULE_DEVICE_TABLE(of, i2c_pnx_of_match); #endif static struct platform_driver i2c_pnx_driver = { .driver = { .name = "pnx-i2c", .of_match_table = of_match_ptr(i2c_pnx_of_match), .pm = pm_sleep_ptr(&i2c_pnx_pm), }, .probe = i2c_pnx_probe, .remove_new = i2c_pnx_remove, }; static int __init i2c_adap_pnx_init(void) { return platform_driver_register(&i2c_pnx_driver); } static void __exit i2c_adap_pnx_exit(void) { platform_driver_unregister(&i2c_pnx_driver); } MODULE_AUTHOR("Vitaly Wool"); MODULE_AUTHOR("Dennis Kovalev <[email protected]>"); MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pnx-i2c"); /* We need to make sure I2C is initialized before USB */ subsys_initcall(i2c_adap_pnx_init); module_exit(i2c_adap_pnx_exit);
linux-master
drivers/i2c/busses/i2c-pnx.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* * Mellanox i2c driver * * Copyright (C) 2016-2020 Mellanox Technologies */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_data/mlxreg.h> #include <linux/platform_device.h> #include <linux/regmap.h> /* General defines */ #define MLXPLAT_CPLD_LPC_I2C_BASE_ADDR 0x2000 #define MLXCPLD_I2C_DEVICE_NAME "i2c_mlxcpld" #define MLXCPLD_I2C_VALID_FLAG (I2C_M_RECV_LEN | I2C_M_RD) #define MLXCPLD_I2C_BUS_NUM 1 #define MLXCPLD_I2C_DATA_REG_SZ 36 #define MLXCPLD_I2C_DATA_SZ_BIT BIT(5) #define MLXCPLD_I2C_DATA_EXT2_SZ_BIT BIT(6) #define MLXCPLD_I2C_DATA_SZ_MASK GENMASK(6, 5) #define MLXCPLD_I2C_SMBUS_BLK_BIT BIT(7) #define MLXCPLD_I2C_MAX_ADDR_LEN 4 #define MLXCPLD_I2C_RETR_NUM 2 #define MLXCPLD_I2C_XFER_TO 500000 /* usec */ #define MLXCPLD_I2C_POLL_TIME 200 /* usec */ /* LPC I2C registers */ #define MLXCPLD_LPCI2C_CPBLTY_REG 0x0 #define MLXCPLD_LPCI2C_CTRL_REG 0x1 #define MLXCPLD_LPCI2C_HALF_CYC_REG 0x4 #define MLXCPLD_LPCI2C_I2C_HOLD_REG 0x5 #define MLXCPLD_LPCI2C_CMD_REG 0x6 #define MLXCPLD_LPCI2C_NUM_DAT_REG 0x7 #define MLXCPLD_LPCI2C_NUM_ADDR_REG 0x8 #define MLXCPLD_LPCI2C_STATUS_REG 0x9 #define MLXCPLD_LPCI2C_DATA_REG 0xa /* LPC I2C masks and parameters */ #define MLXCPLD_LPCI2C_RST_SEL_MASK 0x1 #define MLXCPLD_LPCI2C_TRANS_END 0x1 #define MLXCPLD_LPCI2C_STATUS_NACK 0x10 #define MLXCPLD_LPCI2C_NO_IND 0 #define MLXCPLD_LPCI2C_ACK_IND 1 #define MLXCPLD_LPCI2C_NACK_IND 2 #define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04 #define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e #define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42 enum mlxcpld_i2c_frequency { MLXCPLD_I2C_FREQ_1000KHZ = 1, MLXCPLD_I2C_FREQ_400KHZ = 2, MLXCPLD_I2C_FREQ_100KHZ = 3, }; struct mlxcpld_i2c_curr_xfer { u8 cmd; u8 addr_width; u8 data_len; u8 msg_num; struct i2c_msg *msg; }; struct mlxcpld_i2c_priv { struct i2c_adapter adap; u32 base_addr; struct mutex lock; struct mlxcpld_i2c_curr_xfer xfer; struct device *dev; bool smbus_block; int polling_time; }; static void mlxcpld_i2c_lpc_write_buf(u8 *data, u8 len, u32 addr) { int i; for (i = 0; i < len - len % 4; i += 4) outl(*(u32 *)(data + i), addr + i); for (; i < len; ++i) outb(*(data + i), addr + i); } static void mlxcpld_i2c_lpc_read_buf(u8 *data, u8 len, u32 addr) { int i; for (i = 0; i < len - len % 4; i += 4) *(u32 *)(data + i) = inl(addr + i); for (; i < len; ++i) *(data + i) = inb(addr + i); } static void mlxcpld_i2c_read_comm(struct mlxcpld_i2c_priv *priv, u8 offs, u8 *data, u8 datalen) { u32 addr = priv->base_addr + offs; switch (datalen) { case 1: *(data) = inb(addr); break; case 2: *((u16 *)data) = inw(addr); break; case 3: *((u16 *)data) = inw(addr); *(data + 2) = inb(addr + 2); break; case 4: *((u32 *)data) = inl(addr); break; default: mlxcpld_i2c_lpc_read_buf(data, datalen, addr); break; } } static void mlxcpld_i2c_write_comm(struct mlxcpld_i2c_priv *priv, u8 offs, u8 *data, u8 datalen) { u32 addr = priv->base_addr + offs; switch (datalen) { case 1: outb(*(data), addr); break; case 2: outw(*((u16 *)data), addr); break; case 3: outw(*((u16 *)data), addr); outb(*(data + 2), addr + 2); break; case 4: outl(*((u32 *)data), addr); break; default: mlxcpld_i2c_lpc_write_buf(data, datalen, addr); break; } } /* * Check validity of received i2c messages parameters. * Returns 0 if OK, other - in case of invalid parameters. */ static int mlxcpld_i2c_check_msg_params(struct mlxcpld_i2c_priv *priv, struct i2c_msg *msgs, int num) { int i; if (!num) { dev_err(priv->dev, "Incorrect 0 num of messages\n"); return -EINVAL; } if (unlikely(msgs[0].addr > 0x7f)) { dev_err(priv->dev, "Invalid address 0x%03x\n", msgs[0].addr); return -EINVAL; } for (i = 0; i < num; ++i) { if (unlikely(!msgs[i].buf)) { dev_err(priv->dev, "Invalid buf in msg[%d]\n", i); return -EINVAL; } if (unlikely(msgs[0].addr != msgs[i].addr)) { dev_err(priv->dev, "Invalid addr in msg[%d]\n", i); return -EINVAL; } } return 0; } /* * Check if transfer is completed and status of operation. * Returns 0 - transfer completed (both ACK or NACK), * negative - transfer isn't finished. */ static int mlxcpld_i2c_check_status(struct mlxcpld_i2c_priv *priv, int *status) { u8 val; mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_STATUS_REG, &val, 1); if (val & MLXCPLD_LPCI2C_TRANS_END) { if (val & MLXCPLD_LPCI2C_STATUS_NACK) /* * The slave is unable to accept the data. No such * slave, command not understood, or unable to accept * any more data. */ *status = MLXCPLD_LPCI2C_NACK_IND; else *status = MLXCPLD_LPCI2C_ACK_IND; return 0; } *status = MLXCPLD_LPCI2C_NO_IND; return -EIO; } static void mlxcpld_i2c_set_transf_data(struct mlxcpld_i2c_priv *priv, struct i2c_msg *msgs, int num, u8 comm_len) { priv->xfer.msg = msgs; priv->xfer.msg_num = num; /* * All upper layers currently are never use transfer with more than * 2 messages. Actually, it's also not so relevant in Mellanox systems * because of HW limitation. Max size of transfer is not more than 32 * or 68 bytes in the current x86 LPCI2C bridge. */ priv->xfer.cmd = msgs[num - 1].flags & I2C_M_RD; if (priv->xfer.cmd == I2C_M_RD && comm_len != msgs[0].len) { priv->xfer.addr_width = msgs[0].len; priv->xfer.data_len = comm_len - priv->xfer.addr_width; } else { priv->xfer.addr_width = 0; priv->xfer.data_len = comm_len; } } /* Reset CPLD LPCI2C block */ static void mlxcpld_i2c_reset(struct mlxcpld_i2c_priv *priv) { u8 val; mutex_lock(&priv->lock); mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_CTRL_REG, &val, 1); val &= ~MLXCPLD_LPCI2C_RST_SEL_MASK; mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_CTRL_REG, &val, 1); mutex_unlock(&priv->lock); } /* Make sure the CPLD is ready to start transmitting. */ static int mlxcpld_i2c_check_busy(struct mlxcpld_i2c_priv *priv) { u8 val; mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_STATUS_REG, &val, 1); if (val & MLXCPLD_LPCI2C_TRANS_END) return 0; return -EIO; } static int mlxcpld_i2c_wait_for_free(struct mlxcpld_i2c_priv *priv) { int timeout = 0; do { if (!mlxcpld_i2c_check_busy(priv)) break; usleep_range(priv->polling_time / 2, priv->polling_time); timeout += priv->polling_time; } while (timeout <= MLXCPLD_I2C_XFER_TO); if (timeout > MLXCPLD_I2C_XFER_TO) return -ETIMEDOUT; return 0; } /* * Wait for master transfer to complete. * It puts current process to sleep until we get interrupt or timeout expires. * Returns the number of transferred or read bytes or error (<0). */ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv) { int status, i, timeout = 0; u8 datalen, val; do { usleep_range(priv->polling_time / 2, priv->polling_time); if (!mlxcpld_i2c_check_status(priv, &status)) break; timeout += priv->polling_time; } while (status == 0 && timeout < MLXCPLD_I2C_XFER_TO); switch (status) { case MLXCPLD_LPCI2C_NO_IND: return -ETIMEDOUT; case MLXCPLD_LPCI2C_ACK_IND: if (priv->xfer.cmd != I2C_M_RD) return (priv->xfer.addr_width + priv->xfer.data_len); if (priv->xfer.msg_num == 1) i = 0; else i = 1; if (!priv->xfer.msg[i].buf) return -EINVAL; /* * Actual read data len will be always the same as * requested len. 0xff (line pull-up) will be returned * if slave has no data to return. Thus don't read * MLXCPLD_LPCI2C_NUM_DAT_REG reg from CPLD. Only in case of * SMBus block read transaction data len can be different, * check this case. */ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_ADDR_REG, &val, 1); if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) { mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG, &datalen, 1); if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) { dev_err(priv->dev, "Incorrect smbus block read message len\n"); return -EPROTO; } } else { datalen = priv->xfer.data_len; } mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_DATA_REG, priv->xfer.msg[i].buf, datalen); return datalen; case MLXCPLD_LPCI2C_NACK_IND: return -ENXIO; default: return -EINVAL; } } static void mlxcpld_i2c_xfer_msg(struct mlxcpld_i2c_priv *priv) { int i, len = 0; u8 cmd, val; mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG, &priv->xfer.data_len, 1); val = priv->xfer.addr_width; /* Notify HW about SMBus block read transaction */ if (priv->smbus_block && priv->xfer.msg_num >= 2 && priv->xfer.msg[1].len == 1 && (priv->xfer.msg[1].flags & I2C_M_RECV_LEN) && (priv->xfer.msg[1].flags & I2C_M_RD)) val |= MLXCPLD_I2C_SMBUS_BLK_BIT; mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_NUM_ADDR_REG, &val, 1); for (i = 0; i < priv->xfer.msg_num; i++) { if ((priv->xfer.msg[i].flags & I2C_M_RD) != I2C_M_RD) { /* Don't write to CPLD buffer in read transaction */ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_DATA_REG + len, priv->xfer.msg[i].buf, priv->xfer.msg[i].len); len += priv->xfer.msg[i].len; } } /* * Set target slave address with command for master transfer. * It should be latest executed function before CPLD transaction. */ cmd = (priv->xfer.msg[0].addr << 1) | priv->xfer.cmd; mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_CMD_REG, &cmd, 1); } /* * Generic lpc-i2c transfer. * Returns the number of processed messages or error (<0). */ static int mlxcpld_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct mlxcpld_i2c_priv *priv = i2c_get_adapdata(adap); u8 comm_len = 0; int i, err; err = mlxcpld_i2c_check_msg_params(priv, msgs, num); if (err) { dev_err(priv->dev, "Incorrect message\n"); return err; } for (i = 0; i < num; ++i) comm_len += msgs[i].len; /* Check bus state */ if (mlxcpld_i2c_wait_for_free(priv)) { dev_err(priv->dev, "LPCI2C bridge is busy\n"); /* * Usually it means something serious has happened. * We can not have unfinished previous transfer * so it doesn't make any sense to try to stop it. * Probably we were not able to recover from the * previous error. * The only reasonable thing - is soft reset. */ mlxcpld_i2c_reset(priv); if (mlxcpld_i2c_check_busy(priv)) { dev_err(priv->dev, "LPCI2C bridge is busy after reset\n"); return -EIO; } } mlxcpld_i2c_set_transf_data(priv, msgs, num, comm_len); mutex_lock(&priv->lock); /* Do real transfer. Can't fail */ mlxcpld_i2c_xfer_msg(priv); /* Wait for transaction complete */ err = mlxcpld_i2c_wait_for_tc(priv); mutex_unlock(&priv->lock); return err < 0 ? err : num; } static u32 mlxcpld_i2c_func(struct i2c_adapter *adap) { struct mlxcpld_i2c_priv *priv = i2c_get_adapdata(adap); if (priv->smbus_block) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_BLOCK_DATA; else return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_I2C_BLOCK; } static const struct i2c_algorithm mlxcpld_i2c_algo = { .master_xfer = mlxcpld_i2c_xfer, .functionality = mlxcpld_i2c_func }; static const struct i2c_adapter_quirks mlxcpld_i2c_quirks = { .flags = I2C_AQ_COMB_WRITE_THEN_READ, .max_read_len = MLXCPLD_I2C_DATA_REG_SZ - MLXCPLD_I2C_MAX_ADDR_LEN, .max_write_len = MLXCPLD_I2C_DATA_REG_SZ, .max_comb_1st_msg_len = 4, }; static const struct i2c_adapter_quirks mlxcpld_i2c_quirks_ext = { .flags = I2C_AQ_COMB_WRITE_THEN_READ, .max_read_len = MLXCPLD_I2C_DATA_REG_SZ * 2 - MLXCPLD_I2C_MAX_ADDR_LEN, .max_write_len = MLXCPLD_I2C_DATA_REG_SZ * 2, .max_comb_1st_msg_len = 4, }; static const struct i2c_adapter_quirks mlxcpld_i2c_quirks_ext2 = { .flags = I2C_AQ_COMB_WRITE_THEN_READ, .max_read_len = (MLXCPLD_I2C_DATA_REG_SZ - 4) * 4, .max_write_len = (MLXCPLD_I2C_DATA_REG_SZ - 4) * 4 + MLXCPLD_I2C_MAX_ADDR_LEN, .max_comb_1st_msg_len = 4, }; static struct i2c_adapter mlxcpld_i2c_adapter = { .owner = THIS_MODULE, .name = "i2c-mlxcpld", .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &mlxcpld_i2c_algo, .quirks = &mlxcpld_i2c_quirks, .retries = MLXCPLD_I2C_RETR_NUM, .nr = MLXCPLD_I2C_BUS_NUM, }; static int mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv, struct mlxreg_core_hotplug_platform_data *pdata) { struct mlxreg_core_item *item = pdata->items; struct mlxreg_core_data *data; u32 regval; u8 freq; int err; if (!item) return 0; /* Read frequency setting. */ data = item->data; err = regmap_read(pdata->regmap, data->reg, &regval); if (err) return err; /* Set frequency only if it is not 100KHz, which is default. */ switch ((regval & data->mask) >> data->bit) { case MLXCPLD_I2C_FREQ_1000KHZ: freq = MLXCPLD_I2C_FREQ_1000KHZ_SET; priv->polling_time /= 4; break; case MLXCPLD_I2C_FREQ_400KHZ: freq = MLXCPLD_I2C_FREQ_400KHZ_SET; priv->polling_time /= 4; break; default: return 0; } mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_HALF_CYC_REG, &freq, 1); return 0; } static int mlxcpld_i2c_probe(struct platform_device *pdev) { struct mlxreg_core_hotplug_platform_data *pdata; struct mlxcpld_i2c_priv *priv; int err; u8 val; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; mutex_init(&priv->lock); platform_set_drvdata(pdev, priv); priv->dev = &pdev->dev; priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR; priv->polling_time = MLXCPLD_I2C_POLL_TIME; /* Set I2C bus frequency if platform data provides this info. */ pdata = dev_get_platdata(&pdev->dev); if (pdata) { err = mlxcpld_i2c_set_frequency(priv, pdata); if (err) goto mlxcpld_i2_probe_failed; } /* Register with i2c layer */ mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO); /* Read capability register */ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_CPBLTY_REG, &val, 1); /* Check support for extended transaction length */ if ((val & MLXCPLD_I2C_DATA_SZ_MASK) == MLXCPLD_I2C_DATA_SZ_BIT) mlxcpld_i2c_adapter.quirks = &mlxcpld_i2c_quirks_ext; else if ((val & MLXCPLD_I2C_DATA_SZ_MASK) == MLXCPLD_I2C_DATA_EXT2_SZ_BIT) mlxcpld_i2c_adapter.quirks = &mlxcpld_i2c_quirks_ext2; /* Check support for smbus block transaction */ if (val & MLXCPLD_I2C_SMBUS_BLK_BIT) priv->smbus_block = true; if (pdev->id >= -1) mlxcpld_i2c_adapter.nr = pdev->id; priv->adap = mlxcpld_i2c_adapter; priv->adap.dev.parent = &pdev->dev; i2c_set_adapdata(&priv->adap, priv); err = i2c_add_numbered_adapter(&priv->adap); if (err) goto mlxcpld_i2_probe_failed; /* Notify caller when adapter is added. */ if (pdata && pdata->completion_notify) pdata->completion_notify(pdata->handle, mlxcpld_i2c_adapter.nr); return 0; mlxcpld_i2_probe_failed: mutex_destroy(&priv->lock); return err; } static void mlxcpld_i2c_remove(struct platform_device *pdev) { struct mlxcpld_i2c_priv *priv = platform_get_drvdata(pdev); i2c_del_adapter(&priv->adap); mutex_destroy(&priv->lock); } static struct platform_driver mlxcpld_i2c_driver = { .probe = mlxcpld_i2c_probe, .remove_new = mlxcpld_i2c_remove, .driver = { .name = MLXCPLD_I2C_DEVICE_NAME, }, }; module_platform_driver(mlxcpld_i2c_driver); MODULE_AUTHOR("Michael Shych <[email protected]>"); MODULE_DESCRIPTION("Mellanox I2C-CPLD controller driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("platform:i2c-mlxcpld");
linux-master
drivers/i2c/busses/i2c-mlxcpld.c
// SPDX-License-Identifier: GPL-2.0-only /* * CE4100 PCI-I2C glue code for PXA's driver * Author: Sebastian Andrzej Siewior <[email protected]> * * The CE4100's I2C device is more or less the same one as found on PXA. * It does not support slave mode, the register slightly moved. This PCI * device provides three bars, every contains a single I2C controller. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/platform_data/i2c-pxa.h> #include <linux/of.h> #include <linux/of_address.h> #define CE4100_PCI_I2C_DEVS 3 struct ce4100_devices { struct platform_device *pdev[CE4100_PCI_I2C_DEVS]; }; static struct platform_device *add_i2c_device(struct pci_dev *dev, int bar) { struct platform_device *pdev; struct i2c_pxa_platform_data pdata; struct resource res[2]; struct device_node *child; static int devnum; int ret; memset(&pdata, 0, sizeof(struct i2c_pxa_platform_data)); memset(&res, 0, sizeof(res)); res[0].flags = IORESOURCE_MEM; res[0].start = pci_resource_start(dev, bar); res[0].end = pci_resource_end(dev, bar); res[1].flags = IORESOURCE_IRQ; res[1].start = dev->irq; res[1].end = dev->irq; for_each_child_of_node(dev->dev.of_node, child) { const void *prop; struct resource r; int ret; ret = of_address_to_resource(child, 0, &r); if (ret < 0) continue; if (r.start != res[0].start) continue; if (r.end != res[0].end) continue; if (r.flags != res[0].flags) continue; prop = of_get_property(child, "fast-mode", NULL); if (prop) pdata.fast_mode = 1; break; } if (!child) { dev_err(&dev->dev, "failed to match a DT node for bar %d.\n", bar); ret = -EINVAL; goto out; } pdev = platform_device_alloc("ce4100-i2c", devnum); if (!pdev) { of_node_put(child); ret = -ENOMEM; goto out; } pdev->dev.parent = &dev->dev; pdev->dev.of_node = child; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) goto err; ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); if (ret) goto err; ret = platform_device_add(pdev); if (ret) goto err; devnum++; return pdev; err: platform_device_put(pdev); out: return ERR_PTR(ret); } static int ce4100_i2c_probe(struct pci_dev *dev, const struct pci_device_id *ent) { int ret; int i; struct ce4100_devices *sds; ret = pcim_enable_device(dev); if (ret) return ret; if (!dev->dev.of_node) { dev_err(&dev->dev, "Missing device tree node.\n"); return -EINVAL; } sds = kzalloc(sizeof(*sds), GFP_KERNEL); if (!sds) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { sds->pdev[i] = add_i2c_device(dev, i); if (IS_ERR(sds->pdev[i])) { ret = PTR_ERR(sds->pdev[i]); while (--i >= 0) platform_device_unregister(sds->pdev[i]); goto err_dev_add; } } pci_set_drvdata(dev, sds); return 0; err_dev_add: kfree(sds); return ret; } static const struct pci_device_id ce4100_i2c_devices[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)}, { }, }; static struct pci_driver ce4100_i2c_driver = { .driver = { .suppress_bind_attrs = true, }, .name = "ce4100_i2c", .id_table = ce4100_i2c_devices, .probe = ce4100_i2c_probe, }; builtin_pci_driver(ce4100_i2c_driver);
linux-master
drivers/i2c/busses/i2c-pxa-pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 1999-2002 Merlin Hughes <[email protected]> Shamelessly ripped from i2c-piix4.c: Copyright (c) 1998, 1999 Frodo Looijaard <[email protected]> and Philip Edelbrock <[email protected]> */ /* 2002-04-08: Added nForce support. (Csaba Halasz) 2002-10-03: Fixed nForce PnP I/O port. (Michael Steil) 2002-12-28: Rewritten into something that resembles a Linux driver (hch) 2003-11-29: Added back AMD8111 removed by the previous rewrite. (Philip Pokorny) */ /* Supports AMD756, AMD766, AMD768, AMD8111 and nVidia nForce Note: we assume there can only be one device, with one SMBus interface. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* AMD756 SMBus address offsets */ #define SMB_ADDR_OFFSET 0xE0 #define SMB_IOSIZE 16 #define SMB_GLOBAL_STATUS (0x0 + amd756_ioport) #define SMB_GLOBAL_ENABLE (0x2 + amd756_ioport) #define SMB_HOST_ADDRESS (0x4 + amd756_ioport) #define SMB_HOST_DATA (0x6 + amd756_ioport) #define SMB_HOST_COMMAND (0x8 + amd756_ioport) #define SMB_HOST_BLOCK_DATA (0x9 + amd756_ioport) #define SMB_HAS_DATA (0xA + amd756_ioport) #define SMB_HAS_DEVICE_ADDRESS (0xC + amd756_ioport) #define SMB_HAS_HOST_ADDRESS (0xE + amd756_ioport) #define SMB_SNOOP_ADDRESS (0xF + amd756_ioport) /* PCI Address Constants */ /* address of I/O space */ #define SMBBA 0x058 /* mh */ #define SMBBANFORCE 0x014 /* general configuration */ #define SMBGCFG 0x041 /* mh */ /* silicon revision code */ #define SMBREV 0x008 /* Other settings */ #define MAX_TIMEOUT 500 /* AMD756 constants */ #define AMD756_QUICK 0x00 #define AMD756_BYTE 0x01 #define AMD756_BYTE_DATA 0x02 #define AMD756_WORD_DATA 0x03 #define AMD756_PROCESS_CALL 0x04 #define AMD756_BLOCK_DATA 0x05 static struct pci_driver amd756_driver; static unsigned short amd756_ioport; /* SMBUS event = I/O 28-29 bit 11 see E0 for the status bits and enabled in E2 */ #define GS_ABRT_STS (1 << 0) #define GS_COL_STS (1 << 1) #define GS_PRERR_STS (1 << 2) #define GS_HST_STS (1 << 3) #define GS_HCYC_STS (1 << 4) #define GS_TO_STS (1 << 5) #define GS_SMB_STS (1 << 11) #define GS_CLEAR_STS (GS_ABRT_STS | GS_COL_STS | GS_PRERR_STS | \ GS_HCYC_STS | GS_TO_STS ) #define GE_CYC_TYPE_MASK (7) #define GE_HOST_STC (1 << 3) #define GE_ABORT (1 << 5) static int amd756_transaction(struct i2c_adapter *adap) { int temp; int result = 0; int timeout = 0; dev_dbg(&adap->dev, "Transaction (pre): GS=%04x, GE=%04x, ADD=%04x, " "DAT=%04x\n", inw_p(SMB_GLOBAL_STATUS), inw_p(SMB_GLOBAL_ENABLE), inw_p(SMB_HOST_ADDRESS), inb_p(SMB_HOST_DATA)); /* Make sure the SMBus host is ready to start transmitting */ if ((temp = inw_p(SMB_GLOBAL_STATUS)) & (GS_HST_STS | GS_SMB_STS)) { dev_dbg(&adap->dev, "SMBus busy (%04x). Waiting...\n", temp); do { msleep(1); temp = inw_p(SMB_GLOBAL_STATUS); } while ((temp & (GS_HST_STS | GS_SMB_STS)) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "Busy wait timeout (%04x)\n", temp); goto abort; } timeout = 0; } /* start the transaction by setting the start bit */ outw_p(inw(SMB_GLOBAL_ENABLE) | GE_HOST_STC, SMB_GLOBAL_ENABLE); /* We will always wait for a fraction of a second! */ do { msleep(1); temp = inw_p(SMB_GLOBAL_STATUS); } while ((temp & GS_HST_STS) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "Completion timeout!\n"); goto abort; } if (temp & GS_PRERR_STS) { result = -ENXIO; dev_dbg(&adap->dev, "SMBus Protocol error (no response)!\n"); } if (temp & GS_COL_STS) { result = -EIO; dev_warn(&adap->dev, "SMBus collision!\n"); } if (temp & GS_TO_STS) { result = -ETIMEDOUT; dev_dbg(&adap->dev, "SMBus protocol timeout!\n"); } if (temp & GS_HCYC_STS) dev_dbg(&adap->dev, "SMBus protocol success!\n"); outw_p(GS_CLEAR_STS, SMB_GLOBAL_STATUS); #ifdef DEBUG if (((temp = inw_p(SMB_GLOBAL_STATUS)) & GS_CLEAR_STS) != 0x00) { dev_dbg(&adap->dev, "Failed reset at end of transaction (%04x)\n", temp); } #endif dev_dbg(&adap->dev, "Transaction (post): GS=%04x, GE=%04x, ADD=%04x, DAT=%04x\n", inw_p(SMB_GLOBAL_STATUS), inw_p(SMB_GLOBAL_ENABLE), inw_p(SMB_HOST_ADDRESS), inb_p(SMB_HOST_DATA)); return result; abort: dev_warn(&adap->dev, "Sending abort\n"); outw_p(inw(SMB_GLOBAL_ENABLE) | GE_ABORT, SMB_GLOBAL_ENABLE); msleep(100); outw_p(GS_CLEAR_STS, SMB_GLOBAL_STATUS); return -EIO; } /* Return negative errno on error. */ static s32 amd756_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int i, len; int status; switch (size) { case I2C_SMBUS_QUICK: outw_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMB_HOST_ADDRESS); size = AMD756_QUICK; break; case I2C_SMBUS_BYTE: outw_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMB_HOST_ADDRESS); if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMB_HOST_DATA); size = AMD756_BYTE; break; case I2C_SMBUS_BYTE_DATA: outw_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMB_HOST_ADDRESS); outb_p(command, SMB_HOST_COMMAND); if (read_write == I2C_SMBUS_WRITE) outw_p(data->byte, SMB_HOST_DATA); size = AMD756_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outw_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMB_HOST_ADDRESS); outb_p(command, SMB_HOST_COMMAND); if (read_write == I2C_SMBUS_WRITE) outw_p(data->word, SMB_HOST_DATA); /* TODO: endian???? */ size = AMD756_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: outw_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMB_HOST_ADDRESS); outb_p(command, SMB_HOST_COMMAND); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) len = 0; if (len > 32) len = 32; outw_p(len, SMB_HOST_DATA); /* i = inw_p(SMBHSTCNT); Reset SMBBLKDAT */ for (i = 1; i <= len; i++) outb_p(data->block[i], SMB_HOST_BLOCK_DATA); } size = AMD756_BLOCK_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } /* How about enabling interrupts... */ outw_p(size & GE_CYC_TYPE_MASK, SMB_GLOBAL_ENABLE); status = amd756_transaction(adap); if (status) return status; if ((read_write == I2C_SMBUS_WRITE) || (size == AMD756_QUICK)) return 0; switch (size) { case AMD756_BYTE: data->byte = inw_p(SMB_HOST_DATA); break; case AMD756_BYTE_DATA: data->byte = inw_p(SMB_HOST_DATA); break; case AMD756_WORD_DATA: data->word = inw_p(SMB_HOST_DATA); /* TODO: endian???? */ break; case AMD756_BLOCK_DATA: data->block[0] = inw_p(SMB_HOST_DATA) & 0x3f; if(data->block[0] > 32) data->block[0] = 32; /* i = inw_p(SMBHSTCNT); Reset SMBBLKDAT */ for (i = 1; i <= data->block[0]; i++) data->block[i] = inb_p(SMB_HOST_BLOCK_DATA); break; } return 0; } static u32 amd756_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = amd756_access, .functionality = amd756_func, }; struct i2c_adapter amd756_smbus = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; enum chiptype { AMD756, AMD766, AMD768, NFORCE, AMD8111 }; static const char* chipname[] = { "AMD756", "AMD766", "AMD768", "nVidia nForce", "AMD8111", }; static const struct pci_device_id amd756_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B), .driver_data = AMD756 }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413), .driver_data = AMD766 }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7443), .driver_data = AMD768 }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS), .driver_data = AMD8111 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS), .driver_data = NFORCE }, { 0, } }; MODULE_DEVICE_TABLE (pci, amd756_ids); static int amd756_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int nforce = (id->driver_data == NFORCE); int error; u8 temp; if (amd756_ioport) { dev_err(&pdev->dev, "Only one device supported " "(you have a strange motherboard, btw)\n"); return -ENODEV; } if (nforce) { if (PCI_FUNC(pdev->devfn) != 1) return -ENODEV; pci_read_config_word(pdev, SMBBANFORCE, &amd756_ioport); amd756_ioport &= 0xfffc; } else { /* amd */ if (PCI_FUNC(pdev->devfn) != 3) return -ENODEV; pci_read_config_byte(pdev, SMBGCFG, &temp); if ((temp & 128) == 0) { dev_err(&pdev->dev, "Error: SMBus controller I/O not enabled!\n"); return -ENODEV; } /* Determine the address of the SMBus areas */ /* Technically it is a dword but... */ pci_read_config_word(pdev, SMBBA, &amd756_ioport); amd756_ioport &= 0xff00; amd756_ioport += SMB_ADDR_OFFSET; } error = acpi_check_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name); if (error) return -ENODEV; if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) { dev_err(&pdev->dev, "SMB region 0x%x already in use!\n", amd756_ioport); return -ENODEV; } pci_read_config_byte(pdev, SMBREV, &temp); dev_dbg(&pdev->dev, "SMBREV = 0x%X\n", temp); dev_dbg(&pdev->dev, "AMD756_smba = 0x%X\n", amd756_ioport); /* set up the sysfs linkage to our parent device */ amd756_smbus.dev.parent = &pdev->dev; snprintf(amd756_smbus.name, sizeof(amd756_smbus.name), "SMBus %s adapter at %04x", chipname[id->driver_data], amd756_ioport); error = i2c_add_adapter(&amd756_smbus); if (error) goto out_err; return 0; out_err: release_region(amd756_ioport, SMB_IOSIZE); return error; } static void amd756_remove(struct pci_dev *dev) { i2c_del_adapter(&amd756_smbus); release_region(amd756_ioport, SMB_IOSIZE); } static struct pci_driver amd756_driver = { .name = "amd756_smbus", .id_table = amd756_ids, .probe = amd756_probe, .remove = amd756_remove, }; module_pci_driver(amd756_driver); MODULE_AUTHOR("Merlin Hughes <[email protected]>"); MODULE_DESCRIPTION("AMD756/766/768/8111 and nVidia nForce SMBus driver"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(amd756_smbus);
linux-master
drivers/i2c/busses/i2c-amd756.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2002 Motorola GSG-China * * Author: * Darius Augulis, Teltonika Inc. * * Desc.: * Implementation of I2C Adapter/Algorithm Driver * for I2C Bus integrated in Freescale i.MX/MXC processors * * Derived from Motorola GSG China I2C example driver * * Copyright (C) 2005 Torsten Koschorrek <koschorrek at synertronixx.de * Copyright (C) 2005 Matthias Blaschke <blaschke at synertronixx.de * Copyright (C) 2007 RightHand Technologies, Inc. * Copyright (C) 2008 Darius Augulis <darius.augulis at teltonika.lt> * * Copyright 2013 Freescale Semiconductor, Inc. * Copyright 2020 NXP * */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/dmapool.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/hrtimer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_data/i2c-imx.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <linux/slab.h> /* This will be the driver name the kernel reports */ #define DRIVER_NAME "imx-i2c" #define I2C_IMX_CHECK_DELAY 30000 /* Time to check for bus idle, in NS */ /* * Enable DMA if transfer byte size is bigger than this threshold. * As the hardware request, it must bigger than 4 bytes.\ * I have set '16' here, maybe it's not the best but I think it's * the appropriate. */ #define DMA_THRESHOLD 16 #define DMA_TIMEOUT 1000 /* IMX I2C registers: * the I2C register offset is different between SoCs, * to provide support for all these chips, split the * register offset into a fixed base address and a * variable shift value, then the full register offset * will be calculated by * reg_off = ( reg_base_addr << reg_shift) */ #define IMX_I2C_IADR 0x00 /* i2c slave address */ #define IMX_I2C_IFDR 0x01 /* i2c frequency divider */ #define IMX_I2C_I2CR 0x02 /* i2c control */ #define IMX_I2C_I2SR 0x03 /* i2c status */ #define IMX_I2C_I2DR 0x04 /* i2c transfer data */ /* * All of the layerscape series SoCs support IBIC register. */ #define IMX_I2C_IBIC 0x05 /* i2c bus interrupt config */ #define IMX_I2C_REGSHIFT 2 #define VF610_I2C_REGSHIFT 0 /* Bits of IMX I2C registers */ #define I2SR_RXAK 0x01 #define I2SR_IIF 0x02 #define I2SR_SRW 0x04 #define I2SR_IAL 0x10 #define I2SR_IBB 0x20 #define I2SR_IAAS 0x40 #define I2SR_ICF 0x80 #define I2CR_DMAEN 0x02 #define I2CR_RSTA 0x04 #define I2CR_TXAK 0x08 #define I2CR_MTX 0x10 #define I2CR_MSTA 0x20 #define I2CR_IIEN 0x40 #define I2CR_IEN 0x80 #define IBIC_BIIE 0x80 /* Bus idle interrupt enable */ /* register bits different operating codes definition: * 1) I2SR: Interrupt flags clear operation differ between SoCs: * - write zero to clear(w0c) INT flag on i.MX, * - but write one to clear(w1c) INT flag on Vybrid. * 2) I2CR: I2C module enable operation also differ between SoCs: * - set I2CR_IEN bit enable the module on i.MX, * - but clear I2CR_IEN bit enable the module on Vybrid. */ #define I2SR_CLR_OPCODE_W0C 0x0 #define I2SR_CLR_OPCODE_W1C (I2SR_IAL | I2SR_IIF) #define I2CR_IEN_OPCODE_0 0x0 #define I2CR_IEN_OPCODE_1 I2CR_IEN #define I2C_PM_TIMEOUT 10 /* ms */ /* * sorted list of clock divider, register value pairs * taken from table 26-5, p.26-9, Freescale i.MX * Integrated Portable System Processor Reference Manual * Document Number: MC9328MXLRM, Rev. 5.1, 06/2007 * * Duplicated divider values removed from list */ struct imx_i2c_clk_pair { u16 div; u16 val; }; static struct imx_i2c_clk_pair imx_i2c_clk_div[] = { { 22, 0x20 }, { 24, 0x21 }, { 26, 0x22 }, { 28, 0x23 }, { 30, 0x00 }, { 32, 0x24 }, { 36, 0x25 }, { 40, 0x26 }, { 42, 0x03 }, { 44, 0x27 }, { 48, 0x28 }, { 52, 0x05 }, { 56, 0x29 }, { 60, 0x06 }, { 64, 0x2A }, { 72, 0x2B }, { 80, 0x2C }, { 88, 0x09 }, { 96, 0x2D }, { 104, 0x0A }, { 112, 0x2E }, { 128, 0x2F }, { 144, 0x0C }, { 160, 0x30 }, { 192, 0x31 }, { 224, 0x32 }, { 240, 0x0F }, { 256, 0x33 }, { 288, 0x10 }, { 320, 0x34 }, { 384, 0x35 }, { 448, 0x36 }, { 480, 0x13 }, { 512, 0x37 }, { 576, 0x14 }, { 640, 0x38 }, { 768, 0x39 }, { 896, 0x3A }, { 960, 0x17 }, { 1024, 0x3B }, { 1152, 0x18 }, { 1280, 0x3C }, { 1536, 0x3D }, { 1792, 0x3E }, { 1920, 0x1B }, { 2048, 0x3F }, { 2304, 0x1C }, { 2560, 0x1D }, { 3072, 0x1E }, { 3840, 0x1F } }; /* Vybrid VF610 clock divider, register value pairs */ static struct imx_i2c_clk_pair vf610_i2c_clk_div[] = { { 20, 0x00 }, { 22, 0x01 }, { 24, 0x02 }, { 26, 0x03 }, { 28, 0x04 }, { 30, 0x05 }, { 32, 0x09 }, { 34, 0x06 }, { 36, 0x0A }, { 40, 0x07 }, { 44, 0x0C }, { 48, 0x0D }, { 52, 0x43 }, { 56, 0x0E }, { 60, 0x45 }, { 64, 0x12 }, { 68, 0x0F }, { 72, 0x13 }, { 80, 0x14 }, { 88, 0x15 }, { 96, 0x19 }, { 104, 0x16 }, { 112, 0x1A }, { 128, 0x17 }, { 136, 0x4F }, { 144, 0x1C }, { 160, 0x1D }, { 176, 0x55 }, { 192, 0x1E }, { 208, 0x56 }, { 224, 0x22 }, { 228, 0x24 }, { 240, 0x1F }, { 256, 0x23 }, { 288, 0x5C }, { 320, 0x25 }, { 384, 0x26 }, { 448, 0x2A }, { 480, 0x27 }, { 512, 0x2B }, { 576, 0x2C }, { 640, 0x2D }, { 768, 0x31 }, { 896, 0x32 }, { 960, 0x2F }, { 1024, 0x33 }, { 1152, 0x34 }, { 1280, 0x35 }, { 1536, 0x36 }, { 1792, 0x3A }, { 1920, 0x37 }, { 2048, 0x3B }, { 2304, 0x3C }, { 2560, 0x3D }, { 3072, 0x3E }, { 3584, 0x7A }, { 3840, 0x3F }, { 4096, 0x7B }, { 5120, 0x7D }, { 6144, 0x7E }, }; enum imx_i2c_type { IMX1_I2C, IMX21_I2C, VF610_I2C, }; struct imx_i2c_hwdata { enum imx_i2c_type devtype; unsigned int regshift; struct imx_i2c_clk_pair *clk_div; unsigned int ndivs; unsigned int i2sr_clr_opcode; unsigned int i2cr_ien_opcode; /* * Errata ERR007805 or e7805: * I2C: When the I2C clock speed is configured for 400 kHz, * the SCL low period violates the I2C spec of 1.3 uS min. */ bool has_err007805; }; struct imx_i2c_dma { struct dma_chan *chan_tx; struct dma_chan *chan_rx; struct dma_chan *chan_using; struct completion cmd_complete; dma_addr_t dma_buf; unsigned int dma_len; enum dma_transfer_direction dma_transfer_dir; enum dma_data_direction dma_data_dir; }; struct imx_i2c_struct { struct i2c_adapter adapter; struct clk *clk; struct notifier_block clk_change_nb; void __iomem *base; wait_queue_head_t queue; unsigned long i2csr; unsigned int disable_delay; int stopped; unsigned int ifdr; /* IMX_I2C_IFDR */ unsigned int cur_clk; unsigned int bitrate; const struct imx_i2c_hwdata *hwdata; struct i2c_bus_recovery_info rinfo; struct pinctrl *pinctrl; struct pinctrl_state *pinctrl_pins_default; struct pinctrl_state *pinctrl_pins_gpio; struct imx_i2c_dma *dma; struct i2c_client *slave; enum i2c_slave_event last_slave_event; /* For checking slave events. */ spinlock_t slave_lock; struct hrtimer slave_timer; }; static const struct imx_i2c_hwdata imx1_i2c_hwdata = { .devtype = IMX1_I2C, .regshift = IMX_I2C_REGSHIFT, .clk_div = imx_i2c_clk_div, .ndivs = ARRAY_SIZE(imx_i2c_clk_div), .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C, .i2cr_ien_opcode = I2CR_IEN_OPCODE_1, }; static const struct imx_i2c_hwdata imx21_i2c_hwdata = { .devtype = IMX21_I2C, .regshift = IMX_I2C_REGSHIFT, .clk_div = imx_i2c_clk_div, .ndivs = ARRAY_SIZE(imx_i2c_clk_div), .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C, .i2cr_ien_opcode = I2CR_IEN_OPCODE_1, }; static const struct imx_i2c_hwdata imx6_i2c_hwdata = { .devtype = IMX21_I2C, .regshift = IMX_I2C_REGSHIFT, .clk_div = imx_i2c_clk_div, .ndivs = ARRAY_SIZE(imx_i2c_clk_div), .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C, .i2cr_ien_opcode = I2CR_IEN_OPCODE_1, .has_err007805 = true, }; static struct imx_i2c_hwdata vf610_i2c_hwdata = { .devtype = VF610_I2C, .regshift = VF610_I2C_REGSHIFT, .clk_div = vf610_i2c_clk_div, .ndivs = ARRAY_SIZE(vf610_i2c_clk_div), .i2sr_clr_opcode = I2SR_CLR_OPCODE_W1C, .i2cr_ien_opcode = I2CR_IEN_OPCODE_0, }; static const struct platform_device_id imx_i2c_devtype[] = { { .name = "imx1-i2c", .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata, }, { .name = "imx21-i2c", .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, imx_i2c_devtype); static const struct of_device_id i2c_imx_dt_ids[] = { { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, }, { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, }, { .compatible = "fsl,imx6q-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx6sl-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, i2c_imx_dt_ids); static const struct acpi_device_id i2c_imx_acpi_ids[] = { {"NXP0001", .driver_data = (kernel_ulong_t)&vf610_i2c_hwdata}, { } }; MODULE_DEVICE_TABLE(acpi, i2c_imx_acpi_ids); static inline int is_imx1_i2c(struct imx_i2c_struct *i2c_imx) { return i2c_imx->hwdata->devtype == IMX1_I2C; } static inline int is_vf610_i2c(struct imx_i2c_struct *i2c_imx) { return i2c_imx->hwdata->devtype == VF610_I2C; } static inline void imx_i2c_write_reg(unsigned int val, struct imx_i2c_struct *i2c_imx, unsigned int reg) { writeb(val, i2c_imx->base + (reg << i2c_imx->hwdata->regshift)); } static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx, unsigned int reg) { return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift)); } static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits) { unsigned int temp; /* * i2sr_clr_opcode is the value to clear all interrupts. Here we want to * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits> * toggled. This is required because i.MX needs W0C and Vybrid uses W1C. */ temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); } /* Set up i2c controller register and i2c status register to default value. */ static void i2c_imx_reset_regs(struct imx_i2c_struct *i2c_imx) { imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN, i2c_imx, IMX_I2C_I2CR); i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL); } /* Functions for DMA support */ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dma_addr_t phy_addr) { struct imx_i2c_dma *dma; struct dma_slave_config dma_sconfig; struct device *dev = &i2c_imx->adapter.dev; int ret; dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); if (!dma) return; dma->chan_tx = dma_request_chan(dev, "tx"); if (IS_ERR(dma->chan_tx)) { ret = PTR_ERR(dma->chan_tx); if (ret != -ENODEV && ret != -EPROBE_DEFER) dev_err(dev, "can't request DMA tx channel (%d)\n", ret); goto fail_al; } dma_sconfig.dst_addr = phy_addr + (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_sconfig.dst_maxburst = 1; dma_sconfig.direction = DMA_MEM_TO_DEV; ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig); if (ret < 0) { dev_err(dev, "can't configure tx channel (%d)\n", ret); goto fail_tx; } dma->chan_rx = dma_request_chan(dev, "rx"); if (IS_ERR(dma->chan_rx)) { ret = PTR_ERR(dma->chan_rx); if (ret != -ENODEV && ret != -EPROBE_DEFER) dev_err(dev, "can't request DMA rx channel (%d)\n", ret); goto fail_tx; } dma_sconfig.src_addr = phy_addr + (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_sconfig.src_maxburst = 1; dma_sconfig.direction = DMA_DEV_TO_MEM; ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig); if (ret < 0) { dev_err(dev, "can't configure rx channel (%d)\n", ret); goto fail_rx; } i2c_imx->dma = dma; init_completion(&dma->cmd_complete); dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); return; fail_rx: dma_release_channel(dma->chan_rx); fail_tx: dma_release_channel(dma->chan_tx); fail_al: devm_kfree(dev, dma); } static void i2c_imx_dma_callback(void *arg) { struct imx_i2c_struct *i2c_imx = (struct imx_i2c_struct *)arg; struct imx_i2c_dma *dma = i2c_imx->dma; dma_unmap_single(dma->chan_using->device->dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir); complete(&dma->cmd_complete); } static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) { struct imx_i2c_dma *dma = i2c_imx->dma; struct dma_async_tx_descriptor *txdesc; struct device *dev = &i2c_imx->adapter.dev; struct device *chan_dev = dma->chan_using->device->dev; dma->dma_buf = dma_map_single(chan_dev, msgs->buf, dma->dma_len, dma->dma_data_dir); if (dma_mapping_error(chan_dev, dma->dma_buf)) { dev_err(dev, "DMA mapping failed\n"); goto err_map; } txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf, dma->dma_len, dma->dma_transfer_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) { dev_err(dev, "Not able to get desc for DMA xfer\n"); goto err_desc; } reinit_completion(&dma->cmd_complete); txdesc->callback = i2c_imx_dma_callback; txdesc->callback_param = i2c_imx; if (dma_submit_error(dmaengine_submit(txdesc))) { dev_err(dev, "DMA submit failed\n"); goto err_submit; } dma_async_issue_pending(dma->chan_using); return 0; err_submit: dmaengine_terminate_sync(dma->chan_using); err_desc: dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir); err_map: return -EINVAL; } static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) { struct imx_i2c_dma *dma = i2c_imx->dma; dma->dma_buf = 0; dma->dma_len = 0; dma_release_channel(dma->chan_tx); dma->chan_tx = NULL; dma_release_channel(dma->chan_rx); dma->chan_rx = NULL; dma->chan_using = NULL; } static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic) { unsigned long orig_jiffies = jiffies; unsigned int temp; while (1) { temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); /* check for arbitration lost */ if (temp & I2SR_IAL) { i2c_imx_clear_irq(i2c_imx, I2SR_IAL); return -EAGAIN; } if (for_busy && (temp & I2SR_IBB)) { i2c_imx->stopped = 0; break; } if (!for_busy && !(temp & I2SR_IBB)) { i2c_imx->stopped = 1; break; } if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C bus is busy\n", __func__); return -ETIMEDOUT; } if (atomic) udelay(100); else schedule(); } return 0; } static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic) { if (atomic) { void __iomem *addr = i2c_imx->base + (IMX_I2C_I2SR << i2c_imx->hwdata->regshift); unsigned int regval; /* * The formula for the poll timeout is documented in the RM * Rev.5 on page 1878: * T_min = 10/F_scl * Set the value hard as it is done for the non-atomic use-case. * Use 10 kHz for the calculation since this is the minimum * allowed SMBus frequency. Also add an offset of 100us since it * turned out that the I2SR_IIF bit isn't set correctly within * the minimum timeout in polling mode. */ readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100); i2c_imx->i2csr = regval; i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL); } else { wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10); } if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) { dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } /* check for arbitration lost */ if (i2c_imx->i2csr & I2SR_IAL) { dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__); i2c_imx_clear_irq(i2c_imx, I2SR_IAL); i2c_imx->i2csr = 0; return -EAGAIN; } dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__); i2c_imx->i2csr = 0; return 0; } static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx) { if (imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR) & I2SR_RXAK) { dev_dbg(&i2c_imx->adapter.dev, "<%s> No ACK\n", __func__); return -ENXIO; /* No ACK */ } dev_dbg(&i2c_imx->adapter.dev, "<%s> ACK received\n", __func__); return 0; } static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, unsigned int i2c_clk_rate) { struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div; unsigned int div; int i; if (i2c_imx->hwdata->has_err007805 && i2c_imx->bitrate > 384000) { dev_dbg(&i2c_imx->adapter.dev, "SoC errata ERR007805 or e7805 applies, bus frequency limited from %d Hz to 384000 Hz.\n", i2c_imx->bitrate); i2c_imx->bitrate = 384000; } /* Divider value calculation */ if (i2c_imx->cur_clk == i2c_clk_rate) return; i2c_imx->cur_clk = i2c_clk_rate; div = DIV_ROUND_UP(i2c_clk_rate, i2c_imx->bitrate); if (div < i2c_clk_div[0].div) i = 0; else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div) i = i2c_imx->hwdata->ndivs - 1; else for (i = 0; i2c_clk_div[i].div < div; i++) ; /* Store divider value */ i2c_imx->ifdr = i2c_clk_div[i].val; /* * There dummy delay is calculated. * It should be about one I2C clock period long. * This delay is used in I2C bus disable function * to fix chip hardware bug. */ i2c_imx->disable_delay = DIV_ROUND_UP(500000U * i2c_clk_div[i].div, i2c_clk_rate / 2); #ifdef CONFIG_I2C_DEBUG_BUS dev_dbg(&i2c_imx->adapter.dev, "I2C_CLK=%d, REQ DIV=%d\n", i2c_clk_rate, div); dev_dbg(&i2c_imx->adapter.dev, "IFDR[IC]=0x%x, REAL DIV=%d\n", i2c_clk_div[i].val, i2c_clk_div[i].div); #endif } static int i2c_imx_clk_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct clk_notifier_data *ndata = data; struct imx_i2c_struct *i2c_imx = container_of(nb, struct imx_i2c_struct, clk_change_nb); if (action & POST_RATE_CHANGE) i2c_imx_set_clk(i2c_imx, ndata->new_rate); return NOTIFY_OK; } static int i2c_imx_start(struct imx_i2c_struct *i2c_imx, bool atomic) { unsigned int temp = 0; int result; imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR); /* Enable I2C controller */ imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode, i2c_imx, IMX_I2C_I2CR); /* Wait controller to be stable */ if (atomic) udelay(50); else usleep_range(50, 150); /* Start I2C transaction */ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MSTA; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); result = i2c_imx_bus_busy(i2c_imx, 1, atomic); if (result) return result; temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; if (atomic) temp &= ~I2CR_IIEN; /* Disable interrupt */ temp &= ~I2CR_DMAEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); return result; } static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic) { unsigned int temp = 0; if (!i2c_imx->stopped) { /* Stop I2C transaction */ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); if (!(temp & I2CR_MSTA)) i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); if (i2c_imx->dma) temp &= ~I2CR_DMAEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } if (is_imx1_i2c(i2c_imx)) { /* * This delay caused by an i.MXL hardware bug. * If no (or too short) delay, no "STOP" bit will be generated. */ udelay(i2c_imx->disable_delay); } if (!i2c_imx->stopped) i2c_imx_bus_busy(i2c_imx, 0, atomic); /* Disable I2C controller */ temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN, imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } /* * Enable bus idle interrupts * Note: IBIC register will be cleared after disabled i2c module. * All of layerscape series SoCs support IBIC register. */ static void i2c_imx_enable_bus_idle(struct imx_i2c_struct *i2c_imx) { if (is_vf610_i2c(i2c_imx)) { unsigned int temp; temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_IBIC); temp |= IBIC_BIIE; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_IBIC); } } static void i2c_imx_slave_event(struct imx_i2c_struct *i2c_imx, enum i2c_slave_event event, u8 *val) { i2c_slave_event(i2c_imx->slave, event, val); i2c_imx->last_slave_event = event; } static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx) { u8 val = 0; while (i2c_imx->last_slave_event != I2C_SLAVE_STOP) { switch (i2c_imx->last_slave_event) { case I2C_SLAVE_READ_REQUESTED: i2c_imx_slave_event(i2c_imx, I2C_SLAVE_READ_PROCESSED, &val); break; case I2C_SLAVE_WRITE_REQUESTED: case I2C_SLAVE_READ_PROCESSED: case I2C_SLAVE_WRITE_RECEIVED: i2c_imx_slave_event(i2c_imx, I2C_SLAVE_STOP, &val); break; case I2C_SLAVE_STOP: break; } } } /* Returns true if the timer should be restarted, false if not. */ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx, unsigned int status, unsigned int ctl) { u8 value = 0; if (status & I2SR_IAL) { /* Arbitration lost */ i2c_imx_clear_irq(i2c_imx, I2SR_IAL); if (!(status & I2SR_IAAS)) return IRQ_HANDLED; } if (!(status & I2SR_IBB)) { /* No master on the bus, that could mean a stop condition. */ i2c_imx_slave_finish_op(i2c_imx); return IRQ_HANDLED; } if (!(status & I2SR_ICF)) /* Data transfer still in progress, ignore this. */ goto out; if (status & I2SR_IAAS) { /* Addressed as a slave */ i2c_imx_slave_finish_op(i2c_imx); if (status & I2SR_SRW) { /* Master wants to read from us*/ dev_dbg(&i2c_imx->adapter.dev, "read requested"); i2c_imx_slave_event(i2c_imx, I2C_SLAVE_READ_REQUESTED, &value); /* Slave transmit */ ctl |= I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); /* Send data */ imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR); } else { /* Master wants to write to us */ dev_dbg(&i2c_imx->adapter.dev, "write requested"); i2c_imx_slave_event(i2c_imx, I2C_SLAVE_WRITE_REQUESTED, &value); /* Slave receive */ ctl &= ~I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); /* Dummy read */ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); } } else if (!(ctl & I2CR_MTX)) { /* Receive mode */ value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); i2c_imx_slave_event(i2c_imx, I2C_SLAVE_WRITE_RECEIVED, &value); } else if (!(status & I2SR_RXAK)) { /* Transmit mode received ACK */ ctl |= I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); i2c_imx_slave_event(i2c_imx, I2C_SLAVE_READ_PROCESSED, &value); imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR); } else { /* Transmit mode received NAK, operation is done */ ctl &= ~I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); i2c_imx_slave_finish_op(i2c_imx); return IRQ_HANDLED; } out: /* * No need to check the return value here. If it returns 0 or * 1, then everything is fine. If it returns -1, then the * timer is running in the handler. This will still work, * though it may be redone (or already have been done) by the * timer function. */ hrtimer_try_to_cancel(&i2c_imx->slave_timer); hrtimer_forward_now(&i2c_imx->slave_timer, I2C_IMX_CHECK_DELAY); hrtimer_restart(&i2c_imx->slave_timer); return IRQ_HANDLED; } static enum hrtimer_restart i2c_imx_slave_timeout(struct hrtimer *t) { struct imx_i2c_struct *i2c_imx = container_of(t, struct imx_i2c_struct, slave_timer); unsigned int ctl, status; unsigned long flags; spin_lock_irqsave(&i2c_imx->slave_lock, flags); status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); i2c_imx_slave_handle(i2c_imx, status, ctl); spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); return HRTIMER_NORESTART; } static void i2c_imx_slave_init(struct imx_i2c_struct *i2c_imx) { int temp; /* Set slave addr. */ imx_i2c_write_reg((i2c_imx->slave->addr << 1), i2c_imx, IMX_I2C_IADR); i2c_imx_reset_regs(i2c_imx); /* Enable module */ temp = i2c_imx->hwdata->i2cr_ien_opcode; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); /* Enable interrupt from i2c module */ temp |= I2CR_IIEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); i2c_imx_enable_bus_idle(i2c_imx); } static int i2c_imx_reg_slave(struct i2c_client *client) { struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(client->adapter); int ret; if (i2c_imx->slave) return -EBUSY; i2c_imx->slave = client; i2c_imx->last_slave_event = I2C_SLAVE_STOP; /* Resume */ ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent); if (ret < 0) { dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller"); return ret; } i2c_imx_slave_init(i2c_imx); return 0; } static int i2c_imx_unreg_slave(struct i2c_client *client) { struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(client->adapter); int ret; if (!i2c_imx->slave) return -EINVAL; /* Reset slave address. */ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); i2c_imx_reset_regs(i2c_imx); i2c_imx->slave = NULL; /* Suspend */ ret = pm_runtime_put_sync(i2c_imx->adapter.dev.parent); if (ret < 0) dev_err(&i2c_imx->adapter.dev, "failed to suspend i2c controller"); return ret; } static irqreturn_t i2c_imx_master_isr(struct imx_i2c_struct *i2c_imx, unsigned int status) { /* save status register */ i2c_imx->i2csr = status; wake_up(&i2c_imx->queue); return IRQ_HANDLED; } static irqreturn_t i2c_imx_isr(int irq, void *dev_id) { struct imx_i2c_struct *i2c_imx = dev_id; unsigned int ctl, status; unsigned long flags; spin_lock_irqsave(&i2c_imx->slave_lock, flags); status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); if (status & I2SR_IIF) { i2c_imx_clear_irq(i2c_imx, I2SR_IIF); if (i2c_imx->slave) { if (!(ctl & I2CR_MSTA)) { irqreturn_t ret; ret = i2c_imx_slave_handle(i2c_imx, status, ctl); spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); return ret; } i2c_imx_slave_finish_op(i2c_imx); } spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); return i2c_imx_master_isr(i2c_imx, status); } spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); return IRQ_NONE; } static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) { int result; unsigned long time_left; unsigned int temp = 0; unsigned long orig_jiffies = jiffies; struct imx_i2c_dma *dma = i2c_imx->dma; struct device *dev = &i2c_imx->adapter.dev; dma->chan_using = dma->chan_tx; dma->dma_transfer_dir = DMA_MEM_TO_DEV; dma->dma_data_dir = DMA_TO_DEVICE; dma->dma_len = msgs->len - 1; result = i2c_imx_dma_xfer(i2c_imx, msgs); if (result) return result; temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_DMAEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); /* * Write slave address. * The first byte must be transmitted by the CPU. */ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); if (time_left == 0) { dmaengine_terminate_sync(dma->chan_using); return -ETIMEDOUT; } /* Waiting for transfer complete. */ while (1) { temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); if (temp & I2SR_ICF) break; if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(DMA_TIMEOUT))) { dev_dbg(dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } schedule(); } temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp &= ~I2CR_DMAEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); /* The last data byte must be transferred by the CPU. */ imx_i2c_write_reg(msgs->buf[msgs->len-1], i2c_imx, IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx, false); if (result) return result; return i2c_imx_acked(i2c_imx); } static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bool is_lastmsg) { int result; unsigned long time_left; unsigned int temp; unsigned long orig_jiffies = jiffies; struct imx_i2c_dma *dma = i2c_imx->dma; struct device *dev = &i2c_imx->adapter.dev; dma->chan_using = dma->chan_rx; dma->dma_transfer_dir = DMA_DEV_TO_MEM; dma->dma_data_dir = DMA_FROM_DEVICE; /* The last two data bytes must be transferred by the CPU. */ dma->dma_len = msgs->len - 2; result = i2c_imx_dma_xfer(i2c_imx, msgs); if (result) return result; time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); if (time_left == 0) { dmaengine_terminate_sync(dma->chan_using); return -ETIMEDOUT; } /* waiting for transfer complete. */ while (1) { temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); if (temp & I2SR_ICF) break; if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(DMA_TIMEOUT))) { dev_dbg(dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } schedule(); } temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp &= ~I2CR_DMAEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); /* read n-1 byte data */ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_TXAK; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* read n byte data */ result = i2c_imx_trx_complete(i2c_imx, false); if (result) return result; if (is_lastmsg) { /* * It must generate STOP before read I2DR to prevent * controller from generating another clock cycle */ dev_dbg(dev, "<%s> clear MSTA\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); if (!(temp & I2CR_MSTA)) i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); if (!i2c_imx->stopped) i2c_imx_bus_busy(i2c_imx, 0, false); } else { /* * For i2c master receiver repeat restart operation like: * read -> repeat MSTA -> read/write * The controller must set MTX before read the last byte in * the first read operation, otherwise the first read cost * one extra clock cycle. */ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MTX; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); return 0; } static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bool atomic) { int i, result; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", __func__, i2c_8bit_addr_from_msg(msgs)); /* write slave address */ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx, atomic); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; dev_dbg(&i2c_imx->adapter.dev, "<%s> write data\n", __func__); /* write data */ for (i = 0; i < msgs->len; i++) { dev_dbg(&i2c_imx->adapter.dev, "<%s> write byte: B%d=0x%X\n", __func__, i, msgs->buf[i]); imx_i2c_write_reg(msgs->buf[i], i2c_imx, IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx, atomic); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; } return 0; } static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bool is_lastmsg, bool atomic) { int i, result; unsigned int temp; int block_data = msgs->flags & I2C_M_RECV_LEN; int use_dma = i2c_imx->dma && msgs->flags & I2C_M_DMA_SAFE && msgs->len >= DMA_THRESHOLD && !block_data; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", __func__, i2c_8bit_addr_from_msg(msgs)); /* write slave address */ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx, atomic); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__); /* setup bus to read data */ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp &= ~I2CR_MTX; /* * Reset the I2CR_TXAK flag initially for SMBus block read since the * length is unknown */ if ((msgs->len - 1) || block_data) temp &= ~I2CR_TXAK; if (use_dma) temp |= I2CR_DMAEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */ dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); if (use_dma) return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg); /* read data */ for (i = 0; i < msgs->len; i++) { u8 len = 0; result = i2c_imx_trx_complete(i2c_imx, atomic); if (result) return result; /* * First byte is the length of remaining packet * in the SMBus block data read. Add it to * msgs->len. */ if ((!i) && block_data) { len = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) return -EPROTO; dev_dbg(&i2c_imx->adapter.dev, "<%s> read length: 0x%X\n", __func__, len); msgs->len += len; } if (i == (msgs->len - 1)) { if (is_lastmsg) { /* * It must generate STOP before read I2DR to prevent * controller from generating another clock cycle */ dev_dbg(&i2c_imx->adapter.dev, "<%s> clear MSTA\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); if (!(temp & I2CR_MSTA)) i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); if (!i2c_imx->stopped) i2c_imx_bus_busy(i2c_imx, 0, atomic); } else { /* * For i2c master receiver repeat restart operation like: * read -> repeat MSTA -> read/write * The controller must set MTX before read the last byte in * the first read operation, otherwise the first read cost * one extra clock cycle. */ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MTX; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } } else if (i == (msgs->len - 2)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> set TXAK\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_TXAK; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } if ((!i) && block_data) msgs->buf[0] = len; else msgs->buf[i] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); dev_dbg(&i2c_imx->adapter.dev, "<%s> read byte: B%d=0x%X\n", __func__, i, msgs->buf[i]); } return 0; } static int i2c_imx_xfer_common(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, bool atomic) { unsigned int i, temp; int result; bool is_lastmsg = false; struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter); /* Start I2C transfer */ result = i2c_imx_start(i2c_imx, atomic); if (result) { /* * Bus recovery uses gpiod_get_value_cansleep() which is not * allowed within atomic context. */ if (!atomic && i2c_imx->adapter.bus_recovery_info) { i2c_recover_bus(&i2c_imx->adapter); result = i2c_imx_start(i2c_imx, atomic); } } if (result) goto fail0; /* read/write data */ for (i = 0; i < num; i++) { if (i == num - 1) is_lastmsg = true; if (i) { dev_dbg(&i2c_imx->adapter.dev, "<%s> repeated start\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_RSTA; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); result = i2c_imx_bus_busy(i2c_imx, 1, atomic); if (result) goto fail0; } dev_dbg(&i2c_imx->adapter.dev, "<%s> transfer message: %d\n", __func__, i); /* write/read data */ #ifdef CONFIG_I2C_DEBUG_BUS temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__, (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0), (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0), (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0)); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); dev_dbg(&i2c_imx->adapter.dev, "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__, (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0), (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0), (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0), (temp & I2SR_RXAK ? 1 : 0)); #endif if (msgs[i].flags & I2C_M_RD) { result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic); } else { if (!atomic && i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD && msgs[i].flags & I2C_M_DMA_SAFE) result = i2c_imx_dma_write(i2c_imx, &msgs[i]); else result = i2c_imx_write(i2c_imx, &msgs[i], atomic); } if (result) goto fail0; } fail0: /* Stop I2C transfer */ i2c_imx_stop(i2c_imx, atomic); dev_dbg(&i2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__, (result < 0) ? "error" : "success msg", (result < 0) ? result : num); /* After data is transferred, switch to slave mode(as a receiver) */ if (i2c_imx->slave) i2c_imx_slave_init(i2c_imx); return (result < 0) ? result : num; } static int i2c_imx_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter); int result; result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent); if (result < 0) return result; result = i2c_imx_xfer_common(adapter, msgs, num, false); pm_runtime_mark_last_busy(i2c_imx->adapter.dev.parent); pm_runtime_put_autosuspend(i2c_imx->adapter.dev.parent); return result; } static int i2c_imx_xfer_atomic(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter); int result; result = clk_enable(i2c_imx->clk); if (result) return result; result = i2c_imx_xfer_common(adapter, msgs, num, true); clk_disable(i2c_imx->clk); return result; } static void i2c_imx_prepare_recovery(struct i2c_adapter *adap) { struct imx_i2c_struct *i2c_imx; i2c_imx = container_of(adap, struct imx_i2c_struct, adapter); pinctrl_select_state(i2c_imx->pinctrl, i2c_imx->pinctrl_pins_gpio); } static void i2c_imx_unprepare_recovery(struct i2c_adapter *adap) { struct imx_i2c_struct *i2c_imx; i2c_imx = container_of(adap, struct imx_i2c_struct, adapter); pinctrl_select_state(i2c_imx->pinctrl, i2c_imx->pinctrl_pins_default); } /* * We switch SCL and SDA to their GPIO function and do some bitbanging * for bus recovery. These alternative pinmux settings can be * described in the device tree by a separate pinctrl state "gpio". If * this is missing this is not a big problem, the only implication is * that we can't do bus recovery. */ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx, struct platform_device *pdev) { struct i2c_bus_recovery_info *rinfo = &i2c_imx->rinfo; i2c_imx->pinctrl = devm_pinctrl_get(&pdev->dev); if (!i2c_imx->pinctrl) { dev_info(&pdev->dev, "pinctrl unavailable, bus recovery not supported\n"); return 0; } if (IS_ERR(i2c_imx->pinctrl)) { dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n"); return PTR_ERR(i2c_imx->pinctrl); } i2c_imx->pinctrl_pins_default = pinctrl_lookup_state(i2c_imx->pinctrl, PINCTRL_STATE_DEFAULT); i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl, "gpio"); rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER || PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) { return -EPROBE_DEFER; } else if (IS_ERR(rinfo->sda_gpiod) || IS_ERR(rinfo->scl_gpiod) || IS_ERR(i2c_imx->pinctrl_pins_default) || IS_ERR(i2c_imx->pinctrl_pins_gpio)) { dev_dbg(&pdev->dev, "recovery information incomplete\n"); return 0; } dev_dbg(&pdev->dev, "using scl%s for recovery\n", rinfo->sda_gpiod ? ",sda" : ""); rinfo->prepare_recovery = i2c_imx_prepare_recovery; rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; rinfo->recover_bus = i2c_generic_scl_recovery; i2c_imx->adapter.bus_recovery_info = rinfo; return 0; } static u32 i2c_imx_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA; } static const struct i2c_algorithm i2c_imx_algo = { .master_xfer = i2c_imx_xfer, .master_xfer_atomic = i2c_imx_xfer_atomic, .functionality = i2c_imx_func, .reg_slave = i2c_imx_reg_slave, .unreg_slave = i2c_imx_unreg_slave, }; static int i2c_imx_probe(struct platform_device *pdev) { struct imx_i2c_struct *i2c_imx; struct resource *res; struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev); void __iomem *base; int irq, ret; dma_addr_t phy_addr; const struct imx_i2c_hwdata *match; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); phy_addr = (dma_addr_t)res->start; i2c_imx = devm_kzalloc(&pdev->dev, sizeof(*i2c_imx), GFP_KERNEL); if (!i2c_imx) return -ENOMEM; spin_lock_init(&i2c_imx->slave_lock); hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); i2c_imx->slave_timer.function = i2c_imx_slave_timeout; match = device_get_match_data(&pdev->dev); if (match) i2c_imx->hwdata = match; else i2c_imx->hwdata = (struct imx_i2c_hwdata *) platform_get_device_id(pdev)->driver_data; /* Setup i2c_imx driver structure */ strscpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name)); i2c_imx->adapter.owner = THIS_MODULE; i2c_imx->adapter.algo = &i2c_imx_algo; i2c_imx->adapter.dev.parent = &pdev->dev; i2c_imx->adapter.nr = pdev->id; i2c_imx->adapter.dev.of_node = pdev->dev.of_node; i2c_imx->base = base; ACPI_COMPANION_SET(&i2c_imx->adapter.dev, ACPI_COMPANION(&pdev->dev)); /* Get I2C clock */ i2c_imx->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(i2c_imx->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(i2c_imx->clk), "can't get I2C clock\n"); /* Init queue */ init_waitqueue_head(&i2c_imx->queue); /* Set up adapter data */ i2c_set_adapdata(&i2c_imx->adapter, i2c_imx); /* Set up platform driver data */ platform_set_drvdata(pdev, i2c_imx); pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) goto rpm_disable; /* Request IRQ */ ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED, pdev->name, i2c_imx); if (ret) { dev_err(&pdev->dev, "can't claim irq %d\n", irq); goto rpm_disable; } /* Set up clock divider */ i2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ; ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &i2c_imx->bitrate); if (ret < 0 && pdata && pdata->bitrate) i2c_imx->bitrate = pdata->bitrate; i2c_imx->clk_change_nb.notifier_call = i2c_imx_clk_notifier_call; clk_notifier_register(i2c_imx->clk, &i2c_imx->clk_change_nb); i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk)); i2c_imx_reset_regs(i2c_imx); /* Init optional bus recovery function */ ret = i2c_imx_init_recovery_info(i2c_imx, pdev); /* Give it another chance if pinctrl used is not ready yet */ if (ret == -EPROBE_DEFER) goto clk_notifier_unregister; /* Add I2C adapter */ ret = i2c_add_numbered_adapter(&i2c_imx->adapter); if (ret < 0) goto clk_notifier_unregister; pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq); dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", i2c_imx->adapter.name); dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); /* Init DMA config if supported */ i2c_imx_dma_request(i2c_imx, phy_addr); return 0; /* Return OK */ clk_notifier_unregister: clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); free_irq(irq, i2c_imx); rpm_disable: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); return ret; } static void i2c_imx_remove(struct platform_device *pdev) { struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); int irq, ret; ret = pm_runtime_get_sync(&pdev->dev); hrtimer_cancel(&i2c_imx->slave_timer); /* remove adapter */ dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); i2c_del_adapter(&i2c_imx->adapter); if (i2c_imx->dma) i2c_imx_dma_free(i2c_imx); if (ret >= 0) { /* setup chip registers to defaults */ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR); imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR); } clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); irq = platform_get_irq(pdev, 0); if (irq >= 0) free_irq(irq, i2c_imx); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); } static int __maybe_unused i2c_imx_runtime_suspend(struct device *dev) { struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev); clk_disable(i2c_imx->clk); return 0; } static int __maybe_unused i2c_imx_runtime_resume(struct device *dev) { struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev); int ret; ret = clk_enable(i2c_imx->clk); if (ret) dev_err(dev, "can't enable I2C clock, ret=%d\n", ret); return ret; } static const struct dev_pm_ops i2c_imx_pm_ops = { SET_RUNTIME_PM_OPS(i2c_imx_runtime_suspend, i2c_imx_runtime_resume, NULL) }; static struct platform_driver i2c_imx_driver = { .probe = i2c_imx_probe, .remove_new = i2c_imx_remove, .driver = { .name = DRIVER_NAME, .pm = &i2c_imx_pm_ops, .of_match_table = i2c_imx_dt_ids, .acpi_match_table = i2c_imx_acpi_ids, }, .id_table = imx_i2c_devtype, }; static int __init i2c_adap_imx_init(void) { return platform_driver_register(&i2c_imx_driver); } subsys_initcall(i2c_adap_imx_init); static void __exit i2c_adap_imx_exit(void) { platform_driver_unregister(&i2c_imx_driver); } module_exit(i2c_adap_imx_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Darius Augulis"); MODULE_DESCRIPTION("I2C adapter driver for IMX I2C bus"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/i2c/busses/i2c-imx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c-au1550.c: SMBus (i2c) adapter for Alchemy PSC interface * Copyright (C) 2004 Embedded Edge, LLC <[email protected]> * * 2.6 port by Matt Porter <[email protected]> * * The documentation describes this as an SMBus controller, but it doesn't * understand any of the SMBus protocol in hardware. It's really an I2C * controller that could emulate most of the SMBus in software. * * This is just a skeleton adapter to use with the Au1550 PSC * algorithm. It was developed for the Pb1550, but will work with * any Au1550 board that has a similar PSC configuration. */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/slab.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_psc.h> #define PSC_SEL 0x00 #define PSC_CTRL 0x04 #define PSC_SMBCFG 0x08 #define PSC_SMBMSK 0x0C #define PSC_SMBPCR 0x10 #define PSC_SMBSTAT 0x14 #define PSC_SMBEVNT 0x18 #define PSC_SMBTXRX 0x1C #define PSC_SMBTMR 0x20 struct i2c_au1550_data { void __iomem *psc_base; int xfer_timeout; struct i2c_adapter adap; }; static inline void WR(struct i2c_au1550_data *a, int r, unsigned long v) { __raw_writel(v, a->psc_base + r); wmb(); } static inline unsigned long RD(struct i2c_au1550_data *a, int r) { return __raw_readl(a->psc_base + r); } static int wait_xfer_done(struct i2c_au1550_data *adap) { int i; /* Wait for Tx Buffer Empty */ for (i = 0; i < adap->xfer_timeout; i++) { if (RD(adap, PSC_SMBSTAT) & PSC_SMBSTAT_TE) return 0; udelay(1); } return -ETIMEDOUT; } static int wait_ack(struct i2c_au1550_data *adap) { unsigned long stat; if (wait_xfer_done(adap)) return -ETIMEDOUT; stat = RD(adap, PSC_SMBEVNT); if ((stat & (PSC_SMBEVNT_DN | PSC_SMBEVNT_AN | PSC_SMBEVNT_AL)) != 0) return -ETIMEDOUT; return 0; } static int wait_master_done(struct i2c_au1550_data *adap) { int i; /* Wait for Master Done. */ for (i = 0; i < 2 * adap->xfer_timeout; i++) { if ((RD(adap, PSC_SMBEVNT) & PSC_SMBEVNT_MD) != 0) return 0; udelay(1); } return -ETIMEDOUT; } static int do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q) { unsigned long stat; /* Reset the FIFOs, clear events. */ stat = RD(adap, PSC_SMBSTAT); WR(adap, PSC_SMBEVNT, PSC_SMBEVNT_ALLCLR); if (!(stat & PSC_SMBSTAT_TE) || !(stat & PSC_SMBSTAT_RE)) { WR(adap, PSC_SMBPCR, PSC_SMBPCR_DC); while ((RD(adap, PSC_SMBPCR) & PSC_SMBPCR_DC) != 0) cpu_relax(); udelay(50); } /* Write out the i2c chip address and specify operation */ addr <<= 1; if (rd) addr |= 1; /* zero-byte xfers stop immediately */ if (q) addr |= PSC_SMBTXRX_STP; /* Put byte into fifo, start up master. */ WR(adap, PSC_SMBTXRX, addr); WR(adap, PSC_SMBPCR, PSC_SMBPCR_MS); if (wait_ack(adap)) return -EIO; return (q) ? wait_master_done(adap) : 0; } static int wait_for_rx_byte(struct i2c_au1550_data *adap, unsigned char *out) { int j; if (wait_xfer_done(adap)) return -EIO; j = adap->xfer_timeout * 100; do { j--; if (j <= 0) return -EIO; if ((RD(adap, PSC_SMBSTAT) & PSC_SMBSTAT_RE) == 0) j = 0; else udelay(1); } while (j > 0); *out = RD(adap, PSC_SMBTXRX); return 0; } static int i2c_read(struct i2c_au1550_data *adap, unsigned char *buf, unsigned int len) { int i; if (len == 0) return 0; /* A read is performed by stuffing the transmit fifo with * zero bytes for timing, waiting for bytes to appear in the * receive fifo, then reading the bytes. */ i = 0; while (i < (len - 1)) { WR(adap, PSC_SMBTXRX, 0); if (wait_for_rx_byte(adap, &buf[i])) return -EIO; i++; } /* The last byte has to indicate transfer done. */ WR(adap, PSC_SMBTXRX, PSC_SMBTXRX_STP); if (wait_master_done(adap)) return -EIO; buf[i] = (unsigned char)(RD(adap, PSC_SMBTXRX) & 0xff); return 0; } static int i2c_write(struct i2c_au1550_data *adap, unsigned char *buf, unsigned int len) { int i; unsigned long data; if (len == 0) return 0; i = 0; while (i < (len-1)) { data = buf[i]; WR(adap, PSC_SMBTXRX, data); if (wait_ack(adap)) return -EIO; i++; } /* The last byte has to indicate transfer done. */ data = buf[i]; data |= PSC_SMBTXRX_STP; WR(adap, PSC_SMBTXRX, data); if (wait_master_done(adap)) return -EIO; return 0; } static int au1550_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct i2c_au1550_data *adap = i2c_adap->algo_data; struct i2c_msg *p; int i, err = 0; WR(adap, PSC_CTRL, PSC_CTRL_ENABLE); for (i = 0; !err && i < num; i++) { p = &msgs[i]; err = do_address(adap, p->addr, p->flags & I2C_M_RD, (p->len == 0)); if (err || !p->len) continue; if (p->flags & I2C_M_RD) err = i2c_read(adap, p->buf, p->len); else err = i2c_write(adap, p->buf, p->len); } /* Return the number of messages processed, or the error code. */ if (err == 0) err = num; WR(adap, PSC_CTRL, PSC_CTRL_SUSPEND); return err; } static u32 au1550_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm au1550_algo = { .master_xfer = au1550_xfer, .functionality = au1550_func, }; static void i2c_au1550_setup(struct i2c_au1550_data *priv) { unsigned long cfg; WR(priv, PSC_CTRL, PSC_CTRL_DISABLE); WR(priv, PSC_SEL, PSC_SEL_PS_SMBUSMODE); WR(priv, PSC_SMBCFG, 0); WR(priv, PSC_CTRL, PSC_CTRL_ENABLE); while ((RD(priv, PSC_SMBSTAT) & PSC_SMBSTAT_SR) == 0) cpu_relax(); cfg = PSC_SMBCFG_RT_FIFO8 | PSC_SMBCFG_TT_FIFO8 | PSC_SMBCFG_DD_DISABLE; WR(priv, PSC_SMBCFG, cfg); /* Divide by 8 to get a 6.25 MHz clock. The later protocol * timings are based on this clock. */ cfg |= PSC_SMBCFG_SET_DIV(PSC_SMBCFG_DIV8); WR(priv, PSC_SMBCFG, cfg); WR(priv, PSC_SMBMSK, PSC_SMBMSK_ALLMASK); /* Set the protocol timer values. See Table 71 in the * Au1550 Data Book for standard timing values. */ WR(priv, PSC_SMBTMR, PSC_SMBTMR_SET_TH(0) | PSC_SMBTMR_SET_PS(20) | \ PSC_SMBTMR_SET_PU(20) | PSC_SMBTMR_SET_SH(20) | \ PSC_SMBTMR_SET_SU(20) | PSC_SMBTMR_SET_CL(20) | \ PSC_SMBTMR_SET_CH(20)); cfg |= PSC_SMBCFG_DE_ENABLE; WR(priv, PSC_SMBCFG, cfg); while ((RD(priv, PSC_SMBSTAT) & PSC_SMBSTAT_SR) == 0) cpu_relax(); WR(priv, PSC_CTRL, PSC_CTRL_SUSPEND); } static void i2c_au1550_disable(struct i2c_au1550_data *priv) { WR(priv, PSC_SMBCFG, 0); WR(priv, PSC_CTRL, PSC_CTRL_DISABLE); } /* * registering functions to load algorithms at runtime * Prior to calling us, the 50MHz clock frequency and routing * must have been set up for the PSC indicated by the adapter. */ static int i2c_au1550_probe(struct platform_device *pdev) { struct i2c_au1550_data *priv; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(struct i2c_au1550_data), GFP_KERNEL); if (!priv) return -ENOMEM; priv->psc_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(priv->psc_base)) return PTR_ERR(priv->psc_base); priv->xfer_timeout = 200; priv->adap.nr = pdev->id; priv->adap.algo = &au1550_algo; priv->adap.algo_data = priv; priv->adap.dev.parent = &pdev->dev; strscpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name)); /* Now, set up the PSC for SMBus PIO mode. */ i2c_au1550_setup(priv); ret = i2c_add_numbered_adapter(&priv->adap); if (ret) { i2c_au1550_disable(priv); return ret; } platform_set_drvdata(pdev, priv); return 0; } static void i2c_au1550_remove(struct platform_device *pdev) { struct i2c_au1550_data *priv = platform_get_drvdata(pdev); i2c_del_adapter(&priv->adap); i2c_au1550_disable(priv); } static int i2c_au1550_suspend(struct device *dev) { struct i2c_au1550_data *priv = dev_get_drvdata(dev); i2c_au1550_disable(priv); return 0; } static int i2c_au1550_resume(struct device *dev) { struct i2c_au1550_data *priv = dev_get_drvdata(dev); i2c_au1550_setup(priv); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(i2c_au1550_pmops, i2c_au1550_suspend, i2c_au1550_resume); static struct platform_driver au1xpsc_smbus_driver = { .driver = { .name = "au1xpsc_smbus", .pm = pm_sleep_ptr(&i2c_au1550_pmops), }, .probe = i2c_au1550_probe, .remove_new = i2c_au1550_remove, }; module_platform_driver(au1xpsc_smbus_driver); MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC."); MODULE_DESCRIPTION("SMBus adapter Alchemy pb1550"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:au1xpsc_smbus");
linux-master
drivers/i2c/busses/i2c-au1550.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for the TAOS evaluation modules * These devices include an I2C master which can be controlled over the * serial port. * * Copyright (C) 2007 Jean Delvare <[email protected]> */ #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #include <linux/i2c.h> #define TAOS_BUFFER_SIZE 63 #define TAOS_STATE_INIT 0 #define TAOS_STATE_IDLE 1 #define TAOS_STATE_EOFF 2 #define TAOS_STATE_RECV 3 #define TAOS_CMD_RESET 0x12 #define TAOS_CMD_ECHO_ON '+' #define TAOS_CMD_ECHO_OFF '-' static DECLARE_WAIT_QUEUE_HEAD(wq); struct taos_data { struct i2c_adapter adapter; struct i2c_client *client; int state; u8 addr; /* last used address */ unsigned char buffer[TAOS_BUFFER_SIZE]; unsigned int pos; /* position inside the buffer */ }; /* TAOS TSL2550 EVM */ static const struct i2c_board_info tsl2550_info = { I2C_BOARD_INFO("tsl2550", 0x39), }; /* Instantiate i2c devices based on the adapter name */ static struct i2c_client *taos_instantiate_device(struct i2c_adapter *adapter) { if (!strncmp(adapter->name, "TAOS TSL2550 EVM", 16)) { dev_info(&adapter->dev, "Instantiating device %s at 0x%02x\n", tsl2550_info.type, tsl2550_info.addr); return i2c_new_client_device(adapter, &tsl2550_info); } return ERR_PTR(-ENODEV); } static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct serio *serio = adapter->algo_data; struct taos_data *taos = serio_get_drvdata(serio); char *p; /* Encode our transaction. "@" is for the device address, "$" for the SMBus command and "#" for the data. */ p = taos->buffer; /* The device remembers the last used address, no need to send it again if it's the same */ if (addr != taos->addr) p += sprintf(p, "@%02X", addr); switch (size) { case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_WRITE) sprintf(p, "$#%02X", command); else sprintf(p, "$"); break; case I2C_SMBUS_BYTE_DATA: if (read_write == I2C_SMBUS_WRITE) sprintf(p, "$%02X#%02X", command, data->byte); else sprintf(p, "$%02X", command); break; default: dev_warn(&adapter->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } /* Send the transaction to the TAOS EVM */ dev_dbg(&adapter->dev, "Command buffer: %s\n", taos->buffer); for (p = taos->buffer; *p; p++) serio_write(serio, *p); taos->addr = addr; /* Start the transaction and read the answer */ taos->pos = 0; taos->state = TAOS_STATE_RECV; serio_write(serio, read_write == I2C_SMBUS_WRITE ? '>' : '<'); wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, msecs_to_jiffies(150)); if (taos->state != TAOS_STATE_IDLE || taos->pos != 5) { dev_err(&adapter->dev, "Transaction timeout (pos=%d)\n", taos->pos); return -EIO; } dev_dbg(&adapter->dev, "Answer buffer: %s\n", taos->buffer); /* Interpret the returned string */ p = taos->buffer + 1; p[3] = '\0'; if (!strcmp(p, "NAK")) return -ENODEV; if (read_write == I2C_SMBUS_WRITE) { if (!strcmp(p, "ACK")) return 0; } else { if (p[0] == 'x') { /* * Voluntarily dropping error code of kstrtou8 since all * error code that it could return are invalid according * to Documentation/i2c/fault-codes.rst. */ if (kstrtou8(p + 1, 16, &data->byte)) return -EPROTO; return 0; } } return -EIO; } static u32 taos_smbus_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA; } static const struct i2c_algorithm taos_algorithm = { .smbus_xfer = taos_smbus_xfer, .functionality = taos_smbus_func, }; static irqreturn_t taos_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct taos_data *taos = serio_get_drvdata(serio); switch (taos->state) { case TAOS_STATE_INIT: taos->buffer[taos->pos++] = data; if (data == ':' || taos->pos == TAOS_BUFFER_SIZE - 1) { taos->buffer[taos->pos] = '\0'; taos->state = TAOS_STATE_IDLE; wake_up_interruptible(&wq); } break; case TAOS_STATE_EOFF: taos->state = TAOS_STATE_IDLE; wake_up_interruptible(&wq); break; case TAOS_STATE_RECV: taos->buffer[taos->pos++] = data; if (data == ']') { taos->buffer[taos->pos] = '\0'; taos->state = TAOS_STATE_IDLE; wake_up_interruptible(&wq); } break; } return IRQ_HANDLED; } /* Extract the adapter name from the buffer received after reset. The buffer is modified and a pointer inside the buffer is returned. */ static char *taos_adapter_name(char *buffer) { char *start, *end; start = strstr(buffer, "TAOS "); if (!start) return NULL; end = strchr(start, '\r'); if (!end) return NULL; *end = '\0'; return start; } static int taos_connect(struct serio *serio, struct serio_driver *drv) { struct taos_data *taos; struct i2c_adapter *adapter; char *name; int err; taos = kzalloc(sizeof(struct taos_data), GFP_KERNEL); if (!taos) { err = -ENOMEM; goto exit; } taos->state = TAOS_STATE_INIT; serio_set_drvdata(serio, taos); err = serio_open(serio, drv); if (err) goto exit_kfree; adapter = &taos->adapter; adapter->owner = THIS_MODULE; adapter->algo = &taos_algorithm; adapter->algo_data = serio; adapter->dev.parent = &serio->dev; /* Reset the TAOS evaluation module to identify it */ serio_write(serio, TAOS_CMD_RESET); wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, msecs_to_jiffies(2000)); if (taos->state != TAOS_STATE_IDLE) { err = -ENODEV; dev_err(&serio->dev, "TAOS EVM reset failed (state=%d, " "pos=%d)\n", taos->state, taos->pos); goto exit_close; } name = taos_adapter_name(taos->buffer); if (!name) { err = -ENODEV; dev_err(&serio->dev, "TAOS EVM identification failed\n"); goto exit_close; } strscpy(adapter->name, name, sizeof(adapter->name)); /* Turn echo off for better performance */ taos->state = TAOS_STATE_EOFF; serio_write(serio, TAOS_CMD_ECHO_OFF); wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, msecs_to_jiffies(250)); if (taos->state != TAOS_STATE_IDLE) { err = -ENODEV; dev_err(&serio->dev, "TAOS EVM echo off failed " "(state=%d)\n", taos->state); goto exit_close; } err = i2c_add_adapter(adapter); if (err) goto exit_close; dev_info(&serio->dev, "Connected to TAOS EVM\n"); taos->client = taos_instantiate_device(adapter); return 0; exit_close: serio_close(serio); exit_kfree: kfree(taos); exit: return err; } static void taos_disconnect(struct serio *serio) { struct taos_data *taos = serio_get_drvdata(serio); i2c_unregister_device(taos->client); i2c_del_adapter(&taos->adapter); serio_close(serio); kfree(taos); dev_info(&serio->dev, "Disconnected from TAOS EVM\n"); } static const struct serio_device_id taos_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TAOSEVM, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, taos_serio_ids); static struct serio_driver taos_drv = { .driver = { .name = "taos-evm", }, .description = "TAOS evaluation module driver", .id_table = taos_serio_ids, .connect = taos_connect, .disconnect = taos_disconnect, .interrupt = taos_interrupt, }; module_serio_driver(taos_drv); MODULE_AUTHOR("Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("TAOS evaluation module driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-taos-evm.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2014 Broadcom Corporation #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #define IDM_CTRL_DIRECT_OFFSET 0x00 #define CFG_OFFSET 0x00 #define CFG_RESET_SHIFT 31 #define CFG_EN_SHIFT 30 #define CFG_SLAVE_ADDR_0_SHIFT 28 #define CFG_M_RETRY_CNT_SHIFT 16 #define CFG_M_RETRY_CNT_MASK 0x0f #define TIM_CFG_OFFSET 0x04 #define TIM_CFG_MODE_400_SHIFT 31 #define TIM_RAND_SLAVE_STRETCH_SHIFT 24 #define TIM_RAND_SLAVE_STRETCH_MASK 0x7f #define TIM_PERIODIC_SLAVE_STRETCH_SHIFT 16 #define TIM_PERIODIC_SLAVE_STRETCH_MASK 0x7f #define S_CFG_SMBUS_ADDR_OFFSET 0x08 #define S_CFG_EN_NIC_SMB_ADDR3_SHIFT 31 #define S_CFG_NIC_SMB_ADDR3_SHIFT 24 #define S_CFG_NIC_SMB_ADDR3_MASK 0x7f #define S_CFG_EN_NIC_SMB_ADDR2_SHIFT 23 #define S_CFG_NIC_SMB_ADDR2_SHIFT 16 #define S_CFG_NIC_SMB_ADDR2_MASK 0x7f #define S_CFG_EN_NIC_SMB_ADDR1_SHIFT 15 #define S_CFG_NIC_SMB_ADDR1_SHIFT 8 #define S_CFG_NIC_SMB_ADDR1_MASK 0x7f #define S_CFG_EN_NIC_SMB_ADDR0_SHIFT 7 #define S_CFG_NIC_SMB_ADDR0_SHIFT 0 #define S_CFG_NIC_SMB_ADDR0_MASK 0x7f #define M_FIFO_CTRL_OFFSET 0x0c #define M_FIFO_RX_FLUSH_SHIFT 31 #define M_FIFO_TX_FLUSH_SHIFT 30 #define M_FIFO_RX_CNT_SHIFT 16 #define M_FIFO_RX_CNT_MASK 0x7f #define M_FIFO_RX_THLD_SHIFT 8 #define M_FIFO_RX_THLD_MASK 0x3f #define S_FIFO_CTRL_OFFSET 0x10 #define S_FIFO_RX_FLUSH_SHIFT 31 #define S_FIFO_TX_FLUSH_SHIFT 30 #define S_FIFO_RX_CNT_SHIFT 16 #define S_FIFO_RX_CNT_MASK 0x7f #define S_FIFO_RX_THLD_SHIFT 8 #define S_FIFO_RX_THLD_MASK 0x3f #define M_CMD_OFFSET 0x30 #define M_CMD_START_BUSY_SHIFT 31 #define M_CMD_STATUS_SHIFT 25 #define M_CMD_STATUS_MASK 0x07 #define M_CMD_STATUS_SUCCESS 0x0 #define M_CMD_STATUS_LOST_ARB 0x1 #define M_CMD_STATUS_NACK_ADDR 0x2 #define M_CMD_STATUS_NACK_DATA 0x3 #define M_CMD_STATUS_TIMEOUT 0x4 #define M_CMD_STATUS_FIFO_UNDERRUN 0x5 #define M_CMD_STATUS_RX_FIFO_FULL 0x6 #define M_CMD_PROTOCOL_SHIFT 9 #define M_CMD_PROTOCOL_MASK 0xf #define M_CMD_PROTOCOL_QUICK 0x0 #define M_CMD_PROTOCOL_BLK_WR 0x7 #define M_CMD_PROTOCOL_BLK_RD 0x8 #define M_CMD_PROTOCOL_PROCESS 0xa #define M_CMD_PEC_SHIFT 8 #define M_CMD_RD_CNT_SHIFT 0 #define M_CMD_RD_CNT_MASK 0xff #define S_CMD_OFFSET 0x34 #define S_CMD_START_BUSY_SHIFT 31 #define S_CMD_STATUS_SHIFT 23 #define S_CMD_STATUS_MASK 0x07 #define S_CMD_STATUS_SUCCESS 0x0 #define S_CMD_STATUS_TIMEOUT 0x5 #define S_CMD_STATUS_MASTER_ABORT 0x7 #define IE_OFFSET 0x38 #define IE_M_RX_FIFO_FULL_SHIFT 31 #define IE_M_RX_THLD_SHIFT 30 #define IE_M_START_BUSY_SHIFT 28 #define IE_M_TX_UNDERRUN_SHIFT 27 #define IE_S_RX_FIFO_FULL_SHIFT 26 #define IE_S_RX_THLD_SHIFT 25 #define IE_S_RX_EVENT_SHIFT 24 #define IE_S_START_BUSY_SHIFT 23 #define IE_S_TX_UNDERRUN_SHIFT 22 #define IE_S_RD_EVENT_SHIFT 21 #define IS_OFFSET 0x3c #define IS_M_RX_FIFO_FULL_SHIFT 31 #define IS_M_RX_THLD_SHIFT 30 #define IS_M_START_BUSY_SHIFT 28 #define IS_M_TX_UNDERRUN_SHIFT 27 #define IS_S_RX_FIFO_FULL_SHIFT 26 #define IS_S_RX_THLD_SHIFT 25 #define IS_S_RX_EVENT_SHIFT 24 #define IS_S_START_BUSY_SHIFT 23 #define IS_S_TX_UNDERRUN_SHIFT 22 #define IS_S_RD_EVENT_SHIFT 21 #define M_TX_OFFSET 0x40 #define M_TX_WR_STATUS_SHIFT 31 #define M_TX_DATA_SHIFT 0 #define M_TX_DATA_MASK 0xff #define M_RX_OFFSET 0x44 #define M_RX_STATUS_SHIFT 30 #define M_RX_STATUS_MASK 0x03 #define M_RX_PEC_ERR_SHIFT 29 #define M_RX_DATA_SHIFT 0 #define M_RX_DATA_MASK 0xff #define S_TX_OFFSET 0x48 #define S_TX_WR_STATUS_SHIFT 31 #define S_TX_DATA_SHIFT 0 #define S_TX_DATA_MASK 0xff #define S_RX_OFFSET 0x4c #define S_RX_STATUS_SHIFT 30 #define S_RX_STATUS_MASK 0x03 #define S_RX_PEC_ERR_SHIFT 29 #define S_RX_DATA_SHIFT 0 #define S_RX_DATA_MASK 0xff #define I2C_TIMEOUT_MSEC 50000 #define M_TX_RX_FIFO_SIZE 64 #define M_RX_FIFO_MAX_THLD_VALUE (M_TX_RX_FIFO_SIZE - 1) #define M_RX_MAX_READ_LEN 255 #define M_RX_FIFO_THLD_VALUE 50 #define IE_M_ALL_INTERRUPT_SHIFT 27 #define IE_M_ALL_INTERRUPT_MASK 0x1e #define SLAVE_READ_WRITE_BIT_MASK 0x1 #define SLAVE_READ_WRITE_BIT_SHIFT 0x1 #define SLAVE_MAX_SIZE_TRANSACTION 64 #define SLAVE_CLOCK_STRETCH_TIME 25 #define IE_S_ALL_INTERRUPT_SHIFT 21 #define IE_S_ALL_INTERRUPT_MASK 0x3f /* * It takes ~18us to reading 10bytes of data, hence to keep tasklet * running for less time, max slave read per tasklet is set to 10 bytes. */ #define MAX_SLAVE_RX_PER_INT 10 enum i2c_slave_read_status { I2C_SLAVE_RX_FIFO_EMPTY = 0, I2C_SLAVE_RX_START, I2C_SLAVE_RX_DATA, I2C_SLAVE_RX_END, }; enum bus_speed_index { I2C_SPD_100K = 0, I2C_SPD_400K, }; enum bcm_iproc_i2c_type { IPROC_I2C, IPROC_I2C_NIC }; struct bcm_iproc_i2c_dev { struct device *device; enum bcm_iproc_i2c_type type; int irq; void __iomem *base; void __iomem *idm_base; u32 ape_addr_mask; /* lock for indirect access through IDM */ spinlock_t idm_lock; struct i2c_adapter adapter; unsigned int bus_speed; struct completion done; int xfer_is_done; struct i2c_msg *msg; struct i2c_client *slave; /* bytes that have been transferred */ unsigned int tx_bytes; /* bytes that have been read */ unsigned int rx_bytes; unsigned int thld_bytes; bool slave_rx_only; bool rx_start_rcvd; bool slave_read_complete; u32 tx_underrun; u32 slave_int_mask; struct tasklet_struct slave_rx_tasklet; }; /* tasklet to process slave rx data */ static void slave_rx_tasklet_fn(unsigned long); /* * Can be expanded in the future if more interrupt status bits are utilized */ #define ISR_MASK (BIT(IS_M_START_BUSY_SHIFT) | BIT(IS_M_TX_UNDERRUN_SHIFT)\ | BIT(IS_M_RX_THLD_SHIFT)) #define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\ | BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT)\ | BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\ | BIT(IS_S_RX_THLD_SHIFT)) static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave); static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave); static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c, bool enable); static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset) { u32 val; unsigned long flags; if (iproc_i2c->idm_base) { spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); val = readl(iproc_i2c->base + offset); spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { val = readl(iproc_i2c->base + offset); } return val; } static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset, u32 val) { unsigned long flags; if (iproc_i2c->idm_base) { spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); writel(val, iproc_i2c->base + offset); spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { writel(val, iproc_i2c->base + offset); } } static void bcm_iproc_i2c_slave_init( struct bcm_iproc_i2c_dev *iproc_i2c, bool need_reset) { u32 val; iproc_i2c->tx_underrun = 0; if (need_reset) { /* put controller in reset */ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET); val |= BIT(CFG_RESET_SHIFT); iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); /* wait 100 usec per spec */ udelay(100); /* bring controller out of reset */ val &= ~(BIT(CFG_RESET_SHIFT)); iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); } /* flush TX/RX FIFOs */ val = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT)); iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val); /* Maximum slave stretch time */ val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET); val &= ~(TIM_RAND_SLAVE_STRETCH_MASK << TIM_RAND_SLAVE_STRETCH_SHIFT); val |= (SLAVE_CLOCK_STRETCH_TIME << TIM_RAND_SLAVE_STRETCH_SHIFT); iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val); /* Configure the slave address */ val = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET); val |= BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); val &= ~(S_CFG_NIC_SMB_ADDR3_MASK << S_CFG_NIC_SMB_ADDR3_SHIFT); val |= (iproc_i2c->slave->addr << S_CFG_NIC_SMB_ADDR3_SHIFT); iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, val); /* clear all pending slave interrupts */ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE); /* Enable interrupt register to indicate a valid byte in receive fifo */ val = BIT(IE_S_RX_EVENT_SHIFT); /* Enable interrupt register to indicate Slave Rx FIFO Full */ val |= BIT(IE_S_RX_FIFO_FULL_SHIFT); /* Enable interrupt register to indicate a Master read transaction */ val |= BIT(IE_S_RD_EVENT_SHIFT); /* Enable interrupt register for the Slave BUSY command */ val |= BIT(IE_S_START_BUSY_SHIFT); iproc_i2c->slave_int_mask = val; iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); } static void bcm_iproc_i2c_check_slave_status( struct bcm_iproc_i2c_dev *iproc_i2c) { u32 val; val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET); /* status is valid only when START_BUSY is cleared after it was set */ if (val & BIT(S_CMD_START_BUSY_SHIFT)) return; val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK; if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) { dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ? "slave random stretch time timeout\n" : "Master aborted read transaction\n"); /* re-initialize i2c for recovery */ bcm_iproc_i2c_enable_disable(iproc_i2c, false); bcm_iproc_i2c_slave_init(iproc_i2c, true); bcm_iproc_i2c_enable_disable(iproc_i2c, true); } } static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c) { u8 rx_data, rx_status; u32 rx_bytes = 0; u32 val; while (rx_bytes < MAX_SLAVE_RX_PER_INT) { val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET); rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK; rx_data = ((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK); if (rx_status == I2C_SLAVE_RX_START) { /* Start of SMBUS Master write */ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_WRITE_REQUESTED, &rx_data); iproc_i2c->rx_start_rcvd = true; iproc_i2c->slave_read_complete = false; } else if (rx_status == I2C_SLAVE_RX_DATA && iproc_i2c->rx_start_rcvd) { /* Middle of SMBUS Master write */ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_WRITE_RECEIVED, &rx_data); } else if (rx_status == I2C_SLAVE_RX_END && iproc_i2c->rx_start_rcvd) { /* End of SMBUS Master write */ if (iproc_i2c->slave_rx_only) i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_WRITE_RECEIVED, &rx_data); i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &rx_data); } else if (rx_status == I2C_SLAVE_RX_FIFO_EMPTY) { iproc_i2c->rx_start_rcvd = false; iproc_i2c->slave_read_complete = true; break; } rx_bytes++; } } static void slave_rx_tasklet_fn(unsigned long data) { struct bcm_iproc_i2c_dev *iproc_i2c = (struct bcm_iproc_i2c_dev *)data; u32 int_clr; bcm_iproc_i2c_slave_read(iproc_i2c); /* clear pending IS_S_RX_EVENT_SHIFT interrupt */ int_clr = BIT(IS_S_RX_EVENT_SHIFT); if (!iproc_i2c->slave_rx_only && iproc_i2c->slave_read_complete) { /* * In case of single byte master-read request, * IS_S_TX_UNDERRUN_SHIFT event is generated before * IS_S_START_BUSY_SHIFT event. Hence start slave data send * from first IS_S_TX_UNDERRUN_SHIFT event. * * This means don't send any data from slave when * IS_S_RD_EVENT_SHIFT event is generated else it will increment * eeprom or other backend slave driver read pointer twice. */ iproc_i2c->tx_underrun = 0; iproc_i2c->slave_int_mask |= BIT(IE_S_TX_UNDERRUN_SHIFT); /* clear IS_S_RD_EVENT_SHIFT interrupt */ int_clr |= BIT(IS_S_RD_EVENT_SHIFT); } /* clear slave interrupt */ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, int_clr); /* enable slave interrupts */ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask); } static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status) { u32 val; u8 value; /* * Slave events in case of master-write, master-write-read and, * master-read * * Master-write : only IS_S_RX_EVENT_SHIFT event * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT * events * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT * events or only IS_S_RD_EVENT_SHIFT * * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes * full. This can happen if Master issues write requests of more than * 64 bytes. */ if (status & BIT(IS_S_RX_EVENT_SHIFT) || status & BIT(IS_S_RD_EVENT_SHIFT) || status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) { /* disable slave interrupts */ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); val &= ~iproc_i2c->slave_int_mask; iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); if (status & BIT(IS_S_RD_EVENT_SHIFT)) /* Master-write-read request */ iproc_i2c->slave_rx_only = false; else /* Master-write request only */ iproc_i2c->slave_rx_only = true; /* schedule tasklet to read data later */ tasklet_schedule(&iproc_i2c->slave_rx_tasklet); /* * clear only IS_S_RX_EVENT_SHIFT and * IS_S_RX_FIFO_FULL_SHIFT interrupt. */ val = BIT(IS_S_RX_EVENT_SHIFT); if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) val |= BIT(IS_S_RX_FIFO_FULL_SHIFT); iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val); } if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) { iproc_i2c->tx_underrun++; if (iproc_i2c->tx_underrun == 1) /* Start of SMBUS for Master Read */ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_READ_REQUESTED, &value); else /* Master read other than start */ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_READ_PROCESSED, &value); iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value); /* start transfer */ val = BIT(S_CMD_START_BUSY_SHIFT); iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val); /* clear interrupt */ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, BIT(IS_S_TX_UNDERRUN_SHIFT)); } /* Stop received from master in case of master read transaction */ if (status & BIT(IS_S_START_BUSY_SHIFT)) { /* * Disable interrupt for TX FIFO becomes empty and * less than PKT_LENGTH bytes were output on the SMBUS */ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT); iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask); /* End of SMBUS for Master Read */ val = BIT(S_TX_WR_STATUS_SHIFT); iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, val); val = BIT(S_CMD_START_BUSY_SHIFT); iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val); /* flush TX FIFOs */ val = iproc_i2c_rd_reg(iproc_i2c, S_FIFO_CTRL_OFFSET); val |= (BIT(S_FIFO_TX_FLUSH_SHIFT)); iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val); i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value); /* clear interrupt */ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, BIT(IS_S_START_BUSY_SHIFT)); } /* check slave transmit status only if slave is transmitting */ if (!iproc_i2c->slave_rx_only) bcm_iproc_i2c_check_slave_status(iproc_i2c); return true; } static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c) { struct i2c_msg *msg = iproc_i2c->msg; uint32_t val; /* Read valid data from RX FIFO */ while (iproc_i2c->rx_bytes < msg->len) { val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET); /* rx fifo empty */ if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK)) break; msg->buf[iproc_i2c->rx_bytes] = (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK; iproc_i2c->rx_bytes++; } } static void bcm_iproc_i2c_send(struct bcm_iproc_i2c_dev *iproc_i2c) { struct i2c_msg *msg = iproc_i2c->msg; unsigned int tx_bytes = msg->len - iproc_i2c->tx_bytes; unsigned int i; u32 val; /* can only fill up to the FIFO size */ tx_bytes = min_t(unsigned int, tx_bytes, M_TX_RX_FIFO_SIZE); for (i = 0; i < tx_bytes; i++) { /* start from where we left over */ unsigned int idx = iproc_i2c->tx_bytes + i; val = msg->buf[idx]; /* mark the last byte */ if (idx == msg->len - 1) { val |= BIT(M_TX_WR_STATUS_SHIFT); if (iproc_i2c->irq) { u32 tmp; /* * Since this is the last byte, we should now * disable TX FIFO underrun interrupt */ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); tmp &= ~BIT(IE_M_TX_UNDERRUN_SHIFT); iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp); } } /* load data into TX FIFO */ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val); } /* update number of transferred bytes */ iproc_i2c->tx_bytes += tx_bytes; } static void bcm_iproc_i2c_read(struct bcm_iproc_i2c_dev *iproc_i2c) { struct i2c_msg *msg = iproc_i2c->msg; u32 bytes_left, val; bcm_iproc_i2c_read_valid_bytes(iproc_i2c); bytes_left = msg->len - iproc_i2c->rx_bytes; if (bytes_left == 0) { if (iproc_i2c->irq) { /* finished reading all data, disable rx thld event */ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); val &= ~BIT(IS_M_RX_THLD_SHIFT); iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); } } else if (bytes_left < iproc_i2c->thld_bytes) { /* set bytes left as threshold */ val = iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET); val &= ~(M_FIFO_RX_THLD_MASK << M_FIFO_RX_THLD_SHIFT); val |= (bytes_left << M_FIFO_RX_THLD_SHIFT); iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val); iproc_i2c->thld_bytes = bytes_left; } /* * bytes_left >= iproc_i2c->thld_bytes, * hence no need to change the THRESHOLD SET. * It will remain as iproc_i2c->thld_bytes itself */ } static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status) { /* TX FIFO is empty and we have more data to send */ if (status & BIT(IS_M_TX_UNDERRUN_SHIFT)) bcm_iproc_i2c_send(iproc_i2c); /* RX FIFO threshold is reached and data needs to be read out */ if (status & BIT(IS_M_RX_THLD_SHIFT)) bcm_iproc_i2c_read(iproc_i2c); /* transfer is done */ if (status & BIT(IS_M_START_BUSY_SHIFT)) { iproc_i2c->xfer_is_done = 1; if (iproc_i2c->irq) complete(&iproc_i2c->done); } } static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) { struct bcm_iproc_i2c_dev *iproc_i2c = data; u32 slave_status; u32 status; bool ret; status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET); /* process only slave interrupt which are enabled */ slave_status = status & iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET) & ISR_MASK_SLAVE; if (slave_status) { ret = bcm_iproc_i2c_slave_isr(iproc_i2c, slave_status); if (ret) return IRQ_HANDLED; else return IRQ_NONE; } status &= ISR_MASK; if (!status) return IRQ_NONE; /* process all master based events */ bcm_iproc_i2c_process_m_event(iproc_i2c, status); iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status); return IRQ_HANDLED; } static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c) { u32 val; /* put controller in reset */ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET); val |= BIT(CFG_RESET_SHIFT); val &= ~(BIT(CFG_EN_SHIFT)); iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); /* wait 100 usec per spec */ udelay(100); /* bring controller out of reset */ val &= ~(BIT(CFG_RESET_SHIFT)); iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); /* flush TX/RX FIFOs and set RX FIFO threshold to zero */ val = (BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT)); iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val); /* disable all interrupts */ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); val &= ~(IE_M_ALL_INTERRUPT_MASK << IE_M_ALL_INTERRUPT_SHIFT); iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); /* clear all pending interrupts */ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff); return 0; } static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c, bool enable) { u32 val; val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET); if (enable) val |= BIT(CFG_EN_SHIFT); else val &= ~BIT(CFG_EN_SHIFT); iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); } static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c, struct i2c_msg *msg) { u32 val; val = iproc_i2c_rd_reg(iproc_i2c, M_CMD_OFFSET); val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK; switch (val) { case M_CMD_STATUS_SUCCESS: return 0; case M_CMD_STATUS_LOST_ARB: dev_dbg(iproc_i2c->device, "lost bus arbitration\n"); return -EAGAIN; case M_CMD_STATUS_NACK_ADDR: dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr); return -ENXIO; case M_CMD_STATUS_NACK_DATA: dev_dbg(iproc_i2c->device, "NAK data\n"); return -ENXIO; case M_CMD_STATUS_TIMEOUT: dev_dbg(iproc_i2c->device, "bus timeout\n"); return -ETIMEDOUT; case M_CMD_STATUS_FIFO_UNDERRUN: dev_dbg(iproc_i2c->device, "FIFO under-run\n"); return -ENXIO; case M_CMD_STATUS_RX_FIFO_FULL: dev_dbg(iproc_i2c->device, "RX FIFO full\n"); return -ETIMEDOUT; default: dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val); /* re-initialize i2c for recovery */ bcm_iproc_i2c_enable_disable(iproc_i2c, false); bcm_iproc_i2c_init(iproc_i2c); bcm_iproc_i2c_enable_disable(iproc_i2c, true); return -EIO; } } static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c, struct i2c_msg *msg, u32 cmd) { unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MSEC); u32 val, status; int ret; iproc_i2c_wr_reg(iproc_i2c, M_CMD_OFFSET, cmd); if (iproc_i2c->irq) { time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left); /* disable all interrupts */ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0); /* read it back to flush the write */ iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); /* make sure the interrupt handler isn't running */ synchronize_irq(iproc_i2c->irq); } else { /* polling mode */ unsigned long timeout = jiffies + time_left; do { status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET) & ISR_MASK; bcm_iproc_i2c_process_m_event(iproc_i2c, status); iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status); if (time_after(jiffies, timeout)) { time_left = 0; break; } cpu_relax(); cond_resched(); } while (!iproc_i2c->xfer_is_done); } if (!time_left && !iproc_i2c->xfer_is_done) { dev_err(iproc_i2c->device, "transaction timed out\n"); /* flush both TX/RX FIFOs */ val = BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT); iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val); return -ETIMEDOUT; } ret = bcm_iproc_i2c_check_status(iproc_i2c, msg); if (ret) { /* flush both TX/RX FIFOs */ val = BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT); iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val); return ret; } return 0; } /* * If 'process_call' is true, then this is a multi-msg transfer that requires * a repeated start between the messages. * More specifically, it must be a write (reg) followed by a read (data). * The i2c quirks are set to enforce this rule. */ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c, struct i2c_msg *msgs, bool process_call) { int i; u8 addr; u32 val, tmp, val_intr_en; unsigned int tx_bytes; struct i2c_msg *msg = &msgs[0]; /* check if bus is busy */ if (!!(iproc_i2c_rd_reg(iproc_i2c, M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) { dev_warn(iproc_i2c->device, "bus is busy\n"); return -EBUSY; } iproc_i2c->msg = msg; /* format and load slave address into the TX FIFO */ addr = i2c_8bit_addr_from_msg(msg); iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, addr); /* * For a write transaction, load data into the TX FIFO. Only allow * loading up to TX FIFO size - 1 bytes of data since the first byte * has been used up by the slave address */ tx_bytes = min_t(unsigned int, msg->len, M_TX_RX_FIFO_SIZE - 1); if (!(msg->flags & I2C_M_RD)) { for (i = 0; i < tx_bytes; i++) { val = msg->buf[i]; /* mark the last byte */ if (!process_call && (i == msg->len - 1)) val |= BIT(M_TX_WR_STATUS_SHIFT); iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val); } iproc_i2c->tx_bytes = tx_bytes; } /* Process the read message if this is process call */ if (process_call) { msg++; iproc_i2c->msg = msg; /* point to second msg */ /* * The last byte to be sent out should be a slave * address with read operation */ addr = i2c_8bit_addr_from_msg(msg); /* mark it the last byte out */ val = addr | BIT(M_TX_WR_STATUS_SHIFT); iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val); } /* mark as incomplete before starting the transaction */ if (iproc_i2c->irq) reinit_completion(&iproc_i2c->done); iproc_i2c->xfer_is_done = 0; /* * Enable the "start busy" interrupt, which will be triggered after the * transaction is done, i.e., the internal start_busy bit, transitions * from 1 to 0. */ val_intr_en = BIT(IE_M_START_BUSY_SHIFT); /* * If TX data size is larger than the TX FIFO, need to enable TX * underrun interrupt, which will be triggerred when the TX FIFO is * empty. When that happens we can then pump more data into the FIFO */ if (!process_call && !(msg->flags & I2C_M_RD) && msg->len > iproc_i2c->tx_bytes) val_intr_en |= BIT(IE_M_TX_UNDERRUN_SHIFT); /* * Now we can activate the transfer. For a read operation, specify the * number of bytes to read */ val = BIT(M_CMD_START_BUSY_SHIFT); if (msg->len == 0) { /* SMBUS QUICK Command (Read/Write) */ val |= (M_CMD_PROTOCOL_QUICK << M_CMD_PROTOCOL_SHIFT); } else if (msg->flags & I2C_M_RD) { u32 protocol; iproc_i2c->rx_bytes = 0; if (msg->len > M_RX_FIFO_MAX_THLD_VALUE) iproc_i2c->thld_bytes = M_RX_FIFO_THLD_VALUE; else iproc_i2c->thld_bytes = msg->len; /* set threshold value */ tmp = iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET); tmp &= ~(M_FIFO_RX_THLD_MASK << M_FIFO_RX_THLD_SHIFT); tmp |= iproc_i2c->thld_bytes << M_FIFO_RX_THLD_SHIFT; iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, tmp); /* enable the RX threshold interrupt */ val_intr_en |= BIT(IE_M_RX_THLD_SHIFT); protocol = process_call ? M_CMD_PROTOCOL_PROCESS : M_CMD_PROTOCOL_BLK_RD; val |= (protocol << M_CMD_PROTOCOL_SHIFT) | (msg->len << M_CMD_RD_CNT_SHIFT); } else { val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT); } if (iproc_i2c->irq) iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val_intr_en); return bcm_iproc_i2c_xfer_wait(iproc_i2c, msg, val); } static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) { struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter); bool process_call = false; int ret; if (num == 2) { /* Repeated start, use process call */ process_call = true; if (msgs[1].flags & I2C_M_NOSTART) { dev_err(iproc_i2c->device, "Invalid repeated start\n"); return -EOPNOTSUPP; } } ret = bcm_iproc_i2c_xfer_internal(iproc_i2c, msgs, process_call); if (ret) { dev_dbg(iproc_i2c->device, "xfer failed\n"); return ret; } return num; } static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) { u32 val; val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; if (adap->algo->reg_slave) val |= I2C_FUNC_SLAVE; return val; } static struct i2c_algorithm bcm_iproc_algo = { .master_xfer = bcm_iproc_i2c_xfer, .functionality = bcm_iproc_i2c_functionality, .reg_slave = bcm_iproc_i2c_reg_slave, .unreg_slave = bcm_iproc_i2c_unreg_slave, }; static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = { .flags = I2C_AQ_COMB_WRITE_THEN_READ, .max_comb_1st_msg_len = M_TX_RX_FIFO_SIZE, .max_read_len = M_RX_MAX_READ_LEN, }; static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c) { unsigned int bus_speed; u32 val; int ret = of_property_read_u32(iproc_i2c->device->of_node, "clock-frequency", &bus_speed); if (ret < 0) { dev_info(iproc_i2c->device, "unable to interpret clock-frequency DT property\n"); bus_speed = I2C_MAX_STANDARD_MODE_FREQ; } if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) { dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n", bus_speed); dev_err(iproc_i2c->device, "valid speeds are 100khz and 400khz\n"); return -EINVAL; } else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) { bus_speed = I2C_MAX_STANDARD_MODE_FREQ; } else { bus_speed = I2C_MAX_FAST_MODE_FREQ; } iproc_i2c->bus_speed = bus_speed; val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET); val &= ~BIT(TIM_CFG_MODE_400_SHIFT); val |= (bus_speed == I2C_MAX_FAST_MODE_FREQ) << TIM_CFG_MODE_400_SHIFT; iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val); dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed); return 0; } static int bcm_iproc_i2c_probe(struct platform_device *pdev) { int irq, ret = 0; struct bcm_iproc_i2c_dev *iproc_i2c; struct i2c_adapter *adap; iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c), GFP_KERNEL); if (!iproc_i2c) return -ENOMEM; platform_set_drvdata(pdev, iproc_i2c); iproc_i2c->device = &pdev->dev; iproc_i2c->type = (enum bcm_iproc_i2c_type)of_device_get_match_data(&pdev->dev); init_completion(&iproc_i2c->done); iproc_i2c->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(iproc_i2c->base)) return PTR_ERR(iproc_i2c->base); if (iproc_i2c->type == IPROC_I2C_NIC) { iproc_i2c->idm_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(iproc_i2c->idm_base)) return PTR_ERR(iproc_i2c->idm_base); ret = of_property_read_u32(iproc_i2c->device->of_node, "brcm,ape-hsls-addr-mask", &iproc_i2c->ape_addr_mask); if (ret < 0) { dev_err(iproc_i2c->device, "'brcm,ape-hsls-addr-mask' missing\n"); return -EINVAL; } spin_lock_init(&iproc_i2c->idm_lock); /* no slave support */ bcm_iproc_algo.reg_slave = NULL; bcm_iproc_algo.unreg_slave = NULL; } ret = bcm_iproc_i2c_init(iproc_i2c); if (ret) return ret; ret = bcm_iproc_i2c_cfg_speed(iproc_i2c); if (ret) return ret; irq = platform_get_irq(pdev, 0); if (irq > 0) { ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0, pdev->name, iproc_i2c); if (ret < 0) { dev_err(iproc_i2c->device, "unable to request irq %i\n", irq); return ret; } iproc_i2c->irq = irq; } else { dev_warn(iproc_i2c->device, "no irq resource, falling back to poll mode\n"); } bcm_iproc_i2c_enable_disable(iproc_i2c, true); adap = &iproc_i2c->adapter; i2c_set_adapdata(adap, iproc_i2c); snprintf(adap->name, sizeof(adap->name), "Broadcom iProc (%s)", of_node_full_name(iproc_i2c->device->of_node)); adap->algo = &bcm_iproc_algo; adap->quirks = &bcm_iproc_i2c_quirks; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; return i2c_add_adapter(adap); } static void bcm_iproc_i2c_remove(struct platform_device *pdev) { struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); if (iproc_i2c->irq) { /* * Make sure there's no pending interrupt when we remove the * adapter */ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0); iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); synchronize_irq(iproc_i2c->irq); } i2c_del_adapter(&iproc_i2c->adapter); bcm_iproc_i2c_enable_disable(iproc_i2c, false); } static int bcm_iproc_i2c_suspend(struct device *dev) { struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev); if (iproc_i2c->irq) { /* * Make sure there's no pending interrupt when we go into * suspend */ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0); iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); synchronize_irq(iproc_i2c->irq); } /* now disable the controller */ bcm_iproc_i2c_enable_disable(iproc_i2c, false); return 0; } static int bcm_iproc_i2c_resume(struct device *dev) { struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev); int ret; u32 val; /* * Power domain could have been shut off completely in system deep * sleep, so re-initialize the block here */ ret = bcm_iproc_i2c_init(iproc_i2c); if (ret) return ret; /* configure to the desired bus speed */ val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET); val &= ~BIT(TIM_CFG_MODE_400_SHIFT); val |= (iproc_i2c->bus_speed == I2C_MAX_FAST_MODE_FREQ) << TIM_CFG_MODE_400_SHIFT; iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val); bcm_iproc_i2c_enable_disable(iproc_i2c, true); return 0; } static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = { .suspend_late = &bcm_iproc_i2c_suspend, .resume_early = &bcm_iproc_i2c_resume }; static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave) { struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter); if (iproc_i2c->slave) return -EBUSY; if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; iproc_i2c->slave = slave; tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn, (unsigned long)iproc_i2c); bcm_iproc_i2c_slave_init(iproc_i2c, false); return 0; } static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) { u32 tmp; struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter); if (!iproc_i2c->slave) return -EINVAL; disable_irq(iproc_i2c->irq); tasklet_kill(&iproc_i2c->slave_rx_tasklet); /* disable all slave interrupts */ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); tmp &= ~(IE_S_ALL_INTERRUPT_MASK << IE_S_ALL_INTERRUPT_SHIFT); iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp); /* Erase the slave address programmed */ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET); tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp); /* flush TX/RX FIFOs */ tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT)); iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp); /* clear all pending slave interrupts */ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE); iproc_i2c->slave = NULL; enable_irq(iproc_i2c->irq); return 0; } static const struct of_device_id bcm_iproc_i2c_of_match[] = { { .compatible = "brcm,iproc-i2c", .data = (int *)IPROC_I2C, }, { .compatible = "brcm,iproc-nic-i2c", .data = (int *)IPROC_I2C_NIC, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match); static struct platform_driver bcm_iproc_i2c_driver = { .driver = { .name = "bcm-iproc-i2c", .of_match_table = bcm_iproc_i2c_of_match, .pm = pm_sleep_ptr(&bcm_iproc_i2c_pm_ops), }, .probe = bcm_iproc_i2c_probe, .remove_new = bcm_iproc_i2c_remove, }; module_platform_driver(bcm_iproc_i2c_driver); MODULE_AUTHOR("Ray Jui <[email protected]>"); MODULE_DESCRIPTION("Broadcom iProc I2C Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-bcm-iproc.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * Copyright(c) 2012 Intel Corporation. All rights reserved. * * GPL LICENSE SUMMARY * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Supports the SMBus Message Transport (SMT) in the Intel Atom Processor * S12xx Product Family. * * Features supported by this driver: * Hardware PEC yes * Block buffer yes * Block process call transaction yes * Slave mode no */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/io-64-nonatomic-lo-hi.h> /* PCI Address Constants */ #define SMBBAR 0 /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a #define PCI_DEVICE_ID_INTEL_CDF_SMT 0x18ac #define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac #define PCI_DEVICE_ID_INTEL_EBG_SMT 0x1bff #define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ #define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */ /* Hardware Descriptor Constants - Control Field */ #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */ #define ISMT_DESC_BLK 0X04 /* Perform Block Transaction */ #define ISMT_DESC_FAIR 0x08 /* Set fairness flag upon successful arbit. */ #define ISMT_DESC_PEC 0x10 /* Packet Error Code */ #define ISMT_DESC_I2C 0x20 /* I2C Enable */ #define ISMT_DESC_INT 0x40 /* Interrupt */ #define ISMT_DESC_SOE 0x80 /* Stop On Error */ /* Hardware Descriptor Constants - Status Field */ #define ISMT_DESC_SCS 0x01 /* Success */ #define ISMT_DESC_DLTO 0x04 /* Data Low Time Out */ #define ISMT_DESC_NAK 0x08 /* NAK Received */ #define ISMT_DESC_CRC 0x10 /* CRC Error */ #define ISMT_DESC_CLTO 0x20 /* Clock Low Time Out */ #define ISMT_DESC_COL 0x40 /* Collisions */ #define ISMT_DESC_LPR 0x80 /* Large Packet Received */ /* Macros */ #define ISMT_DESC_ADDR_RW(addr, rw) (((addr) << 1) | (rw)) /* iSMT General Register address offsets (SMBBAR + <addr>) */ #define ISMT_GR_GCTRL 0x000 /* General Control */ #define ISMT_GR_SMTICL 0x008 /* SMT Interrupt Cause Location */ #define ISMT_GR_ERRINTMSK 0x010 /* Error Interrupt Mask */ #define ISMT_GR_ERRAERMSK 0x014 /* Error AER Mask */ #define ISMT_GR_ERRSTS 0x018 /* Error Status */ #define ISMT_GR_ERRINFO 0x01c /* Error Information */ /* iSMT Master Registers */ #define ISMT_MSTR_MDBA 0x100 /* Master Descriptor Base Address */ #define ISMT_MSTR_MCTRL 0x108 /* Master Control */ #define ISMT_MSTR_MSTS 0x10c /* Master Status */ #define ISMT_MSTR_MDS 0x110 /* Master Descriptor Size */ #define ISMT_MSTR_RPOLICY 0x114 /* Retry Policy */ /* iSMT Miscellaneous Registers */ #define ISMT_SPGT 0x300 /* SMBus PHY Global Timing */ /* General Control Register (GCTRL) bit definitions */ #define ISMT_GCTRL_TRST 0x04 /* Target Reset */ #define ISMT_GCTRL_KILL 0x08 /* Kill */ #define ISMT_GCTRL_SRST 0x40 /* Soft Reset */ /* Master Control Register (MCTRL) bit definitions */ #define ISMT_MCTRL_SS 0x01 /* Start/Stop */ #define ISMT_MCTRL_MEIE 0x10 /* Master Error Interrupt Enable */ #define ISMT_MCTRL_FMHP 0x00ff0000 /* Firmware Master Head Ptr (FMHP) */ /* Master Status Register (MSTS) bit definitions */ #define ISMT_MSTS_HMTP 0xff0000 /* HW Master Tail Pointer (HMTP) */ #define ISMT_MSTS_MIS 0x20 /* Master Interrupt Status (MIS) */ #define ISMT_MSTS_MEIS 0x10 /* Master Error Int Status (MEIS) */ #define ISMT_MSTS_IP 0x01 /* In Progress */ /* Master Descriptor Size (MDS) bit definitions */ #define ISMT_MDS_MASK 0xff /* Master Descriptor Size mask (MDS) */ /* SMBus PHY Global Timing Register (SPGT) bit definitions */ #define ISMT_SPGT_SPD_MASK 0xc0000000 /* SMBus Speed mask */ #define ISMT_SPGT_SPD_80K 0x00 /* 80 kHz */ #define ISMT_SPGT_SPD_100K (0x1 << 30) /* 100 kHz */ #define ISMT_SPGT_SPD_400K (0x2U << 30) /* 400 kHz */ #define ISMT_SPGT_SPD_1M (0x3U << 30) /* 1 MHz */ /* MSI Control Register (MSICTL) bit definitions */ #define ISMT_MSICTL_MSIE 0x01 /* MSI Enable */ /* iSMT Hardware Descriptor */ struct ismt_desc { u8 tgtaddr_rw; /* target address & r/w bit */ u8 wr_len_cmd; /* write length in bytes or a command */ u8 rd_len; /* read length */ u8 control; /* control bits */ u8 status; /* status bits */ u8 retry; /* collision retry and retry count */ u8 rxbytes; /* received bytes */ u8 txbytes; /* transmitted bytes */ u32 dptr_low; /* lower 32 bit of the data pointer */ u32 dptr_high; /* upper 32 bit of the data pointer */ } __packed; struct ismt_priv { struct i2c_adapter adapter; void __iomem *smba; /* PCI BAR */ struct pci_dev *pci_dev; struct ismt_desc *hw; /* descriptor virt base addr */ dma_addr_t io_rng_dma; /* descriptor HW base addr */ u8 head; /* ring buffer head pointer */ struct completion cmp; /* interrupt completion */ u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */ dma_addr_t log_dma; u32 *log; }; static const struct pci_device_id ismt_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMT) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EBG_SMT) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, { 0, } }; MODULE_DEVICE_TABLE(pci, ismt_ids); /* Bus speed control bits for slow debuggers - refer to the docs for usage */ static unsigned int bus_speed; module_param(bus_speed, uint, S_IRUGO); MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (0 = BIOS default)"); /** * __ismt_desc_dump() - dump the contents of a specific descriptor * @dev: the iSMT device * @desc: the iSMT hardware descriptor */ static void __ismt_desc_dump(struct device *dev, const struct ismt_desc *desc) { dev_dbg(dev, "Descriptor struct: %p\n", desc); dev_dbg(dev, "\ttgtaddr_rw=0x%02X\n", desc->tgtaddr_rw); dev_dbg(dev, "\twr_len_cmd=0x%02X\n", desc->wr_len_cmd); dev_dbg(dev, "\trd_len= 0x%02X\n", desc->rd_len); dev_dbg(dev, "\tcontrol= 0x%02X\n", desc->control); dev_dbg(dev, "\tstatus= 0x%02X\n", desc->status); dev_dbg(dev, "\tretry= 0x%02X\n", desc->retry); dev_dbg(dev, "\trxbytes= 0x%02X\n", desc->rxbytes); dev_dbg(dev, "\ttxbytes= 0x%02X\n", desc->txbytes); dev_dbg(dev, "\tdptr_low= 0x%08X\n", desc->dptr_low); dev_dbg(dev, "\tdptr_high= 0x%08X\n", desc->dptr_high); } /** * ismt_desc_dump() - dump the contents of a descriptor for debug purposes * @priv: iSMT private data */ static void ismt_desc_dump(struct ismt_priv *priv) { struct device *dev = &priv->pci_dev->dev; struct ismt_desc *desc = &priv->hw[priv->head]; dev_dbg(dev, "Dump of the descriptor struct: 0x%X\n", priv->head); __ismt_desc_dump(dev, desc); } /** * ismt_gen_reg_dump() - dump the iSMT General Registers * @priv: iSMT private data */ static void ismt_gen_reg_dump(struct ismt_priv *priv) { struct device *dev = &priv->pci_dev->dev; dev_dbg(dev, "Dump of the iSMT General Registers\n"); dev_dbg(dev, " GCTRL.... : (0x%p)=0x%X\n", priv->smba + ISMT_GR_GCTRL, readl(priv->smba + ISMT_GR_GCTRL)); dev_dbg(dev, " SMTICL... : (0x%p)=0x%016llX\n", priv->smba + ISMT_GR_SMTICL, (long long unsigned int)readq(priv->smba + ISMT_GR_SMTICL)); dev_dbg(dev, " ERRINTMSK : (0x%p)=0x%X\n", priv->smba + ISMT_GR_ERRINTMSK, readl(priv->smba + ISMT_GR_ERRINTMSK)); dev_dbg(dev, " ERRAERMSK : (0x%p)=0x%X\n", priv->smba + ISMT_GR_ERRAERMSK, readl(priv->smba + ISMT_GR_ERRAERMSK)); dev_dbg(dev, " ERRSTS... : (0x%p)=0x%X\n", priv->smba + ISMT_GR_ERRSTS, readl(priv->smba + ISMT_GR_ERRSTS)); dev_dbg(dev, " ERRINFO.. : (0x%p)=0x%X\n", priv->smba + ISMT_GR_ERRINFO, readl(priv->smba + ISMT_GR_ERRINFO)); } /** * ismt_mstr_reg_dump() - dump the iSMT Master Registers * @priv: iSMT private data */ static void ismt_mstr_reg_dump(struct ismt_priv *priv) { struct device *dev = &priv->pci_dev->dev; dev_dbg(dev, "Dump of the iSMT Master Registers\n"); dev_dbg(dev, " MDBA..... : (0x%p)=0x%016llX\n", priv->smba + ISMT_MSTR_MDBA, (long long unsigned int)readq(priv->smba + ISMT_MSTR_MDBA)); dev_dbg(dev, " MCTRL.... : (0x%p)=0x%X\n", priv->smba + ISMT_MSTR_MCTRL, readl(priv->smba + ISMT_MSTR_MCTRL)); dev_dbg(dev, " MSTS..... : (0x%p)=0x%X\n", priv->smba + ISMT_MSTR_MSTS, readl(priv->smba + ISMT_MSTR_MSTS)); dev_dbg(dev, " MDS...... : (0x%p)=0x%X\n", priv->smba + ISMT_MSTR_MDS, readl(priv->smba + ISMT_MSTR_MDS)); dev_dbg(dev, " RPOLICY.. : (0x%p)=0x%X\n", priv->smba + ISMT_MSTR_RPOLICY, readl(priv->smba + ISMT_MSTR_RPOLICY)); dev_dbg(dev, " SPGT..... : (0x%p)=0x%X\n", priv->smba + ISMT_SPGT, readl(priv->smba + ISMT_SPGT)); } /** * ismt_submit_desc() - add a descriptor to the ring * @priv: iSMT private data */ static void ismt_submit_desc(struct ismt_priv *priv) { uint fmhp; uint val; ismt_desc_dump(priv); ismt_gen_reg_dump(priv); ismt_mstr_reg_dump(priv); /* Set the FMHP (Firmware Master Head Pointer)*/ fmhp = ((priv->head + 1) % ISMT_DESC_ENTRIES) << 16; val = readl(priv->smba + ISMT_MSTR_MCTRL); writel((val & ~ISMT_MCTRL_FMHP) | fmhp, priv->smba + ISMT_MSTR_MCTRL); /* Set the start bit */ val = readl(priv->smba + ISMT_MSTR_MCTRL); writel(val | ISMT_MCTRL_SS, priv->smba + ISMT_MSTR_MCTRL); } /** * ismt_process_desc() - handle the completion of the descriptor * @desc: the iSMT hardware descriptor * @data: data buffer from the upper layer * @priv: ismt_priv struct holding our dma buffer * @size: SMBus transaction type * @read_write: flag to indicate if this is a read or write */ static int ismt_process_desc(const struct ismt_desc *desc, union i2c_smbus_data *data, struct ismt_priv *priv, int size, char read_write) { u8 *dma_buffer = PTR_ALIGN(&priv->buffer[0], 16); dev_dbg(&priv->pci_dev->dev, "Processing completed descriptor\n"); __ismt_desc_dump(&priv->pci_dev->dev, desc); ismt_gen_reg_dump(priv); ismt_mstr_reg_dump(priv); if (desc->status & ISMT_DESC_SCS) { if (read_write == I2C_SMBUS_WRITE && size != I2C_SMBUS_PROC_CALL && size != I2C_SMBUS_BLOCK_PROC_CALL) return 0; switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = dma_buffer[0]; break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: data->word = dma_buffer[0] | (dma_buffer[1] << 8); break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: if (desc->rxbytes != dma_buffer[0] + 1) return -EMSGSIZE; memcpy(data->block, dma_buffer, desc->rxbytes); break; case I2C_SMBUS_I2C_BLOCK_DATA: memcpy(&data->block[1], dma_buffer, desc->rxbytes); data->block[0] = desc->rxbytes; break; } return 0; } if (likely(desc->status & ISMT_DESC_NAK)) return -ENXIO; if (desc->status & ISMT_DESC_CRC) return -EBADMSG; if (desc->status & ISMT_DESC_COL) return -EAGAIN; if (desc->status & ISMT_DESC_LPR) return -EPROTO; if (desc->status & (ISMT_DESC_DLTO | ISMT_DESC_CLTO)) return -ETIMEDOUT; return -EIO; } /** * ismt_access() - process an SMBus command * @adap: the i2c host adapter * @addr: address of the i2c/SMBus target * @flags: command options * @read_write: read from or write to device * @command: the i2c/SMBus command to issue * @size: SMBus transaction type * @data: read/write data buffer */ static int ismt_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int ret; unsigned long time_left; dma_addr_t dma_addr = 0; /* address of the data buffer */ u8 dma_size = 0; enum dma_data_direction dma_direction = 0; struct ismt_desc *desc; struct ismt_priv *priv = i2c_get_adapdata(adap); struct device *dev = &priv->pci_dev->dev; u8 *dma_buffer = PTR_ALIGN(&priv->buffer[0], 16); desc = &priv->hw[priv->head]; /* Initialize the DMA buffer */ memset(priv->buffer, 0, sizeof(priv->buffer)); /* Initialize the descriptor */ memset(desc, 0, sizeof(struct ismt_desc)); desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); /* Always clear the log entries */ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32)); /* Initialize common control bits */ if (likely(pci_dev_msi_enabled(priv->pci_dev))) desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR; else desc->control = ISMT_DESC_FAIR; if ((flags & I2C_CLIENT_PEC) && (size != I2C_SMBUS_QUICK) && (size != I2C_SMBUS_I2C_BLOCK_DATA)) desc->control |= ISMT_DESC_PEC; switch (size) { case I2C_SMBUS_QUICK: dev_dbg(dev, "I2C_SMBUS_QUICK\n"); break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_WRITE) { /* * Send Byte * The command field contains the write data */ dev_dbg(dev, "I2C_SMBUS_BYTE: WRITE\n"); desc->control |= ISMT_DESC_CWRL; desc->wr_len_cmd = command; } else { /* Receive Byte */ dev_dbg(dev, "I2C_SMBUS_BYTE: READ\n"); dma_size = 1; dma_direction = DMA_FROM_DEVICE; desc->rd_len = 1; } break; case I2C_SMBUS_BYTE_DATA: if (read_write == I2C_SMBUS_WRITE) { /* * Write Byte * Command plus 1 data byte */ dev_dbg(dev, "I2C_SMBUS_BYTE_DATA: WRITE\n"); desc->wr_len_cmd = 2; dma_size = 2; dma_direction = DMA_TO_DEVICE; dma_buffer[0] = command; dma_buffer[1] = data->byte; } else { /* Read Byte */ dev_dbg(dev, "I2C_SMBUS_BYTE_DATA: READ\n"); desc->control |= ISMT_DESC_CWRL; desc->wr_len_cmd = command; desc->rd_len = 1; dma_size = 1; dma_direction = DMA_FROM_DEVICE; } break; case I2C_SMBUS_WORD_DATA: if (read_write == I2C_SMBUS_WRITE) { /* Write Word */ dev_dbg(dev, "I2C_SMBUS_WORD_DATA: WRITE\n"); desc->wr_len_cmd = 3; dma_size = 3; dma_direction = DMA_TO_DEVICE; dma_buffer[0] = command; dma_buffer[1] = data->word & 0xff; dma_buffer[2] = data->word >> 8; } else { /* Read Word */ dev_dbg(dev, "I2C_SMBUS_WORD_DATA: READ\n"); desc->wr_len_cmd = command; desc->control |= ISMT_DESC_CWRL; desc->rd_len = 2; dma_size = 2; dma_direction = DMA_FROM_DEVICE; } break; case I2C_SMBUS_PROC_CALL: dev_dbg(dev, "I2C_SMBUS_PROC_CALL\n"); desc->wr_len_cmd = 3; desc->rd_len = 2; dma_size = 3; dma_direction = DMA_BIDIRECTIONAL; dma_buffer[0] = command; dma_buffer[1] = data->word & 0xff; dma_buffer[2] = data->word >> 8; break; case I2C_SMBUS_BLOCK_DATA: if (read_write == I2C_SMBUS_WRITE) { /* Block Write */ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n"); if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EINVAL; dma_size = data->block[0] + 1; dma_direction = DMA_TO_DEVICE; desc->wr_len_cmd = dma_size; desc->control |= ISMT_DESC_BLK; dma_buffer[0] = command; memcpy(&dma_buffer[1], &data->block[1], dma_size - 1); } else { /* Block Read */ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n"); dma_size = I2C_SMBUS_BLOCK_MAX; dma_direction = DMA_FROM_DEVICE; desc->rd_len = dma_size; desc->wr_len_cmd = command; desc->control |= (ISMT_DESC_BLK | ISMT_DESC_CWRL); } break; case I2C_SMBUS_BLOCK_PROC_CALL: dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n"); if (data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EINVAL; dma_size = I2C_SMBUS_BLOCK_MAX; desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1); desc->wr_len_cmd = data->block[0] + 1; desc->rd_len = dma_size; desc->control |= ISMT_DESC_BLK; dma_direction = DMA_BIDIRECTIONAL; dma_buffer[0] = command; memcpy(&dma_buffer[1], &data->block[1], data->block[0]); break; case I2C_SMBUS_I2C_BLOCK_DATA: /* Make sure the length is valid */ if (data->block[0] < 1) data->block[0] = 1; if (data->block[0] > I2C_SMBUS_BLOCK_MAX) data->block[0] = I2C_SMBUS_BLOCK_MAX; if (read_write == I2C_SMBUS_WRITE) { /* i2c Block Write */ dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: WRITE\n"); dma_size = data->block[0] + 1; dma_direction = DMA_TO_DEVICE; desc->wr_len_cmd = dma_size; desc->control |= ISMT_DESC_I2C; dma_buffer[0] = command; memcpy(&dma_buffer[1], &data->block[1], dma_size - 1); } else { /* i2c Block Read */ dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n"); dma_size = data->block[0]; dma_direction = DMA_FROM_DEVICE; desc->rd_len = dma_size; desc->wr_len_cmd = command; desc->control |= (ISMT_DESC_I2C | ISMT_DESC_CWRL); /* * Per the "Table 15-15. I2C Commands", * in the External Design Specification (EDS), * (Document Number: 508084, Revision: 2.0), * the _rw bit must be 0 */ desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 0); } break; default: dev_err(dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } /* map the data buffer */ if (dma_size != 0) { dev_dbg(dev, " dev=%p\n", dev); dev_dbg(dev, " data=%p\n", data); dev_dbg(dev, " dma_buffer=%p\n", dma_buffer); dev_dbg(dev, " dma_size=%d\n", dma_size); dev_dbg(dev, " dma_direction=%d\n", dma_direction); dma_addr = dma_map_single(dev, dma_buffer, dma_size, dma_direction); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Error in mapping dma buffer %p\n", dma_buffer); return -EIO; } dev_dbg(dev, " dma_addr = %pad\n", &dma_addr); desc->dptr_low = lower_32_bits(dma_addr); desc->dptr_high = upper_32_bits(dma_addr); } reinit_completion(&priv->cmp); /* Add the descriptor */ ismt_submit_desc(priv); /* Now we wait for interrupt completion, 1s */ time_left = wait_for_completion_timeout(&priv->cmp, HZ*1); /* unmap the data buffer */ if (dma_size != 0) dma_unmap_single(dev, dma_addr, dma_size, dma_direction); if (unlikely(!time_left)) { dev_err(dev, "completion wait timed out\n"); ret = -ETIMEDOUT; goto out; } /* do any post processing of the descriptor here */ ret = ismt_process_desc(desc, data, priv, size, read_write); out: /* Update the ring pointer */ priv->head++; priv->head %= ISMT_DESC_ENTRIES; return ret; } /** * ismt_func() - report which i2c commands are supported by this adapter * @adap: the i2c host adapter */ static u32 ismt_func(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_PEC; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = ismt_access, .functionality = ismt_func, }; /** * ismt_handle_isr() - interrupt handler bottom half * @priv: iSMT private data */ static irqreturn_t ismt_handle_isr(struct ismt_priv *priv) { complete(&priv->cmp); return IRQ_HANDLED; } /** * ismt_do_interrupt() - IRQ interrupt handler * @vec: interrupt vector * @data: iSMT private data */ static irqreturn_t ismt_do_interrupt(int vec, void *data) { u32 val; struct ismt_priv *priv = data; /* * check to see it's our interrupt, return IRQ_NONE if not ours * since we are sharing interrupt */ val = readl(priv->smba + ISMT_MSTR_MSTS); if (!(val & (ISMT_MSTS_MIS | ISMT_MSTS_MEIS))) return IRQ_NONE; else writel(val | ISMT_MSTS_MIS | ISMT_MSTS_MEIS, priv->smba + ISMT_MSTR_MSTS); return ismt_handle_isr(priv); } /** * ismt_do_msi_interrupt() - MSI interrupt handler * @vec: interrupt vector * @data: iSMT private data */ static irqreturn_t ismt_do_msi_interrupt(int vec, void *data) { return ismt_handle_isr(data); } /** * ismt_hw_init() - initialize the iSMT hardware * @priv: iSMT private data */ static void ismt_hw_init(struct ismt_priv *priv) { u32 val; struct device *dev = &priv->pci_dev->dev; /* initialize the Master Descriptor Base Address (MDBA) */ writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA); writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL); /* initialize the Master Control Register (MCTRL) */ writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL); /* initialize the Master Status Register (MSTS) */ writel(0, priv->smba + ISMT_MSTR_MSTS); /* initialize the Master Descriptor Size (MDS) */ val = readl(priv->smba + ISMT_MSTR_MDS); writel((val & ~ISMT_MDS_MASK) | (ISMT_DESC_ENTRIES - 1), priv->smba + ISMT_MSTR_MDS); /* * Set the SMBus speed (could use this for slow HW debuggers) */ val = readl(priv->smba + ISMT_SPGT); switch (bus_speed) { case 0: break; case 80: dev_dbg(dev, "Setting SMBus clock to 80 kHz\n"); writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_80K), priv->smba + ISMT_SPGT); break; case 100: dev_dbg(dev, "Setting SMBus clock to 100 kHz\n"); writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_100K), priv->smba + ISMT_SPGT); break; case 400: dev_dbg(dev, "Setting SMBus clock to 400 kHz\n"); writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_400K), priv->smba + ISMT_SPGT); break; case 1000: dev_dbg(dev, "Setting SMBus clock to 1000 kHz\n"); writel(((val & ~ISMT_SPGT_SPD_MASK) | ISMT_SPGT_SPD_1M), priv->smba + ISMT_SPGT); break; default: dev_warn(dev, "Invalid SMBus clock speed, only 0, 80, 100, 400, and 1000 are valid\n"); break; } val = readl(priv->smba + ISMT_SPGT); switch (val & ISMT_SPGT_SPD_MASK) { case ISMT_SPGT_SPD_80K: bus_speed = 80; break; case ISMT_SPGT_SPD_100K: bus_speed = 100; break; case ISMT_SPGT_SPD_400K: bus_speed = 400; break; case ISMT_SPGT_SPD_1M: bus_speed = 1000; break; } dev_dbg(dev, "SMBus clock is running at %d kHz\n", bus_speed); } /** * ismt_dev_init() - initialize the iSMT data structures * @priv: iSMT private data */ static int ismt_dev_init(struct ismt_priv *priv) { /* allocate memory for the descriptor */ priv->hw = dmam_alloc_coherent(&priv->pci_dev->dev, (ISMT_DESC_ENTRIES * sizeof(struct ismt_desc)), &priv->io_rng_dma, GFP_KERNEL); if (!priv->hw) return -ENOMEM; priv->head = 0; init_completion(&priv->cmp); priv->log = dmam_alloc_coherent(&priv->pci_dev->dev, ISMT_LOG_ENTRIES * sizeof(u32), &priv->log_dma, GFP_KERNEL); if (!priv->log) return -ENOMEM; return 0; } /** * ismt_int_init() - initialize interrupts * @priv: iSMT private data */ static int ismt_int_init(struct ismt_priv *priv) { int err; /* Try using MSI interrupts */ err = pci_enable_msi(priv->pci_dev); if (err) goto intx; err = devm_request_irq(&priv->pci_dev->dev, priv->pci_dev->irq, ismt_do_msi_interrupt, 0, "ismt-msi", priv); if (err) { pci_disable_msi(priv->pci_dev); goto intx; } return 0; /* Try using legacy interrupts */ intx: dev_warn(&priv->pci_dev->dev, "Unable to use MSI interrupts, falling back to legacy\n"); err = devm_request_irq(&priv->pci_dev->dev, priv->pci_dev->irq, ismt_do_interrupt, IRQF_SHARED, "ismt-intx", priv); if (err) { dev_err(&priv->pci_dev->dev, "no usable interrupts\n"); return err; } return 0; } static struct pci_driver ismt_driver; /** * ismt_probe() - probe for iSMT devices * @pdev: PCI-Express device * @id: PCI-Express device ID */ static int ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; struct ismt_priv *priv; unsigned long start, len; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; pci_set_drvdata(pdev, priv); i2c_set_adapdata(&priv->adapter, priv); priv->adapter.owner = THIS_MODULE; priv->adapter.class = I2C_CLASS_HWMON; priv->adapter.algo = &smbus_algorithm; priv->adapter.dev.parent = &pdev->dev; ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&pdev->dev)); priv->adapter.retries = ISMT_MAX_RETRIES; priv->pci_dev = pdev; err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Failed to enable SMBus PCI device (%d)\n", err); return err; } /* enable bus mastering */ pci_set_master(pdev); /* Determine the address of the SMBus area */ start = pci_resource_start(pdev, SMBBAR); len = pci_resource_len(pdev, SMBBAR); if (!start || !len) { dev_err(&pdev->dev, "SMBus base address uninitialized, upgrade BIOS\n"); return -ENODEV; } snprintf(priv->adapter.name, sizeof(priv->adapter.name), "SMBus iSMT adapter at %lx", start); dev_dbg(&priv->pci_dev->dev, " start=0x%lX\n", start); dev_dbg(&priv->pci_dev->dev, " len=0x%lX\n", len); err = acpi_check_resource_conflict(&pdev->resource[SMBBAR]); if (err) { dev_err(&pdev->dev, "ACPI resource conflict!\n"); return err; } err = pci_request_region(pdev, SMBBAR, ismt_driver.name); if (err) { dev_err(&pdev->dev, "Failed to request SMBus region 0x%lx-0x%lx\n", start, start + len); return err; } priv->smba = pcim_iomap(pdev, SMBBAR, len); if (!priv->smba) { dev_err(&pdev->dev, "Unable to ioremap SMBus BAR\n"); return -ENODEV; } err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "dma_set_mask fail\n"); return -ENODEV; } err = ismt_dev_init(priv); if (err) return err; ismt_hw_init(priv); err = ismt_int_init(priv); if (err) return err; err = i2c_add_adapter(&priv->adapter); if (err) return -ENODEV; return 0; } /** * ismt_remove() - release driver resources * @pdev: PCI-Express device */ static void ismt_remove(struct pci_dev *pdev) { struct ismt_priv *priv = pci_get_drvdata(pdev); i2c_del_adapter(&priv->adapter); } static struct pci_driver ismt_driver = { .name = "ismt_smbus", .id_table = ismt_ids, .probe = ismt_probe, .remove = ismt_remove, }; module_pci_driver(ismt_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Bill E. Brown <[email protected]>"); MODULE_DESCRIPTION("Intel SMBus Message Transport (iSMT) driver");
linux-master
drivers/i2c/busses/i2c-ismt.c
// SPDX-License-Identifier: GPL-2.0 /* * BCM2835 master mode driver */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #define BCM2835_I2C_C 0x0 #define BCM2835_I2C_S 0x4 #define BCM2835_I2C_DLEN 0x8 #define BCM2835_I2C_A 0xc #define BCM2835_I2C_FIFO 0x10 #define BCM2835_I2C_DIV 0x14 #define BCM2835_I2C_DEL 0x18 /* * 16-bit field for the number of SCL cycles to wait after rising SCL * before deciding the slave is not responding. 0 disables the * timeout detection. */ #define BCM2835_I2C_CLKT 0x1c #define BCM2835_I2C_C_READ BIT(0) #define BCM2835_I2C_C_CLEAR BIT(4) /* bits 4 and 5 both clear */ #define BCM2835_I2C_C_ST BIT(7) #define BCM2835_I2C_C_INTD BIT(8) #define BCM2835_I2C_C_INTT BIT(9) #define BCM2835_I2C_C_INTR BIT(10) #define BCM2835_I2C_C_I2CEN BIT(15) #define BCM2835_I2C_S_TA BIT(0) #define BCM2835_I2C_S_DONE BIT(1) #define BCM2835_I2C_S_TXW BIT(2) #define BCM2835_I2C_S_RXR BIT(3) #define BCM2835_I2C_S_TXD BIT(4) #define BCM2835_I2C_S_RXD BIT(5) #define BCM2835_I2C_S_TXE BIT(6) #define BCM2835_I2C_S_RXF BIT(7) #define BCM2835_I2C_S_ERR BIT(8) #define BCM2835_I2C_S_CLKT BIT(9) #define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ #define BCM2835_I2C_FEDL_SHIFT 16 #define BCM2835_I2C_REDL_SHIFT 0 #define BCM2835_I2C_CDIV_MIN 0x0002 #define BCM2835_I2C_CDIV_MAX 0xFFFE struct bcm2835_i2c_dev { struct device *dev; void __iomem *regs; int irq; struct i2c_adapter adapter; struct completion completion; struct i2c_msg *curr_msg; struct clk *bus_clk; int num_msgs; u32 msg_err; u8 *msg_buf; size_t msg_buf_remaining; }; static inline void bcm2835_i2c_writel(struct bcm2835_i2c_dev *i2c_dev, u32 reg, u32 val) { writel(val, i2c_dev->regs + reg); } static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg) { return readl(i2c_dev->regs + reg); } #define to_clk_bcm2835_i2c(_hw) container_of(_hw, struct clk_bcm2835_i2c, hw) struct clk_bcm2835_i2c { struct clk_hw hw; struct bcm2835_i2c_dev *i2c_dev; }; static int clk_bcm2835_i2c_calc_divider(unsigned long rate, unsigned long parent_rate) { u32 divider = DIV_ROUND_UP(parent_rate, rate); /* * Per the datasheet, the register is always interpreted as an even * number, by rounding down. In other words, the LSB is ignored. So, * if the LSB is set, increment the divider to avoid any issue. */ if (divider & 1) divider++; if ((divider < BCM2835_I2C_CDIV_MIN) || (divider > BCM2835_I2C_CDIV_MAX)) return -EINVAL; return divider; } static int clk_bcm2835_i2c_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_bcm2835_i2c *div = to_clk_bcm2835_i2c(hw); u32 redl, fedl; u32 divider = clk_bcm2835_i2c_calc_divider(rate, parent_rate); if (divider == -EINVAL) return -EINVAL; bcm2835_i2c_writel(div->i2c_dev, BCM2835_I2C_DIV, divider); /* * Number of core clocks to wait after falling edge before * outputting the next data bit. Note that both FEDL and REDL * can't be greater than CDIV/2. */ fedl = max(divider / 16, 1u); /* * Number of core clocks to wait after rising edge before * sampling the next incoming data bit. */ redl = max(divider / 4, 1u); bcm2835_i2c_writel(div->i2c_dev, BCM2835_I2C_DEL, (fedl << BCM2835_I2C_FEDL_SHIFT) | (redl << BCM2835_I2C_REDL_SHIFT)); return 0; } static long clk_bcm2835_i2c_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { u32 divider = clk_bcm2835_i2c_calc_divider(rate, *parent_rate); return DIV_ROUND_UP(*parent_rate, divider); } static unsigned long clk_bcm2835_i2c_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_bcm2835_i2c *div = to_clk_bcm2835_i2c(hw); u32 divider = bcm2835_i2c_readl(div->i2c_dev, BCM2835_I2C_DIV); return DIV_ROUND_UP(parent_rate, divider); } static const struct clk_ops clk_bcm2835_i2c_ops = { .set_rate = clk_bcm2835_i2c_set_rate, .round_rate = clk_bcm2835_i2c_round_rate, .recalc_rate = clk_bcm2835_i2c_recalc_rate, }; static struct clk *bcm2835_i2c_register_div(struct device *dev, struct clk *mclk, struct bcm2835_i2c_dev *i2c_dev) { struct clk_init_data init; struct clk_bcm2835_i2c *priv; char name[32]; const char *mclk_name; snprintf(name, sizeof(name), "%s_div", dev_name(dev)); mclk_name = __clk_get_name(mclk); init.ops = &clk_bcm2835_i2c_ops; init.name = name; init.parent_names = (const char* []) { mclk_name }; init.num_parents = 1; init.flags = 0; priv = devm_kzalloc(dev, sizeof(struct clk_bcm2835_i2c), GFP_KERNEL); if (priv == NULL) return ERR_PTR(-ENOMEM); priv->hw.init = &init; priv->i2c_dev = i2c_dev; clk_hw_register_clkdev(&priv->hw, "div", dev_name(dev)); return devm_clk_register(dev, &priv->hw); } static void bcm2835_fill_txfifo(struct bcm2835_i2c_dev *i2c_dev) { u32 val; while (i2c_dev->msg_buf_remaining) { val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S); if (!(val & BCM2835_I2C_S_TXD)) break; bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_FIFO, *i2c_dev->msg_buf); i2c_dev->msg_buf++; i2c_dev->msg_buf_remaining--; } } static void bcm2835_drain_rxfifo(struct bcm2835_i2c_dev *i2c_dev) { u32 val; while (i2c_dev->msg_buf_remaining) { val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S); if (!(val & BCM2835_I2C_S_RXD)) break; *i2c_dev->msg_buf = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_FIFO); i2c_dev->msg_buf++; i2c_dev->msg_buf_remaining--; } } /* * Repeated Start Condition (Sr) * The BCM2835 ARM Peripherals datasheet mentions a way to trigger a Sr when it * talks about reading from a slave with 10 bit address. This is achieved by * issuing a write, poll the I2CS.TA flag and wait for it to be set, and then * issue a read. * A comment in https://github.com/raspberrypi/linux/issues/254 shows how the * firmware actually does it using polling and says that it's a workaround for * a problem in the state machine. * It turns out that it is possible to use the TXW interrupt to know when the * transfer is active, provided the FIFO has not been prefilled. */ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev) { u32 c = BCM2835_I2C_C_ST | BCM2835_I2C_C_I2CEN; struct i2c_msg *msg = i2c_dev->curr_msg; bool last_msg = (i2c_dev->num_msgs == 1); if (!i2c_dev->num_msgs) return; i2c_dev->num_msgs--; i2c_dev->msg_buf = msg->buf; i2c_dev->msg_buf_remaining = msg->len; if (msg->flags & I2C_M_RD) c |= BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR; else c |= BCM2835_I2C_C_INTT; if (last_msg) c |= BCM2835_I2C_C_INTD; bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); } static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev) { i2c_dev->curr_msg = NULL; i2c_dev->num_msgs = 0; i2c_dev->msg_buf = NULL; i2c_dev->msg_buf_remaining = 0; } /* * Note about I2C_C_CLEAR on error: * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in * non-idle state and I2C_C_READ, it sets an abort_rx flag and runs through * the state machine to send a NACK and a STOP. Since we're setting CLEAR * without I2CEN, that NACK will be hanging around queued up for next time * we start the engine. */ static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data) { struct bcm2835_i2c_dev *i2c_dev = data; u32 val, err; val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S); err = val & (BCM2835_I2C_S_CLKT | BCM2835_I2C_S_ERR); if (err) { i2c_dev->msg_err = err; goto complete; } if (val & BCM2835_I2C_S_DONE) { if (!i2c_dev->curr_msg) { dev_err(i2c_dev->dev, "Got unexpected interrupt (from firmware?)\n"); } else if (i2c_dev->curr_msg->flags & I2C_M_RD) { bcm2835_drain_rxfifo(i2c_dev); val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S); } if ((val & BCM2835_I2C_S_RXD) || i2c_dev->msg_buf_remaining) i2c_dev->msg_err = BCM2835_I2C_S_LEN; else i2c_dev->msg_err = 0; goto complete; } if (val & BCM2835_I2C_S_TXW) { if (!i2c_dev->msg_buf_remaining) { i2c_dev->msg_err = val | BCM2835_I2C_S_LEN; goto complete; } bcm2835_fill_txfifo(i2c_dev); if (i2c_dev->num_msgs && !i2c_dev->msg_buf_remaining) { i2c_dev->curr_msg++; bcm2835_i2c_start_transfer(i2c_dev); } return IRQ_HANDLED; } if (val & BCM2835_I2C_S_RXR) { if (!i2c_dev->msg_buf_remaining) { i2c_dev->msg_err = val | BCM2835_I2C_S_LEN; goto complete; } bcm2835_drain_rxfifo(i2c_dev); return IRQ_HANDLED; } return IRQ_NONE; complete: bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, BCM2835_I2C_S_CLKT | BCM2835_I2C_S_ERR | BCM2835_I2C_S_DONE); complete(&i2c_dev->completion); return IRQ_HANDLED; } static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct bcm2835_i2c_dev *i2c_dev = i2c_get_adapdata(adap); unsigned long time_left; int i; for (i = 0; i < (num - 1); i++) if (msgs[i].flags & I2C_M_RD) { dev_warn_once(i2c_dev->dev, "only one read message supported, has to be last\n"); return -EOPNOTSUPP; } i2c_dev->curr_msg = msgs; i2c_dev->num_msgs = num; reinit_completion(&i2c_dev->completion); bcm2835_i2c_start_transfer(i2c_dev); time_left = wait_for_completion_timeout(&i2c_dev->completion, adap->timeout); bcm2835_i2c_finish_transfer(i2c_dev); if (!time_left) { bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR); dev_err(i2c_dev->dev, "i2c transfer timed out\n"); return -ETIMEDOUT; } if (!i2c_dev->msg_err) return num; dev_dbg(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err); if (i2c_dev->msg_err & BCM2835_I2C_S_ERR) return -EREMOTEIO; return -EIO; } static u32 bcm2835_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm bcm2835_i2c_algo = { .master_xfer = bcm2835_i2c_xfer, .functionality = bcm2835_i2c_func, }; /* * The BCM2835 was reported to have problems with clock stretching: * https://www.advamation.com/knowhow/raspberrypi/rpi-i2c-bug.html * https://www.raspberrypi.org/forums/viewtopic.php?p=146272 */ static const struct i2c_adapter_quirks bcm2835_i2c_quirks = { .flags = I2C_AQ_NO_CLK_STRETCH, }; static int bcm2835_i2c_probe(struct platform_device *pdev) { struct bcm2835_i2c_dev *i2c_dev; int ret; struct i2c_adapter *adap; struct clk *mclk; u32 bus_clk_rate; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; platform_set_drvdata(pdev, i2c_dev); i2c_dev->dev = &pdev->dev; init_completion(&i2c_dev->completion); i2c_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(i2c_dev->regs)) return PTR_ERR(i2c_dev->regs); mclk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(mclk)) return dev_err_probe(&pdev->dev, PTR_ERR(mclk), "Could not get clock\n"); i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); if (IS_ERR(i2c_dev->bus_clk)) return dev_err_probe(&pdev->dev, PTR_ERR(i2c_dev->bus_clk), "Could not register clock\n"); ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &bus_clk_rate); if (ret < 0) { dev_warn(&pdev->dev, "Could not read clock-frequency property\n"); bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; } ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "Could not set clock frequency\n"); ret = clk_prepare_enable(i2c_dev->bus_clk); if (ret) { dev_err(&pdev->dev, "Couldn't prepare clock"); goto err_put_exclusive_rate; } i2c_dev->irq = platform_get_irq(pdev, 0); if (i2c_dev->irq < 0) { ret = i2c_dev->irq; goto err_disable_unprepare_clk; } ret = request_irq(i2c_dev->irq, bcm2835_i2c_isr, IRQF_SHARED, dev_name(&pdev->dev), i2c_dev); if (ret) { dev_err(&pdev->dev, "Could not request IRQ\n"); goto err_disable_unprepare_clk; } adap = &i2c_dev->adapter; i2c_set_adapdata(adap, i2c_dev); adap->owner = THIS_MODULE; adap->class = I2C_CLASS_DEPRECATED; snprintf(adap->name, sizeof(adap->name), "bcm2835 (%s)", of_node_full_name(pdev->dev.of_node)); adap->algo = &bcm2835_i2c_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; adap->quirks = of_device_get_match_data(&pdev->dev); /* * Disable the hardware clock stretching timeout. SMBUS * specifies a limit for how long the device can stretch the * clock, but core I2C doesn't. */ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); ret = i2c_add_adapter(adap); if (ret) goto err_free_irq; return 0; err_free_irq: free_irq(i2c_dev->irq, i2c_dev); err_disable_unprepare_clk: clk_disable_unprepare(i2c_dev->bus_clk); err_put_exclusive_rate: clk_rate_exclusive_put(i2c_dev->bus_clk); return ret; } static void bcm2835_i2c_remove(struct platform_device *pdev) { struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev); clk_rate_exclusive_put(i2c_dev->bus_clk); clk_disable_unprepare(i2c_dev->bus_clk); free_irq(i2c_dev->irq, i2c_dev); i2c_del_adapter(&i2c_dev->adapter); } static const struct of_device_id bcm2835_i2c_of_match[] = { { .compatible = "brcm,bcm2711-i2c" }, { .compatible = "brcm,bcm2835-i2c", .data = &bcm2835_i2c_quirks }, {}, }; MODULE_DEVICE_TABLE(of, bcm2835_i2c_of_match); static struct platform_driver bcm2835_i2c_driver = { .probe = bcm2835_i2c_probe, .remove_new = bcm2835_i2c_remove, .driver = { .name = "i2c-bcm2835", .of_match_table = bcm2835_i2c_of_match, }, }; module_platform_driver(bcm2835_i2c_driver); MODULE_AUTHOR("Stephen Warren <[email protected]>"); MODULE_DESCRIPTION("BCM2835 I2C bus adapter"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c-bcm2835");
linux-master
drivers/i2c/busses/i2c-bcm2835.c
/* * (C) Copyright 2009-2010 * Nokia Siemens Networks, [email protected] * * Portions Copyright (C) 2010 - 2016 Cavium, Inc. * * This file contains the shared part of the driver for the i2c adapter in * Cavium Networks' OCTEON processors and ThunderX SOCs. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include "i2c-octeon-core.h" /* interrupt service routine */ irqreturn_t octeon_i2c_isr(int irq, void *dev_id) { struct octeon_i2c *i2c = dev_id; i2c->int_disable(i2c); wake_up(&i2c->queue); return IRQ_HANDLED; } static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c) { return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG); } /** * octeon_i2c_wait - wait for the IFLG to be set * @i2c: The struct octeon_i2c * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_wait(struct octeon_i2c *i2c) { long time_left; /* * Some chip revisions don't assert the irq in the interrupt * controller. So we must poll for the IFLG change. */ if (i2c->broken_irq_mode) { u64 end = get_jiffies_64() + i2c->adap.timeout; while (!octeon_i2c_test_iflg(i2c) && time_before64(get_jiffies_64(), end)) usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT); return octeon_i2c_test_iflg(i2c) ? 0 : -ETIMEDOUT; } i2c->int_enable(i2c); time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c), i2c->adap.timeout); i2c->int_disable(i2c); if (i2c->broken_irq_check && !time_left && octeon_i2c_test_iflg(i2c)) { dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n"); i2c->broken_irq_mode = true; return 0; } if (!time_left) return -ETIMEDOUT; return 0; } static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c) { return (__raw_readq(i2c->twsi_base + SW_TWSI(i2c)) & SW_TWSI_V) == 0; } static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c) { /* clear ST/TS events, listen for neither */ octeon_i2c_write_int(i2c, TWSI_INT_ST_INT | TWSI_INT_TS_INT); } /* * Cleanup low-level state & enable high-level controller. */ static void octeon_i2c_hlc_enable(struct octeon_i2c *i2c) { int try = 0; u64 val; if (i2c->hlc_enabled) return; i2c->hlc_enabled = true; while (1) { val = octeon_i2c_ctl_read(i2c); if (!(val & (TWSI_CTL_STA | TWSI_CTL_STP))) break; /* clear IFLG event */ if (val & TWSI_CTL_IFLG) octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB); if (try++ > 100) { pr_err("%s: giving up\n", __func__); break; } /* spin until any start/stop has finished */ udelay(10); } octeon_i2c_ctl_write(i2c, TWSI_CTL_CE | TWSI_CTL_AAK | TWSI_CTL_ENAB); } static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c) { if (!i2c->hlc_enabled) return; i2c->hlc_enabled = false; octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB); } /** * octeon_i2c_hlc_wait - wait for an HLC operation to complete * @i2c: The struct octeon_i2c * * Returns 0 on success, otherwise -ETIMEDOUT. */ static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c) { int time_left; /* * Some cn38xx boards don't assert the irq in the interrupt * controller. So we must poll for the valid bit change. */ if (i2c->broken_irq_mode) { u64 end = get_jiffies_64() + i2c->adap.timeout; while (!octeon_i2c_hlc_test_valid(i2c) && time_before64(get_jiffies_64(), end)) usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT); return octeon_i2c_hlc_test_valid(i2c) ? 0 : -ETIMEDOUT; } i2c->hlc_int_enable(i2c); time_left = wait_event_timeout(i2c->queue, octeon_i2c_hlc_test_valid(i2c), i2c->adap.timeout); i2c->hlc_int_disable(i2c); if (!time_left) octeon_i2c_hlc_int_clear(i2c); if (i2c->broken_irq_check && !time_left && octeon_i2c_hlc_test_valid(i2c)) { dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n"); i2c->broken_irq_mode = true; return 0; } if (!time_left) return -ETIMEDOUT; return 0; } static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read) { u8 stat; /* * This is ugly... in HLC mode the status is not in the status register * but in the lower 8 bits of SW_TWSI. */ if (i2c->hlc_enabled) stat = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); else stat = octeon_i2c_stat_read(i2c); switch (stat) { /* Everything is fine */ case STAT_IDLE: case STAT_AD2W_ACK: case STAT_RXADDR_ACK: case STAT_TXADDR_ACK: case STAT_TXDATA_ACK: return 0; /* ACK allowed on pre-terminal bytes only */ case STAT_RXDATA_ACK: if (!final_read) return 0; return -EIO; /* NAK allowed on terminal byte only */ case STAT_RXDATA_NAK: if (final_read) return 0; return -EIO; /* Arbitration lost */ case STAT_LOST_ARB_38: case STAT_LOST_ARB_68: case STAT_LOST_ARB_78: case STAT_LOST_ARB_B0: return -EAGAIN; /* Being addressed as slave, should back off & listen */ case STAT_SLAVE_60: case STAT_SLAVE_70: case STAT_GENDATA_ACK: case STAT_GENDATA_NAK: return -EOPNOTSUPP; /* Core busy as slave */ case STAT_SLAVE_80: case STAT_SLAVE_88: case STAT_SLAVE_A0: case STAT_SLAVE_A8: case STAT_SLAVE_LOST: case STAT_SLAVE_NAK: case STAT_SLAVE_ACK: return -EOPNOTSUPP; case STAT_TXDATA_NAK: case STAT_BUS_ERROR: return -EIO; case STAT_TXADDR_NAK: case STAT_RXADDR_NAK: case STAT_AD2W_NAK: return -ENXIO; default: dev_err(i2c->dev, "unhandled state: %d\n", stat); return -EIO; } } static int octeon_i2c_recovery(struct octeon_i2c *i2c) { int ret; ret = i2c_recover_bus(&i2c->adap); if (ret) /* recover failed, try hardware re-init */ ret = octeon_i2c_init_lowlevel(i2c); return ret; } /** * octeon_i2c_start - send START to the bus * @i2c: The struct octeon_i2c * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_start(struct octeon_i2c *i2c) { int ret; u8 stat; octeon_i2c_hlc_disable(i2c); octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA); ret = octeon_i2c_wait(i2c); if (ret) goto error; stat = octeon_i2c_stat_read(i2c); if (stat == STAT_START || stat == STAT_REP_START) /* START successful, bail out */ return 0; error: /* START failed, try to recover */ ret = octeon_i2c_recovery(i2c); return (ret) ? ret : -EAGAIN; } /* send STOP to the bus */ static void octeon_i2c_stop(struct octeon_i2c *i2c) { octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STP); } /** * octeon_i2c_read - receive data from the bus via low-level controller * @i2c: The struct octeon_i2c * @target: Target address * @data: Pointer to the location to store the data * @rlength: Length of the data * @recv_len: flag for length byte * * The address is sent over the bus, then the data is read. * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, u8 *data, u16 *rlength, bool recv_len) { int i, result, length = *rlength; bool final_read = false; octeon_i2c_data_write(i2c, (target << 1) | 1); octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB); result = octeon_i2c_wait(i2c); if (result) return result; /* address OK ? */ result = octeon_i2c_check_status(i2c, false); if (result) return result; for (i = 0; i < length; i++) { /* * For the last byte to receive TWSI_CTL_AAK must not be set. * * A special case is I2C_M_RECV_LEN where we don't know the * additional length yet. If recv_len is set we assume we're * not reading the final byte and therefore need to set * TWSI_CTL_AAK. */ if ((i + 1 == length) && !(recv_len && i == 0)) final_read = true; /* clear iflg to allow next event */ if (final_read) octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB); else octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_AAK); result = octeon_i2c_wait(i2c); if (result) return result; data[i] = octeon_i2c_data_read(i2c, &result); if (result) return result; if (recv_len && i == 0) { if (data[i] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; length += data[i]; } result = octeon_i2c_check_status(i2c, final_read); if (result) return result; } *rlength = length; return 0; } /** * octeon_i2c_write - send data to the bus via low-level controller * @i2c: The struct octeon_i2c * @target: Target address * @data: Pointer to the data to be sent * @length: Length of the data * * The address is sent over the bus, then the data. * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_write(struct octeon_i2c *i2c, int target, const u8 *data, int length) { int i, result; octeon_i2c_data_write(i2c, target << 1); octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB); result = octeon_i2c_wait(i2c); if (result) return result; for (i = 0; i < length; i++) { result = octeon_i2c_check_status(i2c, false); if (result) return result; octeon_i2c_data_write(i2c, data[i]); octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB); result = octeon_i2c_wait(i2c); if (result) return result; } return 0; } /* high-level-controller pure read of up to 8 bytes */ static int octeon_i2c_hlc_read(struct octeon_i2c *i2c, struct i2c_msg *msgs) { int i, j, ret = 0; u64 cmd; octeon_i2c_hlc_enable(i2c); octeon_i2c_hlc_int_clear(i2c); cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR; /* SIZE */ cmd |= (u64)(msgs[0].len - 1) << SW_TWSI_SIZE_SHIFT; /* A */ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT; if (msgs[0].flags & I2C_M_TEN) cmd |= SW_TWSI_OP_10; else cmd |= SW_TWSI_OP_7; octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c)); ret = octeon_i2c_hlc_wait(i2c); if (ret) goto err; cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); if ((cmd & SW_TWSI_R) == 0) return octeon_i2c_check_status(i2c, false); for (i = 0, j = msgs[0].len - 1; i < msgs[0].len && i < 4; i++, j--) msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff; if (msgs[0].len > 4) { cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT(i2c)); for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--) msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff; } err: return ret; } /* high-level-controller pure write of up to 8 bytes */ static int octeon_i2c_hlc_write(struct octeon_i2c *i2c, struct i2c_msg *msgs) { int i, j, ret = 0; u64 cmd; octeon_i2c_hlc_enable(i2c); octeon_i2c_hlc_int_clear(i2c); cmd = SW_TWSI_V | SW_TWSI_SOVR; /* SIZE */ cmd |= (u64)(msgs[0].len - 1) << SW_TWSI_SIZE_SHIFT; /* A */ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT; if (msgs[0].flags & I2C_M_TEN) cmd |= SW_TWSI_OP_10; else cmd |= SW_TWSI_OP_7; for (i = 0, j = msgs[0].len - 1; i < msgs[0].len && i < 4; i++, j--) cmd |= (u64)msgs[0].buf[j] << (8 * i); if (msgs[0].len > 4) { u64 ext = 0; for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--) ext |= (u64)msgs[0].buf[j] << (8 * i); octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c)); } octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c)); ret = octeon_i2c_hlc_wait(i2c); if (ret) goto err; cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); if ((cmd & SW_TWSI_R) == 0) return octeon_i2c_check_status(i2c, false); err: return ret; } /* high-level-controller composite write+read, msg0=addr, msg1=data */ static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs) { int i, j, ret = 0; u64 cmd; octeon_i2c_hlc_enable(i2c); cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR; /* SIZE */ cmd |= (u64)(msgs[1].len - 1) << SW_TWSI_SIZE_SHIFT; /* A */ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT; if (msgs[0].flags & I2C_M_TEN) cmd |= SW_TWSI_OP_10_IA; else cmd |= SW_TWSI_OP_7_IA; if (msgs[0].len == 2) { u64 ext = 0; cmd |= SW_TWSI_EIA; ext = (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT; cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT; octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c)); } else { cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT; } octeon_i2c_hlc_int_clear(i2c); octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c)); ret = octeon_i2c_hlc_wait(i2c); if (ret) goto err; cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); if ((cmd & SW_TWSI_R) == 0) return octeon_i2c_check_status(i2c, false); for (i = 0, j = msgs[1].len - 1; i < msgs[1].len && i < 4; i++, j--) msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff; if (msgs[1].len > 4) { cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT(i2c)); for (i = 0; i < msgs[1].len - 4 && i < 4; i++, j--) msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff; } err: return ret; } /* high-level-controller composite write+write, m[0]len<=2, m[1]len<=8 */ static int octeon_i2c_hlc_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msgs) { bool set_ext = false; int i, j, ret = 0; u64 cmd, ext = 0; octeon_i2c_hlc_enable(i2c); cmd = SW_TWSI_V | SW_TWSI_SOVR; /* SIZE */ cmd |= (u64)(msgs[1].len - 1) << SW_TWSI_SIZE_SHIFT; /* A */ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT; if (msgs[0].flags & I2C_M_TEN) cmd |= SW_TWSI_OP_10_IA; else cmd |= SW_TWSI_OP_7_IA; if (msgs[0].len == 2) { cmd |= SW_TWSI_EIA; ext |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT; set_ext = true; cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT; } else { cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT; } for (i = 0, j = msgs[1].len - 1; i < msgs[1].len && i < 4; i++, j--) cmd |= (u64)msgs[1].buf[j] << (8 * i); if (msgs[1].len > 4) { for (i = 0; i < msgs[1].len - 4 && i < 4; i++, j--) ext |= (u64)msgs[1].buf[j] << (8 * i); set_ext = true; } if (set_ext) octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c)); octeon_i2c_hlc_int_clear(i2c); octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c)); ret = octeon_i2c_hlc_wait(i2c); if (ret) goto err; cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); if ((cmd & SW_TWSI_R) == 0) return octeon_i2c_check_status(i2c, false); err: return ret; } /** * octeon_i2c_xfer - The driver's master_xfer function * @adap: Pointer to the i2c_adapter structure * @msgs: Pointer to the messages to be processed * @num: Length of the MSGS array * * Returns the number of messages processed, or a negative errno on failure. */ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct octeon_i2c *i2c = i2c_get_adapdata(adap); int i, ret = 0; if (num == 1) { if (msgs[0].len > 0 && msgs[0].len <= 8) { if (msgs[0].flags & I2C_M_RD) ret = octeon_i2c_hlc_read(i2c, msgs); else ret = octeon_i2c_hlc_write(i2c, msgs); goto out; } } else if (num == 2) { if ((msgs[0].flags & I2C_M_RD) == 0 && (msgs[1].flags & I2C_M_RECV_LEN) == 0 && msgs[0].len > 0 && msgs[0].len <= 2 && msgs[1].len > 0 && msgs[1].len <= 8 && msgs[0].addr == msgs[1].addr) { if (msgs[1].flags & I2C_M_RD) ret = octeon_i2c_hlc_comp_read(i2c, msgs); else ret = octeon_i2c_hlc_comp_write(i2c, msgs); goto out; } } for (i = 0; ret == 0 && i < num; i++) { struct i2c_msg *pmsg = &msgs[i]; /* zero-length messages are not supported */ if (!pmsg->len) { ret = -EOPNOTSUPP; break; } ret = octeon_i2c_start(i2c); if (ret) return ret; if (pmsg->flags & I2C_M_RD) ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf, &pmsg->len, pmsg->flags & I2C_M_RECV_LEN); else ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf, pmsg->len); } octeon_i2c_stop(i2c); out: return (ret != 0) ? ret : num; } /* calculate and set clock divisors */ void octeon_i2c_set_clock(struct octeon_i2c *i2c) { int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff; int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000; for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) { /* * An mdiv value of less than 2 seems to not work well * with ds1337 RTCs, so we constrain it to larger values. */ for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) { /* * For given ndiv and mdiv values check the * two closest thp values. */ tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10; tclk *= (1 << ndiv_idx); thp_base = (i2c->sys_freq / (tclk * 2)) - 1; for (inc = 0; inc <= 1; inc++) { thp_idx = thp_base + inc; if (thp_idx < 5 || thp_idx > 0xff) continue; foscl = i2c->sys_freq / (2 * (thp_idx + 1)); foscl = foscl / (1 << ndiv_idx); foscl = foscl / (mdiv_idx + 1) / 10; diff = abs(foscl - i2c->twsi_freq); if (diff < delta_hz) { delta_hz = diff; thp = thp_idx; mdiv = mdiv_idx; ndiv = ndiv_idx; } } } } octeon_i2c_reg_write(i2c, SW_TWSI_OP_TWSI_CLK, thp); octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv); } int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c) { u8 status = 0; int tries; /* reset controller */ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_RST, 0); for (tries = 10; tries && status != STAT_IDLE; tries--) { udelay(1); status = octeon_i2c_stat_read(i2c); if (status == STAT_IDLE) break; } if (status != STAT_IDLE) { dev_err(i2c->dev, "%s: TWSI_RST failed! (0x%x)\n", __func__, status); return -EIO; } /* toggle twice to force both teardowns */ octeon_i2c_hlc_enable(i2c); octeon_i2c_hlc_disable(i2c); return 0; } static int octeon_i2c_get_scl(struct i2c_adapter *adap) { struct octeon_i2c *i2c = i2c_get_adapdata(adap); u64 state; state = octeon_i2c_read_int(i2c); return state & TWSI_INT_SCL; } static void octeon_i2c_set_scl(struct i2c_adapter *adap, int val) { struct octeon_i2c *i2c = i2c_get_adapdata(adap); octeon_i2c_write_int(i2c, val ? 0 : TWSI_INT_SCL_OVR); } static int octeon_i2c_get_sda(struct i2c_adapter *adap) { struct octeon_i2c *i2c = i2c_get_adapdata(adap); u64 state; state = octeon_i2c_read_int(i2c); return state & TWSI_INT_SDA; } static void octeon_i2c_prepare_recovery(struct i2c_adapter *adap) { struct octeon_i2c *i2c = i2c_get_adapdata(adap); octeon_i2c_hlc_disable(i2c); octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_RST, 0); /* wait for software reset to settle */ udelay(5); /* * Bring control register to a good state regardless * of HLC state. */ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB); octeon_i2c_write_int(i2c, 0); } static void octeon_i2c_unprepare_recovery(struct i2c_adapter *adap) { struct octeon_i2c *i2c = i2c_get_adapdata(adap); /* * Generate STOP to finish the unfinished transaction. * Can't generate STOP via the TWSI CTL register * since it could bring the TWSI controller into an inoperable state. */ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR | TWSI_INT_SCL_OVR); udelay(5); octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR); udelay(5); octeon_i2c_write_int(i2c, 0); } struct i2c_bus_recovery_info octeon_i2c_recovery_info = { .recover_bus = i2c_generic_scl_recovery, .get_scl = octeon_i2c_get_scl, .set_scl = octeon_i2c_set_scl, .get_sda = octeon_i2c_get_sda, .prepare_recovery = octeon_i2c_prepare_recovery, .unprepare_recovery = octeon_i2c_unprepare_recovery, };
linux-master
drivers/i2c/busses/i2c-octeon-core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * TI OMAP I2C master mode driver * * Copyright (C) 2003 MontaVista Software, Inc. * Copyright (C) 2005 Nokia Corporation * Copyright (C) 2004 - 2007 Texas Instruments. * * Originally written by MontaVista Software, Inc. * Additional contributions by: * Tony Lindgren <[email protected]> * Imre Deak <[email protected]> * Juha Yrjölä <[email protected]> * Syed Khasim <[email protected]> * Nishant Menon <[email protected]> */ #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/slab.h> #include <linux/platform_data/i2c-omap.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/consumer.h> /* I2C controller revisions */ #define OMAP_I2C_OMAP1_REV_2 0x20 /* I2C controller revisions present on specific hardware */ #define OMAP_I2C_REV_ON_2430 0x00000036 #define OMAP_I2C_REV_ON_3430_3530 0x0000003C #define OMAP_I2C_REV_ON_3630 0x00000040 #define OMAP_I2C_REV_ON_4430_PLUS 0x50400002 /* timeout waiting for the controller to respond */ #define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000)) /* timeout for pm runtime autosuspend */ #define OMAP_I2C_PM_TIMEOUT 1000 /* ms */ /* timeout for making decision on bus free status */ #define OMAP_I2C_BUS_FREE_TIMEOUT (msecs_to_jiffies(10)) /* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */ enum { OMAP_I2C_REV_REG = 0, OMAP_I2C_IE_REG, OMAP_I2C_STAT_REG, OMAP_I2C_IV_REG, OMAP_I2C_WE_REG, OMAP_I2C_SYSS_REG, OMAP_I2C_BUF_REG, OMAP_I2C_CNT_REG, OMAP_I2C_DATA_REG, OMAP_I2C_SYSC_REG, OMAP_I2C_CON_REG, OMAP_I2C_OA_REG, OMAP_I2C_SA_REG, OMAP_I2C_PSC_REG, OMAP_I2C_SCLL_REG, OMAP_I2C_SCLH_REG, OMAP_I2C_SYSTEST_REG, OMAP_I2C_BUFSTAT_REG, /* only on OMAP4430 */ OMAP_I2C_IP_V2_REVNB_LO, OMAP_I2C_IP_V2_REVNB_HI, OMAP_I2C_IP_V2_IRQSTATUS_RAW, OMAP_I2C_IP_V2_IRQENABLE_SET, OMAP_I2C_IP_V2_IRQENABLE_CLR, }; /* I2C Interrupt Enable Register (OMAP_I2C_IE): */ #define OMAP_I2C_IE_XDR (1 << 14) /* TX Buffer drain int enable */ #define OMAP_I2C_IE_RDR (1 << 13) /* RX Buffer drain int enable */ #define OMAP_I2C_IE_XRDY (1 << 4) /* TX data ready int enable */ #define OMAP_I2C_IE_RRDY (1 << 3) /* RX data ready int enable */ #define OMAP_I2C_IE_ARDY (1 << 2) /* Access ready int enable */ #define OMAP_I2C_IE_NACK (1 << 1) /* No ack interrupt enable */ #define OMAP_I2C_IE_AL (1 << 0) /* Arbitration lost int ena */ /* I2C Status Register (OMAP_I2C_STAT): */ #define OMAP_I2C_STAT_XDR (1 << 14) /* TX Buffer draining */ #define OMAP_I2C_STAT_RDR (1 << 13) /* RX Buffer draining */ #define OMAP_I2C_STAT_BB (1 << 12) /* Bus busy */ #define OMAP_I2C_STAT_ROVR (1 << 11) /* Receive overrun */ #define OMAP_I2C_STAT_XUDF (1 << 10) /* Transmit underflow */ #define OMAP_I2C_STAT_AAS (1 << 9) /* Address as slave */ #define OMAP_I2C_STAT_BF (1 << 8) /* Bus Free */ #define OMAP_I2C_STAT_XRDY (1 << 4) /* Transmit data ready */ #define OMAP_I2C_STAT_RRDY (1 << 3) /* Receive data ready */ #define OMAP_I2C_STAT_ARDY (1 << 2) /* Register access ready */ #define OMAP_I2C_STAT_NACK (1 << 1) /* No ack interrupt enable */ #define OMAP_I2C_STAT_AL (1 << 0) /* Arbitration lost int ena */ /* I2C WE wakeup enable register */ #define OMAP_I2C_WE_XDR_WE (1 << 14) /* TX drain wakup */ #define OMAP_I2C_WE_RDR_WE (1 << 13) /* RX drain wakeup */ #define OMAP_I2C_WE_AAS_WE (1 << 9) /* Address as slave wakeup*/ #define OMAP_I2C_WE_BF_WE (1 << 8) /* Bus free wakeup */ #define OMAP_I2C_WE_STC_WE (1 << 6) /* Start condition wakeup */ #define OMAP_I2C_WE_GC_WE (1 << 5) /* General call wakeup */ #define OMAP_I2C_WE_DRDY_WE (1 << 3) /* TX/RX data ready wakeup */ #define OMAP_I2C_WE_ARDY_WE (1 << 2) /* Reg access ready wakeup */ #define OMAP_I2C_WE_NACK_WE (1 << 1) /* No acknowledgment wakeup */ #define OMAP_I2C_WE_AL_WE (1 << 0) /* Arbitration lost wakeup */ #define OMAP_I2C_WE_ALL (OMAP_I2C_WE_XDR_WE | OMAP_I2C_WE_RDR_WE | \ OMAP_I2C_WE_AAS_WE | OMAP_I2C_WE_BF_WE | \ OMAP_I2C_WE_STC_WE | OMAP_I2C_WE_GC_WE | \ OMAP_I2C_WE_DRDY_WE | OMAP_I2C_WE_ARDY_WE | \ OMAP_I2C_WE_NACK_WE | OMAP_I2C_WE_AL_WE) /* I2C Buffer Configuration Register (OMAP_I2C_BUF): */ #define OMAP_I2C_BUF_RDMA_EN (1 << 15) /* RX DMA channel enable */ #define OMAP_I2C_BUF_RXFIF_CLR (1 << 14) /* RX FIFO Clear */ #define OMAP_I2C_BUF_XDMA_EN (1 << 7) /* TX DMA channel enable */ #define OMAP_I2C_BUF_TXFIF_CLR (1 << 6) /* TX FIFO Clear */ /* I2C Configuration Register (OMAP_I2C_CON): */ #define OMAP_I2C_CON_EN (1 << 15) /* I2C module enable */ #define OMAP_I2C_CON_BE (1 << 14) /* Big endian mode */ #define OMAP_I2C_CON_OPMODE_HS (1 << 12) /* High Speed support */ #define OMAP_I2C_CON_STB (1 << 11) /* Start byte mode (master) */ #define OMAP_I2C_CON_MST (1 << 10) /* Master/slave mode */ #define OMAP_I2C_CON_TRX (1 << 9) /* TX/RX mode (master only) */ #define OMAP_I2C_CON_XA (1 << 8) /* Expand address */ #define OMAP_I2C_CON_RM (1 << 2) /* Repeat mode (master only) */ #define OMAP_I2C_CON_STP (1 << 1) /* Stop cond (master only) */ #define OMAP_I2C_CON_STT (1 << 0) /* Start condition (master) */ /* I2C SCL time value when Master */ #define OMAP_I2C_SCLL_HSSCLL 8 #define OMAP_I2C_SCLH_HSSCLH 8 /* I2C System Test Register (OMAP_I2C_SYSTEST): */ #define OMAP_I2C_SYSTEST_ST_EN (1 << 15) /* System test enable */ #define OMAP_I2C_SYSTEST_FREE (1 << 14) /* Free running mode */ #define OMAP_I2C_SYSTEST_TMODE_MASK (3 << 12) /* Test mode select */ #define OMAP_I2C_SYSTEST_TMODE_SHIFT (12) /* Test mode select */ /* Functional mode */ #define OMAP_I2C_SYSTEST_SCL_I_FUNC (1 << 8) /* SCL line input value */ #define OMAP_I2C_SYSTEST_SCL_O_FUNC (1 << 7) /* SCL line output value */ #define OMAP_I2C_SYSTEST_SDA_I_FUNC (1 << 6) /* SDA line input value */ #define OMAP_I2C_SYSTEST_SDA_O_FUNC (1 << 5) /* SDA line output value */ /* SDA/SCL IO mode */ #define OMAP_I2C_SYSTEST_SCL_I (1 << 3) /* SCL line sense in */ #define OMAP_I2C_SYSTEST_SCL_O (1 << 2) /* SCL line drive out */ #define OMAP_I2C_SYSTEST_SDA_I (1 << 1) /* SDA line sense in */ #define OMAP_I2C_SYSTEST_SDA_O (1 << 0) /* SDA line drive out */ /* OCP_SYSSTATUS bit definitions */ #define SYSS_RESETDONE_MASK (1 << 0) /* OCP_SYSCONFIG bit definitions */ #define SYSC_CLOCKACTIVITY_MASK (0x3 << 8) #define SYSC_SIDLEMODE_MASK (0x3 << 3) #define SYSC_ENAWAKEUP_MASK (1 << 2) #define SYSC_SOFTRESET_MASK (1 << 1) #define SYSC_AUTOIDLE_MASK (1 << 0) #define SYSC_IDLEMODE_SMART 0x2 #define SYSC_CLOCKACTIVITY_FCLK 0x2 /* Errata definitions */ #define I2C_OMAP_ERRATA_I207 (1 << 0) #define I2C_OMAP_ERRATA_I462 (1 << 1) #define OMAP_I2C_IP_V2_INTERRUPTS_MASK 0x6FFF struct omap_i2c_dev { struct device *dev; void __iomem *base; /* virtual */ int irq; int reg_shift; /* bit shift for I2C register addresses */ struct completion cmd_complete; struct resource *ioarea; u32 latency; /* maximum mpu wkup latency */ void (*set_mpu_wkup_lat)(struct device *dev, long latency); u32 speed; /* Speed of bus in kHz */ u32 flags; u16 scheme; u16 cmd_err; u8 *buf; u8 *regs; size_t buf_len; struct i2c_adapter adapter; u8 threshold; u8 fifo_size; /* use as flag and value * fifo_size==0 implies no fifo * if set, should be trsh+1 */ u32 rev; unsigned b_hw:1; /* bad h/w fixes */ unsigned bb_valid:1; /* true when BB-bit reflects * the I2C bus state */ unsigned receiver:1; /* true when we're in receiver mode */ u16 iestate; /* Saved interrupt register */ u16 pscstate; u16 scllstate; u16 sclhstate; u16 syscstate; u16 westate; u16 errata; }; static const u8 reg_map_ip_v1[] = { [OMAP_I2C_REV_REG] = 0x00, [OMAP_I2C_IE_REG] = 0x01, [OMAP_I2C_STAT_REG] = 0x02, [OMAP_I2C_IV_REG] = 0x03, [OMAP_I2C_WE_REG] = 0x03, [OMAP_I2C_SYSS_REG] = 0x04, [OMAP_I2C_BUF_REG] = 0x05, [OMAP_I2C_CNT_REG] = 0x06, [OMAP_I2C_DATA_REG] = 0x07, [OMAP_I2C_SYSC_REG] = 0x08, [OMAP_I2C_CON_REG] = 0x09, [OMAP_I2C_OA_REG] = 0x0a, [OMAP_I2C_SA_REG] = 0x0b, [OMAP_I2C_PSC_REG] = 0x0c, [OMAP_I2C_SCLL_REG] = 0x0d, [OMAP_I2C_SCLH_REG] = 0x0e, [OMAP_I2C_SYSTEST_REG] = 0x0f, [OMAP_I2C_BUFSTAT_REG] = 0x10, }; static const u8 reg_map_ip_v2[] = { [OMAP_I2C_REV_REG] = 0x04, [OMAP_I2C_IE_REG] = 0x2c, [OMAP_I2C_STAT_REG] = 0x28, [OMAP_I2C_IV_REG] = 0x34, [OMAP_I2C_WE_REG] = 0x34, [OMAP_I2C_SYSS_REG] = 0x90, [OMAP_I2C_BUF_REG] = 0x94, [OMAP_I2C_CNT_REG] = 0x98, [OMAP_I2C_DATA_REG] = 0x9c, [OMAP_I2C_SYSC_REG] = 0x10, [OMAP_I2C_CON_REG] = 0xa4, [OMAP_I2C_OA_REG] = 0xa8, [OMAP_I2C_SA_REG] = 0xac, [OMAP_I2C_PSC_REG] = 0xb0, [OMAP_I2C_SCLL_REG] = 0xb4, [OMAP_I2C_SCLH_REG] = 0xb8, [OMAP_I2C_SYSTEST_REG] = 0xbC, [OMAP_I2C_BUFSTAT_REG] = 0xc0, [OMAP_I2C_IP_V2_REVNB_LO] = 0x00, [OMAP_I2C_IP_V2_REVNB_HI] = 0x04, [OMAP_I2C_IP_V2_IRQSTATUS_RAW] = 0x24, [OMAP_I2C_IP_V2_IRQENABLE_SET] = 0x2c, [OMAP_I2C_IP_V2_IRQENABLE_CLR] = 0x30, }; static int omap_i2c_xfer_data(struct omap_i2c_dev *omap); static inline void omap_i2c_write_reg(struct omap_i2c_dev *omap, int reg, u16 val) { writew_relaxed(val, omap->base + (omap->regs[reg] << omap->reg_shift)); } static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *omap, int reg) { return readw_relaxed(omap->base + (omap->regs[reg] << omap->reg_shift)); } static void __omap_i2c_init(struct omap_i2c_dev *omap) { omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */ omap_i2c_write_reg(omap, OMAP_I2C_PSC_REG, omap->pscstate); /* SCL low and high time values */ omap_i2c_write_reg(omap, OMAP_I2C_SCLL_REG, omap->scllstate); omap_i2c_write_reg(omap, OMAP_I2C_SCLH_REG, omap->sclhstate); if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) omap_i2c_write_reg(omap, OMAP_I2C_WE_REG, omap->westate); /* Take the I2C module out of reset: */ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); /* * NOTE: right after setting CON_EN, STAT_BB could be 0 while the * bus is busy. It will be changed to 1 on the next IP FCLK clock. * udelay(1) will be enough to fix that. */ /* * Don't write to this register if the IE state is 0 as it can * cause deadlock. */ if (omap->iestate) omap_i2c_write_reg(omap, OMAP_I2C_IE_REG, omap->iestate); } static int omap_i2c_reset(struct omap_i2c_dev *omap) { unsigned long timeout; u16 sysc; if (omap->rev >= OMAP_I2C_OMAP1_REV_2) { sysc = omap_i2c_read_reg(omap, OMAP_I2C_SYSC_REG); /* Disable I2C controller before soft reset */ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, omap_i2c_read_reg(omap, OMAP_I2C_CON_REG) & ~(OMAP_I2C_CON_EN)); omap_i2c_write_reg(omap, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK); /* For some reason we need to set the EN bit before the * reset done bit gets set. */ timeout = jiffies + OMAP_I2C_TIMEOUT; omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); while (!(omap_i2c_read_reg(omap, OMAP_I2C_SYSS_REG) & SYSS_RESETDONE_MASK)) { if (time_after(jiffies, timeout)) { dev_warn(omap->dev, "timeout waiting " "for controller reset\n"); return -ETIMEDOUT; } msleep(1); } /* SYSC register is cleared by the reset; rewrite it */ omap_i2c_write_reg(omap, OMAP_I2C_SYSC_REG, sysc); if (omap->rev > OMAP_I2C_REV_ON_3430_3530) { /* Schedule I2C-bus monitoring on the next transfer */ omap->bb_valid = 0; } } return 0; } static int omap_i2c_init(struct omap_i2c_dev *omap) { u16 psc = 0, scll = 0, sclh = 0; u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0; unsigned long fclk_rate = 12000000; unsigned long internal_clk = 0; struct clk *fclk; int error; if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { /* * Enabling all wakup sources to stop I2C freezing on * WFI instruction. * REVISIT: Some wkup sources might not be needed. */ omap->westate = OMAP_I2C_WE_ALL; } if (omap->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) { /* * The I2C functional clock is the armxor_ck, so there's * no need to get "armxor_ck" separately. Now, if OMAP2420 * always returns 12MHz for the functional clock, we can * do this bit unconditionally. */ fclk = clk_get(omap->dev, "fck"); if (IS_ERR(fclk)) { error = PTR_ERR(fclk); dev_err(omap->dev, "could not get fck: %i\n", error); return error; } fclk_rate = clk_get_rate(fclk); clk_put(fclk); /* TRM for 5912 says the I2C clock must be prescaled to be * between 7 - 12 MHz. The XOR input clock is typically * 12, 13 or 19.2 MHz. So we should have code that produces: * * XOR MHz Divider Prescaler * 12 1 0 * 13 2 1 * 19.2 2 1 */ if (fclk_rate > 12000000) psc = fclk_rate / 12000000; } if (!(omap->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) { /* * HSI2C controller internal clk rate should be 19.2 Mhz for * HS and for all modes on 2430. On 34xx we can use lower rate * to get longer filter period for better noise suppression. * The filter is iclk (fclk for HS) period. */ if (omap->speed > 400 || omap->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK) internal_clk = 19200; else if (omap->speed > 100) internal_clk = 9600; else internal_clk = 4000; fclk = clk_get(omap->dev, "fck"); if (IS_ERR(fclk)) { error = PTR_ERR(fclk); dev_err(omap->dev, "could not get fck: %i\n", error); return error; } fclk_rate = clk_get_rate(fclk) / 1000; clk_put(fclk); /* Compute prescaler divisor */ psc = fclk_rate / internal_clk; psc = psc - 1; /* If configured for High Speed */ if (omap->speed > 400) { unsigned long scl; /* For first phase of HS mode */ scl = internal_clk / 400; fsscll = scl - (scl / 3) - 7; fssclh = (scl / 3) - 5; /* For second phase of HS mode */ scl = fclk_rate / omap->speed; hsscll = scl - (scl / 3) - 7; hssclh = (scl / 3) - 5; } else if (omap->speed > 100) { unsigned long scl; /* Fast mode */ scl = internal_clk / omap->speed; fsscll = scl - (scl / 3) - 7; fssclh = (scl / 3) - 5; } else { /* Standard mode */ fsscll = internal_clk / (omap->speed * 2) - 7; fssclh = internal_clk / (omap->speed * 2) - 5; } scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll; sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh; } else { /* Program desired operating rate */ fclk_rate /= (psc + 1) * 1000; if (psc > 2) psc = 2; scll = fclk_rate / (omap->speed * 2) - 7 + psc; sclh = fclk_rate / (omap->speed * 2) - 7 + psc; } omap->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY | OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK | OMAP_I2C_IE_AL) | ((omap->fifo_size) ? (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0); omap->pscstate = psc; omap->scllstate = scll; omap->sclhstate = sclh; if (omap->rev <= OMAP_I2C_REV_ON_3430_3530) { /* Not implemented */ omap->bb_valid = 1; } __omap_i2c_init(omap); return 0; } /* * Try bus recovery, but only if SDA is actually low. */ static int omap_i2c_recover_bus(struct omap_i2c_dev *omap) { u16 systest; systest = omap_i2c_read_reg(omap, OMAP_I2C_SYSTEST_REG); if ((systest & OMAP_I2C_SYSTEST_SCL_I_FUNC) && (systest & OMAP_I2C_SYSTEST_SDA_I_FUNC)) return 0; /* bus seems to already be fine */ if (!(systest & OMAP_I2C_SYSTEST_SCL_I_FUNC)) return -EBUSY; /* recovery would not fix SCL */ return i2c_recover_bus(&omap->adapter); } /* * Waiting on Bus Busy */ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *omap) { unsigned long timeout; timeout = jiffies + OMAP_I2C_TIMEOUT; while (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) { if (time_after(jiffies, timeout)) return omap_i2c_recover_bus(omap); msleep(1); } return 0; } /* * Wait while BB-bit doesn't reflect the I2C bus state * * In a multimaster environment, after IP software reset, BB-bit value doesn't * correspond to the current bus state. It may happen what BB-bit will be 0, * while the bus is busy due to another I2C master activity. * Here are BB-bit values after reset: * SDA SCL BB NOTES * 0 0 0 1, 2 * 1 0 0 1, 2 * 0 1 1 * 1 1 0 3 * Later, if IP detect SDA=0 and SCL=1 (ACK) or SDA 1->0 while SCL=1 (START) * combinations on the bus, it set BB-bit to 1. * If IP detect SDA 0->1 while SCL=1 (STOP) combination on the bus, * it set BB-bit to 0 and BF to 1. * BB and BF bits correctly tracks the bus state while IP is suspended * BB bit became valid on the next FCLK clock after CON_EN bit set * * NOTES: * 1. Any transfer started when BB=0 and bus is busy wouldn't be * completed by IP and results in controller timeout. * 2. Any transfer started when BB=0 and SCL=0 results in IP * starting to drive SDA low. In that case IP corrupt data * on the bus. * 3. Any transfer started in the middle of another master's transfer * results in unpredictable results and data corruption */ static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *omap) { unsigned long bus_free_timeout = 0; unsigned long timeout; int bus_free = 0; u16 stat, systest; if (omap->bb_valid) return 0; timeout = jiffies + OMAP_I2C_TIMEOUT; while (1) { stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); /* * We will see BB or BF event in a case IP had detected any * activity on the I2C bus. Now IP correctly tracks the bus * state. BB-bit value is valid. */ if (stat & (OMAP_I2C_STAT_BB | OMAP_I2C_STAT_BF)) break; /* * Otherwise, we must look signals on the bus to make * the right decision. */ systest = omap_i2c_read_reg(omap, OMAP_I2C_SYSTEST_REG); if ((systest & OMAP_I2C_SYSTEST_SCL_I_FUNC) && (systest & OMAP_I2C_SYSTEST_SDA_I_FUNC)) { if (!bus_free) { bus_free_timeout = jiffies + OMAP_I2C_BUS_FREE_TIMEOUT; bus_free = 1; } /* * SDA and SCL lines was high for 10 ms without bus * activity detected. The bus is free. Consider * BB-bit value is valid. */ if (time_after(jiffies, bus_free_timeout)) break; } else { bus_free = 0; } if (time_after(jiffies, timeout)) { /* * SDA or SCL were low for the entire timeout without * any activity detected. Most likely, a slave is * locking up the bus with no master driving the clock. */ dev_warn(omap->dev, "timeout waiting for bus ready\n"); return omap_i2c_recover_bus(omap); } msleep(1); } omap->bb_valid = 1; return 0; } static void omap_i2c_resize_fifo(struct omap_i2c_dev *omap, u8 size, bool is_rx) { u16 buf; if (omap->flags & OMAP_I2C_FLAG_NO_FIFO) return; /* * Set up notification threshold based on message size. We're doing * this to try and avoid draining feature as much as possible. Whenever * we have big messages to transfer (bigger than our total fifo size) * then we might use draining feature to transfer the remaining bytes. */ omap->threshold = clamp(size, (u8) 1, omap->fifo_size); buf = omap_i2c_read_reg(omap, OMAP_I2C_BUF_REG); if (is_rx) { /* Clear RX Threshold */ buf &= ~(0x3f << 8); buf |= ((omap->threshold - 1) << 8) | OMAP_I2C_BUF_RXFIF_CLR; } else { /* Clear TX Threshold */ buf &= ~0x3f; buf |= (omap->threshold - 1) | OMAP_I2C_BUF_TXFIF_CLR; } omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, buf); if (omap->rev < OMAP_I2C_REV_ON_3630) omap->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ if (omap->set_mpu_wkup_lat != NULL) omap->latency = (1000000 * omap->threshold) / (1000 * omap->speed / 8); } static void omap_i2c_wait(struct omap_i2c_dev *omap) { u16 stat; u16 mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); int count = 0; do { stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); count++; } while (!(stat & mask) && count < 5); } /* * Low level master read/write transaction. */ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop, bool polling) { struct omap_i2c_dev *omap = i2c_get_adapdata(adap); unsigned long timeout; u16 w; int ret; dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); omap->receiver = !!(msg->flags & I2C_M_RD); omap_i2c_resize_fifo(omap, msg->len, omap->receiver); omap_i2c_write_reg(omap, OMAP_I2C_SA_REG, msg->addr); /* REVISIT: Could the STB bit of I2C_CON be used with probing? */ omap->buf = msg->buf; omap->buf_len = msg->len; /* make sure writes to omap->buf_len are ordered */ barrier(); omap_i2c_write_reg(omap, OMAP_I2C_CNT_REG, omap->buf_len); /* Clear the FIFO Buffers */ w = omap_i2c_read_reg(omap, OMAP_I2C_BUF_REG); w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR; omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, w); if (!polling) reinit_completion(&omap->cmd_complete); omap->cmd_err = 0; w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT; /* High speed configuration */ if (omap->speed > 400) w |= OMAP_I2C_CON_OPMODE_HS; if (msg->flags & I2C_M_STOP) stop = 1; if (msg->flags & I2C_M_TEN) w |= OMAP_I2C_CON_XA; if (!(msg->flags & I2C_M_RD)) w |= OMAP_I2C_CON_TRX; if (!omap->b_hw && stop) w |= OMAP_I2C_CON_STP; /* * NOTE: STAT_BB bit could became 1 here if another master occupy * the bus. IP successfully complete transfer when the bus will be * free again (BB reset to 0). */ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w); /* * Don't write stt and stp together on some hardware. */ if (omap->b_hw && stop) { unsigned long delay = jiffies + OMAP_I2C_TIMEOUT; u16 con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG); while (con & OMAP_I2C_CON_STT) { con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG); /* Let the user know if i2c is in a bad state */ if (time_after(jiffies, delay)) { dev_err(omap->dev, "controller timed out " "waiting for start condition to finish\n"); return -ETIMEDOUT; } cpu_relax(); } w |= OMAP_I2C_CON_STP; w &= ~OMAP_I2C_CON_STT; omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w); } /* * REVISIT: We should abort the transfer on signals, but the bus goes * into arbitration and we're currently unable to recover from it. */ if (!polling) { timeout = wait_for_completion_timeout(&omap->cmd_complete, OMAP_I2C_TIMEOUT); } else { do { omap_i2c_wait(omap); ret = omap_i2c_xfer_data(omap); } while (ret == -EAGAIN); timeout = !ret; } if (timeout == 0) { dev_err(omap->dev, "controller timed out\n"); omap_i2c_reset(omap); __omap_i2c_init(omap); return -ETIMEDOUT; } if (likely(!omap->cmd_err)) return 0; /* We have an error */ if (omap->cmd_err & (OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) { omap_i2c_reset(omap); __omap_i2c_init(omap); return -EIO; } if (omap->cmd_err & OMAP_I2C_STAT_AL) return -EAGAIN; if (omap->cmd_err & OMAP_I2C_STAT_NACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0; w = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG); w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w); return -EREMOTEIO; } return -EIO; } /* * Prepare controller for a transaction and call omap_i2c_xfer_msg * to do the work during IRQ processing. */ static int omap_i2c_xfer_common(struct i2c_adapter *adap, struct i2c_msg msgs[], int num, bool polling) { struct omap_i2c_dev *omap = i2c_get_adapdata(adap); int i; int r; r = pm_runtime_get_sync(omap->dev); if (r < 0) goto out; r = omap_i2c_wait_for_bb_valid(omap); if (r < 0) goto out; r = omap_i2c_wait_for_bb(omap); if (r < 0) goto out; if (omap->set_mpu_wkup_lat != NULL) omap->set_mpu_wkup_lat(omap->dev, omap->latency); for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)), polling); if (r != 0) break; } if (r == 0) r = num; omap_i2c_wait_for_bb(omap); if (omap->set_mpu_wkup_lat != NULL) omap->set_mpu_wkup_lat(omap->dev, -1); out: pm_runtime_mark_last_busy(omap->dev); pm_runtime_put_autosuspend(omap->dev); return r; } static int omap_i2c_xfer_irq(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { return omap_i2c_xfer_common(adap, msgs, num, false); } static int omap_i2c_xfer_polling(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { return omap_i2c_xfer_common(adap, msgs, num, true); } static u32 omap_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_PROTOCOL_MANGLING; } static inline void omap_i2c_complete_cmd(struct omap_i2c_dev *omap, u16 err) { omap->cmd_err |= err; complete(&omap->cmd_complete); } static inline void omap_i2c_ack_stat(struct omap_i2c_dev *omap, u16 stat) { omap_i2c_write_reg(omap, OMAP_I2C_STAT_REG, stat); } static inline void i2c_omap_errata_i207(struct omap_i2c_dev *omap, u16 stat) { /* * I2C Errata(Errata Nos. OMAP2: 1.67, OMAP3: 1.8) * Not applicable for OMAP4. * Under certain rare conditions, RDR could be set again * when the bus is busy, then ignore the interrupt and * clear the interrupt. */ if (stat & OMAP_I2C_STAT_RDR) { /* Step 1: If RDR is set, clear it */ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR); /* Step 2: */ if (!(omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB)) { /* Step 3: */ if (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_RDR) { omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR); dev_dbg(omap->dev, "RDR when bus is busy.\n"); } } } } /* rev1 devices are apparently only on some 15xx */ #ifdef CONFIG_ARCH_OMAP15XX static irqreturn_t omap_i2c_omap1_isr(int this_irq, void *dev_id) { struct omap_i2c_dev *omap = dev_id; u16 iv, w; if (pm_runtime_suspended(omap->dev)) return IRQ_NONE; iv = omap_i2c_read_reg(omap, OMAP_I2C_IV_REG); switch (iv) { case 0x00: /* None */ break; case 0x01: /* Arbitration lost */ dev_err(omap->dev, "Arbitration lost\n"); omap_i2c_complete_cmd(omap, OMAP_I2C_STAT_AL); break; case 0x02: /* No acknowledgement */ omap_i2c_complete_cmd(omap, OMAP_I2C_STAT_NACK); omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP); break; case 0x03: /* Register access ready */ omap_i2c_complete_cmd(omap, 0); break; case 0x04: /* Receive data ready */ if (omap->buf_len) { w = omap_i2c_read_reg(omap, OMAP_I2C_DATA_REG); *omap->buf++ = w; omap->buf_len--; if (omap->buf_len) { *omap->buf++ = w >> 8; omap->buf_len--; } } else dev_err(omap->dev, "RRDY IRQ while no data requested\n"); break; case 0x05: /* Transmit data ready */ if (omap->buf_len) { w = *omap->buf++; omap->buf_len--; if (omap->buf_len) { w |= *omap->buf++ << 8; omap->buf_len--; } omap_i2c_write_reg(omap, OMAP_I2C_DATA_REG, w); } else dev_err(omap->dev, "XRDY IRQ while no data to send\n"); break; default: return IRQ_NONE; } return IRQ_HANDLED; } #else #define omap_i2c_omap1_isr NULL #endif /* * OMAP3430 Errata i462: When an XRDY/XDR is hit, wait for XUDF before writing * data to DATA_REG. Otherwise some data bytes can be lost while transferring * them from the memory to the I2C interface. */ static int errata_omap3_i462(struct omap_i2c_dev *omap) { unsigned long timeout = 10000; u16 stat; do { stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); if (stat & OMAP_I2C_STAT_XUDF) break; if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { omap_i2c_ack_stat(omap, (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); if (stat & OMAP_I2C_STAT_NACK) { omap->cmd_err |= OMAP_I2C_STAT_NACK; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK); } if (stat & OMAP_I2C_STAT_AL) { dev_err(omap->dev, "Arbitration lost\n"); omap->cmd_err |= OMAP_I2C_STAT_AL; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_AL); } return -EIO; } cpu_relax(); } while (--timeout); if (!timeout) { dev_err(omap->dev, "timeout waiting on XUDF bit\n"); return 0; } return 0; } static void omap_i2c_receive_data(struct omap_i2c_dev *omap, u8 num_bytes, bool is_rdr) { u16 w; while (num_bytes--) { w = omap_i2c_read_reg(omap, OMAP_I2C_DATA_REG); *omap->buf++ = w; omap->buf_len--; /* * Data reg in 2430, omap3 and * omap4 is 8 bit wide */ if (omap->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) { *omap->buf++ = w >> 8; omap->buf_len--; } } } static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes, bool is_xdr) { u16 w; while (num_bytes--) { w = *omap->buf++; omap->buf_len--; /* * Data reg in 2430, omap3 and * omap4 is 8 bit wide */ if (omap->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) { w |= *omap->buf++ << 8; omap->buf_len--; } if (omap->errata & I2C_OMAP_ERRATA_I462) { int ret; ret = errata_omap3_i462(omap); if (ret < 0) return ret; } omap_i2c_write_reg(omap, OMAP_I2C_DATA_REG, w); } return 0; } static irqreturn_t omap_i2c_isr(int irq, void *dev_id) { struct omap_i2c_dev *omap = dev_id; irqreturn_t ret = IRQ_HANDLED; u16 mask; u16 stat; stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK; if (stat & mask) ret = IRQ_WAKE_THREAD; return ret; } static int omap_i2c_xfer_data(struct omap_i2c_dev *omap) { u16 bits; u16 stat; int err = 0, count = 0; do { bits = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); stat &= bits; /* If we're in receiver mode, ignore XDR/XRDY */ if (omap->receiver) stat &= ~(OMAP_I2C_STAT_XDR | OMAP_I2C_STAT_XRDY); else stat &= ~(OMAP_I2C_STAT_RDR | OMAP_I2C_STAT_RRDY); if (!stat) { /* my work here is done */ err = -EAGAIN; break; } dev_dbg(omap->dev, "IRQ (ISR = 0x%04x)\n", stat); if (count++ == 100) { dev_warn(omap->dev, "Too much work in one IRQ\n"); break; } if (stat & OMAP_I2C_STAT_NACK) { err |= OMAP_I2C_STAT_NACK; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK); } if (stat & OMAP_I2C_STAT_AL) { dev_err(omap->dev, "Arbitration lost\n"); err |= OMAP_I2C_STAT_AL; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_AL); } /* * ProDB0017052: Clear ARDY bit twice */ if (stat & OMAP_I2C_STAT_ARDY) omap_i2c_ack_stat(omap, OMAP_I2C_STAT_ARDY); if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { omap_i2c_ack_stat(omap, (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR | OMAP_I2C_STAT_ARDY)); break; } if (stat & OMAP_I2C_STAT_RDR) { u8 num_bytes = 1; if (omap->fifo_size) num_bytes = omap->buf_len; if (omap->errata & I2C_OMAP_ERRATA_I207) { i2c_omap_errata_i207(omap, stat); num_bytes = (omap_i2c_read_reg(omap, OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F; } omap_i2c_receive_data(omap, num_bytes, true); omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR); continue; } if (stat & OMAP_I2C_STAT_RRDY) { u8 num_bytes = 1; if (omap->threshold) num_bytes = omap->threshold; omap_i2c_receive_data(omap, num_bytes, false); omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RRDY); continue; } if (stat & OMAP_I2C_STAT_XDR) { u8 num_bytes = 1; int ret; if (omap->fifo_size) num_bytes = omap->buf_len; ret = omap_i2c_transmit_data(omap, num_bytes, true); if (ret < 0) break; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XDR); continue; } if (stat & OMAP_I2C_STAT_XRDY) { u8 num_bytes = 1; int ret; if (omap->threshold) num_bytes = omap->threshold; ret = omap_i2c_transmit_data(omap, num_bytes, false); if (ret < 0) break; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XRDY); continue; } if (stat & OMAP_I2C_STAT_ROVR) { dev_err(omap->dev, "Receive overrun\n"); err |= OMAP_I2C_STAT_ROVR; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_ROVR); break; } if (stat & OMAP_I2C_STAT_XUDF) { dev_err(omap->dev, "Transmit underflow\n"); err |= OMAP_I2C_STAT_XUDF; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XUDF); break; } } while (stat); return err; } static irqreturn_t omap_i2c_isr_thread(int this_irq, void *dev_id) { int ret; struct omap_i2c_dev *omap = dev_id; ret = omap_i2c_xfer_data(omap); if (ret != -EAGAIN) omap_i2c_complete_cmd(omap, ret); return IRQ_HANDLED; } static const struct i2c_algorithm omap_i2c_algo = { .master_xfer = omap_i2c_xfer_irq, .master_xfer_atomic = omap_i2c_xfer_polling, .functionality = omap_i2c_func, }; static const struct i2c_adapter_quirks omap_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; #ifdef CONFIG_OF static struct omap_i2c_bus_platform_data omap2420_pdata = { .rev = OMAP_I2C_IP_VERSION_1, .flags = OMAP_I2C_FLAG_NO_FIFO | OMAP_I2C_FLAG_SIMPLE_CLOCK | OMAP_I2C_FLAG_16BIT_DATA_REG | OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_i2c_bus_platform_data omap2430_pdata = { .rev = OMAP_I2C_IP_VERSION_1, .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 | OMAP_I2C_FLAG_FORCE_19200_INT_CLK, }; static struct omap_i2c_bus_platform_data omap3_pdata = { .rev = OMAP_I2C_IP_VERSION_1, .flags = OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_i2c_bus_platform_data omap4_pdata = { .rev = OMAP_I2C_IP_VERSION_2, }; static const struct of_device_id omap_i2c_of_match[] = { { .compatible = "ti,omap4-i2c", .data = &omap4_pdata, }, { .compatible = "ti,omap3-i2c", .data = &omap3_pdata, }, { .compatible = "ti,omap2430-i2c", .data = &omap2430_pdata, }, { .compatible = "ti,omap2420-i2c", .data = &omap2420_pdata, }, { }, }; MODULE_DEVICE_TABLE(of, omap_i2c_of_match); #endif #define OMAP_I2C_SCHEME(rev) ((rev & 0xc000) >> 14) #define OMAP_I2C_REV_SCHEME_0_MAJOR(rev) (rev >> 4) #define OMAP_I2C_REV_SCHEME_0_MINOR(rev) (rev & 0xf) #define OMAP_I2C_REV_SCHEME_1_MAJOR(rev) ((rev & 0x0700) >> 7) #define OMAP_I2C_REV_SCHEME_1_MINOR(rev) (rev & 0x1f) #define OMAP_I2C_SCHEME_0 0 #define OMAP_I2C_SCHEME_1 1 static int omap_i2c_get_scl(struct i2c_adapter *adap) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); return reg & OMAP_I2C_SYSTEST_SCL_I_FUNC; } static int omap_i2c_get_sda(struct i2c_adapter *adap) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); return reg & OMAP_I2C_SYSTEST_SDA_I_FUNC; } static void omap_i2c_set_scl(struct i2c_adapter *adap, int val) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); if (val) reg |= OMAP_I2C_SYSTEST_SCL_O; else reg &= ~OMAP_I2C_SYSTEST_SCL_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } static void omap_i2c_prepare_recovery(struct i2c_adapter *adap) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); /* enable test mode */ reg |= OMAP_I2C_SYSTEST_ST_EN; /* select SDA/SCL IO mode */ reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT; /* set SCL to high-impedance state (reset value is 0) */ reg |= OMAP_I2C_SYSTEST_SCL_O; /* set SDA to high-impedance state (reset value is 0) */ reg |= OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); /* restore reset values */ reg &= ~OMAP_I2C_SYSTEST_ST_EN; reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK; reg &= ~OMAP_I2C_SYSTEST_SCL_O; reg &= ~OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } static struct i2c_bus_recovery_info omap_i2c_bus_recovery_info = { .get_scl = omap_i2c_get_scl, .get_sda = omap_i2c_get_sda, .set_scl = omap_i2c_set_scl, .prepare_recovery = omap_i2c_prepare_recovery, .unprepare_recovery = omap_i2c_unprepare_recovery, .recover_bus = i2c_generic_scl_recovery, }; static int omap_i2c_probe(struct platform_device *pdev) { struct omap_i2c_dev *omap; struct i2c_adapter *adap; const struct omap_i2c_bus_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *node = pdev->dev.of_node; const struct of_device_id *match; int irq; int r; u32 rev; u16 minor, major; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; omap = devm_kzalloc(&pdev->dev, sizeof(struct omap_i2c_dev), GFP_KERNEL); if (!omap) return -ENOMEM; omap->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(omap->base)) return PTR_ERR(omap->base); match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev); if (match) { u32 freq = I2C_MAX_STANDARD_MODE_FREQ; pdata = match->data; omap->flags = pdata->flags; of_property_read_u32(node, "clock-frequency", &freq); /* convert DT freq value in Hz into kHz for speed */ omap->speed = freq / 1000; } else if (pdata != NULL) { omap->speed = pdata->clkrate; omap->flags = pdata->flags; omap->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; } omap->dev = &pdev->dev; omap->irq = irq; platform_set_drvdata(pdev, omap); init_completion(&omap->cmd_complete); omap->reg_shift = (omap->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3; pm_runtime_enable(omap->dev); pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(omap->dev); r = pm_runtime_resume_and_get(omap->dev); if (r < 0) goto err_disable_pm; /* * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2. * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset. * Also since the omap_i2c_read_reg uses reg_map_ip_* a * readw_relaxed is done. */ rev = readw_relaxed(omap->base + 0x04); omap->scheme = OMAP_I2C_SCHEME(rev); switch (omap->scheme) { case OMAP_I2C_SCHEME_0: omap->regs = (u8 *)reg_map_ip_v1; omap->rev = omap_i2c_read_reg(omap, OMAP_I2C_REV_REG); minor = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev); major = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev); break; case OMAP_I2C_SCHEME_1: default: omap->regs = (u8 *)reg_map_ip_v2; rev = (rev << 16) | omap_i2c_read_reg(omap, OMAP_I2C_IP_V2_REVNB_LO); minor = OMAP_I2C_REV_SCHEME_1_MINOR(rev); major = OMAP_I2C_REV_SCHEME_1_MAJOR(rev); omap->rev = rev; } omap->errata = 0; if (omap->rev >= OMAP_I2C_REV_ON_2430 && omap->rev < OMAP_I2C_REV_ON_4430_PLUS) omap->errata |= I2C_OMAP_ERRATA_I207; if (omap->rev <= OMAP_I2C_REV_ON_3430_3530) omap->errata |= I2C_OMAP_ERRATA_I462; if (!(omap->flags & OMAP_I2C_FLAG_NO_FIFO)) { u16 s; /* Set up the fifo size - Get total size */ s = (omap_i2c_read_reg(omap, OMAP_I2C_BUFSTAT_REG) >> 14) & 0x3; omap->fifo_size = 0x8 << s; /* * Set up notification threshold as half the total available * size. This is to ensure that we can handle the status on int * call back latencies. */ omap->fifo_size = (omap->fifo_size / 2); if (omap->rev < OMAP_I2C_REV_ON_3630) omap->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ if (omap->set_mpu_wkup_lat != NULL) omap->latency = (1000000 * omap->fifo_size) / (1000 * omap->speed / 8); } /* reset ASAP, clearing any IRQs */ omap_i2c_init(omap); if (omap->rev < OMAP_I2C_OMAP1_REV_2) r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr, IRQF_NO_SUSPEND, pdev->name, omap); else r = devm_request_threaded_irq(&pdev->dev, omap->irq, omap_i2c_isr, omap_i2c_isr_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, pdev->name, omap); if (r) { dev_err(omap->dev, "failure requesting irq %i\n", omap->irq); goto err_unuse_clocks; } adap = &omap->adapter; i2c_set_adapdata(adap, omap); adap->owner = THIS_MODULE; adap->class = I2C_CLASS_DEPRECATED; strscpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); adap->algo = &omap_i2c_algo; adap->quirks = &omap_i2c_quirks; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; adap->bus_recovery_info = &omap_i2c_bus_recovery_info; /* i2c device drivers may be active on return from add_adapter() */ adap->nr = pdev->id; r = i2c_add_numbered_adapter(adap); if (r) goto err_unuse_clocks; dev_info(omap->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr, major, minor, omap->speed); pm_runtime_mark_last_busy(omap->dev); pm_runtime_put_autosuspend(omap->dev); return 0; err_unuse_clocks: omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); pm_runtime_dont_use_autosuspend(omap->dev); pm_runtime_put_sync(omap->dev); err_disable_pm: pm_runtime_disable(&pdev->dev); return r; } static void omap_i2c_remove(struct platform_device *pdev) { struct omap_i2c_dev *omap = platform_get_drvdata(pdev); int ret; i2c_del_adapter(&omap->adapter); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) dev_err(omap->dev, "Failed to resume hardware, skip disable\n"); else omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); } static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev) { struct omap_i2c_dev *omap = dev_get_drvdata(dev); omap->iestate = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); if (omap->scheme == OMAP_I2C_SCHEME_0) omap_i2c_write_reg(omap, OMAP_I2C_IE_REG, 0); else omap_i2c_write_reg(omap, OMAP_I2C_IP_V2_IRQENABLE_CLR, OMAP_I2C_IP_V2_INTERRUPTS_MASK); if (omap->rev < OMAP_I2C_OMAP1_REV_2) { omap_i2c_read_reg(omap, OMAP_I2C_IV_REG); /* Read clears */ } else { omap_i2c_write_reg(omap, OMAP_I2C_STAT_REG, omap->iestate); /* Flush posted write */ omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); } pinctrl_pm_select_sleep_state(dev); return 0; } static int __maybe_unused omap_i2c_runtime_resume(struct device *dev) { struct omap_i2c_dev *omap = dev_get_drvdata(dev); pinctrl_pm_select_default_state(dev); if (!omap->regs) return 0; __omap_i2c_init(omap); return 0; } static const struct dev_pm_ops omap_i2c_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, omap_i2c_runtime_resume, NULL) }; static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove_new = omap_i2c_remove, .driver = { .name = "omap_i2c", .pm = &omap_i2c_pm_ops, .of_match_table = of_match_ptr(omap_i2c_of_match), }, }; /* I2C may be needed to bring up other drivers */ static int __init omap_i2c_init_driver(void) { return platform_driver_register(&omap_i2c_driver); } subsys_initcall(omap_i2c_init_driver); static void __exit omap_i2c_exit_driver(void) { platform_driver_unregister(&omap_i2c_driver); } module_exit(omap_i2c_exit_driver); MODULE_AUTHOR("MontaVista Software, Inc. (and others)"); MODULE_DESCRIPTION("TI OMAP I2C bus adapter"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:omap_i2c");
linux-master
drivers/i2c/busses/i2c-omap.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for I2C adapter in Rockchip RK3xxx SoC * * Max Schwarz <[email protected]> * based on the patches by Rockchip Inc. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/spinlock.h> #include <linux/clk.h> #include <linux/wait.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/math64.h> /* Register Map */ #define REG_CON 0x00 /* control register */ #define REG_CLKDIV 0x04 /* clock divisor register */ #define REG_MRXADDR 0x08 /* slave address for REGISTER_TX */ #define REG_MRXRADDR 0x0c /* slave register address for REGISTER_TX */ #define REG_MTXCNT 0x10 /* number of bytes to be transmitted */ #define REG_MRXCNT 0x14 /* number of bytes to be received */ #define REG_IEN 0x18 /* interrupt enable */ #define REG_IPD 0x1c /* interrupt pending */ #define REG_FCNT 0x20 /* finished count */ /* Data buffer offsets */ #define TXBUFFER_BASE 0x100 #define RXBUFFER_BASE 0x200 /* REG_CON bits */ #define REG_CON_EN BIT(0) enum { REG_CON_MOD_TX = 0, /* transmit data */ REG_CON_MOD_REGISTER_TX, /* select register and restart */ REG_CON_MOD_RX, /* receive data */ REG_CON_MOD_REGISTER_RX, /* broken: transmits read addr AND writes * register addr */ }; #define REG_CON_MOD(mod) ((mod) << 1) #define REG_CON_MOD_MASK (BIT(1) | BIT(2)) #define REG_CON_START BIT(3) #define REG_CON_STOP BIT(4) #define REG_CON_LASTACK BIT(5) /* 1: send NACK after last received byte */ #define REG_CON_ACTACK BIT(6) /* 1: stop if NACK is received */ #define REG_CON_TUNING_MASK GENMASK_ULL(15, 8) #define REG_CON_SDA_CFG(cfg) ((cfg) << 8) #define REG_CON_STA_CFG(cfg) ((cfg) << 12) #define REG_CON_STO_CFG(cfg) ((cfg) << 14) /* REG_MRXADDR bits */ #define REG_MRXADDR_VALID(x) BIT(24 + (x)) /* [x*8+7:x*8] of MRX[R]ADDR valid */ /* REG_IEN/REG_IPD bits */ #define REG_INT_BTF BIT(0) /* a byte was transmitted */ #define REG_INT_BRF BIT(1) /* a byte was received */ #define REG_INT_MBTF BIT(2) /* master data transmit finished */ #define REG_INT_MBRF BIT(3) /* master data receive finished */ #define REG_INT_START BIT(4) /* START condition generated */ #define REG_INT_STOP BIT(5) /* STOP condition generated */ #define REG_INT_NAKRCV BIT(6) /* NACK received */ #define REG_INT_ALL 0x7f /* Constants */ #define WAIT_TIMEOUT 1000 /* ms */ #define DEFAULT_SCL_RATE (100 * 1000) /* Hz */ /** * struct i2c_spec_values - I2C specification values for various modes * @min_hold_start_ns: min hold time (repeated) START condition * @min_low_ns: min LOW period of the SCL clock * @min_high_ns: min HIGH period of the SCL cloc * @min_setup_start_ns: min set-up time for a repeated START conditio * @max_data_hold_ns: max data hold time * @min_data_setup_ns: min data set-up time * @min_setup_stop_ns: min set-up time for STOP condition * @min_hold_buffer_ns: min bus free time between a STOP and * START condition */ struct i2c_spec_values { unsigned long min_hold_start_ns; unsigned long min_low_ns; unsigned long min_high_ns; unsigned long min_setup_start_ns; unsigned long max_data_hold_ns; unsigned long min_data_setup_ns; unsigned long min_setup_stop_ns; unsigned long min_hold_buffer_ns; }; static const struct i2c_spec_values standard_mode_spec = { .min_hold_start_ns = 4000, .min_low_ns = 4700, .min_high_ns = 4000, .min_setup_start_ns = 4700, .max_data_hold_ns = 3450, .min_data_setup_ns = 250, .min_setup_stop_ns = 4000, .min_hold_buffer_ns = 4700, }; static const struct i2c_spec_values fast_mode_spec = { .min_hold_start_ns = 600, .min_low_ns = 1300, .min_high_ns = 600, .min_setup_start_ns = 600, .max_data_hold_ns = 900, .min_data_setup_ns = 100, .min_setup_stop_ns = 600, .min_hold_buffer_ns = 1300, }; static const struct i2c_spec_values fast_mode_plus_spec = { .min_hold_start_ns = 260, .min_low_ns = 500, .min_high_ns = 260, .min_setup_start_ns = 260, .max_data_hold_ns = 400, .min_data_setup_ns = 50, .min_setup_stop_ns = 260, .min_hold_buffer_ns = 500, }; /** * struct rk3x_i2c_calced_timings - calculated V1 timings * @div_low: Divider output for low * @div_high: Divider output for high * @tuning: Used to adjust setup/hold data time, * setup/hold start time and setup stop time for * v1's calc_timings, the tuning should all be 0 * for old hardware anyone using v0's calc_timings. */ struct rk3x_i2c_calced_timings { unsigned long div_low; unsigned long div_high; unsigned int tuning; }; enum rk3x_i2c_state { STATE_IDLE, STATE_START, STATE_READ, STATE_WRITE, STATE_STOP }; /** * struct rk3x_i2c_soc_data - SOC-specific data * @grf_offset: offset inside the grf regmap for setting the i2c type * @calc_timings: Callback function for i2c timing information calculated */ struct rk3x_i2c_soc_data { int grf_offset; int (*calc_timings)(unsigned long, struct i2c_timings *, struct rk3x_i2c_calced_timings *); }; /** * struct rk3x_i2c - private data of the controller * @adap: corresponding I2C adapter * @dev: device for this controller * @soc_data: related soc data struct * @regs: virtual memory area * @clk: function clk for rk3399 or function & Bus clks for others * @pclk: Bus clk for rk3399 * @clk_rate_nb: i2c clk rate change notify * @t: I2C known timing information * @lock: spinlock for the i2c bus * @wait: the waitqueue to wait for i2c transfer * @busy: the condition for the event to wait for * @msg: current i2c message * @addr: addr of i2c slave device * @mode: mode of i2c transfer * @is_last_msg: flag determines whether it is the last msg in this transfer * @state: state of i2c transfer * @processed: byte length which has been send or received * @error: error code for i2c transfer */ struct rk3x_i2c { struct i2c_adapter adap; struct device *dev; const struct rk3x_i2c_soc_data *soc_data; /* Hardware resources */ void __iomem *regs; struct clk *clk; struct clk *pclk; struct notifier_block clk_rate_nb; /* Settings */ struct i2c_timings t; /* Synchronization & notification */ spinlock_t lock; wait_queue_head_t wait; bool busy; /* Current message */ struct i2c_msg *msg; u8 addr; unsigned int mode; bool is_last_msg; /* I2C state machine */ enum rk3x_i2c_state state; unsigned int processed; int error; }; static inline void i2c_writel(struct rk3x_i2c *i2c, u32 value, unsigned int offset) { writel(value, i2c->regs + offset); } static inline u32 i2c_readl(struct rk3x_i2c *i2c, unsigned int offset) { return readl(i2c->regs + offset); } /* Reset all interrupt pending bits */ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c) { i2c_writel(i2c, REG_INT_ALL, REG_IPD); } /** * rk3x_i2c_start - Generate a START condition, which triggers a REG_INT_START interrupt. * @i2c: target controller data */ static void rk3x_i2c_start(struct rk3x_i2c *i2c) { u32 val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK; i2c_writel(i2c, REG_INT_START, REG_IEN); /* enable adapter with correct mode, send START condition */ val |= REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START; /* if we want to react to NACK, set ACTACK bit */ if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) val |= REG_CON_ACTACK; i2c_writel(i2c, val, REG_CON); } /** * rk3x_i2c_stop - Generate a STOP condition, which triggers a REG_INT_STOP interrupt. * @i2c: target controller data * @error: Error code to return in rk3x_i2c_xfer */ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error) { unsigned int ctrl; i2c->processed = 0; i2c->msg = NULL; i2c->error = error; if (i2c->is_last_msg) { /* Enable stop interrupt */ i2c_writel(i2c, REG_INT_STOP, REG_IEN); i2c->state = STATE_STOP; ctrl = i2c_readl(i2c, REG_CON); ctrl |= REG_CON_STOP; i2c_writel(i2c, ctrl, REG_CON); } else { /* Signal rk3x_i2c_xfer to start the next message. */ i2c->busy = false; i2c->state = STATE_IDLE; /* * The HW is actually not capable of REPEATED START. But we can * get the intended effect by resetting its internal state * and issuing an ordinary START. */ ctrl = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK; i2c_writel(i2c, ctrl, REG_CON); /* signal that we are finished with the current msg */ wake_up(&i2c->wait); } } /** * rk3x_i2c_prepare_read - Setup a read according to i2c->msg * @i2c: target controller data */ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c) { unsigned int len = i2c->msg->len - i2c->processed; u32 con; con = i2c_readl(i2c, REG_CON); /* * The hw can read up to 32 bytes at a time. If we need more than one * chunk, send an ACK after the last byte of the current chunk. */ if (len > 32) { len = 32; con &= ~REG_CON_LASTACK; } else { con |= REG_CON_LASTACK; } /* make sure we are in plain RX mode if we read a second chunk */ if (i2c->processed != 0) { con &= ~REG_CON_MOD_MASK; con |= REG_CON_MOD(REG_CON_MOD_RX); } i2c_writel(i2c, con, REG_CON); i2c_writel(i2c, len, REG_MRXCNT); } /** * rk3x_i2c_fill_transmit_buf - Fill the transmit buffer with data from i2c->msg * @i2c: target controller data */ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c) { unsigned int i, j; u32 cnt = 0; u32 val; u8 byte; for (i = 0; i < 8; ++i) { val = 0; for (j = 0; j < 4; ++j) { if ((i2c->processed == i2c->msg->len) && (cnt != 0)) break; if (i2c->processed == 0 && cnt == 0) byte = (i2c->addr & 0x7f) << 1; else byte = i2c->msg->buf[i2c->processed++]; val |= byte << (j * 8); cnt++; } i2c_writel(i2c, val, TXBUFFER_BASE + 4 * i); if (i2c->processed == i2c->msg->len) break; } i2c_writel(i2c, cnt, REG_MTXCNT); } /* IRQ handlers for individual states */ static void rk3x_i2c_handle_start(struct rk3x_i2c *i2c, unsigned int ipd) { if (!(ipd & REG_INT_START)) { rk3x_i2c_stop(i2c, -EIO); dev_warn(i2c->dev, "unexpected irq in START: 0x%x\n", ipd); rk3x_i2c_clean_ipd(i2c); return; } /* ack interrupt */ i2c_writel(i2c, REG_INT_START, REG_IPD); /* disable start bit */ i2c_writel(i2c, i2c_readl(i2c, REG_CON) & ~REG_CON_START, REG_CON); /* enable appropriate interrupts and transition */ if (i2c->mode == REG_CON_MOD_TX) { i2c_writel(i2c, REG_INT_MBTF | REG_INT_NAKRCV, REG_IEN); i2c->state = STATE_WRITE; rk3x_i2c_fill_transmit_buf(i2c); } else { /* in any other case, we are going to be reading. */ i2c_writel(i2c, REG_INT_MBRF | REG_INT_NAKRCV, REG_IEN); i2c->state = STATE_READ; rk3x_i2c_prepare_read(i2c); } } static void rk3x_i2c_handle_write(struct rk3x_i2c *i2c, unsigned int ipd) { if (!(ipd & REG_INT_MBTF)) { rk3x_i2c_stop(i2c, -EIO); dev_err(i2c->dev, "unexpected irq in WRITE: 0x%x\n", ipd); rk3x_i2c_clean_ipd(i2c); return; } /* ack interrupt */ i2c_writel(i2c, REG_INT_MBTF, REG_IPD); /* are we finished? */ if (i2c->processed == i2c->msg->len) rk3x_i2c_stop(i2c, i2c->error); else rk3x_i2c_fill_transmit_buf(i2c); } static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) { unsigned int i; unsigned int len = i2c->msg->len - i2c->processed; u32 val; u8 byte; /* we only care for MBRF here. */ if (!(ipd & REG_INT_MBRF)) return; /* ack interrupt (read also produces a spurious START flag, clear it too) */ i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD); /* Can only handle a maximum of 32 bytes at a time */ if (len > 32) len = 32; /* read the data from receive buffer */ for (i = 0; i < len; ++i) { if (i % 4 == 0) val = i2c_readl(i2c, RXBUFFER_BASE + (i / 4) * 4); byte = (val >> ((i % 4) * 8)) & 0xff; i2c->msg->buf[i2c->processed++] = byte; } /* are we finished? */ if (i2c->processed == i2c->msg->len) rk3x_i2c_stop(i2c, i2c->error); else rk3x_i2c_prepare_read(i2c); } static void rk3x_i2c_handle_stop(struct rk3x_i2c *i2c, unsigned int ipd) { unsigned int con; if (!(ipd & REG_INT_STOP)) { rk3x_i2c_stop(i2c, -EIO); dev_err(i2c->dev, "unexpected irq in STOP: 0x%x\n", ipd); rk3x_i2c_clean_ipd(i2c); return; } /* ack interrupt */ i2c_writel(i2c, REG_INT_STOP, REG_IPD); /* disable STOP bit */ con = i2c_readl(i2c, REG_CON); con &= ~REG_CON_STOP; i2c_writel(i2c, con, REG_CON); i2c->busy = false; i2c->state = STATE_IDLE; /* signal rk3x_i2c_xfer that we are finished */ wake_up(&i2c->wait); } static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id) { struct rk3x_i2c *i2c = dev_id; unsigned int ipd; spin_lock(&i2c->lock); ipd = i2c_readl(i2c, REG_IPD); if (i2c->state == STATE_IDLE) { dev_warn(i2c->dev, "irq in STATE_IDLE, ipd = 0x%x\n", ipd); rk3x_i2c_clean_ipd(i2c); goto out; } dev_dbg(i2c->dev, "IRQ: state %d, ipd: %x\n", i2c->state, ipd); /* Clean interrupt bits we don't care about */ ipd &= ~(REG_INT_BRF | REG_INT_BTF); if (ipd & REG_INT_NAKRCV) { /* * We got a NACK in the last operation. Depending on whether * IGNORE_NAK is set, we have to stop the operation and report * an error. */ i2c_writel(i2c, REG_INT_NAKRCV, REG_IPD); ipd &= ~REG_INT_NAKRCV; if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) rk3x_i2c_stop(i2c, -ENXIO); } /* is there anything left to handle? */ if ((ipd & REG_INT_ALL) == 0) goto out; switch (i2c->state) { case STATE_START: rk3x_i2c_handle_start(i2c, ipd); break; case STATE_WRITE: rk3x_i2c_handle_write(i2c, ipd); break; case STATE_READ: rk3x_i2c_handle_read(i2c, ipd); break; case STATE_STOP: rk3x_i2c_handle_stop(i2c, ipd); break; case STATE_IDLE: break; } out: spin_unlock(&i2c->lock); return IRQ_HANDLED; } /** * rk3x_i2c_get_spec - Get timing values of I2C specification * @speed: Desired SCL frequency * * Return: Matched i2c_spec_values. */ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed) { if (speed <= I2C_MAX_STANDARD_MODE_FREQ) return &standard_mode_spec; else if (speed <= I2C_MAX_FAST_MODE_FREQ) return &fast_mode_spec; else return &fast_mode_plus_spec; } /** * rk3x_i2c_v0_calc_timings - Calculate divider values for desired SCL frequency * @clk_rate: I2C input clock rate * @t: Known I2C timing information * @t_calc: Caculated rk3x private timings that would be written into regs * * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case * a best-effort divider value is returned in divs. If the target rate is * too high, we silently use the highest possible rate. */ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate, struct i2c_timings *t, struct rk3x_i2c_calced_timings *t_calc) { unsigned long min_low_ns, min_high_ns; unsigned long max_low_ns, min_total_ns; unsigned long clk_rate_khz, scl_rate_khz; unsigned long min_low_div, min_high_div; unsigned long max_low_div; unsigned long min_div_for_hold, min_total_div; unsigned long extra_div, extra_low_div, ideal_low_div; unsigned long data_hold_buffer_ns = 50; const struct i2c_spec_values *spec; int ret = 0; /* Only support standard-mode and fast-mode */ if (WARN_ON(t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ)) t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; /* prevent scl_rate_khz from becoming 0 */ if (WARN_ON(t->bus_freq_hz < 1000)) t->bus_freq_hz = 1000; /* * min_low_ns: The minimum number of ns we need to hold low to * meet I2C specification, should include fall time. * min_high_ns: The minimum number of ns we need to hold high to * meet I2C specification, should include rise time. * max_low_ns: The maximum number of ns we can hold low to meet * I2C specification. * * Note: max_low_ns should be (maximum data hold time * 2 - buffer) * This is because the i2c host on Rockchip holds the data line * for half the low time. */ spec = rk3x_i2c_get_spec(t->bus_freq_hz); min_high_ns = t->scl_rise_ns + spec->min_high_ns; /* * Timings for repeated start: * - controller appears to drop SDA at .875x (7/8) programmed clk high. * - controller appears to keep SCL high for 2x programmed clk high. * * We need to account for those rules in picking our "high" time so * we meet tSU;STA and tHD;STA times. */ min_high_ns = max(min_high_ns, DIV_ROUND_UP( (t->scl_rise_ns + spec->min_setup_start_ns) * 1000, 875)); min_high_ns = max(min_high_ns, DIV_ROUND_UP( (t->scl_rise_ns + spec->min_setup_start_ns + t->sda_fall_ns + spec->min_high_ns), 2)); min_low_ns = t->scl_fall_ns + spec->min_low_ns; max_low_ns = spec->max_data_hold_ns * 2 - data_hold_buffer_ns; min_total_ns = min_low_ns + min_high_ns; /* Adjust to avoid overflow */ clk_rate_khz = DIV_ROUND_UP(clk_rate, 1000); scl_rate_khz = t->bus_freq_hz / 1000; /* * We need the total div to be >= this number * so we don't clock too fast. */ min_total_div = DIV_ROUND_UP(clk_rate_khz, scl_rate_khz * 8); /* These are the min dividers needed for min hold times. */ min_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, 8 * 1000000); min_high_div = DIV_ROUND_UP(clk_rate_khz * min_high_ns, 8 * 1000000); min_div_for_hold = (min_low_div + min_high_div); /* * This is the maximum divider so we don't go over the maximum. * We don't round up here (we round down) since this is a maximum. */ max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000); if (min_low_div > max_low_div) { WARN_ONCE(true, "Conflicting, min_low_div %lu, max_low_div %lu\n", min_low_div, max_low_div); max_low_div = min_low_div; } if (min_div_for_hold > min_total_div) { /* * Time needed to meet hold requirements is important. * Just use that. */ t_calc->div_low = min_low_div; t_calc->div_high = min_high_div; } else { /* * We've got to distribute some time among the low and high * so we don't run too fast. */ extra_div = min_total_div - min_div_for_hold; /* * We'll try to split things up perfectly evenly, * biasing slightly towards having a higher div * for low (spend more time low). */ ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, scl_rate_khz * 8 * min_total_ns); /* Don't allow it to go over the maximum */ if (ideal_low_div > max_low_div) ideal_low_div = max_low_div; /* * Handle when the ideal low div is going to take up * more than we have. */ if (ideal_low_div > min_low_div + extra_div) ideal_low_div = min_low_div + extra_div; /* Give low the "ideal" and give high whatever extra is left */ extra_low_div = ideal_low_div - min_low_div; t_calc->div_low = ideal_low_div; t_calc->div_high = min_high_div + (extra_div - extra_low_div); } /* * Adjust to the fact that the hardware has an implicit "+1". * NOTE: Above calculations always produce div_low > 0 and div_high > 0. */ t_calc->div_low--; t_calc->div_high--; /* Give the tuning value 0, that would not update con register */ t_calc->tuning = 0; /* Maximum divider supported by hw is 0xffff */ if (t_calc->div_low > 0xffff) { t_calc->div_low = 0xffff; ret = -EINVAL; } if (t_calc->div_high > 0xffff) { t_calc->div_high = 0xffff; ret = -EINVAL; } return ret; } /** * rk3x_i2c_v1_calc_timings - Calculate timing values for desired SCL frequency * @clk_rate: I2C input clock rate * @t: Known I2C timing information * @t_calc: Caculated rk3x private timings that would be written into regs * * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case * a best-effort divider value is returned in divs. If the target rate is * too high, we silently use the highest possible rate. * The following formulas are v1's method to calculate timings. * * l = divl + 1; * h = divh + 1; * s = sda_update_config + 1; * u = start_setup_config + 1; * p = stop_setup_config + 1; * T = Tclk_i2c; * * tHigh = 8 * h * T; * tLow = 8 * l * T; * * tHD;sda = (l * s + 1) * T; * tSU;sda = [(8 - s) * l + 1] * T; * tI2C = 8 * (l + h) * T; * * tSU;sta = (8h * u + 1) * T; * tHD;sta = [8h * (u + 1) - 1] * T; * tSU;sto = (8h * p + 1) * T; */ static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate, struct i2c_timings *t, struct rk3x_i2c_calced_timings *t_calc) { unsigned long min_low_ns, min_high_ns; unsigned long min_setup_start_ns, min_setup_data_ns; unsigned long min_setup_stop_ns, max_hold_data_ns; unsigned long clk_rate_khz, scl_rate_khz; unsigned long min_low_div, min_high_div; unsigned long min_div_for_hold, min_total_div; unsigned long extra_div, extra_low_div; unsigned long sda_update_cfg, stp_sta_cfg, stp_sto_cfg; const struct i2c_spec_values *spec; int ret = 0; /* Support standard-mode, fast-mode and fast-mode plus */ if (WARN_ON(t->bus_freq_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)) t->bus_freq_hz = I2C_MAX_FAST_MODE_PLUS_FREQ; /* prevent scl_rate_khz from becoming 0 */ if (WARN_ON(t->bus_freq_hz < 1000)) t->bus_freq_hz = 1000; /* * min_low_ns: The minimum number of ns we need to hold low to * meet I2C specification, should include fall time. * min_high_ns: The minimum number of ns we need to hold high to * meet I2C specification, should include rise time. */ spec = rk3x_i2c_get_spec(t->bus_freq_hz); /* calculate min-divh and min-divl */ clk_rate_khz = DIV_ROUND_UP(clk_rate, 1000); scl_rate_khz = t->bus_freq_hz / 1000; min_total_div = DIV_ROUND_UP(clk_rate_khz, scl_rate_khz * 8); min_high_ns = t->scl_rise_ns + spec->min_high_ns; min_high_div = DIV_ROUND_UP(clk_rate_khz * min_high_ns, 8 * 1000000); min_low_ns = t->scl_fall_ns + spec->min_low_ns; min_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, 8 * 1000000); /* * Final divh and divl must be greater than 0, otherwise the * hardware would not output the i2c clk. */ min_high_div = (min_high_div < 1) ? 2 : min_high_div; min_low_div = (min_low_div < 1) ? 2 : min_low_div; /* These are the min dividers needed for min hold times. */ min_div_for_hold = (min_low_div + min_high_div); /* * This is the maximum divider so we don't go over the maximum. * We don't round up here (we round down) since this is a maximum. */ if (min_div_for_hold >= min_total_div) { /* * Time needed to meet hold requirements is important. * Just use that. */ t_calc->div_low = min_low_div; t_calc->div_high = min_high_div; } else { /* * We've got to distribute some time among the low and high * so we don't run too fast. * We'll try to split things up by the scale of min_low_div and * min_high_div, biasing slightly towards having a higher div * for low (spend more time low). */ extra_div = min_total_div - min_div_for_hold; extra_low_div = DIV_ROUND_UP(min_low_div * extra_div, min_div_for_hold); t_calc->div_low = min_low_div + extra_low_div; t_calc->div_high = min_high_div + (extra_div - extra_low_div); } /* * calculate sda data hold count by the rules, data_upd_st:3 * is a appropriate value to reduce calculated times. */ for (sda_update_cfg = 3; sda_update_cfg > 0; sda_update_cfg--) { max_hold_data_ns = DIV_ROUND_UP((sda_update_cfg * (t_calc->div_low) + 1) * 1000000, clk_rate_khz); min_setup_data_ns = DIV_ROUND_UP(((8 - sda_update_cfg) * (t_calc->div_low) + 1) * 1000000, clk_rate_khz); if ((max_hold_data_ns < spec->max_data_hold_ns) && (min_setup_data_ns > spec->min_data_setup_ns)) break; } /* calculate setup start config */ min_setup_start_ns = t->scl_rise_ns + spec->min_setup_start_ns; stp_sta_cfg = DIV_ROUND_UP(clk_rate_khz * min_setup_start_ns - 1000000, 8 * 1000000 * (t_calc->div_high)); /* calculate setup stop config */ min_setup_stop_ns = t->scl_rise_ns + spec->min_setup_stop_ns; stp_sto_cfg = DIV_ROUND_UP(clk_rate_khz * min_setup_stop_ns - 1000000, 8 * 1000000 * (t_calc->div_high)); t_calc->tuning = REG_CON_SDA_CFG(--sda_update_cfg) | REG_CON_STA_CFG(--stp_sta_cfg) | REG_CON_STO_CFG(--stp_sto_cfg); t_calc->div_low--; t_calc->div_high--; /* Maximum divider supported by hw is 0xffff */ if (t_calc->div_low > 0xffff) { t_calc->div_low = 0xffff; ret = -EINVAL; } if (t_calc->div_high > 0xffff) { t_calc->div_high = 0xffff; ret = -EINVAL; } return ret; } static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate) { struct i2c_timings *t = &i2c->t; struct rk3x_i2c_calced_timings calc; u64 t_low_ns, t_high_ns; unsigned long flags; u32 val; int ret; ret = i2c->soc_data->calc_timings(clk_rate, t, &calc); WARN_ONCE(ret != 0, "Could not reach SCL freq %u", t->bus_freq_hz); clk_enable(i2c->pclk); spin_lock_irqsave(&i2c->lock, flags); val = i2c_readl(i2c, REG_CON); val &= ~REG_CON_TUNING_MASK; val |= calc.tuning; i2c_writel(i2c, val, REG_CON); i2c_writel(i2c, (calc.div_high << 16) | (calc.div_low & 0xffff), REG_CLKDIV); spin_unlock_irqrestore(&i2c->lock, flags); clk_disable(i2c->pclk); t_low_ns = div_u64(((u64)calc.div_low + 1) * 8 * 1000000000, clk_rate); t_high_ns = div_u64(((u64)calc.div_high + 1) * 8 * 1000000000, clk_rate); dev_dbg(i2c->dev, "CLK %lukhz, Req %uns, Act low %lluns high %lluns\n", clk_rate / 1000, 1000000000 / t->bus_freq_hz, t_low_ns, t_high_ns); } /** * rk3x_i2c_clk_notifier_cb - Clock rate change callback * @nb: Pointer to notifier block * @event: Notification reason * @data: Pointer to notification data object * * The callback checks whether a valid bus frequency can be generated after the * change. If so, the change is acknowledged, otherwise the change is aborted. * New dividers are written to the HW in the pre- or post change notification * depending on the scaling direction. * * Code adapted from i2c-cadence.c. * * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK * to acknowledge the change, NOTIFY_DONE if the notification is * considered irrelevant. */ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { struct clk_notifier_data *ndata = data; struct rk3x_i2c *i2c = container_of(nb, struct rk3x_i2c, clk_rate_nb); struct rk3x_i2c_calced_timings calc; switch (event) { case PRE_RATE_CHANGE: /* * Try the calculation (but don't store the result) ahead of * time to see if we need to block the clock change. Timings * shouldn't actually take effect until rk3x_i2c_adapt_div(). */ if (i2c->soc_data->calc_timings(ndata->new_rate, &i2c->t, &calc) != 0) return NOTIFY_STOP; /* scale up */ if (ndata->new_rate > ndata->old_rate) rk3x_i2c_adapt_div(i2c, ndata->new_rate); return NOTIFY_OK; case POST_RATE_CHANGE: /* scale down */ if (ndata->new_rate < ndata->old_rate) rk3x_i2c_adapt_div(i2c, ndata->new_rate); return NOTIFY_OK; case ABORT_RATE_CHANGE: /* scale up */ if (ndata->new_rate > ndata->old_rate) rk3x_i2c_adapt_div(i2c, ndata->old_rate); return NOTIFY_OK; default: return NOTIFY_DONE; } } /** * rk3x_i2c_setup - Setup I2C registers for an I2C operation specified by msgs, num. * @i2c: target controller data * @msgs: I2C msgs to process * @num: Number of msgs * * Must be called with i2c->lock held. * * Return: Number of I2C msgs processed or negative in case of error */ static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num) { u32 addr = (msgs[0].addr & 0x7f) << 1; int ret = 0; /* * The I2C adapter can issue a small (len < 4) write packet before * reading. This speeds up SMBus-style register reads. * The MRXADDR/MRXRADDR hold the slave address and the slave register * address in this case. */ if (num >= 2 && msgs[0].len < 4 && !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) { u32 reg_addr = 0; int i; dev_dbg(i2c->dev, "Combined write/read from addr 0x%x\n", addr >> 1); /* Fill MRXRADDR with the register address(es) */ for (i = 0; i < msgs[0].len; ++i) { reg_addr |= msgs[0].buf[i] << (i * 8); reg_addr |= REG_MRXADDR_VALID(i); } /* msgs[0] is handled by hw. */ i2c->msg = &msgs[1]; i2c->mode = REG_CON_MOD_REGISTER_TX; i2c_writel(i2c, addr | REG_MRXADDR_VALID(0), REG_MRXADDR); i2c_writel(i2c, reg_addr, REG_MRXRADDR); ret = 2; } else { /* * We'll have to do it the boring way and process the msgs * one-by-one. */ if (msgs[0].flags & I2C_M_RD) { addr |= 1; /* set read bit */ /* * We have to transmit the slave addr first. Use * MOD_REGISTER_TX for that purpose. */ i2c->mode = REG_CON_MOD_REGISTER_TX; i2c_writel(i2c, addr | REG_MRXADDR_VALID(0), REG_MRXADDR); i2c_writel(i2c, 0, REG_MRXRADDR); } else { i2c->mode = REG_CON_MOD_TX; } i2c->msg = &msgs[0]; ret = 1; } i2c->addr = msgs[0].addr; i2c->busy = true; i2c->state = STATE_START; i2c->processed = 0; i2c->error = 0; rk3x_i2c_clean_ipd(i2c); return ret; } static int rk3x_i2c_wait_xfer_poll(struct rk3x_i2c *i2c) { ktime_t timeout = ktime_add_ms(ktime_get(), WAIT_TIMEOUT); while (READ_ONCE(i2c->busy) && ktime_compare(ktime_get(), timeout) < 0) { udelay(5); rk3x_i2c_irq(0, i2c); } return !i2c->busy; } static int rk3x_i2c_xfer_common(struct i2c_adapter *adap, struct i2c_msg *msgs, int num, bool polling) { struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data; unsigned long timeout, flags; u32 val; int ret = 0; int i; spin_lock_irqsave(&i2c->lock, flags); clk_enable(i2c->clk); clk_enable(i2c->pclk); i2c->is_last_msg = false; /* * Process msgs. We can handle more than one message at once (see * rk3x_i2c_setup()). */ for (i = 0; i < num; i += ret) { ret = rk3x_i2c_setup(i2c, msgs + i, num - i); if (ret < 0) { dev_err(i2c->dev, "rk3x_i2c_setup() failed\n"); break; } if (i + ret >= num) i2c->is_last_msg = true; spin_unlock_irqrestore(&i2c->lock, flags); rk3x_i2c_start(i2c); if (!polling) { timeout = wait_event_timeout(i2c->wait, !i2c->busy, msecs_to_jiffies(WAIT_TIMEOUT)); } else { timeout = rk3x_i2c_wait_xfer_poll(i2c); } spin_lock_irqsave(&i2c->lock, flags); if (timeout == 0) { dev_err(i2c->dev, "timeout, ipd: 0x%02x, state: %d\n", i2c_readl(i2c, REG_IPD), i2c->state); /* Force a STOP condition without interrupt */ i2c_writel(i2c, 0, REG_IEN); val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK; val |= REG_CON_EN | REG_CON_STOP; i2c_writel(i2c, val, REG_CON); i2c->state = STATE_IDLE; ret = -ETIMEDOUT; break; } if (i2c->error) { ret = i2c->error; break; } } clk_disable(i2c->pclk); clk_disable(i2c->clk); spin_unlock_irqrestore(&i2c->lock, flags); return ret < 0 ? ret : num; } static int rk3x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return rk3x_i2c_xfer_common(adap, msgs, num, false); } static int rk3x_i2c_xfer_polling(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return rk3x_i2c_xfer_common(adap, msgs, num, true); } static __maybe_unused int rk3x_i2c_resume(struct device *dev) { struct rk3x_i2c *i2c = dev_get_drvdata(dev); rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk)); return 0; } static u32 rk3x_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } static const struct i2c_algorithm rk3x_i2c_algorithm = { .master_xfer = rk3x_i2c_xfer, .master_xfer_atomic = rk3x_i2c_xfer_polling, .functionality = rk3x_i2c_func, }; static const struct rk3x_i2c_soc_data rv1108_soc_data = { .grf_offset = -1, .calc_timings = rk3x_i2c_v1_calc_timings, }; static const struct rk3x_i2c_soc_data rv1126_soc_data = { .grf_offset = 0x118, .calc_timings = rk3x_i2c_v1_calc_timings, }; static const struct rk3x_i2c_soc_data rk3066_soc_data = { .grf_offset = 0x154, .calc_timings = rk3x_i2c_v0_calc_timings, }; static const struct rk3x_i2c_soc_data rk3188_soc_data = { .grf_offset = 0x0a4, .calc_timings = rk3x_i2c_v0_calc_timings, }; static const struct rk3x_i2c_soc_data rk3228_soc_data = { .grf_offset = -1, .calc_timings = rk3x_i2c_v0_calc_timings, }; static const struct rk3x_i2c_soc_data rk3288_soc_data = { .grf_offset = -1, .calc_timings = rk3x_i2c_v0_calc_timings, }; static const struct rk3x_i2c_soc_data rk3399_soc_data = { .grf_offset = -1, .calc_timings = rk3x_i2c_v1_calc_timings, }; static const struct of_device_id rk3x_i2c_match[] = { { .compatible = "rockchip,rv1108-i2c", .data = &rv1108_soc_data }, { .compatible = "rockchip,rv1126-i2c", .data = &rv1126_soc_data }, { .compatible = "rockchip,rk3066-i2c", .data = &rk3066_soc_data }, { .compatible = "rockchip,rk3188-i2c", .data = &rk3188_soc_data }, { .compatible = "rockchip,rk3228-i2c", .data = &rk3228_soc_data }, { .compatible = "rockchip,rk3288-i2c", .data = &rk3288_soc_data }, { .compatible = "rockchip,rk3399-i2c", .data = &rk3399_soc_data }, {}, }; MODULE_DEVICE_TABLE(of, rk3x_i2c_match); static int rk3x_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; struct rk3x_i2c *i2c; int ret = 0; int bus_nr; u32 value; int irq; unsigned long clk_rate; i2c = devm_kzalloc(&pdev->dev, sizeof(struct rk3x_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; match = of_match_node(rk3x_i2c_match, np); i2c->soc_data = match->data; /* use common interface to get I2C timing properties */ i2c_parse_fw_timings(&pdev->dev, &i2c->t, true); strscpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &rk3x_i2c_algorithm; i2c->adap.retries = 3; i2c->adap.dev.of_node = np; i2c->adap.algo_data = i2c; i2c->adap.dev.parent = &pdev->dev; i2c->dev = &pdev->dev; spin_lock_init(&i2c->lock); init_waitqueue_head(&i2c->wait); i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); /* Try to set the I2C adapter number from dt */ bus_nr = of_alias_get_id(np, "i2c"); /* * Switch to new interface if the SoC also offers the old one. * The control bit is located in the GRF register space. */ if (i2c->soc_data->grf_offset >= 0) { struct regmap *grf; grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); if (IS_ERR(grf)) { dev_err(&pdev->dev, "rk3x-i2c needs 'rockchip,grf' property\n"); return PTR_ERR(grf); } if (bus_nr < 0) { dev_err(&pdev->dev, "rk3x-i2c needs i2cX alias"); return -EINVAL; } /* 27+i: write mask, 11+i: value */ value = BIT(27 + bus_nr) | BIT(11 + bus_nr); ret = regmap_write(grf, i2c->soc_data->grf_offset, value); if (ret != 0) { dev_err(i2c->dev, "Could not write to GRF: %d\n", ret); return ret; } } /* IRQ setup */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(&pdev->dev, irq, rk3x_i2c_irq, 0, dev_name(&pdev->dev), i2c); if (ret < 0) { dev_err(&pdev->dev, "cannot request IRQ\n"); return ret; } platform_set_drvdata(pdev, i2c); if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) { /* Only one clock to use for bus clock and peripheral clock */ i2c->clk = devm_clk_get(&pdev->dev, NULL); i2c->pclk = i2c->clk; } else { i2c->clk = devm_clk_get(&pdev->dev, "i2c"); i2c->pclk = devm_clk_get(&pdev->dev, "pclk"); } if (IS_ERR(i2c->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk), "Can't get bus clk\n"); if (IS_ERR(i2c->pclk)) return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk), "Can't get periph clk\n"); ret = clk_prepare(i2c->clk); if (ret < 0) { dev_err(&pdev->dev, "Can't prepare bus clk: %d\n", ret); return ret; } ret = clk_prepare(i2c->pclk); if (ret < 0) { dev_err(&pdev->dev, "Can't prepare periph clock: %d\n", ret); goto err_clk; } i2c->clk_rate_nb.notifier_call = rk3x_i2c_clk_notifier_cb; ret = clk_notifier_register(i2c->clk, &i2c->clk_rate_nb); if (ret != 0) { dev_err(&pdev->dev, "Unable to register clock notifier\n"); goto err_pclk; } ret = clk_enable(i2c->clk); if (ret < 0) { dev_err(&pdev->dev, "Can't enable bus clk: %d\n", ret); goto err_clk_notifier; } clk_rate = clk_get_rate(i2c->clk); rk3x_i2c_adapt_div(i2c, clk_rate); clk_disable(i2c->clk); ret = i2c_add_adapter(&i2c->adap); if (ret < 0) goto err_clk_notifier; return 0; err_clk_notifier: clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb); err_pclk: clk_unprepare(i2c->pclk); err_clk: clk_unprepare(i2c->clk); return ret; } static void rk3x_i2c_remove(struct platform_device *pdev) { struct rk3x_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb); clk_unprepare(i2c->pclk); clk_unprepare(i2c->clk); } static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume); static struct platform_driver rk3x_i2c_driver = { .probe = rk3x_i2c_probe, .remove_new = rk3x_i2c_remove, .driver = { .name = "rk3x-i2c", .of_match_table = rk3x_i2c_match, .pm = &rk3x_i2c_pm_ops, }, }; module_platform_driver(rk3x_i2c_driver); MODULE_DESCRIPTION("Rockchip RK3xxx I2C Bus driver"); MODULE_AUTHOR("Max Schwarz <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-rk3x.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for the Diolan DLN-2 USB-I2C adapter * * Copyright (c) 2014 Intel Corporation * * Derived from: * i2c-diolan-u2c.c * Copyright (c) 2010-2011 Ericsson AB */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/mfd/dln2.h> #include <linux/acpi.h> #define DLN2_I2C_MODULE_ID 0x03 #define DLN2_I2C_CMD(cmd) DLN2_CMD(cmd, DLN2_I2C_MODULE_ID) /* I2C commands */ #define DLN2_I2C_GET_PORT_COUNT DLN2_I2C_CMD(0x00) #define DLN2_I2C_ENABLE DLN2_I2C_CMD(0x01) #define DLN2_I2C_DISABLE DLN2_I2C_CMD(0x02) #define DLN2_I2C_IS_ENABLED DLN2_I2C_CMD(0x03) #define DLN2_I2C_WRITE DLN2_I2C_CMD(0x06) #define DLN2_I2C_READ DLN2_I2C_CMD(0x07) #define DLN2_I2C_SCAN_DEVICES DLN2_I2C_CMD(0x08) #define DLN2_I2C_PULLUP_ENABLE DLN2_I2C_CMD(0x09) #define DLN2_I2C_PULLUP_DISABLE DLN2_I2C_CMD(0x0A) #define DLN2_I2C_PULLUP_IS_ENABLED DLN2_I2C_CMD(0x0B) #define DLN2_I2C_TRANSFER DLN2_I2C_CMD(0x0C) #define DLN2_I2C_SET_MAX_REPLY_COUNT DLN2_I2C_CMD(0x0D) #define DLN2_I2C_GET_MAX_REPLY_COUNT DLN2_I2C_CMD(0x0E) #define DLN2_I2C_MAX_XFER_SIZE 256 #define DLN2_I2C_BUF_SIZE (DLN2_I2C_MAX_XFER_SIZE + 16) struct dln2_i2c { struct platform_device *pdev; struct i2c_adapter adapter; u8 port; /* * Buffer to hold the packet for read or write transfers. One is enough * since we can't have multiple transfers in parallel on the i2c bus. */ void *buf; }; static int dln2_i2c_enable(struct dln2_i2c *dln2, bool enable) { u16 cmd; struct { u8 port; } tx; tx.port = dln2->port; if (enable) cmd = DLN2_I2C_ENABLE; else cmd = DLN2_I2C_DISABLE; return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx)); } static int dln2_i2c_write(struct dln2_i2c *dln2, u8 addr, u8 *data, u16 data_len) { int ret; struct { u8 port; u8 addr; u8 mem_addr_len; __le32 mem_addr; __le16 buf_len; u8 buf[DLN2_I2C_MAX_XFER_SIZE]; } __packed *tx = dln2->buf; unsigned len; BUILD_BUG_ON(sizeof(*tx) > DLN2_I2C_BUF_SIZE); tx->port = dln2->port; tx->addr = addr; tx->mem_addr_len = 0; tx->mem_addr = 0; tx->buf_len = cpu_to_le16(data_len); memcpy(tx->buf, data, data_len); len = sizeof(*tx) + data_len - DLN2_I2C_MAX_XFER_SIZE; ret = dln2_transfer_tx(dln2->pdev, DLN2_I2C_WRITE, tx, len); if (ret < 0) return ret; return data_len; } static int dln2_i2c_read(struct dln2_i2c *dln2, u16 addr, u8 *data, u16 data_len) { int ret; struct { u8 port; u8 addr; u8 mem_addr_len; __le32 mem_addr; __le16 buf_len; } __packed tx; struct { __le16 buf_len; u8 buf[DLN2_I2C_MAX_XFER_SIZE]; } __packed *rx = dln2->buf; unsigned rx_len = sizeof(*rx); BUILD_BUG_ON(sizeof(*rx) > DLN2_I2C_BUF_SIZE); tx.port = dln2->port; tx.addr = addr; tx.mem_addr_len = 0; tx.mem_addr = 0; tx.buf_len = cpu_to_le16(data_len); ret = dln2_transfer(dln2->pdev, DLN2_I2C_READ, &tx, sizeof(tx), rx, &rx_len); if (ret < 0) return ret; if (rx_len < sizeof(rx->buf_len) + data_len) return -EPROTO; if (le16_to_cpu(rx->buf_len) != data_len) return -EPROTO; memcpy(data, rx->buf, data_len); return data_len; } static int dln2_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct dln2_i2c *dln2 = i2c_get_adapdata(adapter); struct i2c_msg *pmsg; int i; for (i = 0; i < num; i++) { int ret; pmsg = &msgs[i]; if (pmsg->flags & I2C_M_RD) { ret = dln2_i2c_read(dln2, pmsg->addr, pmsg->buf, pmsg->len); if (ret < 0) return ret; pmsg->len = ret; } else { ret = dln2_i2c_write(dln2, pmsg->addr, pmsg->buf, pmsg->len); if (ret != pmsg->len) return -EPROTO; } } return num; } static u32 dln2_i2c_func(struct i2c_adapter *a) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | I2C_FUNC_SMBUS_I2C_BLOCK; } static const struct i2c_algorithm dln2_i2c_usb_algorithm = { .master_xfer = dln2_i2c_xfer, .functionality = dln2_i2c_func, }; static const struct i2c_adapter_quirks dln2_i2c_quirks = { .max_read_len = DLN2_I2C_MAX_XFER_SIZE, .max_write_len = DLN2_I2C_MAX_XFER_SIZE, }; static int dln2_i2c_probe(struct platform_device *pdev) { int ret; struct dln2_i2c *dln2; struct device *dev = &pdev->dev; struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev); dln2 = devm_kzalloc(dev, sizeof(*dln2), GFP_KERNEL); if (!dln2) return -ENOMEM; dln2->buf = devm_kmalloc(dev, DLN2_I2C_BUF_SIZE, GFP_KERNEL); if (!dln2->buf) return -ENOMEM; dln2->pdev = pdev; dln2->port = pdata->port; /* setup i2c adapter description */ dln2->adapter.owner = THIS_MODULE; dln2->adapter.class = I2C_CLASS_HWMON; dln2->adapter.algo = &dln2_i2c_usb_algorithm; dln2->adapter.quirks = &dln2_i2c_quirks; dln2->adapter.dev.parent = dev; ACPI_COMPANION_SET(&dln2->adapter.dev, ACPI_COMPANION(&pdev->dev)); dln2->adapter.dev.of_node = dev->of_node; i2c_set_adapdata(&dln2->adapter, dln2); snprintf(dln2->adapter.name, sizeof(dln2->adapter.name), "%s-%s-%d", "dln2-i2c", dev_name(pdev->dev.parent), dln2->port); platform_set_drvdata(pdev, dln2); /* initialize the i2c interface */ ret = dln2_i2c_enable(dln2, true); if (ret < 0) return dev_err_probe(dev, ret, "failed to initialize adapter\n"); /* and finally attach to i2c layer */ ret = i2c_add_adapter(&dln2->adapter); if (ret < 0) goto out_disable; return 0; out_disable: dln2_i2c_enable(dln2, false); return ret; } static void dln2_i2c_remove(struct platform_device *pdev) { struct dln2_i2c *dln2 = platform_get_drvdata(pdev); i2c_del_adapter(&dln2->adapter); dln2_i2c_enable(dln2, false); } static struct platform_driver dln2_i2c_driver = { .driver.name = "dln2-i2c", .probe = dln2_i2c_probe, .remove_new = dln2_i2c_remove, }; module_platform_driver(dln2_i2c_driver); MODULE_AUTHOR("Laurentiu Palcu <[email protected]>"); MODULE_DESCRIPTION("Driver for the Diolan DLN2 I2C master interface"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:dln2-i2c");
linux-master
drivers/i2c/busses/i2c-dln2.c
/* * CBUS I2C driver for Nokia Internet Tablets. * * Copyright (C) 2004-2010 Nokia Corporation * * Based on code written by Juha Yrjölä, David Weinehall, Mikko Ylinen and * Felipe Balbi. Converted to I2C driver by Aaro Koskinen. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/io.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> /* * Bit counts are derived from Nokia implementation. These should be checked * if other CBUS implementations appear. */ #define CBUS_ADDR_BITS 3 #define CBUS_REG_BITS 5 struct cbus_host { spinlock_t lock; /* host lock */ struct device *dev; struct gpio_desc *clk; struct gpio_desc *dat; struct gpio_desc *sel; }; /** * cbus_send_bit - sends one bit over the bus * @host: the host we're using * @bit: one bit of information to send */ static void cbus_send_bit(struct cbus_host *host, unsigned bit) { gpiod_set_value(host->dat, bit ? 1 : 0); gpiod_set_value(host->clk, 1); gpiod_set_value(host->clk, 0); } /** * cbus_send_data - sends @len amount of data over the bus * @host: the host we're using * @data: the data to send * @len: size of the transfer */ static void cbus_send_data(struct cbus_host *host, unsigned data, unsigned len) { int i; for (i = len; i > 0; i--) cbus_send_bit(host, data & (1 << (i - 1))); } /** * cbus_receive_bit - receives one bit from the bus * @host: the host we're using */ static int cbus_receive_bit(struct cbus_host *host) { int ret; gpiod_set_value(host->clk, 1); ret = gpiod_get_value(host->dat); gpiod_set_value(host->clk, 0); return ret; } /** * cbus_receive_word - receives 16-bit word from the bus * @host: the host we're using */ static int cbus_receive_word(struct cbus_host *host) { int ret = 0; int i; for (i = 16; i > 0; i--) { int bit = cbus_receive_bit(host); if (bit < 0) return bit; if (bit) ret |= 1 << (i - 1); } return ret; } /** * cbus_transfer - transfers data over the bus * @host: the host we're using * @rw: read/write flag * @dev: device address * @reg: register address * @data: if @rw == I2C_SBUS_WRITE data to send otherwise 0 */ static int cbus_transfer(struct cbus_host *host, char rw, unsigned dev, unsigned reg, unsigned data) { unsigned long flags; int ret; /* We don't want interrupts disturbing our transfer */ spin_lock_irqsave(&host->lock, flags); /* Reset state and start of transfer, SEL stays down during transfer */ gpiod_set_value(host->sel, 0); /* Set the DAT pin to output */ gpiod_direction_output(host->dat, 1); /* Send the device address */ cbus_send_data(host, dev, CBUS_ADDR_BITS); /* Send the rw flag */ cbus_send_bit(host, rw == I2C_SMBUS_READ); /* Send the register address */ cbus_send_data(host, reg, CBUS_REG_BITS); if (rw == I2C_SMBUS_WRITE) { cbus_send_data(host, data, 16); ret = 0; } else { ret = gpiod_direction_input(host->dat); if (ret) { dev_dbg(host->dev, "failed setting direction\n"); goto out; } gpiod_set_value(host->clk, 1); ret = cbus_receive_word(host); if (ret < 0) { dev_dbg(host->dev, "failed receiving data\n"); goto out; } } /* Indicate end of transfer, SEL goes up until next transfer */ gpiod_set_value(host->sel, 1); gpiod_set_value(host->clk, 1); gpiod_set_value(host->clk, 0); out: spin_unlock_irqrestore(&host->lock, flags); return ret; } static int cbus_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct cbus_host *chost = i2c_get_adapdata(adapter); int ret; if (size != I2C_SMBUS_WORD_DATA) return -EINVAL; ret = cbus_transfer(chost, read_write == I2C_SMBUS_READ, addr, command, data->word); if (ret < 0) return ret; if (read_write == I2C_SMBUS_READ) data->word = ret; return 0; } static u32 cbus_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA; } static const struct i2c_algorithm cbus_i2c_algo = { .smbus_xfer = cbus_i2c_smbus_xfer, .smbus_xfer_atomic = cbus_i2c_smbus_xfer, .functionality = cbus_i2c_func, }; static void cbus_i2c_remove(struct platform_device *pdev) { struct i2c_adapter *adapter = platform_get_drvdata(pdev); i2c_del_adapter(adapter); } static int cbus_i2c_probe(struct platform_device *pdev) { struct i2c_adapter *adapter; struct cbus_host *chost; adapter = devm_kzalloc(&pdev->dev, sizeof(struct i2c_adapter), GFP_KERNEL); if (!adapter) return -ENOMEM; chost = devm_kzalloc(&pdev->dev, sizeof(*chost), GFP_KERNEL); if (!chost) return -ENOMEM; if (gpiod_count(&pdev->dev, NULL) != 3) return -ENODEV; chost->clk = devm_gpiod_get_index(&pdev->dev, NULL, 0, GPIOD_OUT_LOW); if (IS_ERR(chost->clk)) return PTR_ERR(chost->clk); chost->dat = devm_gpiod_get_index(&pdev->dev, NULL, 1, GPIOD_IN); if (IS_ERR(chost->dat)) return PTR_ERR(chost->dat); chost->sel = devm_gpiod_get_index(&pdev->dev, NULL, 2, GPIOD_OUT_HIGH); if (IS_ERR(chost->sel)) return PTR_ERR(chost->sel); gpiod_set_consumer_name(chost->clk, "CBUS clk"); gpiod_set_consumer_name(chost->dat, "CBUS dat"); gpiod_set_consumer_name(chost->sel, "CBUS sel"); adapter->owner = THIS_MODULE; adapter->class = I2C_CLASS_HWMON; adapter->dev.parent = &pdev->dev; adapter->dev.of_node = pdev->dev.of_node; adapter->nr = pdev->id; adapter->timeout = HZ; adapter->algo = &cbus_i2c_algo; strscpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name)); spin_lock_init(&chost->lock); chost->dev = &pdev->dev; i2c_set_adapdata(adapter, chost); platform_set_drvdata(pdev, adapter); return i2c_add_numbered_adapter(adapter); } #if defined(CONFIG_OF) static const struct of_device_id i2c_cbus_dt_ids[] = { { .compatible = "i2c-cbus-gpio", }, { } }; MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids); #endif static struct platform_driver cbus_i2c_driver = { .probe = cbus_i2c_probe, .remove_new = cbus_i2c_remove, .driver = { .name = "i2c-cbus-gpio", .of_match_table = of_match_ptr(i2c_cbus_dt_ids), }, }; module_platform_driver(cbus_i2c_driver); MODULE_ALIAS("platform:i2c-cbus-gpio"); MODULE_DESCRIPTION("CBUS I2C driver"); MODULE_AUTHOR("Juha Yrjölä"); MODULE_AUTHOR("David Weinehall"); MODULE_AUTHOR("Mikko Ylinen"); MODULE_AUTHOR("Felipe Balbi"); MODULE_AUTHOR("Aaro Koskinen <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-cbus-gpio.c
// SPDX-License-Identifier: GPL-2.0 /* * I2C driver for the Renesas EMEV2 SoC * * Copyright (C) 2015 Wolfram Sang <[email protected]> * Copyright 2013 Codethink Ltd. * Copyright 2010-2015 Renesas Electronics Corporation */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/sched.h> /* I2C Registers */ #define I2C_OFS_IICACT0 0x00 /* start */ #define I2C_OFS_IIC0 0x04 /* shift */ #define I2C_OFS_IICC0 0x08 /* control */ #define I2C_OFS_SVA0 0x0c /* slave address */ #define I2C_OFS_IICCL0 0x10 /* clock select */ #define I2C_OFS_IICX0 0x14 /* extension */ #define I2C_OFS_IICS0 0x18 /* status */ #define I2C_OFS_IICSE0 0x1c /* status For emulation */ #define I2C_OFS_IICF0 0x20 /* IIC flag */ /* I2C IICACT0 Masks */ #define I2C_BIT_IICE0 0x0001 /* I2C IICC0 Masks */ #define I2C_BIT_LREL0 0x0040 #define I2C_BIT_WREL0 0x0020 #define I2C_BIT_SPIE0 0x0010 #define I2C_BIT_WTIM0 0x0008 #define I2C_BIT_ACKE0 0x0004 #define I2C_BIT_STT0 0x0002 #define I2C_BIT_SPT0 0x0001 /* I2C IICCL0 Masks */ #define I2C_BIT_SMC0 0x0008 #define I2C_BIT_DFC0 0x0004 /* I2C IICSE0 Masks */ #define I2C_BIT_MSTS0 0x0080 #define I2C_BIT_ALD0 0x0040 #define I2C_BIT_EXC0 0x0020 #define I2C_BIT_COI0 0x0010 #define I2C_BIT_TRC0 0x0008 #define I2C_BIT_ACKD0 0x0004 #define I2C_BIT_STD0 0x0002 #define I2C_BIT_SPD0 0x0001 /* I2C IICF0 Masks */ #define I2C_BIT_STCF 0x0080 #define I2C_BIT_IICBSY 0x0040 #define I2C_BIT_STCEN 0x0002 #define I2C_BIT_IICRSV 0x0001 struct em_i2c_device { void __iomem *base; struct i2c_adapter adap; struct completion msg_done; struct clk *sclk; struct i2c_client *slave; int irq; }; static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg) { writeb((readb(priv->base + reg) & ~clear) | set, priv->base + reg); } static int em_i2c_wait_for_event(struct em_i2c_device *priv) { unsigned long time_left; int status; reinit_completion(&priv->msg_done); time_left = wait_for_completion_timeout(&priv->msg_done, priv->adap.timeout); if (!time_left) return -ETIMEDOUT; status = readb(priv->base + I2C_OFS_IICSE0); return status & I2C_BIT_ALD0 ? -EAGAIN : status; } static void em_i2c_stop(struct em_i2c_device *priv) { /* Send Stop condition */ em_clear_set_bit(priv, 0, I2C_BIT_SPT0 | I2C_BIT_SPIE0, I2C_OFS_IICC0); /* Wait for stop condition */ em_i2c_wait_for_event(priv); } static void em_i2c_reset(struct i2c_adapter *adap) { struct em_i2c_device *priv = i2c_get_adapdata(adap); int retr; /* If I2C active */ if (readb(priv->base + I2C_OFS_IICACT0) & I2C_BIT_IICE0) { /* Disable I2C operation */ writeb(0, priv->base + I2C_OFS_IICACT0); retr = 1000; while (readb(priv->base + I2C_OFS_IICACT0) == 1 && retr) retr--; WARN_ON(retr == 0); } /* Transfer mode set */ writeb(I2C_BIT_DFC0, priv->base + I2C_OFS_IICCL0); /* Can Issue start without detecting a stop, Reservation disabled. */ writeb(I2C_BIT_STCEN | I2C_BIT_IICRSV, priv->base + I2C_OFS_IICF0); /* I2C enable, 9 bit interrupt mode */ writeb(I2C_BIT_WTIM0, priv->base + I2C_OFS_IICC0); /* Enable I2C operation */ writeb(I2C_BIT_IICE0, priv->base + I2C_OFS_IICACT0); retr = 1000; while (readb(priv->base + I2C_OFS_IICACT0) == 0 && retr) retr--; WARN_ON(retr == 0); } static int __em_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct em_i2c_device *priv = i2c_get_adapdata(adap); int count, status, read = !!(msg->flags & I2C_M_RD); /* Send start condition */ em_clear_set_bit(priv, 0, I2C_BIT_ACKE0 | I2C_BIT_WTIM0, I2C_OFS_IICC0); em_clear_set_bit(priv, 0, I2C_BIT_STT0, I2C_OFS_IICC0); /* Send slave address and R/W type */ writeb(i2c_8bit_addr_from_msg(msg), priv->base + I2C_OFS_IIC0); /* Wait for transaction */ status = em_i2c_wait_for_event(priv); if (status < 0) goto out_reset; /* Received NACK (result of setting slave address and R/W) */ if (!(status & I2C_BIT_ACKD0)) { em_i2c_stop(priv); goto out; } /* Extra setup for read transactions */ if (read) { /* 8 bit interrupt mode */ em_clear_set_bit(priv, I2C_BIT_WTIM0, I2C_BIT_ACKE0, I2C_OFS_IICC0); em_clear_set_bit(priv, I2C_BIT_WTIM0, I2C_BIT_WREL0, I2C_OFS_IICC0); /* Wait for transaction */ status = em_i2c_wait_for_event(priv); if (status < 0) goto out_reset; } /* Send / receive data */ for (count = 0; count < msg->len; count++) { if (read) { /* Read transaction */ msg->buf[count] = readb(priv->base + I2C_OFS_IIC0); em_clear_set_bit(priv, 0, I2C_BIT_WREL0, I2C_OFS_IICC0); } else { /* Write transaction */ /* Received NACK */ if (!(status & I2C_BIT_ACKD0)) { em_i2c_stop(priv); goto out; } /* Write data */ writeb(msg->buf[count], priv->base + I2C_OFS_IIC0); } /* Wait for R/W transaction */ status = em_i2c_wait_for_event(priv); if (status < 0) goto out_reset; } if (stop) em_i2c_stop(priv); return count; out_reset: em_i2c_reset(adap); out: return status < 0 ? status : -ENXIO; } static int em_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct em_i2c_device *priv = i2c_get_adapdata(adap); int ret, i; if (readb(priv->base + I2C_OFS_IICF0) & I2C_BIT_IICBSY) return -EAGAIN; for (i = 0; i < num; i++) { ret = __em_i2c_xfer(adap, &msgs[i], (i == (num - 1))); if (ret < 0) return ret; } /* I2C transfer completed */ return num; } static bool em_i2c_slave_irq(struct em_i2c_device *priv) { u8 status, value; enum i2c_slave_event event; int ret; if (!priv->slave) return false; status = readb(priv->base + I2C_OFS_IICSE0); /* Extension code, do not participate */ if (status & I2C_BIT_EXC0) { em_clear_set_bit(priv, 0, I2C_BIT_LREL0, I2C_OFS_IICC0); return true; } /* Stop detected, we don't know if it's for slave or master */ if (status & I2C_BIT_SPD0) { /* Notify slave device */ i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); /* Pretend we did not handle the interrupt */ return false; } /* Only handle interrupts addressed to us */ if (!(status & I2C_BIT_COI0)) return false; /* Enable stop interrupts */ em_clear_set_bit(priv, 0, I2C_BIT_SPIE0, I2C_OFS_IICC0); /* Transmission or Reception */ if (status & I2C_BIT_TRC0) { if (status & I2C_BIT_ACKD0) { /* 9 bit interrupt mode */ em_clear_set_bit(priv, 0, I2C_BIT_WTIM0, I2C_OFS_IICC0); /* Send data */ event = status & I2C_BIT_STD0 ? I2C_SLAVE_READ_REQUESTED : I2C_SLAVE_READ_PROCESSED; i2c_slave_event(priv->slave, event, &value); writeb(value, priv->base + I2C_OFS_IIC0); } else { /* NACK, stop transmitting */ em_clear_set_bit(priv, 0, I2C_BIT_LREL0, I2C_OFS_IICC0); } } else { /* 8 bit interrupt mode */ em_clear_set_bit(priv, I2C_BIT_WTIM0, I2C_BIT_ACKE0, I2C_OFS_IICC0); em_clear_set_bit(priv, I2C_BIT_WTIM0, I2C_BIT_WREL0, I2C_OFS_IICC0); if (status & I2C_BIT_STD0) { i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value); } else { /* Recv data */ value = readb(priv->base + I2C_OFS_IIC0); ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value); if (ret < 0) em_clear_set_bit(priv, I2C_BIT_ACKE0, 0, I2C_OFS_IICC0); } } return true; } static irqreturn_t em_i2c_irq_handler(int this_irq, void *dev_id) { struct em_i2c_device *priv = dev_id; if (em_i2c_slave_irq(priv)) return IRQ_HANDLED; complete(&priv->msg_done); return IRQ_HANDLED; } static u32 em_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SLAVE; } static int em_i2c_reg_slave(struct i2c_client *slave) { struct em_i2c_device *priv = i2c_get_adapdata(slave->adapter); if (priv->slave) return -EBUSY; if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; priv->slave = slave; /* Set slave address */ writeb(slave->addr << 1, priv->base + I2C_OFS_SVA0); return 0; } static int em_i2c_unreg_slave(struct i2c_client *slave) { struct em_i2c_device *priv = i2c_get_adapdata(slave->adapter); WARN_ON(!priv->slave); writeb(0, priv->base + I2C_OFS_SVA0); /* * Wait for interrupt to finish. New slave irqs cannot happen because we * cleared the slave address and, thus, only extension codes will be * detected which do not use the slave ptr. */ synchronize_irq(priv->irq); priv->slave = NULL; return 0; } static const struct i2c_algorithm em_i2c_algo = { .master_xfer = em_i2c_xfer, .functionality = em_i2c_func, .reg_slave = em_i2c_reg_slave, .unreg_slave = em_i2c_unreg_slave, }; static int em_i2c_probe(struct platform_device *pdev) { struct em_i2c_device *priv; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); strscpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name)); priv->sclk = devm_clk_get(&pdev->dev, "sclk"); if (IS_ERR(priv->sclk)) return PTR_ERR(priv->sclk); ret = clk_prepare_enable(priv->sclk); if (ret) return ret; priv->adap.timeout = msecs_to_jiffies(100); priv->adap.retries = 5; priv->adap.dev.parent = &pdev->dev; priv->adap.algo = &em_i2c_algo; priv->adap.owner = THIS_MODULE; priv->adap.dev.of_node = pdev->dev.of_node; init_completion(&priv->msg_done); platform_set_drvdata(pdev, priv); i2c_set_adapdata(&priv->adap, priv); em_i2c_reset(&priv->adap); ret = platform_get_irq(pdev, 0); if (ret < 0) goto err_clk; priv->irq = ret; ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0, "em_i2c", priv); if (ret) goto err_clk; ret = i2c_add_adapter(&priv->adap); if (ret) goto err_clk; dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, priv->irq); return 0; err_clk: clk_disable_unprepare(priv->sclk); return ret; } static void em_i2c_remove(struct platform_device *dev) { struct em_i2c_device *priv = platform_get_drvdata(dev); i2c_del_adapter(&priv->adap); clk_disable_unprepare(priv->sclk); } static const struct of_device_id em_i2c_ids[] = { { .compatible = "renesas,iic-emev2", }, { } }; static struct platform_driver em_i2c_driver = { .probe = em_i2c_probe, .remove_new = em_i2c_remove, .driver = { .name = "em-i2c", .of_match_table = em_i2c_ids, } }; module_platform_driver(em_i2c_driver); MODULE_DESCRIPTION("EMEV2 I2C bus driver"); MODULE_AUTHOR("Ian Molton"); MODULE_AUTHOR("Wolfram Sang <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, em_i2c_ids);
linux-master
drivers/i2c/busses/i2c-emev2.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel BayTrail PMIC I2C bus semaphore implementation * Copyright (c) 2014, Intel Corporation. */ #include <linux/device.h> #include <linux/acpi.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <asm/iosf_mbi.h> #include "i2c-designware-core.h" int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev) { acpi_status status; unsigned long long shared_host = 0; acpi_handle handle; if (!dev) return -ENODEV; handle = ACPI_HANDLE(dev->dev); if (!handle) return -ENODEV; status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); if (ACPI_FAILURE(status)) return -ENODEV; if (!shared_host) return -ENODEV; if (!iosf_mbi_available()) return -EPROBE_DEFER; dev_info(dev->dev, "I2C bus managed by PUNIT\n"); dev->acquire_lock = iosf_mbi_block_punit_i2c_access; dev->release_lock = iosf_mbi_unblock_punit_i2c_access; dev->shared_with_punit = true; return 0; }
linux-master
drivers/i2c/busses/i2c-designware-baytrail.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * X-Gene SLIMpro I2C Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Author: Feng Kan <[email protected]> * Author: Hieu Le <[email protected]> * * This driver provides support for X-Gene SLIMpro I2C device access * using the APM X-Gene SLIMpro mailbox driver. */ #include <acpi/pcc.h> #include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mailbox_client.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #define MAILBOX_OP_TIMEOUT 1000 /* Operation time out in ms */ #define MAILBOX_I2C_INDEX 0 #define SLIMPRO_IIC_BUS 1 /* Use I2C bus 1 only */ #define SMBUS_CMD_LEN 1 #define BYTE_DATA 1 #define WORD_DATA 2 #define BLOCK_DATA 3 #define SLIMPRO_IIC_I2C_PROTOCOL 0 #define SLIMPRO_IIC_SMB_PROTOCOL 1 #define SLIMPRO_IIC_READ 0 #define SLIMPRO_IIC_WRITE 1 #define IIC_SMB_WITHOUT_DATA_LEN 0 #define IIC_SMB_WITH_DATA_LEN 1 #define SLIMPRO_DEBUG_MSG 0 #define SLIMPRO_MSG_TYPE_SHIFT 28 #define SLIMPRO_DBG_SUBTYPE_I2C1READ 4 #define SLIMPRO_DBGMSG_TYPE_SHIFT 24 #define SLIMPRO_DBGMSG_TYPE_MASK 0x0F000000U #define SLIMPRO_IIC_DEV_SHIFT 23 #define SLIMPRO_IIC_DEV_MASK 0x00800000U #define SLIMPRO_IIC_DEVID_SHIFT 13 #define SLIMPRO_IIC_DEVID_MASK 0x007FE000U #define SLIMPRO_IIC_RW_SHIFT 12 #define SLIMPRO_IIC_RW_MASK 0x00001000U #define SLIMPRO_IIC_PROTO_SHIFT 11 #define SLIMPRO_IIC_PROTO_MASK 0x00000800U #define SLIMPRO_IIC_ADDRLEN_SHIFT 8 #define SLIMPRO_IIC_ADDRLEN_MASK 0x00000700U #define SLIMPRO_IIC_DATALEN_SHIFT 0 #define SLIMPRO_IIC_DATALEN_MASK 0x000000FFU /* * SLIMpro I2C message encode * * dev - Controller number (0-based) * chip - I2C chip address * op - SLIMPRO_IIC_READ or SLIMPRO_IIC_WRITE * proto - SLIMPRO_IIC_SMB_PROTOCOL or SLIMPRO_IIC_I2C_PROTOCOL * addrlen - Length of the address field * datalen - Length of the data field */ #define SLIMPRO_IIC_ENCODE_MSG(dev, chip, op, proto, addrlen, datalen) \ ((SLIMPRO_DEBUG_MSG << SLIMPRO_MSG_TYPE_SHIFT) | \ ((SLIMPRO_DBG_SUBTYPE_I2C1READ << SLIMPRO_DBGMSG_TYPE_SHIFT) & \ SLIMPRO_DBGMSG_TYPE_MASK) | \ ((dev << SLIMPRO_IIC_DEV_SHIFT) & SLIMPRO_IIC_DEV_MASK) | \ ((chip << SLIMPRO_IIC_DEVID_SHIFT) & SLIMPRO_IIC_DEVID_MASK) | \ ((op << SLIMPRO_IIC_RW_SHIFT) & SLIMPRO_IIC_RW_MASK) | \ ((proto << SLIMPRO_IIC_PROTO_SHIFT) & SLIMPRO_IIC_PROTO_MASK) | \ ((addrlen << SLIMPRO_IIC_ADDRLEN_SHIFT) & SLIMPRO_IIC_ADDRLEN_MASK) | \ ((datalen << SLIMPRO_IIC_DATALEN_SHIFT) & SLIMPRO_IIC_DATALEN_MASK)) #define SLIMPRO_MSG_TYPE(v) (((v) & 0xF0000000) >> 28) /* * Encode for upper address for block data */ #define SLIMPRO_IIC_ENCODE_FLAG_BUFADDR 0x80000000 #define SLIMPRO_IIC_ENCODE_FLAG_WITH_DATA_LEN(a) ((u32) (((a) << 30) \ & 0x40000000)) #define SLIMPRO_IIC_ENCODE_UPPER_BUFADDR(a) ((u32) (((a) >> 12) \ & 0x3FF00000)) #define SLIMPRO_IIC_ENCODE_ADDR(a) ((a) & 0x000FFFFF) #define SLIMPRO_IIC_MSG_DWORD_COUNT 3 /* PCC related defines */ #define PCC_SIGNATURE 0x50424300 #define PCC_STS_CMD_COMPLETE BIT(0) #define PCC_STS_SCI_DOORBELL BIT(1) #define PCC_STS_ERR BIT(2) #define PCC_STS_PLAT_NOTIFY BIT(3) #define PCC_CMD_GENERATE_DB_INT BIT(15) struct slimpro_i2c_dev { struct i2c_adapter adapter; struct device *dev; struct mbox_chan *mbox_chan; struct pcc_mbox_chan *pcc_chan; struct mbox_client mbox_client; int mbox_idx; struct completion rd_complete; u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */ u32 *resp_msg; phys_addr_t comm_base_addr; void *pcc_comm_addr; }; #define to_slimpro_i2c_dev(cl) \ container_of(cl, struct slimpro_i2c_dev, mbox_client) enum slimpro_i2c_version { XGENE_SLIMPRO_I2C_V1 = 0, XGENE_SLIMPRO_I2C_V2 = 1, }; /* * This function tests and clears a bitmask then returns its old value */ static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask) { u16 ret, val; val = le16_to_cpu(READ_ONCE(*addr)); ret = val & mask; val &= ~mask; WRITE_ONCE(*addr, cpu_to_le16(val)); return ret; } static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg) { struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl); /* * Response message format: * mssg[0] is the return code of the operation * mssg[1] is the first data word * mssg[2] is NOT used */ if (ctx->resp_msg) *ctx->resp_msg = ((u32 *)mssg)[1]; if (ctx->mbox_client.tx_block) complete(&ctx->rd_complete); } static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg) { struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl); struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr; /* Check if platform sends interrupt */ if (!xgene_word_tst_and_clr(&generic_comm_base->status, PCC_STS_SCI_DOORBELL)) return; if (xgene_word_tst_and_clr(&generic_comm_base->status, PCC_STS_CMD_COMPLETE)) { msg = generic_comm_base + 1; /* Response message msg[1] contains the return value. */ if (ctx->resp_msg) *ctx->resp_msg = ((u32 *)msg)[1]; complete(&ctx->rd_complete); } } static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg) { struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr; u32 *ptr = (void *)(generic_comm_base + 1); u16 status; int i; WRITE_ONCE(generic_comm_base->signature, cpu_to_le32(PCC_SIGNATURE | ctx->mbox_idx)); WRITE_ONCE(generic_comm_base->command, cpu_to_le16(SLIMPRO_MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INT)); status = le16_to_cpu(READ_ONCE(generic_comm_base->status)); status &= ~PCC_STS_CMD_COMPLETE; WRITE_ONCE(generic_comm_base->status, cpu_to_le16(status)); /* Copy the message to the PCC comm space */ for (i = 0; i < SLIMPRO_IIC_MSG_DWORD_COUNT; i++) WRITE_ONCE(ptr[i], cpu_to_le32(msg[i])); } static int start_i2c_msg_xfer(struct slimpro_i2c_dev *ctx) { if (ctx->mbox_client.tx_block || !acpi_disabled) { if (!wait_for_completion_timeout(&ctx->rd_complete, msecs_to_jiffies(MAILBOX_OP_TIMEOUT))) return -ETIMEDOUT; } /* Check of invalid data or no device */ if (*ctx->resp_msg == 0xffffffff) return -ENODEV; return 0; } static int slimpro_i2c_send_msg(struct slimpro_i2c_dev *ctx, u32 *msg, u32 *data) { int rc; ctx->resp_msg = data; if (!acpi_disabled) { reinit_completion(&ctx->rd_complete); slimpro_i2c_pcc_tx_prepare(ctx, msg); } rc = mbox_send_message(ctx->mbox_chan, msg); if (rc < 0) goto err; rc = start_i2c_msg_xfer(ctx); err: if (!acpi_disabled) mbox_chan_txdone(ctx->mbox_chan, 0); ctx->resp_msg = NULL; return rc; } static int slimpro_i2c_rd(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, u32 addrlen, u32 protocol, u32 readlen, u32 *data) { u32 msg[3]; msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, SLIMPRO_IIC_READ, protocol, addrlen, readlen); msg[1] = SLIMPRO_IIC_ENCODE_ADDR(addr); msg[2] = 0; return slimpro_i2c_send_msg(ctx, msg, data); } static int slimpro_i2c_wr(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, u32 addrlen, u32 protocol, u32 writelen, u32 data) { u32 msg[3]; msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, SLIMPRO_IIC_WRITE, protocol, addrlen, writelen); msg[1] = SLIMPRO_IIC_ENCODE_ADDR(addr); msg[2] = data; return slimpro_i2c_send_msg(ctx, msg, msg); } static int slimpro_i2c_blkrd(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, u32 addrlen, u32 protocol, u32 readlen, u32 with_data_len, void *data) { dma_addr_t paddr; u32 msg[3]; int rc; paddr = dma_map_single(ctx->dev, ctx->dma_buffer, readlen, DMA_FROM_DEVICE); if (dma_mapping_error(ctx->dev, paddr)) { dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n", ctx->dma_buffer); return -ENOMEM; } msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, SLIMPRO_IIC_READ, protocol, addrlen, readlen); msg[1] = SLIMPRO_IIC_ENCODE_FLAG_BUFADDR | SLIMPRO_IIC_ENCODE_FLAG_WITH_DATA_LEN(with_data_len) | SLIMPRO_IIC_ENCODE_UPPER_BUFADDR(paddr) | SLIMPRO_IIC_ENCODE_ADDR(addr); msg[2] = (u32)paddr; rc = slimpro_i2c_send_msg(ctx, msg, msg); /* Copy to destination */ memcpy(data, ctx->dma_buffer, readlen); dma_unmap_single(ctx->dev, paddr, readlen, DMA_FROM_DEVICE); return rc; } static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, u32 addrlen, u32 protocol, u32 writelen, void *data) { dma_addr_t paddr; u32 msg[3]; int rc; if (writelen > I2C_SMBUS_BLOCK_MAX) return -EINVAL; memcpy(ctx->dma_buffer, data, writelen); paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, paddr)) { dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n", ctx->dma_buffer); return -ENOMEM; } msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, SLIMPRO_IIC_WRITE, protocol, addrlen, writelen); msg[1] = SLIMPRO_IIC_ENCODE_FLAG_BUFADDR | SLIMPRO_IIC_ENCODE_UPPER_BUFADDR(paddr) | SLIMPRO_IIC_ENCODE_ADDR(addr); msg[2] = (u32)paddr; if (ctx->mbox_client.tx_block) reinit_completion(&ctx->rd_complete); rc = slimpro_i2c_send_msg(ctx, msg, msg); dma_unmap_single(ctx->dev, paddr, writelen, DMA_TO_DEVICE); return rc; } static int xgene_slimpro_i2c_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct slimpro_i2c_dev *ctx = i2c_get_adapdata(adap); int ret = -EOPNOTSUPP; u32 val; switch (size) { case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_READ) { ret = slimpro_i2c_rd(ctx, addr, 0, 0, SLIMPRO_IIC_SMB_PROTOCOL, BYTE_DATA, &val); data->byte = val; } else { ret = slimpro_i2c_wr(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_SMB_PROTOCOL, 0, 0); } break; case I2C_SMBUS_BYTE_DATA: if (read_write == I2C_SMBUS_READ) { ret = slimpro_i2c_rd(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_SMB_PROTOCOL, BYTE_DATA, &val); data->byte = val; } else { val = data->byte; ret = slimpro_i2c_wr(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_SMB_PROTOCOL, BYTE_DATA, val); } break; case I2C_SMBUS_WORD_DATA: if (read_write == I2C_SMBUS_READ) { ret = slimpro_i2c_rd(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_SMB_PROTOCOL, WORD_DATA, &val); data->word = val; } else { val = data->word; ret = slimpro_i2c_wr(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_SMB_PROTOCOL, WORD_DATA, val); } break; case I2C_SMBUS_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { ret = slimpro_i2c_blkrd(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_SMB_PROTOCOL, I2C_SMBUS_BLOCK_MAX + 1, IIC_SMB_WITH_DATA_LEN, &data->block[0]); } else { ret = slimpro_i2c_blkwr(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_SMB_PROTOCOL, data->block[0] + 1, &data->block[0]); } break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { ret = slimpro_i2c_blkrd(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_I2C_PROTOCOL, I2C_SMBUS_BLOCK_MAX, IIC_SMB_WITHOUT_DATA_LEN, &data->block[1]); } else { ret = slimpro_i2c_blkwr(ctx, addr, command, SMBUS_CMD_LEN, SLIMPRO_IIC_I2C_PROTOCOL, data->block[0], &data->block[1]); } break; default: break; } return ret; } /* * Return list of supported functionality. */ static u32 xgene_slimpro_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK; } static const struct i2c_algorithm xgene_slimpro_i2c_algorithm = { .smbus_xfer = xgene_slimpro_i2c_xfer, .functionality = xgene_slimpro_i2c_func, }; static int xgene_slimpro_i2c_probe(struct platform_device *pdev) { struct slimpro_i2c_dev *ctx; struct i2c_adapter *adapter; struct mbox_client *cl; int rc; ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = &pdev->dev; platform_set_drvdata(pdev, ctx); cl = &ctx->mbox_client; /* Request mailbox channel */ cl->dev = &pdev->dev; init_completion(&ctx->rd_complete); cl->tx_tout = MAILBOX_OP_TIMEOUT; cl->knows_txdone = false; if (acpi_disabled) { cl->tx_block = true; cl->rx_callback = slimpro_i2c_rx_cb; ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX); if (IS_ERR(ctx->mbox_chan)) { dev_err(&pdev->dev, "i2c mailbox channel request failed\n"); return PTR_ERR(ctx->mbox_chan); } } else { struct pcc_mbox_chan *pcc_chan; const struct acpi_device_id *acpi_id; int version = XGENE_SLIMPRO_I2C_V1; acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); if (!acpi_id) return -EINVAL; version = (int)acpi_id->driver_data; if (device_property_read_u32(&pdev->dev, "pcc-channel", &ctx->mbox_idx)) ctx->mbox_idx = MAILBOX_I2C_INDEX; cl->tx_block = false; cl->rx_callback = slimpro_i2c_pcc_rx_cb; pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx); if (IS_ERR(pcc_chan)) { dev_err(&pdev->dev, "PCC mailbox channel request failed\n"); return PTR_ERR(pcc_chan); } ctx->pcc_chan = pcc_chan; ctx->mbox_chan = pcc_chan->mchan; if (!ctx->mbox_chan->mbox->txdone_irq) { dev_err(&pdev->dev, "PCC IRQ not supported\n"); rc = -ENOENT; goto mbox_err; } /* * This is the shared communication region * for the OS and Platform to communicate over. */ ctx->comm_base_addr = pcc_chan->shmem_base_addr; if (ctx->comm_base_addr) { if (version == XGENE_SLIMPRO_I2C_V2) ctx->pcc_comm_addr = memremap( ctx->comm_base_addr, pcc_chan->shmem_size, MEMREMAP_WT); else ctx->pcc_comm_addr = memremap( ctx->comm_base_addr, pcc_chan->shmem_size, MEMREMAP_WB); } else { dev_err(&pdev->dev, "Failed to get PCC comm region\n"); rc = -ENOENT; goto mbox_err; } if (!ctx->pcc_comm_addr) { dev_err(&pdev->dev, "Failed to ioremap PCC comm region\n"); rc = -ENOMEM; goto mbox_err; } } rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) dev_warn(&pdev->dev, "Unable to set dma mask\n"); /* Setup I2C adapter */ adapter = &ctx->adapter; snprintf(adapter->name, sizeof(adapter->name), "MAILBOX I2C"); adapter->algo = &xgene_slimpro_i2c_algorithm; adapter->class = I2C_CLASS_HWMON; adapter->dev.parent = &pdev->dev; adapter->dev.of_node = pdev->dev.of_node; ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev)); i2c_set_adapdata(adapter, ctx); rc = i2c_add_adapter(adapter); if (rc) goto mbox_err; dev_info(&pdev->dev, "Mailbox I2C Adapter registered\n"); return 0; mbox_err: if (acpi_disabled) mbox_free_channel(ctx->mbox_chan); else pcc_mbox_free_channel(ctx->pcc_chan); return rc; } static void xgene_slimpro_i2c_remove(struct platform_device *pdev) { struct slimpro_i2c_dev *ctx = platform_get_drvdata(pdev); i2c_del_adapter(&ctx->adapter); if (acpi_disabled) mbox_free_channel(ctx->mbox_chan); else pcc_mbox_free_channel(ctx->pcc_chan); } static const struct of_device_id xgene_slimpro_i2c_dt_ids[] = { {.compatible = "apm,xgene-slimpro-i2c" }, {}, }; MODULE_DEVICE_TABLE(of, xgene_slimpro_i2c_dt_ids); #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_slimpro_i2c_acpi_ids[] = { {"APMC0D40", XGENE_SLIMPRO_I2C_V1}, {"APMC0D8B", XGENE_SLIMPRO_I2C_V2}, {} }; MODULE_DEVICE_TABLE(acpi, xgene_slimpro_i2c_acpi_ids); #endif static struct platform_driver xgene_slimpro_i2c_driver = { .probe = xgene_slimpro_i2c_probe, .remove_new = xgene_slimpro_i2c_remove, .driver = { .name = "xgene-slimpro-i2c", .of_match_table = of_match_ptr(xgene_slimpro_i2c_dt_ids), .acpi_match_table = ACPI_PTR(xgene_slimpro_i2c_acpi_ids) }, }; module_platform_driver(xgene_slimpro_i2c_driver); MODULE_DESCRIPTION("APM X-Gene SLIMpro I2C driver"); MODULE_AUTHOR("Feng Kan <[email protected]>"); MODULE_AUTHOR("Hieu Le <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-xgene-slimpro.c
// SPDX-License-Identifier: GPL-2.0-only /* ------------------------------------------------------------------------- */ /* i2c-iop3xx.c i2c driver algorithms for Intel XScale IOP3xx & IXP46x */ /* ------------------------------------------------------------------------- */ /* Copyright (C) 2003 Peter Milne, D-TACQ Solutions Ltd * <Peter dot Milne at D hyphen TACQ dot com> * * With acknowledgements to i2c-algo-ibm_ocp.c by * Ian DaSilva, MontaVista Software, Inc. [email protected] * * And i2c-algo-pcf.c, which was created by Simon G. Vogl and Hans Berglund: * * Copyright (C) 1995-1997 Simon G. Vogl, 1998-2000 Hans Berglund * * And which acknowledged Kyösti Mälkki <[email protected]>, * Frodo Looijaard <[email protected]>, Martin Bailey<[email protected]> * * Major cleanup by Deepak Saxena <[email protected]>, 01/2005: * * - Use driver model to pass per-chip info instead of hardcoding and #ifdefs * - Use ioremap/__raw_readl/__raw_writel instead of direct dereference * - Make it work with IXP46x chips * - Cleanup function names, coding style, etc * * - writing to slave address causes latchup on iop331. * fix: driver refuses to address self. */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/gpio/consumer.h> #include "i2c-iop3xx.h" /* global unit counter */ static int i2c_id; static inline unsigned char iic_cook_addr(struct i2c_msg *msg) { unsigned char addr; addr = i2c_8bit_addr_from_msg(msg); return addr; } static void iop3xx_i2c_reset(struct i2c_algo_iop3xx_data *iop3xx_adap) { /* Follows devman 9.3 */ __raw_writel(IOP3XX_ICR_UNIT_RESET, iop3xx_adap->ioaddr + CR_OFFSET); __raw_writel(IOP3XX_ISR_CLEARBITS, iop3xx_adap->ioaddr + SR_OFFSET); __raw_writel(0, iop3xx_adap->ioaddr + CR_OFFSET); } static void iop3xx_i2c_enable(struct i2c_algo_iop3xx_data *iop3xx_adap) { u32 cr = IOP3XX_ICR_GCD | IOP3XX_ICR_SCLEN | IOP3XX_ICR_UE; /* * Every time unit enable is asserted, GPOD needs to be cleared * on IOP3XX to avoid data corruption on the bus. We use the * gpiod_set_raw_value() to make sure the 0 hits the hardware * GPOD register. These descriptors are only passed along to * the device if this is necessary. */ if (iop3xx_adap->gpio_scl) gpiod_set_raw_value(iop3xx_adap->gpio_scl, 0); if (iop3xx_adap->gpio_sda) gpiod_set_raw_value(iop3xx_adap->gpio_sda, 0); /* NB SR bits not same position as CR IE bits :-( */ iop3xx_adap->SR_enabled = IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD | IOP3XX_ISR_RXFULL | IOP3XX_ISR_TXEMPTY; cr |= IOP3XX_ICR_ALD_IE | IOP3XX_ICR_BERR_IE | IOP3XX_ICR_RXFULL_IE | IOP3XX_ICR_TXEMPTY_IE; __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); } static void iop3xx_i2c_transaction_cleanup(struct i2c_algo_iop3xx_data *iop3xx_adap) { unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET); cr &= ~(IOP3XX_ICR_MSTART | IOP3XX_ICR_TBYTE | IOP3XX_ICR_MSTOP | IOP3XX_ICR_SCLEN); __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); } /* * NB: the handler has to clear the source of the interrupt! * Then it passes the SR flags of interest to BH via adap data */ static irqreturn_t iop3xx_i2c_irq_handler(int this_irq, void *dev_id) { struct i2c_algo_iop3xx_data *iop3xx_adap = dev_id; u32 sr = __raw_readl(iop3xx_adap->ioaddr + SR_OFFSET); if ((sr &= iop3xx_adap->SR_enabled)) { __raw_writel(sr, iop3xx_adap->ioaddr + SR_OFFSET); iop3xx_adap->SR_received |= sr; wake_up_interruptible(&iop3xx_adap->waitq); } return IRQ_HANDLED; } /* check all error conditions, clear them , report most important */ static int iop3xx_i2c_error(u32 sr) { int rc = 0; if ((sr & IOP3XX_ISR_BERRD)) { if (!rc) rc = -I2C_ERR_BERR; } if ((sr & IOP3XX_ISR_ALD)) { if (!rc) rc = -I2C_ERR_ALD; } return rc; } static inline u32 iop3xx_i2c_get_srstat(struct i2c_algo_iop3xx_data *iop3xx_adap) { unsigned long flags; u32 sr; spin_lock_irqsave(&iop3xx_adap->lock, flags); sr = iop3xx_adap->SR_received; iop3xx_adap->SR_received = 0; spin_unlock_irqrestore(&iop3xx_adap->lock, flags); return sr; } /* * sleep until interrupted, then recover and analyse the SR * saved by handler */ typedef int (*compare_func)(unsigned test, unsigned mask); /* returns 1 on correct comparison */ static int iop3xx_i2c_wait_event(struct i2c_algo_iop3xx_data *iop3xx_adap, unsigned flags, unsigned *status, compare_func compare) { unsigned sr = 0; int interrupted; int done; int rc = 0; do { interrupted = wait_event_interruptible_timeout ( iop3xx_adap->waitq, (done = compare(sr = iop3xx_i2c_get_srstat(iop3xx_adap), flags)), 1 * HZ ); if ((rc = iop3xx_i2c_error(sr)) < 0) { *status = sr; return rc; } else if (!interrupted) { *status = sr; return -ETIMEDOUT; } } while (!done); *status = sr; return 0; } /* * Concrete compare_funcs */ static int all_bits_clear(unsigned test, unsigned mask) { return (test & mask) == 0; } static int any_bits_set(unsigned test, unsigned mask) { return (test & mask) != 0; } static int iop3xx_i2c_wait_tx_done(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status) { return iop3xx_i2c_wait_event( iop3xx_adap, IOP3XX_ISR_TXEMPTY | IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD, status, any_bits_set); } static int iop3xx_i2c_wait_rx_done(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status) { return iop3xx_i2c_wait_event( iop3xx_adap, IOP3XX_ISR_RXFULL | IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD, status, any_bits_set); } static int iop3xx_i2c_wait_idle(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status) { return iop3xx_i2c_wait_event( iop3xx_adap, IOP3XX_ISR_UNITBUSY, status, all_bits_clear); } static int iop3xx_i2c_send_target_addr(struct i2c_algo_iop3xx_data *iop3xx_adap, struct i2c_msg *msg) { unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET); int status; int rc; /* avoid writing to my slave address (hangs on 80331), * forbidden in Intel developer manual */ if (msg->addr == MYSAR) { return -EBUSY; } __raw_writel(iic_cook_addr(msg), iop3xx_adap->ioaddr + DBR_OFFSET); cr &= ~(IOP3XX_ICR_MSTOP | IOP3XX_ICR_NACK); cr |= IOP3XX_ICR_MSTART | IOP3XX_ICR_TBYTE; __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); rc = iop3xx_i2c_wait_tx_done(iop3xx_adap, &status); return rc; } static int iop3xx_i2c_write_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char byte, int stop) { unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET); int status; int rc = 0; __raw_writel(byte, iop3xx_adap->ioaddr + DBR_OFFSET); cr &= ~IOP3XX_ICR_MSTART; if (stop) { cr |= IOP3XX_ICR_MSTOP; } else { cr &= ~IOP3XX_ICR_MSTOP; } cr |= IOP3XX_ICR_TBYTE; __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); rc = iop3xx_i2c_wait_tx_done(iop3xx_adap, &status); return rc; } static int iop3xx_i2c_read_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char *byte, int stop) { unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET); int status; int rc = 0; cr &= ~IOP3XX_ICR_MSTART; if (stop) { cr |= IOP3XX_ICR_MSTOP | IOP3XX_ICR_NACK; } else { cr &= ~(IOP3XX_ICR_MSTOP | IOP3XX_ICR_NACK); } cr |= IOP3XX_ICR_TBYTE; __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); rc = iop3xx_i2c_wait_rx_done(iop3xx_adap, &status); *byte = __raw_readl(iop3xx_adap->ioaddr + DBR_OFFSET); return rc; } static int iop3xx_i2c_writebytes(struct i2c_adapter *i2c_adap, const char *buf, int count) { struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data; int ii; int rc = 0; for (ii = 0; rc == 0 && ii != count; ++ii) rc = iop3xx_i2c_write_byte(iop3xx_adap, buf[ii], ii == count-1); return rc; } static int iop3xx_i2c_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count) { struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data; int ii; int rc = 0; for (ii = 0; rc == 0 && ii != count; ++ii) rc = iop3xx_i2c_read_byte(iop3xx_adap, &buf[ii], ii == count-1); return rc; } /* * Description: This function implements combined transactions. Combined * transactions consist of combinations of reading and writing blocks of data. * FROM THE SAME ADDRESS * Each transfer (i.e. a read or a write) is separated by a repeated start * condition. */ static int iop3xx_i2c_handle_msg(struct i2c_adapter *i2c_adap, struct i2c_msg *pmsg) { struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data; int rc; rc = iop3xx_i2c_send_target_addr(iop3xx_adap, pmsg); if (rc < 0) { return rc; } if ((pmsg->flags&I2C_M_RD)) { return iop3xx_i2c_readbytes(i2c_adap, pmsg->buf, pmsg->len); } else { return iop3xx_i2c_writebytes(i2c_adap, pmsg->buf, pmsg->len); } } /* * master_xfer() - main read/write entry */ static int iop3xx_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data; int im = 0; int ret = 0; int status; iop3xx_i2c_wait_idle(iop3xx_adap, &status); iop3xx_i2c_reset(iop3xx_adap); iop3xx_i2c_enable(iop3xx_adap); for (im = 0; ret == 0 && im != num; im++) { ret = iop3xx_i2c_handle_msg(i2c_adap, &msgs[im]); } iop3xx_i2c_transaction_cleanup(iop3xx_adap); if (ret) return ret; return im; } static u32 iop3xx_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm iop3xx_i2c_algo = { .master_xfer = iop3xx_i2c_master_xfer, .functionality = iop3xx_i2c_func, }; static void iop3xx_i2c_remove(struct platform_device *pdev) { struct i2c_adapter *padapter = platform_get_drvdata(pdev); struct i2c_algo_iop3xx_data *adapter_data = (struct i2c_algo_iop3xx_data *)padapter->algo_data; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); unsigned long cr = __raw_readl(adapter_data->ioaddr + CR_OFFSET); /* * Disable the actual HW unit */ cr &= ~(IOP3XX_ICR_ALD_IE | IOP3XX_ICR_BERR_IE | IOP3XX_ICR_RXFULL_IE | IOP3XX_ICR_TXEMPTY_IE); __raw_writel(cr, adapter_data->ioaddr + CR_OFFSET); iounmap(adapter_data->ioaddr); release_mem_region(res->start, IOP3XX_I2C_IO_SIZE); kfree(adapter_data); kfree(padapter); } static int iop3xx_i2c_probe(struct platform_device *pdev) { struct resource *res; int ret, irq; struct i2c_adapter *new_adapter; struct i2c_algo_iop3xx_data *adapter_data; new_adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); if (!new_adapter) { ret = -ENOMEM; goto out; } adapter_data = kzalloc(sizeof(struct i2c_algo_iop3xx_data), GFP_KERNEL); if (!adapter_data) { ret = -ENOMEM; goto free_adapter; } adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev, "scl", GPIOD_ASIS); if (IS_ERR(adapter_data->gpio_scl)) { ret = PTR_ERR(adapter_data->gpio_scl); goto free_both; } adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev, "sda", GPIOD_ASIS); if (IS_ERR(adapter_data->gpio_sda)) { ret = PTR_ERR(adapter_data->gpio_sda); goto free_both; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto free_both; } if (!request_mem_region(res->start, IOP3XX_I2C_IO_SIZE, pdev->name)) { ret = -EBUSY; goto free_both; } /* set the adapter enumeration # */ adapter_data->id = i2c_id++; adapter_data->ioaddr = ioremap(res->start, IOP3XX_I2C_IO_SIZE); if (!adapter_data->ioaddr) { ret = -ENOMEM; goto release_region; } irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto unmap; } ret = request_irq(irq, iop3xx_i2c_irq_handler, 0, pdev->name, adapter_data); if (ret) goto unmap; memcpy(new_adapter->name, pdev->name, strlen(pdev->name)); new_adapter->owner = THIS_MODULE; new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; new_adapter->dev.parent = &pdev->dev; new_adapter->dev.of_node = pdev->dev.of_node; new_adapter->nr = pdev->id; /* * Default values...should these come in from board code? */ new_adapter->timeout = HZ; new_adapter->algo = &iop3xx_i2c_algo; init_waitqueue_head(&adapter_data->waitq); spin_lock_init(&adapter_data->lock); iop3xx_i2c_reset(adapter_data); iop3xx_i2c_enable(adapter_data); platform_set_drvdata(pdev, new_adapter); new_adapter->algo_data = adapter_data; i2c_add_numbered_adapter(new_adapter); return 0; unmap: iounmap(adapter_data->ioaddr); release_region: release_mem_region(res->start, IOP3XX_I2C_IO_SIZE); free_both: kfree(adapter_data); free_adapter: kfree(new_adapter); out: return ret; } static const struct of_device_id i2c_iop3xx_match[] = { { .compatible = "intel,iop3xx-i2c", }, { .compatible = "intel,ixp4xx-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_iop3xx_match); static struct platform_driver iop3xx_i2c_driver = { .probe = iop3xx_i2c_probe, .remove_new = iop3xx_i2c_remove, .driver = { .name = "IOP3xx-I2C", .of_match_table = i2c_iop3xx_match, }, }; module_platform_driver(iop3xx_i2c_driver); MODULE_AUTHOR("D-TACQ Solutions Ltd <www.d-tacq.com>"); MODULE_DESCRIPTION("IOP3xx iic algorithm and driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:IOP3xx-I2C");
linux-master
drivers/i2c/busses/i2c-iop3xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * i2c-stm32.c * * Copyright (C) M'boumba Cedric Madianga 2017 * Author: M'boumba Cedric Madianga <[email protected]> */ #include "i2c-stm32.h" /* Functions for DMA support */ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev, dma_addr_t phy_addr, u32 txdr_offset, u32 rxdr_offset) { struct stm32_i2c_dma *dma; struct dma_slave_config dma_sconfig; int ret; dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); if (!dma) return ERR_PTR(-ENOMEM); /* Request and configure I2C TX dma channel */ dma->chan_tx = dma_request_chan(dev, "tx"); if (IS_ERR(dma->chan_tx)) { ret = PTR_ERR(dma->chan_tx); if (ret != -ENODEV) ret = dev_err_probe(dev, ret, "can't request DMA tx channel\n"); goto fail_al; } memset(&dma_sconfig, 0, sizeof(dma_sconfig)); dma_sconfig.dst_addr = phy_addr + txdr_offset; dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_sconfig.dst_maxburst = 1; dma_sconfig.direction = DMA_MEM_TO_DEV; ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig); if (ret < 0) { dev_err(dev, "can't configure tx channel\n"); goto fail_tx; } /* Request and configure I2C RX dma channel */ dma->chan_rx = dma_request_chan(dev, "rx"); if (IS_ERR(dma->chan_rx)) { ret = PTR_ERR(dma->chan_rx); if (ret != -ENODEV) ret = dev_err_probe(dev, ret, "can't request DMA rx channel\n"); goto fail_tx; } memset(&dma_sconfig, 0, sizeof(dma_sconfig)); dma_sconfig.src_addr = phy_addr + rxdr_offset; dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_sconfig.src_maxburst = 1; dma_sconfig.direction = DMA_DEV_TO_MEM; ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig); if (ret < 0) { dev_err(dev, "can't configure rx channel\n"); goto fail_rx; } init_completion(&dma->dma_complete); dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); return dma; fail_rx: dma_release_channel(dma->chan_rx); fail_tx: dma_release_channel(dma->chan_tx); fail_al: devm_kfree(dev, dma); return ERR_PTR(ret); } void stm32_i2c_dma_free(struct stm32_i2c_dma *dma) { dma->dma_buf = 0; dma->dma_len = 0; dma_release_channel(dma->chan_tx); dma->chan_tx = NULL; dma_release_channel(dma->chan_rx); dma->chan_rx = NULL; dma->chan_using = NULL; } int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma, bool rd_wr, u32 len, u8 *buf, dma_async_tx_callback callback, void *dma_async_param) { struct dma_async_tx_descriptor *txdesc; struct device *chan_dev; int ret; if (rd_wr) { dma->chan_using = dma->chan_rx; dma->dma_transfer_dir = DMA_DEV_TO_MEM; dma->dma_data_dir = DMA_FROM_DEVICE; } else { dma->chan_using = dma->chan_tx; dma->dma_transfer_dir = DMA_MEM_TO_DEV; dma->dma_data_dir = DMA_TO_DEVICE; } dma->dma_len = len; chan_dev = dma->chan_using->device->dev; dma->dma_buf = dma_map_single(chan_dev, buf, dma->dma_len, dma->dma_data_dir); if (dma_mapping_error(chan_dev, dma->dma_buf)) { dev_err(dev, "DMA mapping failed\n"); return -EINVAL; } txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf, dma->dma_len, dma->dma_transfer_dir, DMA_PREP_INTERRUPT); if (!txdesc) { dev_err(dev, "Not able to get desc for DMA xfer\n"); ret = -EINVAL; goto err; } reinit_completion(&dma->dma_complete); txdesc->callback = callback; txdesc->callback_param = dma_async_param; ret = dma_submit_error(dmaengine_submit(txdesc)); if (ret < 0) { dev_err(dev, "DMA submit failed\n"); goto err; } dma_async_issue_pending(dma->chan_using); return 0; err: dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir); return ret; }
linux-master
drivers/i2c/busses/i2c-stm32.c
// SPDX-License-Identifier: GPL-2.0 /* * i2c-ocores.c: I2C bus driver for OpenCores I2C controller * (https://opencores.org/project/i2c/overview) * * Peter Korsgaard <[email protected]> * * Support for the GRLIB port of the controller by * Andreas Larsson <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/platform_data/i2c-ocores.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/log2.h> #include <linux/spinlock.h> #include <linux/jiffies.h> /* * 'process_lock' exists because ocores_process() and ocores_process_timeout() * can't run in parallel. */ struct ocores_i2c { void __iomem *base; int iobase; u32 reg_shift; u32 reg_io_width; unsigned long flags; wait_queue_head_t wait; struct i2c_adapter adap; struct i2c_msg *msg; int pos; int nmsgs; int state; /* see STATE_ */ spinlock_t process_lock; struct clk *clk; int ip_clock_khz; int bus_clock_khz; void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value); u8 (*getreg)(struct ocores_i2c *i2c, int reg); }; /* registers */ #define OCI2C_PRELOW 0 #define OCI2C_PREHIGH 1 #define OCI2C_CONTROL 2 #define OCI2C_DATA 3 #define OCI2C_CMD 4 /* write only */ #define OCI2C_STATUS 4 /* read only, same address as OCI2C_CMD */ #define OCI2C_CTRL_IEN 0x40 #define OCI2C_CTRL_EN 0x80 #define OCI2C_CMD_START 0x91 #define OCI2C_CMD_STOP 0x41 #define OCI2C_CMD_READ 0x21 #define OCI2C_CMD_WRITE 0x11 #define OCI2C_CMD_READ_ACK 0x21 #define OCI2C_CMD_READ_NACK 0x29 #define OCI2C_CMD_IACK 0x01 #define OCI2C_STAT_IF 0x01 #define OCI2C_STAT_TIP 0x02 #define OCI2C_STAT_ARBLOST 0x20 #define OCI2C_STAT_BUSY 0x40 #define OCI2C_STAT_NACK 0x80 #define STATE_DONE 0 #define STATE_START 1 #define STATE_WRITE 2 #define STATE_READ 3 #define STATE_ERROR 4 #define TYPE_OCORES 0 #define TYPE_GRLIB 1 #define OCORES_FLAG_BROKEN_IRQ BIT(1) /* Broken IRQ for FU540-C000 SoC */ static void oc_setreg_8(struct ocores_i2c *i2c, int reg, u8 value) { iowrite8(value, i2c->base + (reg << i2c->reg_shift)); } static void oc_setreg_16(struct ocores_i2c *i2c, int reg, u8 value) { iowrite16(value, i2c->base + (reg << i2c->reg_shift)); } static void oc_setreg_32(struct ocores_i2c *i2c, int reg, u8 value) { iowrite32(value, i2c->base + (reg << i2c->reg_shift)); } static void oc_setreg_16be(struct ocores_i2c *i2c, int reg, u8 value) { iowrite16be(value, i2c->base + (reg << i2c->reg_shift)); } static void oc_setreg_32be(struct ocores_i2c *i2c, int reg, u8 value) { iowrite32be(value, i2c->base + (reg << i2c->reg_shift)); } static inline u8 oc_getreg_8(struct ocores_i2c *i2c, int reg) { return ioread8(i2c->base + (reg << i2c->reg_shift)); } static inline u8 oc_getreg_16(struct ocores_i2c *i2c, int reg) { return ioread16(i2c->base + (reg << i2c->reg_shift)); } static inline u8 oc_getreg_32(struct ocores_i2c *i2c, int reg) { return ioread32(i2c->base + (reg << i2c->reg_shift)); } static inline u8 oc_getreg_16be(struct ocores_i2c *i2c, int reg) { return ioread16be(i2c->base + (reg << i2c->reg_shift)); } static inline u8 oc_getreg_32be(struct ocores_i2c *i2c, int reg) { return ioread32be(i2c->base + (reg << i2c->reg_shift)); } static void oc_setreg_io_8(struct ocores_i2c *i2c, int reg, u8 value) { outb(value, i2c->iobase + reg); } static inline u8 oc_getreg_io_8(struct ocores_i2c *i2c, int reg) { return inb(i2c->iobase + reg); } static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value) { i2c->setreg(i2c, reg, value); } static inline u8 oc_getreg(struct ocores_i2c *i2c, int reg) { return i2c->getreg(i2c, reg); } static void ocores_process(struct ocores_i2c *i2c, u8 stat) { struct i2c_msg *msg = i2c->msg; unsigned long flags; /* * If we spin here is because we are in timeout, so we are going * to be in STATE_ERROR. See ocores_process_timeout() */ spin_lock_irqsave(&i2c->process_lock, flags); if ((i2c->state == STATE_DONE) || (i2c->state == STATE_ERROR)) { /* stop has been sent */ oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); wake_up(&i2c->wait); goto out; } /* error? */ if (stat & OCI2C_STAT_ARBLOST) { i2c->state = STATE_ERROR; oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_STOP); goto out; } if ((i2c->state == STATE_START) || (i2c->state == STATE_WRITE)) { i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE; if (stat & OCI2C_STAT_NACK) { i2c->state = STATE_ERROR; oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_STOP); goto out; } } else { msg->buf[i2c->pos++] = oc_getreg(i2c, OCI2C_DATA); } /* end of msg? */ if (i2c->pos == msg->len) { i2c->nmsgs--; i2c->msg++; i2c->pos = 0; msg = i2c->msg; if (i2c->nmsgs) { /* end? */ /* send start? */ if (!(msg->flags & I2C_M_NOSTART)) { u8 addr = i2c_8bit_addr_from_msg(msg); i2c->state = STATE_START; oc_setreg(i2c, OCI2C_DATA, addr); oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START); goto out; } i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE; } else { i2c->state = STATE_DONE; oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_STOP); goto out; } } if (i2c->state == STATE_READ) { oc_setreg(i2c, OCI2C_CMD, i2c->pos == (msg->len-1) ? OCI2C_CMD_READ_NACK : OCI2C_CMD_READ_ACK); } else { oc_setreg(i2c, OCI2C_DATA, msg->buf[i2c->pos++]); oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_WRITE); } out: spin_unlock_irqrestore(&i2c->process_lock, flags); } static irqreturn_t ocores_isr(int irq, void *dev_id) { struct ocores_i2c *i2c = dev_id; u8 stat = oc_getreg(i2c, OCI2C_STATUS); if (i2c->flags & OCORES_FLAG_BROKEN_IRQ) { if ((stat & OCI2C_STAT_IF) && !(stat & OCI2C_STAT_BUSY)) return IRQ_NONE; } else if (!(stat & OCI2C_STAT_IF)) { return IRQ_NONE; } ocores_process(i2c, stat); return IRQ_HANDLED; } /** * ocores_process_timeout() - Process timeout event * @i2c: ocores I2C device instance */ static void ocores_process_timeout(struct ocores_i2c *i2c) { unsigned long flags; spin_lock_irqsave(&i2c->process_lock, flags); i2c->state = STATE_ERROR; oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_STOP); spin_unlock_irqrestore(&i2c->process_lock, flags); } /** * ocores_wait() - Wait until something change in a given register * @i2c: ocores I2C device instance * @reg: register to query * @mask: bitmask to apply on register value * @val: expected result * @timeout: timeout in jiffies * * Timeout is necessary to avoid to stay here forever when the chip * does not answer correctly. * * Return: 0 on success, -ETIMEDOUT on timeout */ static int ocores_wait(struct ocores_i2c *i2c, int reg, u8 mask, u8 val, const unsigned long timeout) { unsigned long j; j = jiffies + timeout; while (1) { u8 status = oc_getreg(i2c, reg); if ((status & mask) == val) break; if (time_after(jiffies, j)) return -ETIMEDOUT; } return 0; } /** * ocores_poll_wait() - Wait until is possible to process some data * @i2c: ocores I2C device instance * * Used when the device is in polling mode (interrupts disabled). * * Return: 0 on success, -ETIMEDOUT on timeout */ static int ocores_poll_wait(struct ocores_i2c *i2c) { u8 mask; int err; if (i2c->state == STATE_DONE || i2c->state == STATE_ERROR) { /* transfer is over */ mask = OCI2C_STAT_BUSY; } else { /* on going transfer */ mask = OCI2C_STAT_TIP; /* * We wait for the data to be transferred (8bit), * then we start polling on the ACK/NACK bit */ udelay((8 * 1000) / i2c->bus_clock_khz); } /* * once we are here we expect to get the expected result immediately * so if after 1ms we timeout then something is broken. */ err = ocores_wait(i2c, OCI2C_STATUS, mask, 0, msecs_to_jiffies(1)); if (err) dev_warn(i2c->adap.dev.parent, "%s: STATUS timeout, bit 0x%x did not clear in 1ms\n", __func__, mask); return err; } /** * ocores_process_polling() - It handles an IRQ-less transfer * @i2c: ocores I2C device instance * * Even if IRQ are disabled, the I2C OpenCore IP behavior is exactly the same * (only that IRQ are not produced). This means that we can re-use entirely * ocores_isr(), we just add our polling code around it. * * It can run in atomic context * * Return: 0 on success, -ETIMEDOUT on timeout */ static int ocores_process_polling(struct ocores_i2c *i2c) { irqreturn_t ret; int err = 0; while (1) { err = ocores_poll_wait(i2c); if (err) break; /* timeout */ ret = ocores_isr(-1, i2c); if (ret == IRQ_NONE) break; /* all messages have been transferred */ else { if (i2c->flags & OCORES_FLAG_BROKEN_IRQ) if (i2c->state == STATE_DONE) break; } } return err; } static int ocores_xfer_core(struct ocores_i2c *i2c, struct i2c_msg *msgs, int num, bool polling) { int ret = 0; u8 ctrl; ctrl = oc_getreg(i2c, OCI2C_CONTROL); if (polling) oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~OCI2C_CTRL_IEN); else oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN); i2c->msg = msgs; i2c->pos = 0; i2c->nmsgs = num; i2c->state = STATE_START; oc_setreg(i2c, OCI2C_DATA, i2c_8bit_addr_from_msg(i2c->msg)); oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START); if (polling) { ret = ocores_process_polling(i2c); } else { if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) || (i2c->state == STATE_DONE), HZ) == 0) ret = -ETIMEDOUT; } if (ret) { ocores_process_timeout(i2c); return ret; } return (i2c->state == STATE_DONE) ? num : -EIO; } static int ocores_xfer_polling(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return ocores_xfer_core(i2c_get_adapdata(adap), msgs, num, true); } static int ocores_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return ocores_xfer_core(i2c_get_adapdata(adap), msgs, num, false); } static int ocores_init(struct device *dev, struct ocores_i2c *i2c) { int prescale; int diff; u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL); /* make sure the device is disabled */ ctrl &= ~(OCI2C_CTRL_EN | OCI2C_CTRL_IEN); oc_setreg(i2c, OCI2C_CONTROL, ctrl); prescale = (i2c->ip_clock_khz / (5 * i2c->bus_clock_khz)) - 1; prescale = clamp(prescale, 0, 0xffff); diff = i2c->ip_clock_khz / (5 * (prescale + 1)) - i2c->bus_clock_khz; if (abs(diff) > i2c->bus_clock_khz / 10) { dev_err(dev, "Unsupported clock settings: core: %d KHz, bus: %d KHz\n", i2c->ip_clock_khz, i2c->bus_clock_khz); return -EINVAL; } oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff); oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8); /* Init the device */ oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN); return 0; } static u32 ocores_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm ocores_algorithm = { .master_xfer = ocores_xfer, .master_xfer_atomic = ocores_xfer_polling, .functionality = ocores_func, }; static const struct i2c_adapter ocores_adapter = { .owner = THIS_MODULE, .name = "i2c-ocores", .class = I2C_CLASS_DEPRECATED, .algo = &ocores_algorithm, }; static const struct of_device_id ocores_i2c_match[] = { { .compatible = "opencores,i2c-ocores", .data = (void *)TYPE_OCORES, }, { .compatible = "aeroflexgaisler,i2cmst", .data = (void *)TYPE_GRLIB, }, { .compatible = "sifive,fu540-c000-i2c", }, { .compatible = "sifive,i2c0", }, {}, }; MODULE_DEVICE_TABLE(of, ocores_i2c_match); #ifdef CONFIG_OF /* * Read and write functions for the GRLIB port of the controller. Registers are * 32-bit big endian and the PRELOW and PREHIGH registers are merged into one * register. The subsequent registers have their offsets decreased accordingly. */ static u8 oc_getreg_grlib(struct ocores_i2c *i2c, int reg) { u32 rd; int rreg = reg; if (reg != OCI2C_PRELOW) rreg--; rd = ioread32be(i2c->base + (rreg << i2c->reg_shift)); if (reg == OCI2C_PREHIGH) return (u8)(rd >> 8); else return (u8)rd; } static void oc_setreg_grlib(struct ocores_i2c *i2c, int reg, u8 value) { u32 curr, wr; int rreg = reg; if (reg != OCI2C_PRELOW) rreg--; if (reg == OCI2C_PRELOW || reg == OCI2C_PREHIGH) { curr = ioread32be(i2c->base + (rreg << i2c->reg_shift)); if (reg == OCI2C_PRELOW) wr = (curr & 0xff00) | value; else wr = (((u32)value) << 8) | (curr & 0xff); } else { wr = value; } iowrite32be(wr, i2c->base + (rreg << i2c->reg_shift)); } static int ocores_i2c_of_probe(struct platform_device *pdev, struct ocores_i2c *i2c) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; u32 val; u32 clock_frequency; bool clock_frequency_present; if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) { /* no 'reg-shift', check for deprecated 'regstep' */ if (!of_property_read_u32(np, "regstep", &val)) { if (!is_power_of_2(val)) { dev_err(&pdev->dev, "invalid regstep %d\n", val); return -EINVAL; } i2c->reg_shift = ilog2(val); dev_warn(&pdev->dev, "regstep property deprecated, use reg-shift\n"); } } clock_frequency_present = !of_property_read_u32(np, "clock-frequency", &clock_frequency); i2c->bus_clock_khz = 100; i2c->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk), "devm_clk_get_optional_enabled failed\n"); i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000; if (clock_frequency_present) i2c->bus_clock_khz = clock_frequency / 1000; if (i2c->ip_clock_khz == 0) { if (of_property_read_u32(np, "opencores,ip-clock-frequency", &val)) { if (!clock_frequency_present) { dev_err(&pdev->dev, "Missing required parameter 'opencores,ip-clock-frequency'\n"); return -ENODEV; } i2c->ip_clock_khz = clock_frequency / 1000; dev_warn(&pdev->dev, "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n"); } else { i2c->ip_clock_khz = val / 1000; if (clock_frequency_present) i2c->bus_clock_khz = clock_frequency / 1000; } } of_property_read_u32(pdev->dev.of_node, "reg-io-width", &i2c->reg_io_width); match = of_match_node(ocores_i2c_match, pdev->dev.of_node); if (match && (long)match->data == TYPE_GRLIB) { dev_dbg(&pdev->dev, "GRLIB variant of i2c-ocores\n"); i2c->setreg = oc_setreg_grlib; i2c->getreg = oc_getreg_grlib; } return 0; } #else #define ocores_i2c_of_probe(pdev, i2c) -ENODEV #endif static int ocores_i2c_probe(struct platform_device *pdev) { struct ocores_i2c *i2c; struct ocores_i2c_platform_data *pdata; struct resource *res; int irq; int ret; int i; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; spin_lock_init(&i2c->process_lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) { i2c->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); } else { res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) return -EINVAL; i2c->iobase = res->start; if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) { dev_err(&pdev->dev, "Can't get I/O resource.\n"); return -EBUSY; } i2c->setreg = oc_setreg_io_8; i2c->getreg = oc_getreg_io_8; } pdata = dev_get_platdata(&pdev->dev); if (pdata) { i2c->reg_shift = pdata->reg_shift; i2c->reg_io_width = pdata->reg_io_width; i2c->ip_clock_khz = pdata->clock_khz; if (pdata->bus_khz) i2c->bus_clock_khz = pdata->bus_khz; else i2c->bus_clock_khz = 100; } else { ret = ocores_i2c_of_probe(pdev, i2c); if (ret) return ret; } if (i2c->reg_io_width == 0) i2c->reg_io_width = 1; /* Set to default value */ if (!i2c->setreg || !i2c->getreg) { bool be = pdata ? pdata->big_endian : of_device_is_big_endian(pdev->dev.of_node); switch (i2c->reg_io_width) { case 1: i2c->setreg = oc_setreg_8; i2c->getreg = oc_getreg_8; break; case 2: i2c->setreg = be ? oc_setreg_16be : oc_setreg_16; i2c->getreg = be ? oc_getreg_16be : oc_getreg_16; break; case 4: i2c->setreg = be ? oc_setreg_32be : oc_setreg_32; i2c->getreg = be ? oc_getreg_32be : oc_getreg_32; break; default: dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", i2c->reg_io_width); return -EINVAL; } } init_waitqueue_head(&i2c->wait); irq = platform_get_irq_optional(pdev, 0); /* * Since the SoC does have an interrupt, its DT has an interrupt * property - But this should be bypassed as the IRQ logic in this * SoC is broken. */ if (of_device_is_compatible(pdev->dev.of_node, "sifive,fu540-c000-i2c")) { i2c->flags |= OCORES_FLAG_BROKEN_IRQ; irq = -ENXIO; } if (irq == -ENXIO) { ocores_algorithm.master_xfer = ocores_xfer_polling; } else { if (irq < 0) return irq; } if (ocores_algorithm.master_xfer != ocores_xfer_polling) { ret = devm_request_any_context_irq(&pdev->dev, irq, ocores_isr, 0, pdev->name, i2c); if (ret) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); return ret; } } ret = ocores_init(&pdev->dev, i2c); if (ret) return ret; /* hook up driver to tree */ platform_set_drvdata(pdev, i2c); i2c->adap = ocores_adapter; i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; /* add i2c adapter to i2c tree */ ret = i2c_add_adapter(&i2c->adap); if (ret) return ret; /* add in known devices to the bus */ if (pdata) { for (i = 0; i < pdata->num_devices; i++) i2c_new_client_device(&i2c->adap, pdata->devices + i); } return 0; } static void ocores_i2c_remove(struct platform_device *pdev) { struct ocores_i2c *i2c = platform_get_drvdata(pdev); u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL); /* disable i2c logic */ ctrl &= ~(OCI2C_CTRL_EN | OCI2C_CTRL_IEN); oc_setreg(i2c, OCI2C_CONTROL, ctrl); /* remove adapter & data */ i2c_del_adapter(&i2c->adap); } static int ocores_i2c_suspend(struct device *dev) { struct ocores_i2c *i2c = dev_get_drvdata(dev); u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL); /* make sure the device is disabled */ ctrl &= ~(OCI2C_CTRL_EN | OCI2C_CTRL_IEN); oc_setreg(i2c, OCI2C_CONTROL, ctrl); clk_disable_unprepare(i2c->clk); return 0; } static int ocores_i2c_resume(struct device *dev) { struct ocores_i2c *i2c = dev_get_drvdata(dev); unsigned long rate; int ret; ret = clk_prepare_enable(i2c->clk); if (ret) return dev_err_probe(dev, ret, "clk_prepare_enable failed\n"); rate = clk_get_rate(i2c->clk) / 1000; if (rate) i2c->ip_clock_khz = rate; return ocores_init(dev, i2c); } static DEFINE_SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume); static struct platform_driver ocores_i2c_driver = { .probe = ocores_i2c_probe, .remove_new = ocores_i2c_remove, .driver = { .name = "ocores-i2c", .of_match_table = ocores_i2c_match, .pm = pm_sleep_ptr(&ocores_i2c_pm), }, }; module_platform_driver(ocores_i2c_driver); MODULE_AUTHOR("Peter Korsgaard <[email protected]>"); MODULE_DESCRIPTION("OpenCores I2C bus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ocores-i2c");
linux-master
drivers/i2c/busses/i2c-ocores.c
// SPDX-License-Identifier: GPL-2.0-only /* * Aspeed 24XX/25XX I2C Controller. * * Copyright (C) 2012-2017 ASPEED Technology Inc. * Copyright 2017 IBM Corporation * Copyright 2017 Google, Inc. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/slab.h> /* I2C Register */ #define ASPEED_I2C_FUN_CTRL_REG 0x00 #define ASPEED_I2C_AC_TIMING_REG1 0x04 #define ASPEED_I2C_AC_TIMING_REG2 0x08 #define ASPEED_I2C_INTR_CTRL_REG 0x0c #define ASPEED_I2C_INTR_STS_REG 0x10 #define ASPEED_I2C_CMD_REG 0x14 #define ASPEED_I2C_DEV_ADDR_REG 0x18 #define ASPEED_I2C_BYTE_BUF_REG 0x20 /* Global Register Definition */ /* 0x00 : I2C Interrupt Status Register */ /* 0x08 : I2C Interrupt Target Assignment */ /* Device Register Definition */ /* 0x00 : I2CD Function Control Register */ #define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15) #define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8) #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7) #define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6) #define ASPEED_I2CD_SLAVE_EN BIT(1) #define ASPEED_I2CD_MASTER_EN BIT(0) /* 0x04 : I2CD Clock and AC Timing Control Register #1 */ #define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28) #define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24) #define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20) #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16) #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12 #define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12) #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0) #define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0) /* 0x08 : I2CD Clock and AC Timing Control Register #2 */ #define ASPEED_NO_TIMEOUT_CTRL 0 /* 0x0c : I2CD Interrupt Control Register & * 0x10 : I2CD Interrupt Status Register * * These share bit definitions, so use the same values for the enable & * status bits. */ #define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14) #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13) #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7) #define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6) #define ASPEED_I2CD_INTR_ABNORMAL BIT(5) #define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4) #define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3) #define ASPEED_I2CD_INTR_RX_DONE BIT(2) #define ASPEED_I2CD_INTR_TX_NAK BIT(1) #define ASPEED_I2CD_INTR_TX_ACK BIT(0) #define ASPEED_I2CD_INTR_MASTER_ERRORS \ (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ ASPEED_I2CD_INTR_SCL_TIMEOUT | \ ASPEED_I2CD_INTR_ABNORMAL | \ ASPEED_I2CD_INTR_ARBIT_LOSS) #define ASPEED_I2CD_INTR_ALL \ (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \ ASPEED_I2CD_INTR_SCL_TIMEOUT | \ ASPEED_I2CD_INTR_ABNORMAL | \ ASPEED_I2CD_INTR_NORMAL_STOP | \ ASPEED_I2CD_INTR_ARBIT_LOSS | \ ASPEED_I2CD_INTR_RX_DONE | \ ASPEED_I2CD_INTR_TX_NAK | \ ASPEED_I2CD_INTR_TX_ACK) /* 0x14 : I2CD Command/Status Register */ #define ASPEED_I2CD_SCL_LINE_STS BIT(18) #define ASPEED_I2CD_SDA_LINE_STS BIT(17) #define ASPEED_I2CD_BUS_BUSY_STS BIT(16) #define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11) /* Command Bit */ #define ASPEED_I2CD_M_STOP_CMD BIT(5) #define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4) #define ASPEED_I2CD_M_RX_CMD BIT(3) #define ASPEED_I2CD_S_TX_CMD BIT(2) #define ASPEED_I2CD_M_TX_CMD BIT(1) #define ASPEED_I2CD_M_START_CMD BIT(0) #define ASPEED_I2CD_MASTER_CMDS_MASK \ (ASPEED_I2CD_M_STOP_CMD | \ ASPEED_I2CD_M_S_RX_CMD_LAST | \ ASPEED_I2CD_M_RX_CMD | \ ASPEED_I2CD_M_TX_CMD | \ ASPEED_I2CD_M_START_CMD) /* 0x18 : I2CD Slave Device Address Register */ #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0) enum aspeed_i2c_master_state { ASPEED_I2C_MASTER_INACTIVE, ASPEED_I2C_MASTER_PENDING, ASPEED_I2C_MASTER_START, ASPEED_I2C_MASTER_TX_FIRST, ASPEED_I2C_MASTER_TX, ASPEED_I2C_MASTER_RX_FIRST, ASPEED_I2C_MASTER_RX, ASPEED_I2C_MASTER_STOP, }; enum aspeed_i2c_slave_state { ASPEED_I2C_SLAVE_INACTIVE, ASPEED_I2C_SLAVE_START, ASPEED_I2C_SLAVE_READ_REQUESTED, ASPEED_I2C_SLAVE_READ_PROCESSED, ASPEED_I2C_SLAVE_WRITE_REQUESTED, ASPEED_I2C_SLAVE_WRITE_RECEIVED, ASPEED_I2C_SLAVE_STOP, }; struct aspeed_i2c_bus { struct i2c_adapter adap; struct device *dev; void __iomem *base; struct reset_control *rst; /* Synchronizes I/O mem access to base. */ spinlock_t lock; struct completion cmd_complete; u32 (*get_clk_reg_val)(struct device *dev, u32 divisor); unsigned long parent_clk_frequency; u32 bus_frequency; /* Transaction state. */ enum aspeed_i2c_master_state master_state; struct i2c_msg *msgs; size_t buf_index; size_t msgs_index; size_t msgs_count; bool send_stop; int cmd_err; /* Protected only by i2c_lock_bus */ int master_xfer_result; /* Multi-master */ bool multi_master; #if IS_ENABLED(CONFIG_I2C_SLAVE) struct i2c_client *slave; enum aspeed_i2c_slave_state slave_state; #endif /* CONFIG_I2C_SLAVE */ }; static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus); static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) { unsigned long time_left, flags; int ret = 0; u32 command; spin_lock_irqsave(&bus->lock, flags); command = readl(bus->base + ASPEED_I2C_CMD_REG); if (command & ASPEED_I2CD_SDA_LINE_STS) { /* Bus is idle: no recovery needed. */ if (command & ASPEED_I2CD_SCL_LINE_STS) goto out; dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n", command); reinit_completion(&bus->cmd_complete); writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); spin_unlock_irqrestore(&bus->lock, flags); time_left = wait_for_completion_timeout( &bus->cmd_complete, bus->adap.timeout); spin_lock_irqsave(&bus->lock, flags); if (time_left == 0) goto reset_out; else if (bus->cmd_err) goto reset_out; /* Recovery failed. */ else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & ASPEED_I2CD_SCL_LINE_STS)) goto reset_out; /* Bus error. */ } else { dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n", command); reinit_completion(&bus->cmd_complete); /* Writes 1 to 8 SCL clock cycles until SDA is released. */ writel(ASPEED_I2CD_BUS_RECOVER_CMD, bus->base + ASPEED_I2C_CMD_REG); spin_unlock_irqrestore(&bus->lock, flags); time_left = wait_for_completion_timeout( &bus->cmd_complete, bus->adap.timeout); spin_lock_irqsave(&bus->lock, flags); if (time_left == 0) goto reset_out; else if (bus->cmd_err) goto reset_out; /* Recovery failed. */ else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & ASPEED_I2CD_SDA_LINE_STS)) goto reset_out; } out: spin_unlock_irqrestore(&bus->lock, flags); return ret; reset_out: spin_unlock_irqrestore(&bus->lock, flags); return aspeed_i2c_reset(bus); } #if IS_ENABLED(CONFIG_I2C_SLAVE) static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status) { u32 command, irq_handled = 0; struct i2c_client *slave = bus->slave; u8 value; int ret; if (!slave) return 0; command = readl(bus->base + ASPEED_I2C_CMD_REG); /* Slave was requested, restart state machine. */ if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) { irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH; bus->slave_state = ASPEED_I2C_SLAVE_START; } /* Slave is not currently active, irq was for someone else. */ if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) return irq_handled; dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n", irq_status, command); /* Slave was sent something. */ if (irq_status & ASPEED_I2CD_INTR_RX_DONE) { value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; /* Handle address frame. */ if (bus->slave_state == ASPEED_I2C_SLAVE_START) { if (value & 0x1) bus->slave_state = ASPEED_I2C_SLAVE_READ_REQUESTED; else bus->slave_state = ASPEED_I2C_SLAVE_WRITE_REQUESTED; } irq_handled |= ASPEED_I2CD_INTR_RX_DONE; } /* Slave was asked to stop. */ if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; bus->slave_state = ASPEED_I2C_SLAVE_STOP; } if (irq_status & ASPEED_I2CD_INTR_TX_NAK && bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) { irq_handled |= ASPEED_I2CD_INTR_TX_NAK; bus->slave_state = ASPEED_I2C_SLAVE_STOP; } switch (bus->slave_state) { case ASPEED_I2C_SLAVE_READ_REQUESTED: if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK)) dev_err(bus->dev, "Unexpected ACK on read request.\n"); bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED; i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value); writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); break; case ASPEED_I2C_SLAVE_READ_PROCESSED: if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { dev_err(bus->dev, "Expected ACK after processed read.\n"); break; } irq_handled |= ASPEED_I2CD_INTR_TX_ACK; i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value); writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); break; case ASPEED_I2C_SLAVE_WRITE_REQUESTED: bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED; ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); /* * Slave ACK's on this address phase already but as the backend driver * returns an errno, the bus driver should nack the next incoming byte. */ if (ret < 0) writel(ASPEED_I2CD_M_S_RX_CMD_LAST, bus->base + ASPEED_I2C_CMD_REG); break; case ASPEED_I2C_SLAVE_WRITE_RECEIVED: i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); break; case ASPEED_I2C_SLAVE_STOP: i2c_slave_event(slave, I2C_SLAVE_STOP, &value); bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; break; case ASPEED_I2C_SLAVE_START: /* Slave was just started. Waiting for the next event. */; break; default: dev_err(bus->dev, "unknown slave_state: %d\n", bus->slave_state); bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; break; } return irq_handled; } #endif /* CONFIG_I2C_SLAVE */ /* precondition: bus.lock has been acquired. */ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) { u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD; struct i2c_msg *msg = &bus->msgs[bus->msgs_index]; u8 slave_addr = i2c_8bit_addr_from_msg(msg); #if IS_ENABLED(CONFIG_I2C_SLAVE) /* * If it's requested in the middle of a slave session, set the master * state to 'pending' then H/W will continue handling this master * command when the bus comes back to the idle state. */ if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) { bus->master_state = ASPEED_I2C_MASTER_PENDING; return; } #endif /* CONFIG_I2C_SLAVE */ bus->master_state = ASPEED_I2C_MASTER_START; bus->buf_index = 0; if (msg->flags & I2C_M_RD) { command |= ASPEED_I2CD_M_RX_CMD; /* Need to let the hardware know to NACK after RX. */ if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN)) command |= ASPEED_I2CD_M_S_RX_CMD_LAST; } writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG); writel(command, bus->base + ASPEED_I2C_CMD_REG); } /* precondition: bus.lock has been acquired. */ static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) { bus->master_state = ASPEED_I2C_MASTER_STOP; writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); } /* precondition: bus.lock has been acquired. */ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) { if (bus->msgs_index + 1 < bus->msgs_count) { bus->msgs_index++; aspeed_i2c_do_start(bus); } else { aspeed_i2c_do_stop(bus); } } static int aspeed_i2c_is_irq_error(u32 irq_status) { if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS) return -EAGAIN; if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | ASPEED_I2CD_INTR_SCL_TIMEOUT)) return -EBUSY; if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL)) return -EPROTO; return 0; } static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) { u32 irq_handled = 0, command = 0; struct i2c_msg *msg; u8 recv_byte; int ret; if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) { bus->master_state = ASPEED_I2C_MASTER_INACTIVE; irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE; goto out_complete; } /* * We encountered an interrupt that reports an error: the hardware * should clear the command queue effectively taking us back to the * INACTIVE state. */ ret = aspeed_i2c_is_irq_error(irq_status); if (ret) { dev_dbg(bus->dev, "received error interrupt: 0x%08x\n", irq_status); irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS); if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) { bus->cmd_err = ret; bus->master_state = ASPEED_I2C_MASTER_INACTIVE; goto out_complete; } } /* Master is not currently active, irq was for someone else. */ if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE || bus->master_state == ASPEED_I2C_MASTER_PENDING) goto out_no_complete; /* We are in an invalid state; reset bus to a known state. */ if (!bus->msgs) { dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n", irq_status); bus->cmd_err = -EIO; if (bus->master_state != ASPEED_I2C_MASTER_STOP && bus->master_state != ASPEED_I2C_MASTER_INACTIVE) aspeed_i2c_do_stop(bus); goto out_no_complete; } msg = &bus->msgs[bus->msgs_index]; /* * START is a special case because we still have to handle a subsequent * TX or RX immediately after we handle it, so we handle it here and * then update the state and handle the new state below. */ if (bus->master_state == ASPEED_I2C_MASTER_START) { #if IS_ENABLED(CONFIG_I2C_SLAVE) /* * If a peer master starts a xfer immediately after it queues a * master command, clear the queued master command and change * its state to 'pending'. To simplify handling of pending * cases, it uses S/W solution instead of H/W command queue * handling. */ if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) { writel(readl(bus->base + ASPEED_I2C_CMD_REG) & ~ASPEED_I2CD_MASTER_CMDS_MASK, bus->base + ASPEED_I2C_CMD_REG); bus->master_state = ASPEED_I2C_MASTER_PENDING; dev_dbg(bus->dev, "master goes pending due to a slave start\n"); goto out_no_complete; } #endif /* CONFIG_I2C_SLAVE */ if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) { bus->cmd_err = -ENXIO; bus->master_state = ASPEED_I2C_MASTER_INACTIVE; goto out_complete; } pr_devel("no slave present at %02x\n", msg->addr); irq_handled |= ASPEED_I2CD_INTR_TX_NAK; bus->cmd_err = -ENXIO; aspeed_i2c_do_stop(bus); goto out_no_complete; } irq_handled |= ASPEED_I2CD_INTR_TX_ACK; if (msg->len == 0) { /* SMBUS_QUICK */ aspeed_i2c_do_stop(bus); goto out_no_complete; } if (msg->flags & I2C_M_RD) bus->master_state = ASPEED_I2C_MASTER_RX_FIRST; else bus->master_state = ASPEED_I2C_MASTER_TX_FIRST; } switch (bus->master_state) { case ASPEED_I2C_MASTER_TX: if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) { dev_dbg(bus->dev, "slave NACKed TX\n"); irq_handled |= ASPEED_I2CD_INTR_TX_NAK; goto error_and_stop; } else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { dev_err(bus->dev, "slave failed to ACK TX\n"); goto error_and_stop; } irq_handled |= ASPEED_I2CD_INTR_TX_ACK; fallthrough; case ASPEED_I2C_MASTER_TX_FIRST: if (bus->buf_index < msg->len) { bus->master_state = ASPEED_I2C_MASTER_TX; writel(msg->buf[bus->buf_index++], bus->base + ASPEED_I2C_BYTE_BUF_REG); writel(ASPEED_I2CD_M_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); } else { aspeed_i2c_next_msg_or_stop(bus); } goto out_no_complete; case ASPEED_I2C_MASTER_RX_FIRST: /* RX may not have completed yet (only address cycle) */ if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE)) goto out_no_complete; fallthrough; case ASPEED_I2C_MASTER_RX: if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) { dev_err(bus->dev, "master failed to RX\n"); goto error_and_stop; } irq_handled |= ASPEED_I2CD_INTR_RX_DONE; recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; msg->buf[bus->buf_index++] = recv_byte; if (msg->flags & I2C_M_RECV_LEN) { if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) { bus->cmd_err = -EPROTO; aspeed_i2c_do_stop(bus); goto out_no_complete; } msg->len = recv_byte + ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1); msg->flags &= ~I2C_M_RECV_LEN; } if (bus->buf_index < msg->len) { bus->master_state = ASPEED_I2C_MASTER_RX; command = ASPEED_I2CD_M_RX_CMD; if (bus->buf_index + 1 == msg->len) command |= ASPEED_I2CD_M_S_RX_CMD_LAST; writel(command, bus->base + ASPEED_I2C_CMD_REG); } else { aspeed_i2c_next_msg_or_stop(bus); } goto out_no_complete; case ASPEED_I2C_MASTER_STOP: if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) { dev_err(bus->dev, "master failed to STOP. irq_status:0x%x\n", irq_status); bus->cmd_err = -EIO; /* Do not STOP as we have already tried. */ } else { irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; } bus->master_state = ASPEED_I2C_MASTER_INACTIVE; goto out_complete; case ASPEED_I2C_MASTER_INACTIVE: dev_err(bus->dev, "master received interrupt 0x%08x, but is inactive\n", irq_status); bus->cmd_err = -EIO; /* Do not STOP as we should be inactive. */ goto out_complete; default: WARN(1, "unknown master state\n"); bus->master_state = ASPEED_I2C_MASTER_INACTIVE; bus->cmd_err = -EINVAL; goto out_complete; } error_and_stop: bus->cmd_err = -EIO; aspeed_i2c_do_stop(bus); goto out_no_complete; out_complete: bus->msgs = NULL; if (bus->cmd_err) bus->master_xfer_result = bus->cmd_err; else bus->master_xfer_result = bus->msgs_index + 1; complete(&bus->cmd_complete); out_no_complete: return irq_handled; } static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) { struct aspeed_i2c_bus *bus = dev_id; u32 irq_received, irq_remaining, irq_handled; spin_lock(&bus->lock); irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG); /* Ack all interrupts except for Rx done */ writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, bus->base + ASPEED_I2C_INTR_STS_REG); readl(bus->base + ASPEED_I2C_INTR_STS_REG); irq_received &= ASPEED_I2CD_INTR_RECV_MASK; irq_remaining = irq_received; #if IS_ENABLED(CONFIG_I2C_SLAVE) /* * In most cases, interrupt bits will be set one by one, although * multiple interrupt bits could be set at the same time. It's also * possible that master interrupt bits could be set along with slave * interrupt bits. Each case needs to be handled using corresponding * handlers depending on the current state. */ if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE && bus->master_state != ASPEED_I2C_MASTER_PENDING) { irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); irq_remaining &= ~irq_handled; if (irq_remaining) irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining); } else { irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining); irq_remaining &= ~irq_handled; if (irq_remaining) irq_handled |= aspeed_i2c_master_irq(bus, irq_remaining); } /* * Start a pending master command at here if a slave operation is * completed. */ if (bus->master_state == ASPEED_I2C_MASTER_PENDING && bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) aspeed_i2c_do_start(bus); #else irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); #endif /* CONFIG_I2C_SLAVE */ irq_remaining &= ~irq_handled; if (irq_remaining) dev_err(bus->dev, "irq handled != irq. expected 0x%08x, but was 0x%08x\n", irq_received, irq_handled); /* Ack Rx done */ if (irq_received & ASPEED_I2CD_INTR_RX_DONE) { writel(ASPEED_I2CD_INTR_RX_DONE, bus->base + ASPEED_I2C_INTR_STS_REG); readl(bus->base + ASPEED_I2C_INTR_STS_REG); } spin_unlock(&bus->lock); return irq_remaining ? IRQ_NONE : IRQ_HANDLED; } static int aspeed_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap); unsigned long time_left, flags; spin_lock_irqsave(&bus->lock, flags); bus->cmd_err = 0; /* If bus is busy in a single master environment, attempt recovery. */ if (!bus->multi_master && (readl(bus->base + ASPEED_I2C_CMD_REG) & ASPEED_I2CD_BUS_BUSY_STS)) { int ret; spin_unlock_irqrestore(&bus->lock, flags); ret = aspeed_i2c_recover_bus(bus); if (ret) return ret; spin_lock_irqsave(&bus->lock, flags); } bus->cmd_err = 0; bus->msgs = msgs; bus->msgs_index = 0; bus->msgs_count = num; reinit_completion(&bus->cmd_complete); aspeed_i2c_do_start(bus); spin_unlock_irqrestore(&bus->lock, flags); time_left = wait_for_completion_timeout(&bus->cmd_complete, bus->adap.timeout); if (time_left == 0) { /* * In a multi-master setup, if a timeout occurs, attempt * recovery. But if the bus is idle, we still need to reset the * i2c controller to clear the remaining interrupts. */ if (bus->multi_master && (readl(bus->base + ASPEED_I2C_CMD_REG) & ASPEED_I2CD_BUS_BUSY_STS)) aspeed_i2c_recover_bus(bus); else aspeed_i2c_reset(bus); /* * If timed out and the state is still pending, drop the pending * master command. */ spin_lock_irqsave(&bus->lock, flags); if (bus->master_state == ASPEED_I2C_MASTER_PENDING) bus->master_state = ASPEED_I2C_MASTER_INACTIVE; spin_unlock_irqrestore(&bus->lock, flags); return -ETIMEDOUT; } return bus->master_xfer_result; } static u32 aspeed_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; } #if IS_ENABLED(CONFIG_I2C_SLAVE) /* precondition: bus.lock has been acquired. */ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr) { u32 addr_reg_val, func_ctrl_reg_val; /* * Set slave addr. Reserved bits can all safely be written with zeros * on all of ast2[456]00, so zero everything else to ensure we only * enable a single slave address (ast2500 has two, ast2600 has three, * the enable bits for which are also in this register) so that we don't * end up with additional phantom devices responding on the bus. */ addr_reg_val = slave_addr & ASPEED_I2CD_DEV_ADDR_MASK; writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG); /* Turn on slave mode. */ func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN; writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); } static int aspeed_i2c_reg_slave(struct i2c_client *client) { struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); unsigned long flags; spin_lock_irqsave(&bus->lock, flags); if (bus->slave) { spin_unlock_irqrestore(&bus->lock, flags); return -EINVAL; } __aspeed_i2c_reg_slave(bus, client->addr); bus->slave = client; bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; spin_unlock_irqrestore(&bus->lock, flags); return 0; } static int aspeed_i2c_unreg_slave(struct i2c_client *client) { struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); u32 func_ctrl_reg_val; unsigned long flags; spin_lock_irqsave(&bus->lock, flags); if (!bus->slave) { spin_unlock_irqrestore(&bus->lock, flags); return -EINVAL; } /* Turn off slave mode. */ func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN; writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); bus->slave = NULL; spin_unlock_irqrestore(&bus->lock, flags); return 0; } #endif /* CONFIG_I2C_SLAVE */ static const struct i2c_algorithm aspeed_i2c_algo = { .master_xfer = aspeed_i2c_master_xfer, .functionality = aspeed_i2c_functionality, #if IS_ENABLED(CONFIG_I2C_SLAVE) .reg_slave = aspeed_i2c_reg_slave, .unreg_slave = aspeed_i2c_unreg_slave, #endif /* CONFIG_I2C_SLAVE */ }; static u32 aspeed_i2c_get_clk_reg_val(struct device *dev, u32 clk_high_low_mask, u32 divisor) { u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp; /* * SCL_high and SCL_low represent a value 1 greater than what is stored * since a zero divider is meaningless. Thus, the max value each can * store is every bit set + 1. Since SCL_high and SCL_low are added * together (see below), the max value of both is the max value of one * them times two. */ clk_high_low_max = (clk_high_low_mask + 1) * 2; /* * The actual clock frequency of SCL is: * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low)) * = APB_freq / divisor * where base_freq is a programmable clock divider; its value is * base_freq = 1 << base_clk_divisor * SCL_high is the number of base_freq clock cycles that SCL stays high * and SCL_low is the number of base_freq clock cycles that SCL stays * low for a period of SCL. * The actual register has a minimum SCL_high and SCL_low minimum of 1; * thus, they start counting at zero. So * SCL_high = clk_high + 1 * SCL_low = clk_low + 1 * Thus, * SCL_freq = APB_freq / * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1)) * The documentation recommends clk_high >= clk_high_max / 2 and * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint * gives us the following solution: */ base_clk_divisor = divisor > clk_high_low_max ? ilog2((divisor - 1) / clk_high_low_max) + 1 : 0; if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) { base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK; clk_low = clk_high_low_mask; clk_high = clk_high_low_mask; dev_err(dev, "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n", divisor, (1 << base_clk_divisor) * clk_high_low_max); } else { tmp = (divisor + (1 << base_clk_divisor) - 1) >> base_clk_divisor; clk_low = tmp / 2; clk_high = tmp - clk_low; if (clk_high) clk_high--; if (clk_low) clk_low--; } return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT) & ASPEED_I2CD_TIME_SCL_HIGH_MASK) | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT) & ASPEED_I2CD_TIME_SCL_LOW_MASK) | (base_clk_divisor & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK); } static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor) { /* * clk_high and clk_low are each 3 bits wide, so each can hold a max * value of 8 giving a clk_high_low_max of 16. */ return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor); } static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor) { /* * clk_high and clk_low are each 4 bits wide, so each can hold a max * value of 16 giving a clk_high_low_max of 32. */ return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor); } /* precondition: bus.lock has been acquired. */ static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus) { u32 divisor, clk_reg_val; divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency); clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1); clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK | ASPEED_I2CD_TIME_THDSTA_MASK | ASPEED_I2CD_TIME_TACST_MASK); clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor); writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1); writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2); return 0; } /* precondition: bus.lock has been acquired. */ static int aspeed_i2c_init(struct aspeed_i2c_bus *bus, struct platform_device *pdev) { u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN; int ret; /* Disable everything. */ writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); ret = aspeed_i2c_init_clk(bus); if (ret < 0) return ret; if (of_property_read_bool(pdev->dev.of_node, "multi-master")) bus->multi_master = true; else fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS; /* Enable Master Mode */ writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg, bus->base + ASPEED_I2C_FUN_CTRL_REG); #if IS_ENABLED(CONFIG_I2C_SLAVE) /* If slave has already been registered, re-enable it. */ if (bus->slave) __aspeed_i2c_reg_slave(bus, bus->slave->addr); #endif /* CONFIG_I2C_SLAVE */ /* Set interrupt generation of I2C controller */ writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG); return 0; } static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus) { struct platform_device *pdev = to_platform_device(bus->dev); unsigned long flags; int ret; spin_lock_irqsave(&bus->lock, flags); /* Disable and ack all interrupts. */ writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); ret = aspeed_i2c_init(bus, pdev); spin_unlock_irqrestore(&bus->lock, flags); return ret; } static const struct of_device_id aspeed_i2c_bus_of_table[] = { { .compatible = "aspeed,ast2400-i2c-bus", .data = aspeed_i2c_24xx_get_clk_reg_val, }, { .compatible = "aspeed,ast2500-i2c-bus", .data = aspeed_i2c_25xx_get_clk_reg_val, }, { .compatible = "aspeed,ast2600-i2c-bus", .data = aspeed_i2c_25xx_get_clk_reg_val, }, { }, }; MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table); static int aspeed_i2c_probe_bus(struct platform_device *pdev) { const struct of_device_id *match; struct aspeed_i2c_bus *bus; struct clk *parent_clk; int irq, ret; bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); if (!bus) return -ENOMEM; bus->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(bus->base)) return PTR_ERR(bus->base); parent_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(parent_clk)) return PTR_ERR(parent_clk); bus->parent_clk_frequency = clk_get_rate(parent_clk); /* We just need the clock rate, we don't actually use the clk object. */ devm_clk_put(&pdev->dev, parent_clk); bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL); if (IS_ERR(bus->rst)) { dev_err(&pdev->dev, "missing or invalid reset controller device tree entry\n"); return PTR_ERR(bus->rst); } reset_control_deassert(bus->rst); ret = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &bus->bus_frequency); if (ret < 0) { dev_err(&pdev->dev, "Could not read bus-frequency property\n"); bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ; } match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node); if (!match) bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val; else bus->get_clk_reg_val = (u32 (*)(struct device *, u32)) match->data; /* Initialize the I2C adapter */ spin_lock_init(&bus->lock); init_completion(&bus->cmd_complete); bus->adap.owner = THIS_MODULE; bus->adap.retries = 0; bus->adap.algo = &aspeed_i2c_algo; bus->adap.dev.parent = &pdev->dev; bus->adap.dev.of_node = pdev->dev.of_node; strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name)); i2c_set_adapdata(&bus->adap, bus); bus->dev = &pdev->dev; /* Clean up any left over interrupt state. */ writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); /* * bus.lock does not need to be held because the interrupt handler has * not been enabled yet. */ ret = aspeed_i2c_init(bus, pdev); if (ret < 0) return ret; irq = irq_of_parse_and_map(pdev->dev.of_node, 0); ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq, 0, dev_name(&pdev->dev), bus); if (ret < 0) return ret; ret = i2c_add_adapter(&bus->adap); if (ret < 0) return ret; platform_set_drvdata(pdev, bus); dev_info(bus->dev, "i2c bus %d registered, irq %d\n", bus->adap.nr, irq); return 0; } static void aspeed_i2c_remove_bus(struct platform_device *pdev) { struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev); unsigned long flags; spin_lock_irqsave(&bus->lock, flags); /* Disable everything. */ writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); spin_unlock_irqrestore(&bus->lock, flags); reset_control_assert(bus->rst); i2c_del_adapter(&bus->adap); } static struct platform_driver aspeed_i2c_bus_driver = { .probe = aspeed_i2c_probe_bus, .remove_new = aspeed_i2c_remove_bus, .driver = { .name = "aspeed-i2c-bus", .of_match_table = aspeed_i2c_bus_of_table, }, }; module_platform_driver(aspeed_i2c_bus_driver); MODULE_AUTHOR("Brendan Higgins <[email protected]>"); MODULE_DESCRIPTION("Aspeed I2C Bus Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-aspeed.c
// SPDX-License-Identifier: GPL-2.0-only /* * SMBus driver for ACPI SMBus CMI * * Copyright (C) 2009 Crane Cai <[email protected]> */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/i2c.h> #include <linux/acpi.h> struct smbus_methods_t { char *mt_info; char *mt_sbr; char *mt_sbw; }; struct acpi_smbus_cmi { acpi_handle handle; struct i2c_adapter adapter; u8 cap_info:1; u8 cap_read:1; u8 cap_write:1; const struct smbus_methods_t *methods; }; static const struct smbus_methods_t smbus_methods = { .mt_info = "_SBI", .mt_sbr = "_SBR", .mt_sbw = "_SBW", }; /* Some IBM BIOSes omit the leading underscore */ static const struct smbus_methods_t ibm_smbus_methods = { .mt_info = "SBI_", .mt_sbr = "SBR_", .mt_sbw = "SBW_", }; static const struct acpi_device_id acpi_smbus_cmi_ids[] = { {"SMBUS01", (kernel_ulong_t)&smbus_methods}, {ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods}, {ACPI_SMBUS_MS_HID, (kernel_ulong_t)&smbus_methods}, {"", 0} }; MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids); #define ACPI_SMBUS_STATUS_OK 0x00 #define ACPI_SMBUS_STATUS_FAIL 0x07 #define ACPI_SMBUS_STATUS_DNAK 0x10 #define ACPI_SMBUS_STATUS_DERR 0x11 #define ACPI_SMBUS_STATUS_CMD_DENY 0x12 #define ACPI_SMBUS_STATUS_UNKNOWN 0x13 #define ACPI_SMBUS_STATUS_ACC_DENY 0x17 #define ACPI_SMBUS_STATUS_TIMEOUT 0x18 #define ACPI_SMBUS_STATUS_NOTSUP 0x19 #define ACPI_SMBUS_STATUS_BUSY 0x1a #define ACPI_SMBUS_STATUS_PEC 0x1f #define ACPI_SMBUS_PRTCL_WRITE 0x00 #define ACPI_SMBUS_PRTCL_READ 0x01 #define ACPI_SMBUS_PRTCL_QUICK 0x02 #define ACPI_SMBUS_PRTCL_BYTE 0x04 #define ACPI_SMBUS_PRTCL_BYTE_DATA 0x06 #define ACPI_SMBUS_PRTCL_WORD_DATA 0x08 #define ACPI_SMBUS_PRTCL_BLOCK_DATA 0x0a static int acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int result = 0; struct acpi_smbus_cmi *smbus_cmi = adap->algo_data; unsigned char protocol; acpi_status status = 0; struct acpi_object_list input; union acpi_object mt_params[5]; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; union acpi_object *pkg; char *method; int len = 0; dev_dbg(&adap->dev, "access size: %d %s\n", size, (read_write) ? "READ" : "WRITE"); switch (size) { case I2C_SMBUS_QUICK: protocol = ACPI_SMBUS_PRTCL_QUICK; command = 0; if (read_write == I2C_SMBUS_WRITE) { mt_params[3].type = ACPI_TYPE_INTEGER; mt_params[3].integer.value = 0; mt_params[4].type = ACPI_TYPE_INTEGER; mt_params[4].integer.value = 0; } break; case I2C_SMBUS_BYTE: protocol = ACPI_SMBUS_PRTCL_BYTE; if (read_write == I2C_SMBUS_WRITE) { mt_params[3].type = ACPI_TYPE_INTEGER; mt_params[3].integer.value = 0; mt_params[4].type = ACPI_TYPE_INTEGER; mt_params[4].integer.value = 0; } else { command = 0; } break; case I2C_SMBUS_BYTE_DATA: protocol = ACPI_SMBUS_PRTCL_BYTE_DATA; if (read_write == I2C_SMBUS_WRITE) { mt_params[3].type = ACPI_TYPE_INTEGER; mt_params[3].integer.value = 1; mt_params[4].type = ACPI_TYPE_INTEGER; mt_params[4].integer.value = data->byte; } break; case I2C_SMBUS_WORD_DATA: protocol = ACPI_SMBUS_PRTCL_WORD_DATA; if (read_write == I2C_SMBUS_WRITE) { mt_params[3].type = ACPI_TYPE_INTEGER; mt_params[3].integer.value = 2; mt_params[4].type = ACPI_TYPE_INTEGER; mt_params[4].integer.value = data->word; } break; case I2C_SMBUS_BLOCK_DATA: protocol = ACPI_SMBUS_PRTCL_BLOCK_DATA; if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; mt_params[3].type = ACPI_TYPE_INTEGER; mt_params[3].integer.value = len; mt_params[4].type = ACPI_TYPE_BUFFER; mt_params[4].buffer.length = len; mt_params[4].buffer.pointer = data->block + 1; } break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } if (read_write == I2C_SMBUS_READ) { protocol |= ACPI_SMBUS_PRTCL_READ; method = smbus_cmi->methods->mt_sbr; input.count = 3; } else { protocol |= ACPI_SMBUS_PRTCL_WRITE; method = smbus_cmi->methods->mt_sbw; input.count = 5; } input.pointer = mt_params; mt_params[0].type = ACPI_TYPE_INTEGER; mt_params[0].integer.value = protocol; mt_params[1].type = ACPI_TYPE_INTEGER; mt_params[1].integer.value = addr; mt_params[2].type = ACPI_TYPE_INTEGER; mt_params[2].integer.value = command; status = acpi_evaluate_object(smbus_cmi->handle, method, &input, &buffer); if (ACPI_FAILURE(status)) { acpi_handle_err(smbus_cmi->handle, "Failed to evaluate %s: %i\n", method, status); return -EIO; } pkg = buffer.pointer; if (pkg && pkg->type == ACPI_TYPE_PACKAGE) obj = pkg->package.elements; else { acpi_handle_err(smbus_cmi->handle, "Invalid argument type\n"); result = -EIO; goto out; } if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) { acpi_handle_err(smbus_cmi->handle, "Invalid argument type\n"); result = -EIO; goto out; } result = obj->integer.value; acpi_handle_debug(smbus_cmi->handle, "%s return status: %i\n", method, result); switch (result) { case ACPI_SMBUS_STATUS_OK: result = 0; break; case ACPI_SMBUS_STATUS_BUSY: result = -EBUSY; goto out; case ACPI_SMBUS_STATUS_TIMEOUT: result = -ETIMEDOUT; goto out; case ACPI_SMBUS_STATUS_DNAK: result = -ENXIO; goto out; default: result = -EIO; goto out; } if (read_write == I2C_SMBUS_WRITE || size == I2C_SMBUS_QUICK) goto out; obj = pkg->package.elements + 1; if (obj->type != ACPI_TYPE_INTEGER) { acpi_handle_err(smbus_cmi->handle, "Invalid argument type\n"); result = -EIO; goto out; } len = obj->integer.value; obj = pkg->package.elements + 2; switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: case I2C_SMBUS_WORD_DATA: if (obj->type != ACPI_TYPE_INTEGER) { acpi_handle_err(smbus_cmi->handle, "Invalid argument type\n"); result = -EIO; goto out; } if (len == 2) data->word = obj->integer.value; else data->byte = obj->integer.value; break; case I2C_SMBUS_BLOCK_DATA: if (obj->type != ACPI_TYPE_BUFFER) { acpi_handle_err(smbus_cmi->handle, "Invalid argument type\n"); result = -EIO; goto out; } if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EPROTO; data->block[0] = len; memcpy(data->block + 1, obj->buffer.pointer, len); break; } out: kfree(buffer.pointer); dev_dbg(&adap->dev, "Transaction status: %i\n", result); return result; } static u32 acpi_smbus_cmi_func(struct i2c_adapter *adapter) { struct acpi_smbus_cmi *smbus_cmi = adapter->algo_data; u32 ret; ret = smbus_cmi->cap_read | smbus_cmi->cap_write ? I2C_FUNC_SMBUS_QUICK : 0; ret |= smbus_cmi->cap_read ? (I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA | I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_READ_BLOCK_DATA) : 0; ret |= smbus_cmi->cap_write ? (I2C_FUNC_SMBUS_WRITE_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA | I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) : 0; return ret; } static const struct i2c_algorithm acpi_smbus_cmi_algorithm = { .smbus_xfer = acpi_smbus_cmi_access, .functionality = acpi_smbus_cmi_func, }; static int acpi_smbus_cmi_add_cap(struct acpi_smbus_cmi *smbus_cmi, const char *name) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_handle *handle = smbus_cmi->handle; union acpi_object *obj; acpi_status status; if (!strcmp(name, smbus_cmi->methods->mt_info)) { status = acpi_evaluate_object(smbus_cmi->handle, smbus_cmi->methods->mt_info, NULL, &buffer); if (ACPI_FAILURE(status)) { acpi_handle_err(handle, "Failed to evaluate %s: %i\n", smbus_cmi->methods->mt_info, status); return -EIO; } obj = buffer.pointer; if (obj && obj->type == ACPI_TYPE_PACKAGE) obj = obj->package.elements; else { acpi_handle_err(handle, "Invalid argument type\n"); kfree(buffer.pointer); return -EIO; } if (obj->type != ACPI_TYPE_INTEGER) { acpi_handle_err(handle, "Invalid argument type\n"); kfree(buffer.pointer); return -EIO; } else acpi_handle_debug(handle, "SMBus CMI Version %x\n", (int)obj->integer.value); kfree(buffer.pointer); smbus_cmi->cap_info = 1; } else if (!strcmp(name, smbus_cmi->methods->mt_sbr)) smbus_cmi->cap_read = 1; else if (!strcmp(name, smbus_cmi->methods->mt_sbw)) smbus_cmi->cap_write = 1; else acpi_handle_debug(handle, "Unsupported CMI method: %s\n", name); return 0; } static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level, void *context, void **return_value) { char node_name[5]; struct acpi_buffer buffer = { sizeof(node_name), node_name }; struct acpi_smbus_cmi *smbus_cmi = context; acpi_status status; status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); if (ACPI_SUCCESS(status)) acpi_smbus_cmi_add_cap(smbus_cmi, node_name); return AE_OK; } static int smbus_cmi_probe(struct platform_device *device) { struct device *dev = &device->dev; struct acpi_smbus_cmi *smbus_cmi; int ret; smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL); if (!smbus_cmi) return -ENOMEM; smbus_cmi->handle = ACPI_HANDLE(dev); smbus_cmi->methods = device_get_match_data(dev); platform_set_drvdata(device, smbus_cmi); smbus_cmi->cap_info = 0; smbus_cmi->cap_read = 0; smbus_cmi->cap_write = 0; acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1, acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL); if (smbus_cmi->cap_info == 0) { ret = -ENODEV; goto err; } snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), "SMBus CMI adapter %s", dev_name(dev)); smbus_cmi->adapter.owner = THIS_MODULE; smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm; smbus_cmi->adapter.algo_data = smbus_cmi; smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; smbus_cmi->adapter.dev.parent = &device->dev; ret = i2c_add_adapter(&smbus_cmi->adapter); if (ret) { dev_err(&device->dev, "Couldn't register adapter!\n"); goto err; } return 0; err: kfree(smbus_cmi); return ret; } static void smbus_cmi_remove(struct platform_device *device) { struct acpi_smbus_cmi *smbus_cmi = platform_get_drvdata(device); i2c_del_adapter(&smbus_cmi->adapter); kfree(smbus_cmi); } static struct platform_driver smbus_cmi_driver = { .probe = smbus_cmi_probe, .remove_new = smbus_cmi_remove, .driver = { .name = "smbus_cmi", .acpi_match_table = acpi_smbus_cmi_ids, }, }; module_platform_driver(smbus_cmi_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Crane Cai <[email protected]>"); MODULE_DESCRIPTION("ACPI SMBus CMI driver");
linux-master
drivers/i2c/busses/i2c-scmi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2003 Mark M. Hoffman <[email protected]> */ /* This module must be considered BETA unless and until the chipset manufacturer releases a datasheet. The register definitions are based on the SiS630. This module relies on quirk_sis_96x_smbus (drivers/pci/quirks.c) for just about every machine for which users have reported. If this module isn't detecting your 96x south bridge, have a look there. We assume there can only be one SiS96x with one SMBus interface. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* base address register in PCI config space */ #define SIS96x_BAR 0x04 /* SiS96x SMBus registers */ #define SMB_STS 0x00 #define SMB_EN 0x01 #define SMB_CNT 0x02 #define SMB_HOST_CNT 0x03 #define SMB_ADDR 0x04 #define SMB_CMD 0x05 #define SMB_PCOUNT 0x06 #define SMB_COUNT 0x07 #define SMB_BYTE 0x08 #define SMB_DEV_ADDR 0x10 #define SMB_DB0 0x11 #define SMB_DB1 0x12 #define SMB_SAA 0x13 /* register count for request_region */ #define SMB_IOSIZE 0x20 /* Other settings */ #define MAX_TIMEOUT 500 /* SiS96x SMBus constants */ #define SIS96x_QUICK 0x00 #define SIS96x_BYTE 0x01 #define SIS96x_BYTE_DATA 0x02 #define SIS96x_WORD_DATA 0x03 #define SIS96x_PROC_CALL 0x04 #define SIS96x_BLOCK_DATA 0x05 static struct pci_driver sis96x_driver; static struct i2c_adapter sis96x_adapter; static u16 sis96x_smbus_base; static inline u8 sis96x_read(u8 reg) { return inb(sis96x_smbus_base + reg) ; } static inline void sis96x_write(u8 reg, u8 data) { outb(data, sis96x_smbus_base + reg) ; } /* Execute a SMBus transaction. int size is from SIS96x_QUICK to SIS96x_BLOCK_DATA */ static int sis96x_transaction(int size) { int temp; int result = 0; int timeout = 0; dev_dbg(&sis96x_adapter.dev, "SMBus transaction %d\n", size); /* Make sure the SMBus host is ready to start transmitting */ if (((temp = sis96x_read(SMB_CNT)) & 0x03) != 0x00) { dev_dbg(&sis96x_adapter.dev, "SMBus busy (0x%02x). " "Resetting...\n", temp); /* kill the transaction */ sis96x_write(SMB_HOST_CNT, 0x20); /* check it again */ if (((temp = sis96x_read(SMB_CNT)) & 0x03) != 0x00) { dev_dbg(&sis96x_adapter.dev, "Failed (0x%02x)\n", temp); return -EBUSY; } else { dev_dbg(&sis96x_adapter.dev, "Successful\n"); } } /* Turn off timeout interrupts, set fast host clock */ sis96x_write(SMB_CNT, 0x20); /* clear all (sticky) status flags */ temp = sis96x_read(SMB_STS); sis96x_write(SMB_STS, temp & 0x1e); /* start the transaction by setting bit 4 and size bits */ sis96x_write(SMB_HOST_CNT, 0x10 | (size & 0x07)); /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis96x_read(SMB_STS); } while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&sis96x_adapter.dev, "SMBus Timeout! (0x%02x)\n", temp); result = -ETIMEDOUT; } /* device error - probably missing ACK */ if (temp & 0x02) { dev_dbg(&sis96x_adapter.dev, "Failed bus transaction!\n"); result = -ENXIO; } /* bus collision */ if (temp & 0x04) { dev_dbg(&sis96x_adapter.dev, "Bus collision!\n"); result = -EIO; } /* Finish up by resetting the bus */ sis96x_write(SMB_STS, temp); if ((temp = sis96x_read(SMB_STS))) { dev_dbg(&sis96x_adapter.dev, "Failed reset at " "end of transaction! (0x%02x)\n", temp); } return result; } /* Return negative errno on error. */ static s32 sis96x_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS96x_QUICK; break; case I2C_SMBUS_BYTE: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis96x_write(SMB_CMD, command); size = SIS96x_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis96x_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis96x_write(SMB_BYTE, data->byte); size = SIS96x_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis96x_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis96x_write(SMB_BYTE, data->word & 0xff); sis96x_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL ? SIS96x_PROC_CALL : SIS96x_WORD_DATA); break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } status = sis96x_transaction(size); if (status) return status; if ((size != SIS96x_PROC_CALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS96x_QUICK))) return 0; switch (size) { case SIS96x_BYTE: case SIS96x_BYTE_DATA: data->byte = sis96x_read(SMB_BYTE); break; case SIS96x_WORD_DATA: case SIS96x_PROC_CALL: data->word = sis96x_read(SMB_BYTE) + (sis96x_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis96x_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis96x_access, .functionality = sis96x_func, }; static struct i2c_adapter sis96x_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static const struct pci_device_id sis96x_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, { 0, } }; MODULE_DEVICE_TABLE (pci, sis96x_ids); static int sis96x_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 ww = 0; int retval; if (sis96x_smbus_base) { dev_err(&dev->dev, "Only one device supported.\n"); return -EBUSY; } pci_read_config_word(dev, PCI_CLASS_DEVICE, &ww); if (PCI_CLASS_SERIAL_SMBUS != ww) { dev_err(&dev->dev, "Unsupported device class 0x%04x!\n", ww); return -ENODEV; } sis96x_smbus_base = pci_resource_start(dev, SIS96x_BAR); if (!sis96x_smbus_base) { dev_err(&dev->dev, "SiS96x SMBus base address " "not initialized!\n"); return -EINVAL; } dev_info(&dev->dev, "SiS96x SMBus base address: 0x%04x\n", sis96x_smbus_base); retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); if (retval) return -ENODEV; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(sis96x_smbus_base, SMB_IOSIZE, sis96x_driver.name)) { dev_err(&dev->dev, "SMBus registers 0x%04x-0x%04x " "already in use!\n", sis96x_smbus_base, sis96x_smbus_base + SMB_IOSIZE - 1); sis96x_smbus_base = 0; return -EINVAL; } /* set up the sysfs linkage to our parent device */ sis96x_adapter.dev.parent = &dev->dev; snprintf(sis96x_adapter.name, sizeof(sis96x_adapter.name), "SiS96x SMBus adapter at 0x%04x", sis96x_smbus_base); if ((retval = i2c_add_adapter(&sis96x_adapter))) { dev_err(&dev->dev, "Couldn't register adapter!\n"); release_region(sis96x_smbus_base, SMB_IOSIZE); sis96x_smbus_base = 0; } return retval; } static void sis96x_remove(struct pci_dev *dev) { if (sis96x_smbus_base) { i2c_del_adapter(&sis96x_adapter); release_region(sis96x_smbus_base, SMB_IOSIZE); sis96x_smbus_base = 0; } } static struct pci_driver sis96x_driver = { .name = "sis96x_smbus", .id_table = sis96x_ids, .probe = sis96x_probe, .remove = sis96x_remove, }; module_pci_driver(sis96x_driver); MODULE_AUTHOR("Mark M. Hoffman <[email protected]>"); MODULE_DESCRIPTION("SiS96x SMBus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-sis96x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 NXP Semiconductors * * Code portions referenced from the i2x-pxa and i2c-pnx drivers * * Make SMBus byte and word transactions work on LPC178x/7x * Copyright (c) 2012 * Alexander Potashev, Emcraft Systems, [email protected] * Anton Protopopov, Emcraft Systems, [email protected] * * Copyright (C) 2015 Joachim Eastwood <[email protected]> */ #include <linux/clk.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/time.h> /* LPC24xx register offsets and bits */ #define LPC24XX_I2CONSET 0x00 #define LPC24XX_I2STAT 0x04 #define LPC24XX_I2DAT 0x08 #define LPC24XX_I2ADDR 0x0c #define LPC24XX_I2SCLH 0x10 #define LPC24XX_I2SCLL 0x14 #define LPC24XX_I2CONCLR 0x18 #define LPC24XX_AA BIT(2) #define LPC24XX_SI BIT(3) #define LPC24XX_STO BIT(4) #define LPC24XX_STA BIT(5) #define LPC24XX_I2EN BIT(6) #define LPC24XX_STO_AA (LPC24XX_STO | LPC24XX_AA) #define LPC24XX_CLEAR_ALL (LPC24XX_AA | LPC24XX_SI | LPC24XX_STO | \ LPC24XX_STA | LPC24XX_I2EN) /* I2C SCL clock has different duty cycle depending on mode */ #define I2C_STD_MODE_DUTY 46 #define I2C_FAST_MODE_DUTY 36 #define I2C_FAST_MODE_PLUS_DUTY 38 /* * 26 possible I2C status codes, but codes applicable only * to master are listed here and used in this driver */ enum { M_BUS_ERROR = 0x00, M_START = 0x08, M_REPSTART = 0x10, MX_ADDR_W_ACK = 0x18, MX_ADDR_W_NACK = 0x20, MX_DATA_W_ACK = 0x28, MX_DATA_W_NACK = 0x30, M_DATA_ARB_LOST = 0x38, MR_ADDR_R_ACK = 0x40, MR_ADDR_R_NACK = 0x48, MR_DATA_R_ACK = 0x50, MR_DATA_R_NACK = 0x58, M_I2C_IDLE = 0xf8, }; struct lpc2k_i2c { void __iomem *base; struct clk *clk; int irq; wait_queue_head_t wait; struct i2c_adapter adap; struct i2c_msg *msg; int msg_idx; int msg_status; int is_last; }; static void i2c_lpc2k_reset(struct lpc2k_i2c *i2c) { /* Will force clear all statuses */ writel(LPC24XX_CLEAR_ALL, i2c->base + LPC24XX_I2CONCLR); writel(0, i2c->base + LPC24XX_I2ADDR); writel(LPC24XX_I2EN, i2c->base + LPC24XX_I2CONSET); } static int i2c_lpc2k_clear_arb(struct lpc2k_i2c *i2c) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); /* * If the transfer needs to abort for some reason, we'll try to * force a stop condition to clear any pending bus conditions */ writel(LPC24XX_STO, i2c->base + LPC24XX_I2CONSET); /* Wait for status change */ while (readl(i2c->base + LPC24XX_I2STAT) != M_I2C_IDLE) { if (time_after(jiffies, timeout)) { /* Bus was not idle, try to reset adapter */ i2c_lpc2k_reset(i2c); return -EBUSY; } cpu_relax(); } return 0; } static void i2c_lpc2k_pump_msg(struct lpc2k_i2c *i2c) { unsigned char data; u32 status; /* * I2C in the LPC2xxx series is basically a state machine. * Just run through the steps based on the current status. */ status = readl(i2c->base + LPC24XX_I2STAT); switch (status) { case M_START: case M_REPSTART: /* Start bit was just sent out, send out addr and dir */ data = i2c_8bit_addr_from_msg(i2c->msg); writel(data, i2c->base + LPC24XX_I2DAT); writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR); break; case MX_ADDR_W_ACK: case MX_DATA_W_ACK: /* * Address or data was sent out with an ACK. If there is more * data to send, send it now */ if (i2c->msg_idx < i2c->msg->len) { writel(i2c->msg->buf[i2c->msg_idx], i2c->base + LPC24XX_I2DAT); } else if (i2c->is_last) { /* Last message, send stop */ writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET); writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR); i2c->msg_status = 0; disable_irq_nosync(i2c->irq); } else { i2c->msg_status = 0; disable_irq_nosync(i2c->irq); } i2c->msg_idx++; break; case MR_ADDR_R_ACK: /* Receive first byte from slave */ if (i2c->msg->len == 1) { /* Last byte, return NACK */ writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONCLR); } else { /* Not last byte, return ACK */ writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONSET); } writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR); break; case MR_DATA_R_NACK: /* * The I2C shows NACK status on reads, so we need to accept * the NACK as an ACK here. This should be ok, as the real * BACK would of been caught on the address write. */ case MR_DATA_R_ACK: /* Data was received */ if (i2c->msg_idx < i2c->msg->len) { i2c->msg->buf[i2c->msg_idx] = readl(i2c->base + LPC24XX_I2DAT); } /* If transfer is done, send STOP */ if (i2c->msg_idx >= i2c->msg->len - 1 && i2c->is_last) { writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET); writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR); i2c->msg_status = 0; } /* Message is done */ if (i2c->msg_idx >= i2c->msg->len - 1) { i2c->msg_status = 0; disable_irq_nosync(i2c->irq); } /* * One pre-last data input, send NACK to tell the slave that * this is going to be the last data byte to be transferred. */ if (i2c->msg_idx >= i2c->msg->len - 2) { /* One byte left to receive - NACK */ writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONCLR); } else { /* More than one byte left to receive - ACK */ writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONSET); } writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR); i2c->msg_idx++; break; case MX_ADDR_W_NACK: case MX_DATA_W_NACK: case MR_ADDR_R_NACK: /* NACK processing is done */ writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET); i2c->msg_status = -ENXIO; disable_irq_nosync(i2c->irq); break; case M_DATA_ARB_LOST: /* Arbitration lost */ i2c->msg_status = -EAGAIN; /* Release the I2C bus */ writel(LPC24XX_STA | LPC24XX_STO, i2c->base + LPC24XX_I2CONCLR); disable_irq_nosync(i2c->irq); break; default: /* Unexpected statuses */ i2c->msg_status = -EIO; disable_irq_nosync(i2c->irq); break; } /* Exit on failure or all bytes transferred */ if (i2c->msg_status != -EBUSY) wake_up(&i2c->wait); /* * If `msg_status` is zero, then `lpc2k_process_msg()` * is responsible for clearing the SI flag. */ if (i2c->msg_status != 0) writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR); } static int lpc2k_process_msg(struct lpc2k_i2c *i2c, int msgidx) { /* A new transfer is kicked off by initiating a start condition */ if (!msgidx) { writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONSET); } else { /* * A multi-message I2C transfer continues where the * previous I2C transfer left off and uses the * current condition of the I2C adapter. */ if (unlikely(i2c->msg->flags & I2C_M_NOSTART)) { WARN_ON(i2c->msg->len == 0); if (!(i2c->msg->flags & I2C_M_RD)) { /* Start transmit of data */ writel(i2c->msg->buf[0], i2c->base + LPC24XX_I2DAT); i2c->msg_idx++; } } else { /* Start or repeated start */ writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONSET); } writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR); } enable_irq(i2c->irq); /* Wait for transfer completion */ if (wait_event_timeout(i2c->wait, i2c->msg_status != -EBUSY, msecs_to_jiffies(1000)) == 0) { disable_irq_nosync(i2c->irq); return -ETIMEDOUT; } return i2c->msg_status; } static int i2c_lpc2k_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int msg_num) { struct lpc2k_i2c *i2c = i2c_get_adapdata(adap); int ret, i; u32 stat; /* Check for bus idle condition */ stat = readl(i2c->base + LPC24XX_I2STAT); if (stat != M_I2C_IDLE) { /* Something is holding the bus, try to clear it */ return i2c_lpc2k_clear_arb(i2c); } /* Process a single message at a time */ for (i = 0; i < msg_num; i++) { /* Save message pointer and current message data index */ i2c->msg = &msgs[i]; i2c->msg_idx = 0; i2c->msg_status = -EBUSY; i2c->is_last = (i == (msg_num - 1)); ret = lpc2k_process_msg(i2c, i); if (ret) return ret; } return msg_num; } static irqreturn_t i2c_lpc2k_handler(int irq, void *dev_id) { struct lpc2k_i2c *i2c = dev_id; if (readl(i2c->base + LPC24XX_I2CONSET) & LPC24XX_SI) { i2c_lpc2k_pump_msg(i2c); return IRQ_HANDLED; } return IRQ_NONE; } static u32 i2c_lpc2k_functionality(struct i2c_adapter *adap) { /* Only emulated SMBus for now */ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm i2c_lpc2k_algorithm = { .master_xfer = i2c_lpc2k_xfer, .functionality = i2c_lpc2k_functionality, }; static int i2c_lpc2k_probe(struct platform_device *pdev) { struct lpc2k_i2c *i2c; u32 bus_clk_rate; u32 scl_high; u32 clkrate; int ret; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); i2c->irq = platform_get_irq(pdev, 0); if (i2c->irq < 0) return i2c->irq; init_waitqueue_head(&i2c->wait); i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) { dev_err(&pdev->dev, "failed to enable clock.\n"); return PTR_ERR(i2c->clk); } ret = devm_request_irq(&pdev->dev, i2c->irq, i2c_lpc2k_handler, 0, dev_name(&pdev->dev), i2c); if (ret < 0) { dev_err(&pdev->dev, "can't request interrupt.\n"); return ret; } disable_irq_nosync(i2c->irq); /* Place controller is a known state */ i2c_lpc2k_reset(i2c); ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &bus_clk_rate); if (ret) bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; clkrate = clk_get_rate(i2c->clk); if (clkrate == 0) { dev_err(&pdev->dev, "can't get I2C base clock\n"); return -EINVAL; } /* Setup I2C dividers to generate clock with proper duty cycle */ clkrate = clkrate / bus_clk_rate; if (bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ) scl_high = (clkrate * I2C_STD_MODE_DUTY) / 100; else if (bus_clk_rate <= I2C_MAX_FAST_MODE_FREQ) scl_high = (clkrate * I2C_FAST_MODE_DUTY) / 100; else scl_high = (clkrate * I2C_FAST_MODE_PLUS_DUTY) / 100; writel(scl_high, i2c->base + LPC24XX_I2SCLH); writel(clkrate - scl_high, i2c->base + LPC24XX_I2SCLL); platform_set_drvdata(pdev, i2c); i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.owner = THIS_MODULE; strscpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name)); i2c->adap.algo = &i2c_lpc2k_algorithm; i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; ret = i2c_add_adapter(&i2c->adap); if (ret < 0) return ret; dev_info(&pdev->dev, "LPC2K I2C adapter\n"); return 0; } static void i2c_lpc2k_remove(struct platform_device *dev) { struct lpc2k_i2c *i2c = platform_get_drvdata(dev); i2c_del_adapter(&i2c->adap); } static int i2c_lpc2k_suspend(struct device *dev) { struct lpc2k_i2c *i2c = dev_get_drvdata(dev); clk_disable(i2c->clk); return 0; } static int i2c_lpc2k_resume(struct device *dev) { struct lpc2k_i2c *i2c = dev_get_drvdata(dev); clk_enable(i2c->clk); i2c_lpc2k_reset(i2c); return 0; } static const struct dev_pm_ops i2c_lpc2k_dev_pm_ops = { .suspend_noirq = i2c_lpc2k_suspend, .resume_noirq = i2c_lpc2k_resume, }; static const struct of_device_id lpc2k_i2c_match[] = { { .compatible = "nxp,lpc1788-i2c" }, {}, }; MODULE_DEVICE_TABLE(of, lpc2k_i2c_match); static struct platform_driver i2c_lpc2k_driver = { .probe = i2c_lpc2k_probe, .remove_new = i2c_lpc2k_remove, .driver = { .name = "lpc2k-i2c", .pm = pm_sleep_ptr(&i2c_lpc2k_dev_pm_ops), .of_match_table = lpc2k_i2c_match, }, }; module_platform_driver(i2c_lpc2k_driver); MODULE_AUTHOR("Kevin Wells <[email protected]>"); MODULE_DESCRIPTION("I2C driver for LPC2xxx devices"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lpc2k-i2c");
linux-master
drivers/i2c/busses/i2c-lpc2k.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright Intel Corporation (C) 2017. * * Based on the i2c-axxia.c driver. */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/iopoll.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> #define ALTR_I2C_TFR_CMD 0x00 /* Transfer Command register */ #define ALTR_I2C_TFR_CMD_STA BIT(9) /* send START before byte */ #define ALTR_I2C_TFR_CMD_STO BIT(8) /* send STOP after byte */ #define ALTR_I2C_TFR_CMD_RW_D BIT(0) /* Direction of transfer */ #define ALTR_I2C_RX_DATA 0x04 /* RX data FIFO register */ #define ALTR_I2C_CTRL 0x08 /* Control register */ #define ALTR_I2C_CTRL_RXT_SHFT 4 /* RX FIFO Threshold */ #define ALTR_I2C_CTRL_TCT_SHFT 2 /* TFER CMD FIFO Threshold */ #define ALTR_I2C_CTRL_BSPEED BIT(1) /* Bus Speed (1=Fast) */ #define ALTR_I2C_CTRL_EN BIT(0) /* Enable Core (1=Enable) */ #define ALTR_I2C_ISER 0x0C /* Interrupt Status Enable register */ #define ALTR_I2C_ISER_RXOF_EN BIT(4) /* Enable RX OVERFLOW IRQ */ #define ALTR_I2C_ISER_ARB_EN BIT(3) /* Enable ARB LOST IRQ */ #define ALTR_I2C_ISER_NACK_EN BIT(2) /* Enable NACK DET IRQ */ #define ALTR_I2C_ISER_RXRDY_EN BIT(1) /* Enable RX Ready IRQ */ #define ALTR_I2C_ISER_TXRDY_EN BIT(0) /* Enable TX Ready IRQ */ #define ALTR_I2C_ISR 0x10 /* Interrupt Status register */ #define ALTR_I2C_ISR_RXOF BIT(4) /* RX OVERFLOW IRQ */ #define ALTR_I2C_ISR_ARB BIT(3) /* ARB LOST IRQ */ #define ALTR_I2C_ISR_NACK BIT(2) /* NACK DET IRQ */ #define ALTR_I2C_ISR_RXRDY BIT(1) /* RX Ready IRQ */ #define ALTR_I2C_ISR_TXRDY BIT(0) /* TX Ready IRQ */ #define ALTR_I2C_STATUS 0x14 /* Status register */ #define ALTR_I2C_STAT_CORE BIT(0) /* Core Status (0=idle) */ #define ALTR_I2C_TC_FIFO_LVL 0x18 /* Transfer FIFO LVL register */ #define ALTR_I2C_RX_FIFO_LVL 0x1C /* Receive FIFO LVL register */ #define ALTR_I2C_SCL_LOW 0x20 /* SCL low count register */ #define ALTR_I2C_SCL_HIGH 0x24 /* SCL high count register */ #define ALTR_I2C_SDA_HOLD 0x28 /* SDA hold count register */ #define ALTR_I2C_ALL_IRQ (ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | \ ALTR_I2C_ISR_NACK | ALTR_I2C_ISR_RXRDY | \ ALTR_I2C_ISR_TXRDY) #define ALTR_I2C_THRESHOLD 0 /* IRQ Threshold at 1 element */ #define ALTR_I2C_DFLT_FIFO_SZ 4 #define ALTR_I2C_TIMEOUT 100000 /* 100ms */ #define ALTR_I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) /** * struct altr_i2c_dev - I2C device context * @base: pointer to register struct * @msg: pointer to current message * @msg_len: number of bytes transferred in msg * @msg_err: error code for completed message * @msg_complete: xfer completion object * @dev: device reference * @adapter: core i2c abstraction * @i2c_clk: clock reference for i2c input clock * @bus_clk_rate: current i2c bus clock rate * @buf: ptr to msg buffer for easier use. * @fifo_size: size of the FIFO passed in. * @isr_mask: cached copy of local ISR enables. * @isr_status: cached copy of local ISR status. * @isr_mutex: mutex for IRQ thread. */ struct altr_i2c_dev { void __iomem *base; struct i2c_msg *msg; size_t msg_len; int msg_err; struct completion msg_complete; struct device *dev; struct i2c_adapter adapter; struct clk *i2c_clk; u32 bus_clk_rate; u8 *buf; u32 fifo_size; u32 isr_mask; u32 isr_status; struct mutex isr_mutex; }; static void altr_i2c_int_enable(struct altr_i2c_dev *idev, u32 mask, bool enable) { u32 int_en; int_en = readl(idev->base + ALTR_I2C_ISER); if (enable) idev->isr_mask = int_en | mask; else idev->isr_mask = int_en & ~mask; writel(idev->isr_mask, idev->base + ALTR_I2C_ISER); } static void altr_i2c_int_clear(struct altr_i2c_dev *idev, u32 mask) { u32 int_en = readl(idev->base + ALTR_I2C_ISR); writel(int_en | mask, idev->base + ALTR_I2C_ISR); } static void altr_i2c_core_disable(struct altr_i2c_dev *idev) { u32 tmp = readl(idev->base + ALTR_I2C_CTRL); writel(tmp & ~ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL); } static void altr_i2c_core_enable(struct altr_i2c_dev *idev) { u32 tmp = readl(idev->base + ALTR_I2C_CTRL); writel(tmp | ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL); } static void altr_i2c_reset(struct altr_i2c_dev *idev) { altr_i2c_core_disable(idev); altr_i2c_core_enable(idev); } static inline void altr_i2c_stop(struct altr_i2c_dev *idev) { writel(ALTR_I2C_TFR_CMD_STO, idev->base + ALTR_I2C_TFR_CMD); } static void altr_i2c_init(struct altr_i2c_dev *idev) { u32 divisor = clk_get_rate(idev->i2c_clk) / idev->bus_clk_rate; u32 clk_mhz = clk_get_rate(idev->i2c_clk) / 1000000; u32 tmp = (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_RXT_SHFT) | (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_TCT_SHFT); u32 t_high, t_low; if (idev->bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ) { tmp &= ~ALTR_I2C_CTRL_BSPEED; /* Standard mode SCL 50/50 */ t_high = divisor * 1 / 2; t_low = divisor * 1 / 2; } else { tmp |= ALTR_I2C_CTRL_BSPEED; /* Fast mode SCL 33/66 */ t_high = divisor * 1 / 3; t_low = divisor * 2 / 3; } writel(tmp, idev->base + ALTR_I2C_CTRL); dev_dbg(idev->dev, "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n", idev->bus_clk_rate, clk_mhz, divisor); /* Reset controller */ altr_i2c_reset(idev); /* SCL High Time */ writel(t_high, idev->base + ALTR_I2C_SCL_HIGH); /* SCL Low Time */ writel(t_low, idev->base + ALTR_I2C_SCL_LOW); /* SDA Hold Time, 300ns */ writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD); /* Mask all master interrupt bits */ altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); } /* * altr_i2c_transfer - On the last byte to be transmitted, send * a Stop bit on the last byte. */ static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data) { /* On the last byte to be transmitted, send STOP */ if (idev->msg_len == 1) data |= ALTR_I2C_TFR_CMD_STO; if (idev->msg_len > 0) writel(data, idev->base + ALTR_I2C_TFR_CMD); } /* * altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of * transfer. Send a Stop bit on the last byte. */ static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev) { size_t rx_fifo_avail = readl(idev->base + ALTR_I2C_RX_FIFO_LVL); int bytes_to_transfer = min(rx_fifo_avail, idev->msg_len); while (bytes_to_transfer-- > 0) { *idev->buf++ = readl(idev->base + ALTR_I2C_RX_DATA); idev->msg_len--; altr_i2c_transfer(idev, 0); } } /* * altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer. */ static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev) { size_t tx_fifo_avail = idev->fifo_size - readl(idev->base + ALTR_I2C_TC_FIFO_LVL); int bytes_to_transfer = min(tx_fifo_avail, idev->msg_len); int ret = idev->msg_len - bytes_to_transfer; while (bytes_to_transfer-- > 0) { altr_i2c_transfer(idev, *idev->buf++); idev->msg_len--; } return ret; } static irqreturn_t altr_i2c_isr_quick(int irq, void *_dev) { struct altr_i2c_dev *idev = _dev; irqreturn_t ret = IRQ_HANDLED; /* Read IRQ status but only interested in Enabled IRQs. */ idev->isr_status = readl(idev->base + ALTR_I2C_ISR) & idev->isr_mask; if (idev->isr_status) ret = IRQ_WAKE_THREAD; return ret; } static irqreturn_t altr_i2c_isr(int irq, void *_dev) { int ret; bool read, finish = false; struct altr_i2c_dev *idev = _dev; u32 status = idev->isr_status; mutex_lock(&idev->isr_mutex); if (!idev->msg) { dev_warn(idev->dev, "unexpected interrupt\n"); altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); goto out; } read = (idev->msg->flags & I2C_M_RD) != 0; /* handle Lost Arbitration */ if (unlikely(status & ALTR_I2C_ISR_ARB)) { altr_i2c_int_clear(idev, ALTR_I2C_ISR_ARB); idev->msg_err = -EAGAIN; finish = true; } else if (unlikely(status & ALTR_I2C_ISR_NACK)) { dev_dbg(idev->dev, "Could not get ACK\n"); idev->msg_err = -ENXIO; altr_i2c_int_clear(idev, ALTR_I2C_ISR_NACK); altr_i2c_stop(idev); finish = true; } else if (read && unlikely(status & ALTR_I2C_ISR_RXOF)) { /* handle RX FIFO Overflow */ altr_i2c_empty_rx_fifo(idev); altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY); altr_i2c_stop(idev); dev_err(idev->dev, "RX FIFO Overflow\n"); finish = true; } else if (read && (status & ALTR_I2C_ISR_RXRDY)) { /* RX FIFO needs service? */ altr_i2c_empty_rx_fifo(idev); altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY); if (!idev->msg_len) finish = true; } else if (!read && (status & ALTR_I2C_ISR_TXRDY)) { /* TX FIFO needs service? */ altr_i2c_int_clear(idev, ALTR_I2C_ISR_TXRDY); if (idev->msg_len > 0) altr_i2c_fill_tx_fifo(idev); else finish = true; } else { dev_warn(idev->dev, "Unexpected interrupt: 0x%x\n", status); altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); } if (finish) { /* Wait for the Core to finish */ ret = readl_poll_timeout_atomic(idev->base + ALTR_I2C_STATUS, status, !(status & ALTR_I2C_STAT_CORE), 1, ALTR_I2C_TIMEOUT); if (ret) dev_err(idev->dev, "message timeout\n"); altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); complete(&idev->msg_complete); dev_dbg(idev->dev, "Message Complete\n"); } out: mutex_unlock(&idev->isr_mutex); return IRQ_HANDLED; } static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) { u32 imask = ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | ALTR_I2C_ISR_NACK; unsigned long time_left; u32 value; u8 addr = i2c_8bit_addr_from_msg(msg); mutex_lock(&idev->isr_mutex); idev->msg = msg; idev->msg_len = msg->len; idev->buf = msg->buf; idev->msg_err = 0; reinit_completion(&idev->msg_complete); altr_i2c_core_enable(idev); /* Make sure RX FIFO is empty */ do { readl(idev->base + ALTR_I2C_RX_DATA); } while (readl(idev->base + ALTR_I2C_RX_FIFO_LVL)); writel(ALTR_I2C_TFR_CMD_STA | addr, idev->base + ALTR_I2C_TFR_CMD); if ((msg->flags & I2C_M_RD) != 0) { imask |= ALTR_I2C_ISER_RXOF_EN | ALTR_I2C_ISER_RXRDY_EN; altr_i2c_int_enable(idev, imask, true); /* write the first byte to start the RX */ altr_i2c_transfer(idev, 0); } else { imask |= ALTR_I2C_ISR_TXRDY; altr_i2c_int_enable(idev, imask, true); altr_i2c_fill_tx_fifo(idev); } mutex_unlock(&idev->isr_mutex); time_left = wait_for_completion_timeout(&idev->msg_complete, ALTR_I2C_XFER_TIMEOUT); mutex_lock(&idev->isr_mutex); altr_i2c_int_enable(idev, imask, false); value = readl(idev->base + ALTR_I2C_STATUS) & ALTR_I2C_STAT_CORE; if (value) dev_err(idev->dev, "Core Status not IDLE...\n"); if (time_left == 0) { idev->msg_err = -ETIMEDOUT; dev_dbg(idev->dev, "Transaction timed out.\n"); } altr_i2c_core_disable(idev); mutex_unlock(&idev->isr_mutex); return idev->msg_err; } static int altr_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct altr_i2c_dev *idev = i2c_get_adapdata(adap); int i, ret; for (i = 0; i < num; i++) { ret = altr_i2c_xfer_msg(idev, msgs++); if (ret) return ret; } return num; } static u32 altr_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm altr_i2c_algo = { .master_xfer = altr_i2c_xfer, .functionality = altr_i2c_func, }; static int altr_i2c_probe(struct platform_device *pdev) { struct altr_i2c_dev *idev = NULL; int irq, ret; idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); if (!idev) return -ENOMEM; idev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(idev->base)) return PTR_ERR(idev->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; idev->i2c_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(idev->i2c_clk)) { dev_err(&pdev->dev, "missing clock\n"); return PTR_ERR(idev->i2c_clk); } idev->dev = &pdev->dev; init_completion(&idev->msg_complete); mutex_init(&idev->isr_mutex); ret = device_property_read_u32(idev->dev, "fifo-size", &idev->fifo_size); if (ret) { dev_err(&pdev->dev, "FIFO size set to default of %d\n", ALTR_I2C_DFLT_FIFO_SZ); idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ; } ret = device_property_read_u32(idev->dev, "clock-frequency", &idev->bus_clk_rate); if (ret) { dev_err(&pdev->dev, "Default to 100kHz\n"); idev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */ } if (idev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ) { dev_err(&pdev->dev, "invalid clock-frequency %d\n", idev->bus_clk_rate); return -EINVAL; } ret = devm_request_threaded_irq(&pdev->dev, irq, altr_i2c_isr_quick, altr_i2c_isr, IRQF_ONESHOT, pdev->name, idev); if (ret) { dev_err(&pdev->dev, "failed to claim IRQ %d\n", irq); return ret; } ret = clk_prepare_enable(idev->i2c_clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock\n"); return ret; } mutex_lock(&idev->isr_mutex); altr_i2c_init(idev); mutex_unlock(&idev->isr_mutex); i2c_set_adapdata(&idev->adapter, idev); strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name)); idev->adapter.owner = THIS_MODULE; idev->adapter.algo = &altr_i2c_algo; idev->adapter.dev.parent = &pdev->dev; idev->adapter.dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, idev); ret = i2c_add_adapter(&idev->adapter); if (ret) { clk_disable_unprepare(idev->i2c_clk); return ret; } dev_info(&pdev->dev, "Altera SoftIP I2C Probe Complete\n"); return 0; } static void altr_i2c_remove(struct platform_device *pdev) { struct altr_i2c_dev *idev = platform_get_drvdata(pdev); clk_disable_unprepare(idev->i2c_clk); i2c_del_adapter(&idev->adapter); } /* Match table for of_platform binding */ static const struct of_device_id altr_i2c_of_match[] = { { .compatible = "altr,softip-i2c-v1.0" }, {}, }; MODULE_DEVICE_TABLE(of, altr_i2c_of_match); static struct platform_driver altr_i2c_driver = { .probe = altr_i2c_probe, .remove_new = altr_i2c_remove, .driver = { .name = "altera-i2c", .of_match_table = altr_i2c_of_match, }, }; module_platform_driver(altr_i2c_driver); MODULE_DESCRIPTION("Altera Soft IP I2C bus driver"); MODULE_AUTHOR("Thor Thayer <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-altera.c
// SPDX-License-Identifier: GPL-2.0 /* * drivers/i2c/busses/i2c-mt7621.c * * Copyright (C) 2013 Steven Liu <[email protected]> * Copyright (C) 2016 Michael Lee <[email protected]> * Copyright (C) 2018 Jan Breuer <[email protected]> * * Improve driver for i2cdetect from i2c-tools to detect i2c devices on the bus. * (C) 2014 Sittisak <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/reset.h> #define REG_SM0CFG2_REG 0x28 #define REG_SM0CTL0_REG 0x40 #define REG_SM0CTL1_REG 0x44 #define REG_SM0D0_REG 0x50 #define REG_SM0D1_REG 0x54 #define REG_PINTEN_REG 0x5c #define REG_PINTST_REG 0x60 #define REG_PINTCL_REG 0x64 /* REG_SM0CFG2_REG */ #define SM0CFG2_IS_AUTOMODE BIT(0) /* REG_SM0CTL0_REG */ #define SM0CTL0_ODRAIN BIT(31) #define SM0CTL0_CLK_DIV_MASK (0x7ff << 16) #define SM0CTL0_CLK_DIV_MAX 0x7ff #define SM0CTL0_CS_STATUS BIT(4) #define SM0CTL0_SCL_STATE BIT(3) #define SM0CTL0_SDA_STATE BIT(2) #define SM0CTL0_EN BIT(1) #define SM0CTL0_SCL_STRETCH BIT(0) /* REG_SM0CTL1_REG */ #define SM0CTL1_ACK_MASK (0xff << 16) #define SM0CTL1_PGLEN_MASK (0x7 << 8) #define SM0CTL1_PGLEN(x) ((((x) - 1) << 8) & SM0CTL1_PGLEN_MASK) #define SM0CTL1_READ (5 << 4) #define SM0CTL1_READ_LAST (4 << 4) #define SM0CTL1_STOP (3 << 4) #define SM0CTL1_WRITE (2 << 4) #define SM0CTL1_START (1 << 4) #define SM0CTL1_MODE_MASK (0x7 << 4) #define SM0CTL1_TRI BIT(0) /* timeout waiting for I2C devices to respond */ #define TIMEOUT_MS 1000 struct mtk_i2c { void __iomem *base; struct device *dev; struct i2c_adapter adap; u32 bus_freq; u32 clk_div; u32 flags; struct clk *clk; }; static int mtk_i2c_wait_idle(struct mtk_i2c *i2c) { int ret; u32 val; ret = readl_relaxed_poll_timeout(i2c->base + REG_SM0CTL1_REG, val, !(val & SM0CTL1_TRI), 10, TIMEOUT_MS * 1000); if (ret) dev_dbg(i2c->dev, "idle err(%d)\n", ret); return ret; } static void mtk_i2c_reset(struct mtk_i2c *i2c) { int ret; ret = device_reset(i2c->adap.dev.parent); if (ret) dev_err(i2c->dev, "I2C reset failed!\n"); /* * Don't set SM0CTL0_ODRAIN as its bit meaning is inverted. To * configure open-drain mode, this bit needs to be cleared. */ iowrite32(((i2c->clk_div << 16) & SM0CTL0_CLK_DIV_MASK) | SM0CTL0_EN | SM0CTL0_SCL_STRETCH, i2c->base + REG_SM0CTL0_REG); iowrite32(0, i2c->base + REG_SM0CFG2_REG); } static void mtk_i2c_dump_reg(struct mtk_i2c *i2c) { dev_dbg(i2c->dev, "SM0CFG2 %08x, SM0CTL0 %08x, SM0CTL1 %08x, SM0D0 %08x, SM0D1 %08x\n", ioread32(i2c->base + REG_SM0CFG2_REG), ioread32(i2c->base + REG_SM0CTL0_REG), ioread32(i2c->base + REG_SM0CTL1_REG), ioread32(i2c->base + REG_SM0D0_REG), ioread32(i2c->base + REG_SM0D1_REG)); } static int mtk_i2c_check_ack(struct mtk_i2c *i2c, u32 expected) { u32 ack = readl_relaxed(i2c->base + REG_SM0CTL1_REG); u32 ack_expected = (expected << 16) & SM0CTL1_ACK_MASK; return ((ack & ack_expected) == ack_expected) ? 0 : -ENXIO; } static int mtk_i2c_master_start(struct mtk_i2c *i2c) { iowrite32(SM0CTL1_START | SM0CTL1_TRI, i2c->base + REG_SM0CTL1_REG); return mtk_i2c_wait_idle(i2c); } static int mtk_i2c_master_stop(struct mtk_i2c *i2c) { iowrite32(SM0CTL1_STOP | SM0CTL1_TRI, i2c->base + REG_SM0CTL1_REG); return mtk_i2c_wait_idle(i2c); } static int mtk_i2c_master_cmd(struct mtk_i2c *i2c, u32 cmd, int page_len) { iowrite32(cmd | SM0CTL1_TRI | SM0CTL1_PGLEN(page_len), i2c->base + REG_SM0CTL1_REG); return mtk_i2c_wait_idle(i2c); } static int mtk_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct mtk_i2c *i2c; struct i2c_msg *pmsg; u16 addr; int i, j, ret, len, page_len; u32 cmd; u32 data[2]; i2c = i2c_get_adapdata(adap); for (i = 0; i < num; i++) { pmsg = &msgs[i]; /* wait hardware idle */ ret = mtk_i2c_wait_idle(i2c); if (ret) goto err_timeout; /* start sequence */ ret = mtk_i2c_master_start(i2c); if (ret) goto err_timeout; /* write address */ if (pmsg->flags & I2C_M_TEN) { /* 10 bits address */ addr = 0xf0 | ((pmsg->addr >> 7) & 0x06); addr |= (pmsg->addr & 0xff) << 8; if (pmsg->flags & I2C_M_RD) addr |= 1; iowrite32(addr, i2c->base + REG_SM0D0_REG); ret = mtk_i2c_master_cmd(i2c, SM0CTL1_WRITE, 2); if (ret) goto err_timeout; } else { /* 7 bits address */ addr = i2c_8bit_addr_from_msg(pmsg); iowrite32(addr, i2c->base + REG_SM0D0_REG); ret = mtk_i2c_master_cmd(i2c, SM0CTL1_WRITE, 1); if (ret) goto err_timeout; } /* check address ACK */ if (!(pmsg->flags & I2C_M_IGNORE_NAK)) { ret = mtk_i2c_check_ack(i2c, BIT(0)); if (ret) goto err_ack; } /* transfer data */ for (len = pmsg->len, j = 0; len > 0; len -= 8, j += 8) { page_len = (len >= 8) ? 8 : len; if (pmsg->flags & I2C_M_RD) { cmd = (len > 8) ? SM0CTL1_READ : SM0CTL1_READ_LAST; } else { memcpy(data, &pmsg->buf[j], page_len); iowrite32(data[0], i2c->base + REG_SM0D0_REG); iowrite32(data[1], i2c->base + REG_SM0D1_REG); cmd = SM0CTL1_WRITE; } ret = mtk_i2c_master_cmd(i2c, cmd, page_len); if (ret) goto err_timeout; if (pmsg->flags & I2C_M_RD) { data[0] = ioread32(i2c->base + REG_SM0D0_REG); data[1] = ioread32(i2c->base + REG_SM0D1_REG); memcpy(&pmsg->buf[j], data, page_len); } else { if (!(pmsg->flags & I2C_M_IGNORE_NAK)) { ret = mtk_i2c_check_ack(i2c, (1 << page_len) - 1); if (ret) goto err_ack; } } } } ret = mtk_i2c_master_stop(i2c); if (ret) goto err_timeout; /* the return value is number of executed messages */ return i; err_ack: ret = mtk_i2c_master_stop(i2c); if (ret) goto err_timeout; return -ENXIO; err_timeout: mtk_i2c_dump_reg(i2c); mtk_i2c_reset(i2c); return ret; } static u32 mtk_i2c_func(struct i2c_adapter *a) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } static const struct i2c_algorithm mtk_i2c_algo = { .master_xfer = mtk_i2c_master_xfer, .functionality = mtk_i2c_func, }; static const struct of_device_id i2c_mtk_dt_ids[] = { { .compatible = "mediatek,mt7621-i2c" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, i2c_mtk_dt_ids); static void mtk_i2c_init(struct mtk_i2c *i2c) { i2c->clk_div = clk_get_rate(i2c->clk) / i2c->bus_freq - 1; if (i2c->clk_div < 99) i2c->clk_div = 99; if (i2c->clk_div > SM0CTL0_CLK_DIV_MAX) i2c->clk_div = SM0CTL0_CLK_DIV_MAX; mtk_i2c_reset(i2c); } static int mtk_i2c_probe(struct platform_device *pdev) { struct mtk_i2c *i2c; struct i2c_adapter *adap; int ret; i2c = devm_kzalloc(&pdev->dev, sizeof(struct mtk_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) { dev_err(&pdev->dev, "Failed to enable clock\n"); return PTR_ERR(i2c->clk); } i2c->dev = &pdev->dev; if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", &i2c->bus_freq)) i2c->bus_freq = I2C_MAX_STANDARD_MODE_FREQ; if (i2c->bus_freq == 0) { dev_warn(i2c->dev, "clock-frequency 0 not supported\n"); return -EINVAL; } adap = &i2c->adap; adap->owner = THIS_MODULE; adap->algo = &mtk_i2c_algo; adap->retries = 3; adap->dev.parent = &pdev->dev; i2c_set_adapdata(adap, i2c); adap->dev.of_node = pdev->dev.of_node; strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name)); platform_set_drvdata(pdev, i2c); mtk_i2c_init(i2c); ret = i2c_add_adapter(adap); if (ret < 0) return ret; dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000); return 0; } static void mtk_i2c_remove(struct platform_device *pdev) { struct mtk_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); } static struct platform_driver mtk_i2c_driver = { .probe = mtk_i2c_probe, .remove_new = mtk_i2c_remove, .driver = { .name = "i2c-mt7621", .of_match_table = i2c_mtk_dt_ids, }, }; module_platform_driver(mtk_i2c_driver); MODULE_AUTHOR("Steven Liu"); MODULE_DESCRIPTION("MT7621 I2C host driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:MT7621-I2C");
linux-master
drivers/i2c/busses/i2c-mt7621.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Synopsys DesignWare I2C adapter driver (master only). * * Based on the TI DAVINCI I2C adapter driver. * * Copyright (C) 2006 Texas Instruments. * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. * Copyright (C) 2011, 2015, 2016 Intel Corporation. */ #include <linux/acpi.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/power_supply.h> #include <linux/sched.h> #include <linux/slab.h> #include "i2c-designware-core.h" #include "i2c-ccgx-ucsi.h" #define DRIVER_NAME "i2c-designware-pci" enum dw_pci_ctl_id_t { medfield, merrifield, baytrail, cherrytrail, haswell, elkhartlake, navi_amd, }; /* * This is a legacy structure to describe the hardware counters * to configure signal timings on the bus. For Device Tree platforms * one should use the respective properties and for ACPI there is * a set of ACPI methods that provide these counters. No new * platform should use this structure. */ struct dw_scl_sda_cfg { u16 ss_hcnt; u16 fs_hcnt; u16 ss_lcnt; u16 fs_lcnt; u32 sda_hold; }; struct dw_pci_controller { u32 bus_num; u32 flags; struct dw_scl_sda_cfg *scl_sda_cfg; int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c); u32 (*get_clk_rate_khz)(struct dw_i2c_dev *dev); }; /* Merrifield HCNT/LCNT/SDA hold time */ static struct dw_scl_sda_cfg mrfld_config = { .ss_hcnt = 0x2f8, .fs_hcnt = 0x87, .ss_lcnt = 0x37b, .fs_lcnt = 0x10a, }; /* BayTrail HCNT/LCNT/SDA hold time */ static struct dw_scl_sda_cfg byt_config = { .ss_hcnt = 0x200, .fs_hcnt = 0x55, .ss_lcnt = 0x200, .fs_lcnt = 0x99, .sda_hold = 0x6, }; /* Haswell HCNT/LCNT/SDA hold time */ static struct dw_scl_sda_cfg hsw_config = { .ss_hcnt = 0x01b0, .fs_hcnt = 0x48, .ss_lcnt = 0x01fb, .fs_lcnt = 0xa0, .sda_hold = 0x9, }; /* NAVI-AMD HCNT/LCNT/SDA hold time */ static struct dw_scl_sda_cfg navi_amd_config = { .ss_hcnt = 0x1ae, .ss_lcnt = 0x23a, .sda_hold = 0x9, }; static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev) { return 25000; } static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) { struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev); switch (pdev->device) { case 0x0817: dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; fallthrough; case 0x0818: case 0x0819: c->bus_num = pdev->device - 0x817 + 3; return 0; case 0x082C: case 0x082D: case 0x082E: c->bus_num = pdev->device - 0x82C + 0; return 0; } return -ENODEV; } static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) { /* * On Intel Merrifield the user visible i2c buses are enumerated * [1..7]. So, we add 1 to shift the default range. Besides that the * first PCI slot provides 4 functions, that's why we have to add 0 to * the first slot and 4 to the next one. */ switch (PCI_SLOT(pdev->devfn)) { case 8: c->bus_num = PCI_FUNC(pdev->devfn) + 0 + 1; return 0; case 9: c->bus_num = PCI_FUNC(pdev->devfn) + 4 + 1; return 0; } return -ENODEV; } static u32 ehl_get_clk_rate_khz(struct dw_i2c_dev *dev) { return 100000; } static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev) { return 100000; } static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c) { struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev); dev->flags |= MODEL_AMD_NAVI_GPU; dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; return 0; } static struct dw_pci_controller dw_pci_controllers[] = { [medfield] = { .bus_num = -1, .setup = mfld_setup, .get_clk_rate_khz = mfld_get_clk_rate_khz, }, [merrifield] = { .bus_num = -1, .scl_sda_cfg = &mrfld_config, .setup = mrfld_setup, }, [baytrail] = { .bus_num = -1, .scl_sda_cfg = &byt_config, }, [haswell] = { .bus_num = -1, .scl_sda_cfg = &hsw_config, }, [cherrytrail] = { .bus_num = -1, .scl_sda_cfg = &byt_config, }, [elkhartlake] = { .bus_num = -1, .get_clk_rate_khz = ehl_get_clk_rate_khz, }, [navi_amd] = { .bus_num = -1, .scl_sda_cfg = &navi_amd_config, .setup = navi_amd_setup, .get_clk_rate_khz = navi_amd_get_clk_rate_khz, }, }; static int __maybe_unused i2c_dw_pci_runtime_suspend(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); i_dev->disable(i_dev); return 0; } static int __maybe_unused i2c_dw_pci_suspend(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&i_dev->adapter); return i2c_dw_pci_runtime_suspend(dev); } static int __maybe_unused i2c_dw_pci_runtime_resume(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); return i_dev->init(i_dev); } static int __maybe_unused i2c_dw_pci_resume(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); int ret; ret = i2c_dw_pci_runtime_resume(dev); i2c_mark_adapter_resumed(&i_dev->adapter); return ret; } static const struct dev_pm_ops i2c_dw_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(i2c_dw_pci_suspend, i2c_dw_pci_resume) SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend, i2c_dw_pci_runtime_resume, NULL) }; static const struct property_entry dgpu_properties[] = { /* USB-C doesn't power the system */ PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE), {} }; static const struct software_node dgpu_node = { .properties = dgpu_properties, }; static int i2c_dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct dw_i2c_dev *dev; struct i2c_adapter *adap; int r; struct dw_pci_controller *controller; struct dw_scl_sda_cfg *cfg; struct i2c_timings *t; if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) return dev_err_probe(&pdev->dev, -EINVAL, "Invalid driver data %ld\n", id->driver_data); controller = &dw_pci_controllers[id->driver_data]; r = pcim_enable_device(pdev); if (r) return dev_err_probe(&pdev->dev, r, "Failed to enable I2C PCI device\n"); pci_set_master(pdev); r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); if (r) return dev_err_probe(&pdev->dev, r, "I/O memory remapping failed\n"); dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; r = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (r < 0) return r; dev->get_clk_rate_khz = controller->get_clk_rate_khz; dev->base = pcim_iomap_table(pdev)[0]; dev->dev = &pdev->dev; dev->irq = pci_irq_vector(pdev, 0); dev->flags |= controller->flags; t = &dev->timings; i2c_parse_fw_timings(&pdev->dev, t, false); pci_set_drvdata(pdev, dev); if (controller->setup) { r = controller->setup(pdev, controller); if (r) { pci_free_irq_vectors(pdev); return r; } } i2c_dw_adjust_bus_speed(dev); if (has_acpi_companion(&pdev->dev)) i2c_dw_acpi_configure(&pdev->dev); r = i2c_dw_validate_speed(dev); if (r) { pci_free_irq_vectors(pdev); return r; } i2c_dw_configure(dev); if (controller->scl_sda_cfg) { cfg = controller->scl_sda_cfg; dev->ss_hcnt = cfg->ss_hcnt; dev->fs_hcnt = cfg->fs_hcnt; dev->ss_lcnt = cfg->ss_lcnt; dev->fs_lcnt = cfg->fs_lcnt; dev->sda_hold_time = cfg->sda_hold; } adap = &dev->adapter; adap->owner = THIS_MODULE; adap->class = 0; ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); adap->nr = controller->bus_num; r = i2c_dw_probe(dev); if (r) { pci_free_irq_vectors(pdev); return r; } if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node); if (IS_ERR(dev->slave)) return dev_err_probe(dev->dev, PTR_ERR(dev->slave), "register UCSI failed\n"); } pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; } static void i2c_dw_pci_remove(struct pci_dev *pdev) { struct dw_i2c_dev *dev = pci_get_drvdata(pdev); dev->disable(dev); pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); i2c_del_adapter(&dev->adapter); devm_free_irq(&pdev->dev, dev->irq, dev); pci_free_irq_vectors(pdev); } static const struct pci_device_id i2_designware_pci_ids[] = { /* Medfield */ { PCI_VDEVICE(INTEL, 0x0817), medfield }, { PCI_VDEVICE(INTEL, 0x0818), medfield }, { PCI_VDEVICE(INTEL, 0x0819), medfield }, { PCI_VDEVICE(INTEL, 0x082C), medfield }, { PCI_VDEVICE(INTEL, 0x082D), medfield }, { PCI_VDEVICE(INTEL, 0x082E), medfield }, /* Merrifield */ { PCI_VDEVICE(INTEL, 0x1195), merrifield }, { PCI_VDEVICE(INTEL, 0x1196), merrifield }, /* Baytrail */ { PCI_VDEVICE(INTEL, 0x0F41), baytrail }, { PCI_VDEVICE(INTEL, 0x0F42), baytrail }, { PCI_VDEVICE(INTEL, 0x0F43), baytrail }, { PCI_VDEVICE(INTEL, 0x0F44), baytrail }, { PCI_VDEVICE(INTEL, 0x0F45), baytrail }, { PCI_VDEVICE(INTEL, 0x0F46), baytrail }, { PCI_VDEVICE(INTEL, 0x0F47), baytrail }, /* Haswell */ { PCI_VDEVICE(INTEL, 0x9c61), haswell }, { PCI_VDEVICE(INTEL, 0x9c62), haswell }, /* Braswell / Cherrytrail */ { PCI_VDEVICE(INTEL, 0x22C1), cherrytrail }, { PCI_VDEVICE(INTEL, 0x22C2), cherrytrail }, { PCI_VDEVICE(INTEL, 0x22C3), cherrytrail }, { PCI_VDEVICE(INTEL, 0x22C4), cherrytrail }, { PCI_VDEVICE(INTEL, 0x22C5), cherrytrail }, { PCI_VDEVICE(INTEL, 0x22C6), cherrytrail }, { PCI_VDEVICE(INTEL, 0x22C7), cherrytrail }, /* Elkhart Lake (PSE I2C) */ { PCI_VDEVICE(INTEL, 0x4bb9), elkhartlake }, { PCI_VDEVICE(INTEL, 0x4bba), elkhartlake }, { PCI_VDEVICE(INTEL, 0x4bbb), elkhartlake }, { PCI_VDEVICE(INTEL, 0x4bbc), elkhartlake }, { PCI_VDEVICE(INTEL, 0x4bbd), elkhartlake }, { PCI_VDEVICE(INTEL, 0x4bbe), elkhartlake }, { PCI_VDEVICE(INTEL, 0x4bbf), elkhartlake }, { PCI_VDEVICE(INTEL, 0x4bc0), elkhartlake }, /* AMD NAVI */ { PCI_VDEVICE(ATI, 0x7314), navi_amd }, { PCI_VDEVICE(ATI, 0x73a4), navi_amd }, { PCI_VDEVICE(ATI, 0x73e4), navi_amd }, { PCI_VDEVICE(ATI, 0x73c4), navi_amd }, { PCI_VDEVICE(ATI, 0x7444), navi_amd }, { PCI_VDEVICE(ATI, 0x7464), navi_amd }, { 0,} }; MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids); static struct pci_driver dw_i2c_driver = { .name = DRIVER_NAME, .id_table = i2_designware_pci_ids, .probe = i2c_dw_pci_probe, .remove = i2c_dw_pci_remove, .driver = { .pm = &i2c_dw_pm_ops, }, }; module_pci_driver(dw_i2c_driver); /* Work with hotplug and coldplug */ MODULE_ALIAS("i2c_designware-pci"); MODULE_AUTHOR("Baruch Siach <[email protected]>"); MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-designware-pcidrv.c
/* * Cavium ThunderX i2c driver. * * Copyright (C) 2015,2016 Cavium Inc. * Authors: Fred Martin <[email protected]> * Jan Glauber <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_irq.h> #include <linux/pci.h> #include "i2c-octeon-core.h" #define DRV_NAME "i2c-thunderx" #define PCI_DEVICE_ID_THUNDER_TWSI 0xa012 #define SYS_FREQ_DEFAULT 700000000 #define TWSI_INT_ENA_W1C 0x1028 #define TWSI_INT_ENA_W1S 0x1030 /* * Enable the CORE interrupt. * The interrupt will be asserted when there is non-STAT_IDLE state in the * SW_TWSI_EOP_TWSI_STAT register. */ static void thunder_i2c_int_enable(struct octeon_i2c *i2c) { octeon_i2c_writeq_flush(TWSI_INT_CORE_INT, i2c->twsi_base + TWSI_INT_ENA_W1S); } /* * Disable the CORE interrupt. */ static void thunder_i2c_int_disable(struct octeon_i2c *i2c) { octeon_i2c_writeq_flush(TWSI_INT_CORE_INT, i2c->twsi_base + TWSI_INT_ENA_W1C); } static void thunder_i2c_hlc_int_enable(struct octeon_i2c *i2c) { octeon_i2c_writeq_flush(TWSI_INT_ST_INT | TWSI_INT_TS_INT, i2c->twsi_base + TWSI_INT_ENA_W1S); } static void thunder_i2c_hlc_int_disable(struct octeon_i2c *i2c) { octeon_i2c_writeq_flush(TWSI_INT_ST_INT | TWSI_INT_TS_INT, i2c->twsi_base + TWSI_INT_ENA_W1C); } static u32 thunderx_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm thunderx_i2c_algo = { .master_xfer = octeon_i2c_xfer, .functionality = thunderx_i2c_functionality, }; static const struct i2c_adapter thunderx_i2c_ops = { .owner = THIS_MODULE, .name = "ThunderX adapter", .algo = &thunderx_i2c_algo, }; static void thunder_i2c_clock_enable(struct device *dev, struct octeon_i2c *i2c) { int ret; if (acpi_disabled) { /* DT */ i2c->clk = clk_get(dev, NULL); if (IS_ERR(i2c->clk)) { i2c->clk = NULL; goto skip; } ret = clk_prepare_enable(i2c->clk); if (ret) goto skip; i2c->sys_freq = clk_get_rate(i2c->clk); } else { /* ACPI */ device_property_read_u32(dev, "sclk", &i2c->sys_freq); } skip: if (!i2c->sys_freq) i2c->sys_freq = SYS_FREQ_DEFAULT; } static void thunder_i2c_clock_disable(struct device *dev, struct clk *clk) { if (!clk) return; clk_disable_unprepare(clk); clk_put(clk); } static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c, struct device_node *node) { struct i2c_client *ara; if (!node) return -EINVAL; i2c->alert_data.irq = irq_of_parse_and_map(node, 0); if (!i2c->alert_data.irq) return -EINVAL; ara = i2c_new_smbus_alert_device(&i2c->adap, &i2c->alert_data); if (IS_ERR(ara)) return PTR_ERR(ara); i2c->ara = ara; return 0; } static int thunder_i2c_smbus_setup(struct octeon_i2c *i2c, struct device_node *node) { /* TODO: ACPI support */ if (!acpi_disabled) return -EOPNOTSUPP; return thunder_i2c_smbus_setup_of(i2c, node); } static void thunder_i2c_smbus_remove(struct octeon_i2c *i2c) { i2c_unregister_device(i2c->ara); } static int thunder_i2c_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct octeon_i2c *i2c; int ret; i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->roff.sw_twsi = 0x1000; i2c->roff.twsi_int = 0x1010; i2c->roff.sw_twsi_ext = 0x1018; i2c->dev = dev; pci_set_drvdata(pdev, i2c); ret = pcim_enable_device(pdev); if (ret) return ret; ret = pci_request_regions(pdev, DRV_NAME); if (ret) return ret; i2c->twsi_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!i2c->twsi_base) return -EINVAL; thunder_i2c_clock_enable(dev, i2c); ret = device_property_read_u32(dev, "clock-frequency", &i2c->twsi_freq); if (ret) i2c->twsi_freq = I2C_MAX_STANDARD_MODE_FREQ; init_waitqueue_head(&i2c->queue); i2c->int_enable = thunder_i2c_int_enable; i2c->int_disable = thunder_i2c_int_disable; i2c->hlc_int_enable = thunder_i2c_hlc_int_enable; i2c->hlc_int_disable = thunder_i2c_hlc_int_disable; ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); if (ret < 0) goto error; ret = devm_request_irq(dev, pci_irq_vector(pdev, 0), octeon_i2c_isr, 0, DRV_NAME, i2c); if (ret) goto error; ret = octeon_i2c_init_lowlevel(i2c); if (ret) goto error; octeon_i2c_set_clock(i2c); i2c->adap = thunderx_i2c_ops; i2c->adap.retries = 5; i2c->adap.class = I2C_CLASS_HWMON; i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info; i2c->adap.dev.parent = dev; i2c->adap.dev.of_node = pdev->dev.of_node; i2c->adap.dev.fwnode = dev->fwnode; snprintf(i2c->adap.name, sizeof(i2c->adap.name), "Cavium ThunderX i2c adapter at %s", dev_name(dev)); i2c_set_adapdata(&i2c->adap, i2c); ret = i2c_add_adapter(&i2c->adap); if (ret) goto error; dev_info(i2c->dev, "Probed. Set system clock to %u\n", i2c->sys_freq); ret = thunder_i2c_smbus_setup(i2c, pdev->dev.of_node); if (ret) dev_info(dev, "SMBUS alert not active on this bus\n"); return 0; error: thunder_i2c_clock_disable(dev, i2c->clk); return ret; } static void thunder_i2c_remove_pci(struct pci_dev *pdev) { struct octeon_i2c *i2c = pci_get_drvdata(pdev); thunder_i2c_smbus_remove(i2c); thunder_i2c_clock_disable(&pdev->dev, i2c->clk); i2c_del_adapter(&i2c->adap); } static const struct pci_device_id thunder_i2c_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_TWSI) }, { 0, } }; MODULE_DEVICE_TABLE(pci, thunder_i2c_pci_id_table); static struct pci_driver thunder_i2c_pci_driver = { .name = DRV_NAME, .id_table = thunder_i2c_pci_id_table, .probe = thunder_i2c_probe_pci, .remove = thunder_i2c_remove_pci, }; module_pci_driver(thunder_i2c_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fred Martin <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus adapter for Cavium ThunderX SOC");
linux-master
drivers/i2c/busses/i2c-thunderx-pcidrv.c
// SPDX-License-Identifier: GPL-2.0 /* * drivers/i2c/busses/i2c-tegra.c * * Copyright (C) 2010 Google, Inc. * Author: Colin Cross <[email protected]> */ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #define BYTES_PER_FIFO_WORD 4 #define I2C_CNFG 0x000 #define I2C_CNFG_DEBOUNCE_CNT GENMASK(14, 12) #define I2C_CNFG_PACKET_MODE_EN BIT(10) #define I2C_CNFG_NEW_MASTER_FSM BIT(11) #define I2C_CNFG_MULTI_MASTER_MODE BIT(17) #define I2C_STATUS 0x01c #define I2C_SL_CNFG 0x020 #define I2C_SL_CNFG_NACK BIT(1) #define I2C_SL_CNFG_NEWSL BIT(2) #define I2C_SL_ADDR1 0x02c #define I2C_SL_ADDR2 0x030 #define I2C_TLOW_SEXT 0x034 #define I2C_TX_FIFO 0x050 #define I2C_RX_FIFO 0x054 #define I2C_PACKET_TRANSFER_STATUS 0x058 #define I2C_FIFO_CONTROL 0x05c #define I2C_FIFO_CONTROL_TX_FLUSH BIT(1) #define I2C_FIFO_CONTROL_RX_FLUSH BIT(0) #define I2C_FIFO_CONTROL_TX_TRIG(x) (((x) - 1) << 5) #define I2C_FIFO_CONTROL_RX_TRIG(x) (((x) - 1) << 2) #define I2C_FIFO_STATUS 0x060 #define I2C_FIFO_STATUS_TX GENMASK(7, 4) #define I2C_FIFO_STATUS_RX GENMASK(3, 0) #define I2C_INT_MASK 0x064 #define I2C_INT_STATUS 0x068 #define I2C_INT_BUS_CLR_DONE BIT(11) #define I2C_INT_PACKET_XFER_COMPLETE BIT(7) #define I2C_INT_NO_ACK BIT(3) #define I2C_INT_ARBITRATION_LOST BIT(2) #define I2C_INT_TX_FIFO_DATA_REQ BIT(1) #define I2C_INT_RX_FIFO_DATA_REQ BIT(0) #define I2C_CLK_DIVISOR 0x06c #define I2C_CLK_DIVISOR_STD_FAST_MODE GENMASK(31, 16) #define I2C_CLK_DIVISOR_HSMODE GENMASK(15, 0) #define DVC_CTRL_REG1 0x000 #define DVC_CTRL_REG1_INTR_EN BIT(10) #define DVC_CTRL_REG3 0x008 #define DVC_CTRL_REG3_SW_PROG BIT(26) #define DVC_CTRL_REG3_I2C_DONE_INTR_EN BIT(30) #define DVC_STATUS 0x00c #define DVC_STATUS_I2C_DONE_INTR BIT(30) #define I2C_ERR_NONE 0x00 #define I2C_ERR_NO_ACK BIT(0) #define I2C_ERR_ARBITRATION_LOST BIT(1) #define I2C_ERR_UNKNOWN_INTERRUPT BIT(2) #define I2C_ERR_RX_BUFFER_OVERFLOW BIT(3) #define PACKET_HEADER0_HEADER_SIZE GENMASK(29, 28) #define PACKET_HEADER0_PACKET_ID GENMASK(23, 16) #define PACKET_HEADER0_CONT_ID GENMASK(15, 12) #define PACKET_HEADER0_PROTOCOL GENMASK(7, 4) #define PACKET_HEADER0_PROTOCOL_I2C 1 #define I2C_HEADER_CONT_ON_NAK BIT(21) #define I2C_HEADER_READ BIT(19) #define I2C_HEADER_10BIT_ADDR BIT(18) #define I2C_HEADER_IE_ENABLE BIT(17) #define I2C_HEADER_REPEAT_START BIT(16) #define I2C_HEADER_CONTINUE_XFER BIT(15) #define I2C_HEADER_SLAVE_ADDR_SHIFT 1 #define I2C_BUS_CLEAR_CNFG 0x084 #define I2C_BC_SCLK_THRESHOLD GENMASK(23, 16) #define I2C_BC_STOP_COND BIT(2) #define I2C_BC_TERMINATE BIT(1) #define I2C_BC_ENABLE BIT(0) #define I2C_BUS_CLEAR_STATUS 0x088 #define I2C_BC_STATUS BIT(0) #define I2C_CONFIG_LOAD 0x08c #define I2C_MSTR_CONFIG_LOAD BIT(0) #define I2C_CLKEN_OVERRIDE 0x090 #define I2C_MST_CORE_CLKEN_OVR BIT(0) #define I2C_INTERFACE_TIMING_0 0x094 #define I2C_INTERFACE_TIMING_THIGH GENMASK(13, 8) #define I2C_INTERFACE_TIMING_TLOW GENMASK(5, 0) #define I2C_INTERFACE_TIMING_1 0x098 #define I2C_INTERFACE_TIMING_TBUF GENMASK(29, 24) #define I2C_INTERFACE_TIMING_TSU_STO GENMASK(21, 16) #define I2C_INTERFACE_TIMING_THD_STA GENMASK(13, 8) #define I2C_INTERFACE_TIMING_TSU_STA GENMASK(5, 0) #define I2C_HS_INTERFACE_TIMING_0 0x09c #define I2C_HS_INTERFACE_TIMING_THIGH GENMASK(13, 8) #define I2C_HS_INTERFACE_TIMING_TLOW GENMASK(5, 0) #define I2C_HS_INTERFACE_TIMING_1 0x0a0 #define I2C_HS_INTERFACE_TIMING_TSU_STO GENMASK(21, 16) #define I2C_HS_INTERFACE_TIMING_THD_STA GENMASK(13, 8) #define I2C_HS_INTERFACE_TIMING_TSU_STA GENMASK(5, 0) #define I2C_MST_FIFO_CONTROL 0x0b4 #define I2C_MST_FIFO_CONTROL_RX_FLUSH BIT(0) #define I2C_MST_FIFO_CONTROL_TX_FLUSH BIT(1) #define I2C_MST_FIFO_CONTROL_RX_TRIG(x) (((x) - 1) << 4) #define I2C_MST_FIFO_CONTROL_TX_TRIG(x) (((x) - 1) << 16) #define I2C_MST_FIFO_STATUS 0x0b8 #define I2C_MST_FIFO_STATUS_TX GENMASK(23, 16) #define I2C_MST_FIFO_STATUS_RX GENMASK(7, 0) /* configuration load timeout in microseconds */ #define I2C_CONFIG_LOAD_TIMEOUT 1000000 /* packet header size in bytes */ #define I2C_PACKET_HEADER_SIZE 12 /* * I2C Controller will use PIO mode for transfers up to 32 bytes in order to * avoid DMA overhead, otherwise external APB DMA controller will be used. * Note that the actual MAX PIO length is 20 bytes because 32 bytes include * I2C_PACKET_HEADER_SIZE. */ #define I2C_PIO_MODE_PREFERRED_LEN 32 /* * msg_end_type: The bus control which needs to be sent at end of transfer. * @MSG_END_STOP: Send stop pulse. * @MSG_END_REPEAT_START: Send repeat-start. * @MSG_END_CONTINUE: Don't send stop or repeat-start. */ enum msg_end_type { MSG_END_STOP, MSG_END_REPEAT_START, MSG_END_CONTINUE, }; /** * struct tegra_i2c_hw_feature : per hardware generation features * @has_continue_xfer_support: continue-transfer supported * @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer * completion interrupt on per packet basis. * @has_config_load_reg: Has the config load register to load the new * configuration. * @clk_divisor_hs_mode: Clock divisor in HS mode. * @clk_divisor_std_mode: Clock divisor in standard mode. It is * applicable if there is no fast clock source i.e. single clock * source. * @clk_divisor_fast_mode: Clock divisor in fast mode. It is * applicable if there is no fast clock source i.e. single clock * source. * @clk_divisor_fast_plus_mode: Clock divisor in fast mode plus. It is * applicable if there is no fast clock source (i.e. single * clock source). * @has_multi_master_mode: The I2C controller supports running in single-master * or multi-master mode. * @has_slcg_override_reg: The I2C controller supports a register that * overrides the second level clock gating. * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that * provides additional features and allows for longer messages to * be transferred in one go. * @quirks: I2C adapter quirks for limiting write/read transfer size and not * allowing 0 length transfers. * @supports_bus_clear: Bus Clear support to recover from bus hang during * SDA stuck low from device for some unknown reasons. * @has_apb_dma: Support of APBDMA on corresponding Tegra chip. * @tlow_std_mode: Low period of the clock in standard mode. * @thigh_std_mode: High period of the clock in standard mode. * @tlow_fast_fastplus_mode: Low period of the clock in fast/fast-plus modes. * @thigh_fast_fastplus_mode: High period of the clock in fast/fast-plus modes. * @setup_hold_time_std_mode: Setup and hold time for start and stop conditions * in standard mode. * @setup_hold_time_fast_fast_plus_mode: Setup and hold time for start and stop * conditions in fast/fast-plus modes. * @setup_hold_time_hs_mode: Setup and hold time for start and stop conditions * in HS mode. * @has_interface_timing_reg: Has interface timing register to program the tuned * timing settings. */ struct tegra_i2c_hw_feature { bool has_continue_xfer_support; bool has_per_pkt_xfer_complete_irq; bool has_config_load_reg; u32 clk_divisor_hs_mode; u32 clk_divisor_std_mode; u32 clk_divisor_fast_mode; u32 clk_divisor_fast_plus_mode; bool has_multi_master_mode; bool has_slcg_override_reg; bool has_mst_fifo; const struct i2c_adapter_quirks *quirks; bool supports_bus_clear; bool has_apb_dma; u32 tlow_std_mode; u32 thigh_std_mode; u32 tlow_fast_fastplus_mode; u32 thigh_fast_fastplus_mode; u32 setup_hold_time_std_mode; u32 setup_hold_time_fast_fast_plus_mode; u32 setup_hold_time_hs_mode; bool has_interface_timing_reg; }; /** * struct tegra_i2c_dev - per device I2C context * @dev: device reference for power management * @hw: Tegra I2C HW feature * @adapter: core I2C layer adapter information * @div_clk: clock reference for div clock of I2C controller * @clocks: array of I2C controller clocks * @nclocks: number of clocks in the array * @rst: reset control for the I2C controller * @base: ioremapped registers cookie * @base_phys: physical base address of the I2C controller * @cont_id: I2C controller ID, used for packet header * @irq: IRQ number of transfer complete interrupt * @is_dvc: identifies the DVC I2C controller, has a different register layout * @is_vi: identifies the VI I2C controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_buf_remaining: size of unsent data in the message buffer * @msg_len: length of message in current transfer * @msg_err: error code for completed message * @msg_buf: pointer to current message data * @msg_read: indicates that the transfer is a read access * @timings: i2c timings information like bus frequency * @multimaster_mode: indicates that I2C controller is in multi-master mode * @dma_chan: DMA channel * @dma_phys: handle to DMA resources * @dma_buf: pointer to allocated DMA buffer * @dma_buf_size: DMA buffer size * @dma_mode: indicates active DMA transfer * @dma_complete: DMA completion notifier * @atomic_mode: indicates active atomic transfer */ struct tegra_i2c_dev { struct device *dev; struct i2c_adapter adapter; const struct tegra_i2c_hw_feature *hw; struct reset_control *rst; unsigned int cont_id; unsigned int irq; phys_addr_t base_phys; void __iomem *base; struct clk_bulk_data clocks[2]; unsigned int nclocks; struct clk *div_clk; struct i2c_timings timings; struct completion msg_complete; size_t msg_buf_remaining; unsigned int msg_len; int msg_err; u8 *msg_buf; struct completion dma_complete; struct dma_chan *dma_chan; unsigned int dma_buf_size; struct device *dma_dev; dma_addr_t dma_phys; void *dma_buf; bool multimaster_mode; bool atomic_mode; bool dma_mode; bool msg_read; bool is_dvc; bool is_vi; }; #define IS_DVC(dev) (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && (dev)->is_dvc) #define IS_VI(dev) (IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) && (dev)->is_vi) static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg) { writel_relaxed(val, i2c_dev->base + reg); } static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg) { return readl_relaxed(i2c_dev->base + reg); } /* * If necessary, i2c_writel() and i2c_readl() will offset the register * in order to talk to the I2C block inside the DVC block. */ static u32 tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, unsigned int reg) { if (IS_DVC(i2c_dev)) reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40; else if (IS_VI(i2c_dev)) reg = 0xc00 + (reg << 2); return reg; } static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg) { writel_relaxed(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); /* read back register to make sure that register writes completed */ if (reg != I2C_TX_FIFO) readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); else if (IS_VI(i2c_dev)) readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS)); } static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg) { return readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); } static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data, unsigned int reg, unsigned int len) { writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); } static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data, unsigned int reg, unsigned int len) { u32 *data32 = data; /* * VI I2C controller has known hardware bug where writes get stuck * when immediate multiple writes happen to TX_FIFO register. * Recommended software work around is to read I2C register after * each write to TX_FIFO register to flush out the data. */ while (len--) i2c_writel(i2c_dev, *data32++, reg); } static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data, unsigned int reg, unsigned int len) { readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); } static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask) { u32 int_mask; int_mask = i2c_readl(i2c_dev, I2C_INT_MASK) & ~mask; i2c_writel(i2c_dev, int_mask, I2C_INT_MASK); } static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask) { u32 int_mask; int_mask = i2c_readl(i2c_dev, I2C_INT_MASK) | mask; i2c_writel(i2c_dev, int_mask, I2C_INT_MASK); } static void tegra_i2c_dma_complete(void *args) { struct tegra_i2c_dev *i2c_dev = args; complete(&i2c_dev->dma_complete); } static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len) { struct dma_async_tx_descriptor *dma_desc; enum dma_transfer_direction dir; dev_dbg(i2c_dev->dev, "starting DMA for length: %zu\n", len); reinit_completion(&i2c_dev->dma_complete); dir = i2c_dev->msg_read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; dma_desc = dmaengine_prep_slave_single(i2c_dev->dma_chan, i2c_dev->dma_phys, len, dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!dma_desc) { dev_err(i2c_dev->dev, "failed to get %s DMA descriptor\n", i2c_dev->msg_read ? "RX" : "TX"); return -EINVAL; } dma_desc->callback = tegra_i2c_dma_complete; dma_desc->callback_param = i2c_dev; dmaengine_submit(dma_desc); dma_async_issue_pending(i2c_dev->dma_chan); return 0; } static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev) { if (i2c_dev->dma_buf) { dma_free_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, i2c_dev->dma_buf, i2c_dev->dma_phys); i2c_dev->dma_buf = NULL; } if (i2c_dev->dma_chan) { dma_release_channel(i2c_dev->dma_chan); i2c_dev->dma_chan = NULL; } } static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev) { dma_addr_t dma_phys; u32 *dma_buf; int err; if (IS_VI(i2c_dev)) return 0; if (i2c_dev->hw->has_apb_dma) { if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) { dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n"); return 0; } } else if (!IS_ENABLED(CONFIG_TEGRA186_GPC_DMA)) { dev_dbg(i2c_dev->dev, "GPC DMA support not enabled\n"); return 0; } /* * The same channel will be used for both RX and TX. * Keeping the name as "tx" for backward compatibility * with existing devicetrees. */ i2c_dev->dma_chan = dma_request_chan(i2c_dev->dev, "tx"); if (IS_ERR(i2c_dev->dma_chan)) { err = PTR_ERR(i2c_dev->dma_chan); i2c_dev->dma_chan = NULL; goto err_out; } i2c_dev->dma_dev = i2c_dev->dma_chan->device->dev; i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len + I2C_PACKET_HEADER_SIZE; dma_buf = dma_alloc_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, &dma_phys, GFP_KERNEL | __GFP_NOWARN); if (!dma_buf) { dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n"); err = -ENOMEM; goto err_out; } i2c_dev->dma_buf = dma_buf; i2c_dev->dma_phys = dma_phys; return 0; err_out: tegra_i2c_release_dma(i2c_dev); if (err != -EPROBE_DEFER) { dev_err(i2c_dev->dev, "cannot use DMA: %d\n", err); dev_err(i2c_dev->dev, "falling back to PIO\n"); return 0; } return err; } /* * One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller) * block. This block is identical to the rest of the I2C blocks, except that * it only supports master mode, it has registers moved around, and it needs * some extra init to get it into I2C mode. The register moves are handled * by i2c_readl() and i2c_writel(). */ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev) { u32 val; val = dvc_readl(i2c_dev, DVC_CTRL_REG3); val |= DVC_CTRL_REG3_SW_PROG; val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN; dvc_writel(i2c_dev, val, DVC_CTRL_REG3); val = dvc_readl(i2c_dev, DVC_CTRL_REG1); val |= DVC_CTRL_REG1_INTR_EN; dvc_writel(i2c_dev, val, DVC_CTRL_REG1); } static void tegra_i2c_vi_init(struct tegra_i2c_dev *i2c_dev) { u32 value; value = FIELD_PREP(I2C_INTERFACE_TIMING_THIGH, 2) | FIELD_PREP(I2C_INTERFACE_TIMING_TLOW, 4); i2c_writel(i2c_dev, value, I2C_INTERFACE_TIMING_0); value = FIELD_PREP(I2C_INTERFACE_TIMING_TBUF, 4) | FIELD_PREP(I2C_INTERFACE_TIMING_TSU_STO, 7) | FIELD_PREP(I2C_INTERFACE_TIMING_THD_STA, 4) | FIELD_PREP(I2C_INTERFACE_TIMING_TSU_STA, 4); i2c_writel(i2c_dev, value, I2C_INTERFACE_TIMING_1); value = FIELD_PREP(I2C_HS_INTERFACE_TIMING_THIGH, 3) | FIELD_PREP(I2C_HS_INTERFACE_TIMING_TLOW, 8); i2c_writel(i2c_dev, value, I2C_HS_INTERFACE_TIMING_0); value = FIELD_PREP(I2C_HS_INTERFACE_TIMING_TSU_STO, 11) | FIELD_PREP(I2C_HS_INTERFACE_TIMING_THD_STA, 11) | FIELD_PREP(I2C_HS_INTERFACE_TIMING_TSU_STA, 11); i2c_writel(i2c_dev, value, I2C_HS_INTERFACE_TIMING_1); value = FIELD_PREP(I2C_BC_SCLK_THRESHOLD, 9) | I2C_BC_STOP_COND; i2c_writel(i2c_dev, value, I2C_BUS_CLEAR_CNFG); i2c_writel(i2c_dev, 0x0, I2C_TLOW_SEXT); } static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev, u32 reg, u32 mask, u32 delay_us, u32 timeout_us) { void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg); u32 val; if (!i2c_dev->atomic_mode) return readl_relaxed_poll_timeout(addr, val, !(val & mask), delay_us, timeout_us); return readl_relaxed_poll_timeout_atomic(addr, val, !(val & mask), delay_us, timeout_us); } static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev) { u32 mask, val, offset; int err; if (i2c_dev->hw->has_mst_fifo) { mask = I2C_MST_FIFO_CONTROL_TX_FLUSH | I2C_MST_FIFO_CONTROL_RX_FLUSH; offset = I2C_MST_FIFO_CONTROL; } else { mask = I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH; offset = I2C_FIFO_CONTROL; } val = i2c_readl(i2c_dev, offset); val |= mask; i2c_writel(i2c_dev, val, offset); err = tegra_i2c_poll_register(i2c_dev, offset, mask, 1000, 1000000); if (err) { dev_err(i2c_dev->dev, "failed to flush FIFO\n"); return err; } return 0; } static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev) { int err; if (!i2c_dev->hw->has_config_load_reg) return 0; i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD); err = tegra_i2c_poll_register(i2c_dev, I2C_CONFIG_LOAD, 0xffffffff, 1000, I2C_CONFIG_LOAD_TIMEOUT); if (err) { dev_err(i2c_dev->dev, "failed to load config\n"); return err; } return 0; } static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) { u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode; acpi_handle handle = ACPI_HANDLE(i2c_dev->dev); struct i2c_timings *t = &i2c_dev->timings; int err; /* * The reset shouldn't ever fail in practice. The failure will be a * sign of a severe problem that needs to be resolved. Still we don't * want to fail the initialization completely because this may break * kernel boot up since voltage regulators use I2C. Hence, we will * emit a noisy warning on error, which won't stay unnoticed and * won't hose machine entirely. */ if (handle) err = acpi_evaluate_object(handle, "_RST", NULL, NULL); else err = reset_control_reset(i2c_dev->rst); WARN_ON_ONCE(err); if (IS_DVC(i2c_dev)) tegra_dvc_init(i2c_dev); val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | FIELD_PREP(I2C_CNFG_DEBOUNCE_CNT, 2); if (i2c_dev->hw->has_multi_master_mode) val |= I2C_CNFG_MULTI_MASTER_MODE; i2c_writel(i2c_dev, val, I2C_CNFG); i2c_writel(i2c_dev, 0, I2C_INT_MASK); if (IS_VI(i2c_dev)) tegra_i2c_vi_init(i2c_dev); switch (t->bus_freq_hz) { case I2C_MAX_STANDARD_MODE_FREQ + 1 ... I2C_MAX_FAST_MODE_PLUS_FREQ: default: tlow = i2c_dev->hw->tlow_fast_fastplus_mode; thigh = i2c_dev->hw->thigh_fast_fastplus_mode; tsu_thd = i2c_dev->hw->setup_hold_time_fast_fast_plus_mode; if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) non_hs_mode = i2c_dev->hw->clk_divisor_fast_plus_mode; else non_hs_mode = i2c_dev->hw->clk_divisor_fast_mode; break; case 0 ... I2C_MAX_STANDARD_MODE_FREQ: tlow = i2c_dev->hw->tlow_std_mode; thigh = i2c_dev->hw->thigh_std_mode; tsu_thd = i2c_dev->hw->setup_hold_time_std_mode; non_hs_mode = i2c_dev->hw->clk_divisor_std_mode; break; } /* make sure clock divisor programmed correctly */ clk_divisor = FIELD_PREP(I2C_CLK_DIVISOR_HSMODE, i2c_dev->hw->clk_divisor_hs_mode) | FIELD_PREP(I2C_CLK_DIVISOR_STD_FAST_MODE, non_hs_mode); i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR); if (i2c_dev->hw->has_interface_timing_reg) { val = FIELD_PREP(I2C_INTERFACE_TIMING_THIGH, thigh) | FIELD_PREP(I2C_INTERFACE_TIMING_TLOW, tlow); i2c_writel(i2c_dev, val, I2C_INTERFACE_TIMING_0); } /* * Configure setup and hold times only when tsu_thd is non-zero. * Otherwise, preserve the chip default values. */ if (i2c_dev->hw->has_interface_timing_reg && tsu_thd) i2c_writel(i2c_dev, tsu_thd, I2C_INTERFACE_TIMING_1); clk_multiplier = (tlow + thigh + 2) * (non_hs_mode + 1); err = clk_set_rate(i2c_dev->div_clk, t->bus_freq_hz * clk_multiplier); if (err) { dev_err(i2c_dev->dev, "failed to set div-clk rate: %d\n", err); return err; } if (!IS_DVC(i2c_dev) && !IS_VI(i2c_dev)) { u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG); sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL; i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG); i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1); i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2); } err = tegra_i2c_flush_fifos(i2c_dev); if (err) return err; if (i2c_dev->multimaster_mode && i2c_dev->hw->has_slcg_override_reg) i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE); err = tegra_i2c_wait_for_config_load(i2c_dev); if (err) return err; return 0; } static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev) { u32 cnfg; /* * NACK interrupt is generated before the I2C controller generates * the STOP condition on the bus. So, wait for 2 clock periods * before disabling the controller so that the STOP condition has * been delivered properly. */ udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->timings.bus_freq_hz)); cnfg = i2c_readl(i2c_dev, I2C_CNFG); if (cnfg & I2C_CNFG_PACKET_MODE_EN) i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG); return tegra_i2c_wait_for_config_load(i2c_dev); } static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev) { size_t buf_remaining = i2c_dev->msg_buf_remaining; unsigned int words_to_transfer, rx_fifo_avail; u8 *buf = i2c_dev->msg_buf; u32 val; /* * Catch overflow due to message fully sent before the check for * RX FIFO availability. */ if (WARN_ON_ONCE(!(i2c_dev->msg_buf_remaining))) return -EINVAL; if (i2c_dev->hw->has_mst_fifo) { val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS); rx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_RX, val); } else { val = i2c_readl(i2c_dev, I2C_FIFO_STATUS); rx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_RX, val); } /* round down to exclude partial word at the end of buffer */ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; if (words_to_transfer > rx_fifo_avail) words_to_transfer = rx_fifo_avail; i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer); buf += words_to_transfer * BYTES_PER_FIFO_WORD; buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; rx_fifo_avail -= words_to_transfer; /* * If there is a partial word at the end of buffer, handle it * manually to prevent overwriting past the end of buffer. */ if (rx_fifo_avail > 0 && buf_remaining > 0) { /* * buf_remaining > 3 check not needed as rx_fifo_avail == 0 * when (words_to_transfer was > rx_fifo_avail) earlier * in this function. */ val = i2c_readl(i2c_dev, I2C_RX_FIFO); val = cpu_to_le32(val); memcpy(buf, &val, buf_remaining); buf_remaining = 0; rx_fifo_avail--; } /* RX FIFO must be drained, otherwise it's an Overflow case. */ if (WARN_ON_ONCE(rx_fifo_avail)) return -EINVAL; i2c_dev->msg_buf_remaining = buf_remaining; i2c_dev->msg_buf = buf; return 0; } static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) { size_t buf_remaining = i2c_dev->msg_buf_remaining; unsigned int words_to_transfer, tx_fifo_avail; u8 *buf = i2c_dev->msg_buf; u32 val; if (i2c_dev->hw->has_mst_fifo) { val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS); tx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_TX, val); } else { val = i2c_readl(i2c_dev, I2C_FIFO_STATUS); tx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_TX, val); } /* round down to exclude partial word at the end of buffer */ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; /* * This hunk pushes 4 bytes at a time into the TX FIFO. * * It's very common to have < 4 bytes, hence there is no word * to push if we have less than 4 bytes to transfer. */ if (words_to_transfer) { if (words_to_transfer > tx_fifo_avail) words_to_transfer = tx_fifo_avail; /* * Update state before writing to FIFO. Note that this may * cause us to finish writing all bytes (AKA buf_remaining * goes to 0), hence we have a potential for an interrupt * (PACKET_XFER_COMPLETE is not maskable), but GIC interrupt * is disabled at this point. */ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; tx_fifo_avail -= words_to_transfer; i2c_dev->msg_buf_remaining = buf_remaining; i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD; if (IS_VI(i2c_dev)) i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); else i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); buf += words_to_transfer * BYTES_PER_FIFO_WORD; } /* * If there is a partial word at the end of buffer, handle it manually * to prevent reading past the end of buffer, which could cross a page * boundary and fault. */ if (tx_fifo_avail > 0 && buf_remaining > 0) { /* * buf_remaining > 3 check not needed as tx_fifo_avail == 0 * when (words_to_transfer was > tx_fifo_avail) earlier * in this function for non-zero words_to_transfer. */ memcpy(&val, buf, buf_remaining); val = le32_to_cpu(val); i2c_dev->msg_buf_remaining = 0; i2c_dev->msg_buf = NULL; i2c_writel(i2c_dev, val, I2C_TX_FIFO); } return 0; } static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) { const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; struct tegra_i2c_dev *i2c_dev = dev_id; u32 status; status = i2c_readl(i2c_dev, I2C_INT_STATUS); if (status == 0) { dev_warn(i2c_dev->dev, "IRQ status 0 %08x %08x %08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS), i2c_readl(i2c_dev, I2C_STATUS), i2c_readl(i2c_dev, I2C_CNFG)); i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; goto err; } if (status & status_err) { tegra_i2c_disable_packet_mode(i2c_dev); if (status & I2C_INT_NO_ACK) i2c_dev->msg_err |= I2C_ERR_NO_ACK; if (status & I2C_INT_ARBITRATION_LOST) i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST; goto err; } /* * I2C transfer is terminated during the bus clear, so skip * processing the other interrupts. */ if (i2c_dev->hw->supports_bus_clear && (status & I2C_INT_BUS_CLR_DONE)) goto err; if (!i2c_dev->dma_mode) { if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) { if (tegra_i2c_empty_rx_fifo(i2c_dev)) { /* * Overflow error condition: message fully sent, * with no XFER_COMPLETE interrupt but hardware * asks to transfer more. */ i2c_dev->msg_err |= I2C_ERR_RX_BUFFER_OVERFLOW; goto err; } } if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) { if (i2c_dev->msg_buf_remaining) tegra_i2c_fill_tx_fifo(i2c_dev); else tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); } } i2c_writel(i2c_dev, status, I2C_INT_STATUS); if (IS_DVC(i2c_dev)) dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS); /* * During message read XFER_COMPLETE interrupt is triggered prior to * DMA completion and during message write XFER_COMPLETE interrupt is * triggered after DMA completion. * * PACKETS_XFER_COMPLETE indicates completion of all bytes of transfer, * so forcing msg_buf_remaining to 0 in DMA mode. */ if (status & I2C_INT_PACKET_XFER_COMPLETE) { if (i2c_dev->dma_mode) i2c_dev->msg_buf_remaining = 0; /* * Underflow error condition: XFER_COMPLETE before message * fully sent. */ if (WARN_ON_ONCE(i2c_dev->msg_buf_remaining)) { i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; goto err; } complete(&i2c_dev->msg_complete); } goto done; err: /* mask all interrupts on error */ tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST | I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ | I2C_INT_RX_FIFO_DATA_REQ); if (i2c_dev->hw->supports_bus_clear) tegra_i2c_mask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE); i2c_writel(i2c_dev, status, I2C_INT_STATUS); if (IS_DVC(i2c_dev)) dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS); if (i2c_dev->dma_mode) { dmaengine_terminate_async(i2c_dev->dma_chan); complete(&i2c_dev->dma_complete); } complete(&i2c_dev->msg_complete); done: return IRQ_HANDLED; } static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev, size_t len) { struct dma_slave_config slv_config = {0}; u32 val, reg, dma_burst, reg_offset; int err; if (i2c_dev->hw->has_mst_fifo) reg = I2C_MST_FIFO_CONTROL; else reg = I2C_FIFO_CONTROL; if (i2c_dev->dma_mode) { if (len & 0xF) dma_burst = 1; else if (len & 0x10) dma_burst = 4; else dma_burst = 8; if (i2c_dev->msg_read) { reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_RX_FIFO); slv_config.src_addr = i2c_dev->base_phys + reg_offset; slv_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; slv_config.src_maxburst = dma_burst; if (i2c_dev->hw->has_mst_fifo) val = I2C_MST_FIFO_CONTROL_RX_TRIG(dma_burst); else val = I2C_FIFO_CONTROL_RX_TRIG(dma_burst); } else { reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_TX_FIFO); slv_config.dst_addr = i2c_dev->base_phys + reg_offset; slv_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; slv_config.dst_maxburst = dma_burst; if (i2c_dev->hw->has_mst_fifo) val = I2C_MST_FIFO_CONTROL_TX_TRIG(dma_burst); else val = I2C_FIFO_CONTROL_TX_TRIG(dma_burst); } slv_config.device_fc = true; err = dmaengine_slave_config(i2c_dev->dma_chan, &slv_config); if (err) { dev_err(i2c_dev->dev, "DMA config failed: %d\n", err); dev_err(i2c_dev->dev, "falling back to PIO\n"); tegra_i2c_release_dma(i2c_dev); i2c_dev->dma_mode = false; } else { goto out; } } if (i2c_dev->hw->has_mst_fifo) val = I2C_MST_FIFO_CONTROL_TX_TRIG(8) | I2C_MST_FIFO_CONTROL_RX_TRIG(1); else val = I2C_FIFO_CONTROL_TX_TRIG(8) | I2C_FIFO_CONTROL_RX_TRIG(1); out: i2c_writel(i2c_dev, val, reg); } static unsigned long tegra_i2c_poll_completion(struct tegra_i2c_dev *i2c_dev, struct completion *complete, unsigned int timeout_ms) { ktime_t ktime = ktime_get(); ktime_t ktimeout = ktime_add_ms(ktime, timeout_ms); do { u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS); if (status) tegra_i2c_isr(i2c_dev->irq, i2c_dev); if (completion_done(complete)) { s64 delta = ktime_ms_delta(ktimeout, ktime); return msecs_to_jiffies(delta) ?: 1; } ktime = ktime_get(); } while (ktime_before(ktime, ktimeout)); return 0; } static unsigned long tegra_i2c_wait_completion(struct tegra_i2c_dev *i2c_dev, struct completion *complete, unsigned int timeout_ms) { unsigned long ret; if (i2c_dev->atomic_mode) { ret = tegra_i2c_poll_completion(i2c_dev, complete, timeout_ms); } else { enable_irq(i2c_dev->irq); ret = wait_for_completion_timeout(complete, msecs_to_jiffies(timeout_ms)); disable_irq(i2c_dev->irq); /* * Under some rare circumstances (like running KASAN + * NFS root) CPU, which handles interrupt, may stuck in * uninterruptible state for a significant time. In this * case we will get timeout if I2C transfer is running on * a sibling CPU, despite of IRQ being raised. * * In order to handle this rare condition, the IRQ status * needs to be checked after timeout. */ if (ret == 0) ret = tegra_i2c_poll_completion(i2c_dev, complete, 0); } return ret; } static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); u32 val, time_left; int err; reinit_completion(&i2c_dev->msg_complete); val = FIELD_PREP(I2C_BC_SCLK_THRESHOLD, 9) | I2C_BC_STOP_COND | I2C_BC_TERMINATE; i2c_writel(i2c_dev, val, I2C_BUS_CLEAR_CNFG); err = tegra_i2c_wait_for_config_load(i2c_dev); if (err) return err; val |= I2C_BC_ENABLE; i2c_writel(i2c_dev, val, I2C_BUS_CLEAR_CNFG); tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE); time_left = tegra_i2c_wait_completion(i2c_dev, &i2c_dev->msg_complete, 50); tegra_i2c_mask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE); if (time_left == 0) { dev_err(i2c_dev->dev, "failed to clear bus\n"); return -ETIMEDOUT; } val = i2c_readl(i2c_dev, I2C_BUS_CLEAR_STATUS); if (!(val & I2C_BC_STATUS)) { dev_err(i2c_dev->dev, "un-recovered arbitration lost\n"); return -EIO; } return -EAGAIN; } static void tegra_i2c_push_packet_header(struct tegra_i2c_dev *i2c_dev, struct i2c_msg *msg, enum msg_end_type end_state) { u32 *dma_buf = i2c_dev->dma_buf; u32 packet_header; packet_header = FIELD_PREP(PACKET_HEADER0_HEADER_SIZE, 0) | FIELD_PREP(PACKET_HEADER0_PROTOCOL, PACKET_HEADER0_PROTOCOL_I2C) | FIELD_PREP(PACKET_HEADER0_CONT_ID, i2c_dev->cont_id) | FIELD_PREP(PACKET_HEADER0_PACKET_ID, 1); if (i2c_dev->dma_mode && !i2c_dev->msg_read) *dma_buf++ = packet_header; else i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); packet_header = i2c_dev->msg_len - 1; if (i2c_dev->dma_mode && !i2c_dev->msg_read) *dma_buf++ = packet_header; else i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); packet_header = I2C_HEADER_IE_ENABLE; if (end_state == MSG_END_CONTINUE) packet_header |= I2C_HEADER_CONTINUE_XFER; else if (end_state == MSG_END_REPEAT_START) packet_header |= I2C_HEADER_REPEAT_START; if (msg->flags & I2C_M_TEN) { packet_header |= msg->addr; packet_header |= I2C_HEADER_10BIT_ADDR; } else { packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT; } if (msg->flags & I2C_M_IGNORE_NAK) packet_header |= I2C_HEADER_CONT_ON_NAK; if (msg->flags & I2C_M_RD) packet_header |= I2C_HEADER_READ; if (i2c_dev->dma_mode && !i2c_dev->msg_read) *dma_buf++ = packet_header; else i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); } static int tegra_i2c_error_recover(struct tegra_i2c_dev *i2c_dev, struct i2c_msg *msg) { if (i2c_dev->msg_err == I2C_ERR_NONE) return 0; tegra_i2c_init(i2c_dev); /* start recovery upon arbitration loss in single master mode */ if (i2c_dev->msg_err == I2C_ERR_ARBITRATION_LOST) { if (!i2c_dev->multimaster_mode) return i2c_recover_bus(&i2c_dev->adapter); return -EAGAIN; } if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0; return -EREMOTEIO; } return -EIO; } static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, struct i2c_msg *msg, enum msg_end_type end_state) { unsigned long time_left, xfer_time = 100; size_t xfer_size; u32 int_mask; int err; err = tegra_i2c_flush_fifos(i2c_dev); if (err) return err; i2c_dev->msg_buf = msg->buf; i2c_dev->msg_len = msg->len; i2c_dev->msg_err = I2C_ERR_NONE; i2c_dev->msg_read = !!(msg->flags & I2C_M_RD); reinit_completion(&i2c_dev->msg_complete); /* * For SMBUS block read command, read only 1 byte in the first transfer. * Adjust that 1 byte for the next transfer in the msg buffer and msg * length. */ if (msg->flags & I2C_M_RECV_LEN) { if (end_state == MSG_END_CONTINUE) { i2c_dev->msg_len = 1; } else { i2c_dev->msg_buf += 1; i2c_dev->msg_len -= 1; } } i2c_dev->msg_buf_remaining = i2c_dev->msg_len; if (i2c_dev->msg_read) xfer_size = i2c_dev->msg_len; else xfer_size = i2c_dev->msg_len + I2C_PACKET_HEADER_SIZE; xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD); i2c_dev->dma_mode = xfer_size > I2C_PIO_MODE_PREFERRED_LEN && i2c_dev->dma_buf && !i2c_dev->atomic_mode; tegra_i2c_config_fifo_trig(i2c_dev, xfer_size); /* * Transfer time in mSec = Total bits / transfer rate * Total bits = 9 bits per byte (including ACK bit) + Start & stop bits */ xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC, i2c_dev->timings.bus_freq_hz); int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; tegra_i2c_unmask_irq(i2c_dev, int_mask); if (i2c_dev->dma_mode) { if (i2c_dev->msg_read) { dma_sync_single_for_device(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_FROM_DEVICE); err = tegra_i2c_dma_submit(i2c_dev, xfer_size); if (err) return err; } else { dma_sync_single_for_cpu(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_TO_DEVICE); } } tegra_i2c_push_packet_header(i2c_dev, msg, end_state); if (!i2c_dev->msg_read) { if (i2c_dev->dma_mode) { memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE, msg->buf, i2c_dev->msg_len); dma_sync_single_for_device(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_TO_DEVICE); err = tegra_i2c_dma_submit(i2c_dev, xfer_size); if (err) return err; } else { tegra_i2c_fill_tx_fifo(i2c_dev); } } if (i2c_dev->hw->has_per_pkt_xfer_complete_irq) int_mask |= I2C_INT_PACKET_XFER_COMPLETE; if (!i2c_dev->dma_mode) { if (msg->flags & I2C_M_RD) int_mask |= I2C_INT_RX_FIFO_DATA_REQ; else if (i2c_dev->msg_buf_remaining) int_mask |= I2C_INT_TX_FIFO_DATA_REQ; } tegra_i2c_unmask_irq(i2c_dev, int_mask); dev_dbg(i2c_dev->dev, "unmasked IRQ: %02x\n", i2c_readl(i2c_dev, I2C_INT_MASK)); if (i2c_dev->dma_mode) { time_left = tegra_i2c_wait_completion(i2c_dev, &i2c_dev->dma_complete, xfer_time); /* * Synchronize DMA first, since dmaengine_terminate_sync() * performs synchronization after the transfer's termination * and we want to get a completion if transfer succeeded. */ dmaengine_synchronize(i2c_dev->dma_chan); dmaengine_terminate_sync(i2c_dev->dma_chan); if (!time_left && !completion_done(&i2c_dev->dma_complete)) { dev_err(i2c_dev->dev, "DMA transfer timed out\n"); tegra_i2c_init(i2c_dev); return -ETIMEDOUT; } if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) { dma_sync_single_for_cpu(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_FROM_DEVICE); memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, i2c_dev->msg_len); } } time_left = tegra_i2c_wait_completion(i2c_dev, &i2c_dev->msg_complete, xfer_time); tegra_i2c_mask_irq(i2c_dev, int_mask); if (time_left == 0) { dev_err(i2c_dev->dev, "I2C transfer timed out\n"); tegra_i2c_init(i2c_dev); return -ETIMEDOUT; } dev_dbg(i2c_dev->dev, "transfer complete: %lu %d %d\n", time_left, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err); i2c_dev->dma_mode = false; err = tegra_i2c_error_recover(i2c_dev, msg); if (err) return err; return 0; } static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); int i, ret; ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) { dev_err(i2c_dev->dev, "runtime resume failed %d\n", ret); pm_runtime_put_noidle(i2c_dev->dev); return ret; } for (i = 0; i < num; i++) { enum msg_end_type end_type = MSG_END_STOP; if (i < (num - 1)) { /* check whether follow up message is coming */ if (msgs[i + 1].flags & I2C_M_NOSTART) end_type = MSG_END_CONTINUE; else end_type = MSG_END_REPEAT_START; } /* If M_RECV_LEN use ContinueXfer to read the first byte */ if (msgs[i].flags & I2C_M_RECV_LEN) { ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE); if (ret) break; /* Set the msg length from first byte */ msgs[i].len += msgs[i].buf[0]; dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len); } ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], end_type); if (ret) break; } pm_runtime_put(i2c_dev->dev); return ret ?: i; } static int tegra_i2c_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); int ret; i2c_dev->atomic_mode = true; ret = tegra_i2c_xfer(adap, msgs, num); i2c_dev->atomic_mode = false; return ret; } static u32 tegra_i2c_func(struct i2c_adapter *adap) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); u32 ret = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; if (i2c_dev->hw->has_continue_xfer_support) ret |= I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_READ_BLOCK_DATA; return ret; } static const struct i2c_algorithm tegra_i2c_algo = { .master_xfer = tegra_i2c_xfer, .master_xfer_atomic = tegra_i2c_xfer_atomic, .functionality = tegra_i2c_func, }; /* payload size is only 12 bit */ static const struct i2c_adapter_quirks tegra_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, .max_read_len = SZ_4K, .max_write_len = SZ_4K - I2C_PACKET_HEADER_SIZE, }; static const struct i2c_adapter_quirks tegra194_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, .max_write_len = SZ_64K - I2C_PACKET_HEADER_SIZE, }; static struct i2c_bus_recovery_info tegra_i2c_recovery_info = { .recover_bus = tegra_i2c_issue_bus_clear, }; static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { .has_continue_xfer_support = false, .has_per_pkt_xfer_complete_irq = false, .clk_divisor_hs_mode = 3, .clk_divisor_std_mode = 0, .clk_divisor_fast_mode = 0, .clk_divisor_fast_plus_mode = 0, .has_config_load_reg = false, .has_multi_master_mode = false, .has_slcg_override_reg = false, .has_mst_fifo = false, .quirks = &tegra_i2c_quirks, .supports_bus_clear = false, .has_apb_dma = true, .tlow_std_mode = 0x4, .thigh_std_mode = 0x2, .tlow_fast_fastplus_mode = 0x4, .thigh_fast_fastplus_mode = 0x2, .setup_hold_time_std_mode = 0x0, .setup_hold_time_fast_fast_plus_mode = 0x0, .setup_hold_time_hs_mode = 0x0, .has_interface_timing_reg = false, }; static const struct tegra_i2c_hw_feature tegra30_i2c_hw = { .has_continue_xfer_support = true, .has_per_pkt_xfer_complete_irq = false, .clk_divisor_hs_mode = 3, .clk_divisor_std_mode = 0, .clk_divisor_fast_mode = 0, .clk_divisor_fast_plus_mode = 0, .has_config_load_reg = false, .has_multi_master_mode = false, .has_slcg_override_reg = false, .has_mst_fifo = false, .quirks = &tegra_i2c_quirks, .supports_bus_clear = false, .has_apb_dma = true, .tlow_std_mode = 0x4, .thigh_std_mode = 0x2, .tlow_fast_fastplus_mode = 0x4, .thigh_fast_fastplus_mode = 0x2, .setup_hold_time_std_mode = 0x0, .setup_hold_time_fast_fast_plus_mode = 0x0, .setup_hold_time_hs_mode = 0x0, .has_interface_timing_reg = false, }; static const struct tegra_i2c_hw_feature tegra114_i2c_hw = { .has_continue_xfer_support = true, .has_per_pkt_xfer_complete_irq = true, .clk_divisor_hs_mode = 1, .clk_divisor_std_mode = 0x19, .clk_divisor_fast_mode = 0x19, .clk_divisor_fast_plus_mode = 0x10, .has_config_load_reg = false, .has_multi_master_mode = false, .has_slcg_override_reg = false, .has_mst_fifo = false, .quirks = &tegra_i2c_quirks, .supports_bus_clear = true, .has_apb_dma = true, .tlow_std_mode = 0x4, .thigh_std_mode = 0x2, .tlow_fast_fastplus_mode = 0x4, .thigh_fast_fastplus_mode = 0x2, .setup_hold_time_std_mode = 0x0, .setup_hold_time_fast_fast_plus_mode = 0x0, .setup_hold_time_hs_mode = 0x0, .has_interface_timing_reg = false, }; static const struct tegra_i2c_hw_feature tegra124_i2c_hw = { .has_continue_xfer_support = true, .has_per_pkt_xfer_complete_irq = true, .clk_divisor_hs_mode = 1, .clk_divisor_std_mode = 0x19, .clk_divisor_fast_mode = 0x19, .clk_divisor_fast_plus_mode = 0x10, .has_config_load_reg = true, .has_multi_master_mode = false, .has_slcg_override_reg = true, .has_mst_fifo = false, .quirks = &tegra_i2c_quirks, .supports_bus_clear = true, .has_apb_dma = true, .tlow_std_mode = 0x4, .thigh_std_mode = 0x2, .tlow_fast_fastplus_mode = 0x4, .thigh_fast_fastplus_mode = 0x2, .setup_hold_time_std_mode = 0x0, .setup_hold_time_fast_fast_plus_mode = 0x0, .setup_hold_time_hs_mode = 0x0, .has_interface_timing_reg = true, }; static const struct tegra_i2c_hw_feature tegra210_i2c_hw = { .has_continue_xfer_support = true, .has_per_pkt_xfer_complete_irq = true, .clk_divisor_hs_mode = 1, .clk_divisor_std_mode = 0x19, .clk_divisor_fast_mode = 0x19, .clk_divisor_fast_plus_mode = 0x10, .has_config_load_reg = true, .has_multi_master_mode = false, .has_slcg_override_reg = true, .has_mst_fifo = false, .quirks = &tegra_i2c_quirks, .supports_bus_clear = true, .has_apb_dma = true, .tlow_std_mode = 0x4, .thigh_std_mode = 0x2, .tlow_fast_fastplus_mode = 0x4, .thigh_fast_fastplus_mode = 0x2, .setup_hold_time_std_mode = 0, .setup_hold_time_fast_fast_plus_mode = 0, .setup_hold_time_hs_mode = 0, .has_interface_timing_reg = true, }; static const struct tegra_i2c_hw_feature tegra186_i2c_hw = { .has_continue_xfer_support = true, .has_per_pkt_xfer_complete_irq = true, .clk_divisor_hs_mode = 1, .clk_divisor_std_mode = 0x16, .clk_divisor_fast_mode = 0x19, .clk_divisor_fast_plus_mode = 0x10, .has_config_load_reg = true, .has_multi_master_mode = false, .has_slcg_override_reg = true, .has_mst_fifo = false, .quirks = &tegra_i2c_quirks, .supports_bus_clear = true, .has_apb_dma = false, .tlow_std_mode = 0x4, .thigh_std_mode = 0x3, .tlow_fast_fastplus_mode = 0x4, .thigh_fast_fastplus_mode = 0x2, .setup_hold_time_std_mode = 0, .setup_hold_time_fast_fast_plus_mode = 0, .setup_hold_time_hs_mode = 0, .has_interface_timing_reg = true, }; static const struct tegra_i2c_hw_feature tegra194_i2c_hw = { .has_continue_xfer_support = true, .has_per_pkt_xfer_complete_irq = true, .clk_divisor_hs_mode = 1, .clk_divisor_std_mode = 0x4f, .clk_divisor_fast_mode = 0x3c, .clk_divisor_fast_plus_mode = 0x16, .has_config_load_reg = true, .has_multi_master_mode = true, .has_slcg_override_reg = true, .has_mst_fifo = true, .quirks = &tegra194_i2c_quirks, .supports_bus_clear = true, .has_apb_dma = false, .tlow_std_mode = 0x8, .thigh_std_mode = 0x7, .tlow_fast_fastplus_mode = 0x2, .thigh_fast_fastplus_mode = 0x2, .setup_hold_time_std_mode = 0x08080808, .setup_hold_time_fast_fast_plus_mode = 0x02020202, .setup_hold_time_hs_mode = 0x090909, .has_interface_timing_reg = true, }; static const struct of_device_id tegra_i2c_of_match[] = { { .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, }, { .compatible = "nvidia,tegra186-i2c", .data = &tegra186_i2c_hw, }, #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) { .compatible = "nvidia,tegra210-i2c-vi", .data = &tegra210_i2c_hw, }, #endif { .compatible = "nvidia,tegra210-i2c", .data = &tegra210_i2c_hw, }, { .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, }, { .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, }, { .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, }, { .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, }, #if IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) { .compatible = "nvidia,tegra20-i2c-dvc", .data = &tegra20_i2c_hw, }, #endif {}, }; MODULE_DEVICE_TABLE(of, tegra_i2c_of_match); static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev) { struct device_node *np = i2c_dev->dev->of_node; bool multi_mode; i2c_parse_fw_timings(i2c_dev->dev, &i2c_dev->timings, true); multi_mode = device_property_read_bool(i2c_dev->dev, "multi-master"); i2c_dev->multimaster_mode = multi_mode; if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc")) i2c_dev->is_dvc = true; if (IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) && of_device_is_compatible(np, "nvidia,tegra210-i2c-vi")) i2c_dev->is_vi = true; } static int tegra_i2c_init_reset(struct tegra_i2c_dev *i2c_dev) { if (ACPI_HANDLE(i2c_dev->dev)) return 0; i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c"); if (IS_ERR(i2c_dev->rst)) return dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst), "failed to get reset control\n"); return 0; } static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev) { int err; if (ACPI_HANDLE(i2c_dev->dev)) return 0; i2c_dev->clocks[i2c_dev->nclocks++].id = "div-clk"; if (i2c_dev->hw == &tegra20_i2c_hw || i2c_dev->hw == &tegra30_i2c_hw) i2c_dev->clocks[i2c_dev->nclocks++].id = "fast-clk"; if (IS_VI(i2c_dev)) i2c_dev->clocks[i2c_dev->nclocks++].id = "slow"; err = devm_clk_bulk_get(i2c_dev->dev, i2c_dev->nclocks, i2c_dev->clocks); if (err) return err; err = clk_bulk_prepare(i2c_dev->nclocks, i2c_dev->clocks); if (err) return err; i2c_dev->div_clk = i2c_dev->clocks[0].clk; if (!i2c_dev->multimaster_mode) return 0; err = clk_enable(i2c_dev->div_clk); if (err) { dev_err(i2c_dev->dev, "failed to enable div-clk: %d\n", err); goto unprepare_clocks; } return 0; unprepare_clocks: clk_bulk_unprepare(i2c_dev->nclocks, i2c_dev->clocks); return err; } static void tegra_i2c_release_clocks(struct tegra_i2c_dev *i2c_dev) { if (i2c_dev->multimaster_mode) clk_disable(i2c_dev->div_clk); clk_bulk_unprepare(i2c_dev->nclocks, i2c_dev->clocks); } static int tegra_i2c_init_hardware(struct tegra_i2c_dev *i2c_dev) { int ret; ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) dev_err(i2c_dev->dev, "runtime resume failed: %d\n", ret); else ret = tegra_i2c_init(i2c_dev); pm_runtime_put_sync(i2c_dev->dev); return ret; } static int tegra_i2c_probe(struct platform_device *pdev) { struct tegra_i2c_dev *i2c_dev; struct resource *res; int err; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; platform_set_drvdata(pdev, i2c_dev); init_completion(&i2c_dev->msg_complete); init_completion(&i2c_dev->dma_complete); i2c_dev->hw = device_get_match_data(&pdev->dev); i2c_dev->cont_id = pdev->id; i2c_dev->dev = &pdev->dev; i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); i2c_dev->base_phys = res->start; err = platform_get_irq(pdev, 0); if (err < 0) return err; i2c_dev->irq = err; /* interrupt will be enabled during of transfer time */ irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN); err = devm_request_threaded_irq(i2c_dev->dev, i2c_dev->irq, NULL, tegra_i2c_isr, IRQF_NO_SUSPEND | IRQF_ONESHOT, dev_name(i2c_dev->dev), i2c_dev); if (err) return err; tegra_i2c_parse_dt(i2c_dev); err = tegra_i2c_init_reset(i2c_dev); if (err) return err; err = tegra_i2c_init_clocks(i2c_dev); if (err) return err; err = tegra_i2c_init_dma(i2c_dev); if (err) goto release_clocks; /* * VI I2C is in VE power domain which is not always ON and not * IRQ-safe. Thus, IRQ-safe device shouldn't be attached to a * non IRQ-safe domain because this prevents powering off the power * domain. * * VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't * be used for atomic transfers. */ if (!IS_VI(i2c_dev)) pm_runtime_irq_safe(i2c_dev->dev); pm_runtime_enable(i2c_dev->dev); err = tegra_i2c_init_hardware(i2c_dev); if (err) goto release_rpm; i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); i2c_dev->adapter.dev.of_node = i2c_dev->dev->of_node; i2c_dev->adapter.dev.parent = i2c_dev->dev; i2c_dev->adapter.retries = 1; i2c_dev->adapter.timeout = 6 * HZ; i2c_dev->adapter.quirks = i2c_dev->hw->quirks; i2c_dev->adapter.owner = THIS_MODULE; i2c_dev->adapter.class = I2C_CLASS_DEPRECATED; i2c_dev->adapter.algo = &tegra_i2c_algo; i2c_dev->adapter.nr = pdev->id; ACPI_COMPANION_SET(&i2c_dev->adapter.dev, ACPI_COMPANION(&pdev->dev)); if (i2c_dev->hw->supports_bus_clear) i2c_dev->adapter.bus_recovery_info = &tegra_i2c_recovery_info; strscpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev), sizeof(i2c_dev->adapter.name)); err = i2c_add_numbered_adapter(&i2c_dev->adapter); if (err) goto release_rpm; return 0; release_rpm: pm_runtime_disable(i2c_dev->dev); tegra_i2c_release_dma(i2c_dev); release_clocks: tegra_i2c_release_clocks(i2c_dev); return err; } static void tegra_i2c_remove(struct platform_device *pdev) { struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); i2c_del_adapter(&i2c_dev->adapter); pm_runtime_force_suspend(i2c_dev->dev); tegra_i2c_release_dma(i2c_dev); tegra_i2c_release_clocks(i2c_dev); } static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); int err; err = pinctrl_pm_select_default_state(dev); if (err) return err; err = clk_bulk_enable(i2c_dev->nclocks, i2c_dev->clocks); if (err) return err; /* * VI I2C device is attached to VE power domain which goes through * power ON/OFF during runtime PM resume/suspend, meaning that * controller needs to be re-initialized after power ON. */ if (IS_VI(i2c_dev)) { err = tegra_i2c_init(i2c_dev); if (err) goto disable_clocks; } return 0; disable_clocks: clk_bulk_disable(i2c_dev->nclocks, i2c_dev->clocks); return err; } static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); clk_bulk_disable(i2c_dev->nclocks, i2c_dev->clocks); return pinctrl_pm_select_idle_state(dev); } static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); int err; i2c_mark_adapter_suspended(&i2c_dev->adapter); if (!pm_runtime_status_suspended(dev)) { err = tegra_i2c_runtime_suspend(dev); if (err) return err; } return 0; } static int __maybe_unused tegra_i2c_resume(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); int err; /* * We need to ensure that clocks are enabled so that registers can be * restored in tegra_i2c_init(). */ err = tegra_i2c_runtime_resume(dev); if (err) return err; err = tegra_i2c_init(i2c_dev); if (err) return err; /* * In case we are runtime suspended, disable clocks again so that we * don't unbalance the clock reference counts during the next runtime * resume transition. */ if (pm_runtime_status_suspended(dev)) { err = tegra_i2c_runtime_suspend(dev); if (err) return err; } i2c_mark_adapter_resumed(&i2c_dev->adapter); return 0; } static const struct dev_pm_ops tegra_i2c_pm = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume) SET_RUNTIME_PM_OPS(tegra_i2c_runtime_suspend, tegra_i2c_runtime_resume, NULL) }; static const struct acpi_device_id tegra_i2c_acpi_match[] = { {.id = "NVDA0101", .driver_data = (kernel_ulong_t)&tegra210_i2c_hw}, {.id = "NVDA0201", .driver_data = (kernel_ulong_t)&tegra186_i2c_hw}, {.id = "NVDA0301", .driver_data = (kernel_ulong_t)&tegra194_i2c_hw}, { } }; MODULE_DEVICE_TABLE(acpi, tegra_i2c_acpi_match); static struct platform_driver tegra_i2c_driver = { .probe = tegra_i2c_probe, .remove_new = tegra_i2c_remove, .driver = { .name = "tegra-i2c", .of_match_table = tegra_i2c_of_match, .acpi_match_table = tegra_i2c_acpi_match, .pm = &tegra_i2c_pm, }, }; module_platform_driver(tegra_i2c_driver); MODULE_DESCRIPTION("NVIDIA Tegra I2C Bus Controller driver"); MODULE_AUTHOR("Colin Cross"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-tegra.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015 Masahiro Yamada <[email protected]> */ #include <linux/clk.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #define UNIPHIER_I2C_DTRM 0x00 /* TX register */ #define UNIPHIER_I2C_DTRM_IRQEN BIT(11) /* enable interrupt */ #define UNIPHIER_I2C_DTRM_STA BIT(10) /* start condition */ #define UNIPHIER_I2C_DTRM_STO BIT(9) /* stop condition */ #define UNIPHIER_I2C_DTRM_NACK BIT(8) /* do not return ACK */ #define UNIPHIER_I2C_DTRM_RD BIT(0) /* read transaction */ #define UNIPHIER_I2C_DREC 0x04 /* RX register */ #define UNIPHIER_I2C_DREC_MST BIT(14) /* 1 = master, 0 = slave */ #define UNIPHIER_I2C_DREC_TX BIT(13) /* 1 = transmit, 0 = receive */ #define UNIPHIER_I2C_DREC_STS BIT(12) /* stop condition detected */ #define UNIPHIER_I2C_DREC_LRB BIT(11) /* no ACK */ #define UNIPHIER_I2C_DREC_LAB BIT(9) /* arbitration lost */ #define UNIPHIER_I2C_DREC_BBN BIT(8) /* bus not busy */ #define UNIPHIER_I2C_MYAD 0x08 /* slave address */ #define UNIPHIER_I2C_CLK 0x0c /* clock frequency control */ #define UNIPHIER_I2C_BRST 0x10 /* bus reset */ #define UNIPHIER_I2C_BRST_FOEN BIT(1) /* normal operation */ #define UNIPHIER_I2C_BRST_RSCL BIT(0) /* release SCL */ #define UNIPHIER_I2C_HOLD 0x14 /* hold time control */ #define UNIPHIER_I2C_BSTS 0x18 /* bus status monitor */ #define UNIPHIER_I2C_BSTS_SDA BIT(1) /* readback of SDA line */ #define UNIPHIER_I2C_BSTS_SCL BIT(0) /* readback of SCL line */ #define UNIPHIER_I2C_NOISE 0x1c /* noise filter control */ #define UNIPHIER_I2C_SETUP 0x20 /* setup time control */ struct uniphier_i2c_priv { struct completion comp; struct i2c_adapter adap; void __iomem *membase; struct clk *clk; unsigned int busy_cnt; unsigned int clk_cycle; }; static irqreturn_t uniphier_i2c_interrupt(int irq, void *dev_id) { struct uniphier_i2c_priv *priv = dev_id; /* * This hardware uses edge triggered interrupt. Do not touch the * hardware registers in this handler to make sure to catch the next * interrupt edge. Just send a complete signal and return. */ complete(&priv->comp); return IRQ_HANDLED; } static int uniphier_i2c_xfer_byte(struct i2c_adapter *adap, u32 txdata, u32 *rxdatap) { struct uniphier_i2c_priv *priv = i2c_get_adapdata(adap); unsigned long time_left; u32 rxdata; reinit_completion(&priv->comp); txdata |= UNIPHIER_I2C_DTRM_IRQEN; writel(txdata, priv->membase + UNIPHIER_I2C_DTRM); time_left = wait_for_completion_timeout(&priv->comp, adap->timeout); if (unlikely(!time_left)) { dev_err(&adap->dev, "transaction timeout\n"); return -ETIMEDOUT; } rxdata = readl(priv->membase + UNIPHIER_I2C_DREC); if (rxdatap) *rxdatap = rxdata; return 0; } static int uniphier_i2c_send_byte(struct i2c_adapter *adap, u32 txdata) { u32 rxdata; int ret; ret = uniphier_i2c_xfer_byte(adap, txdata, &rxdata); if (ret) return ret; if (unlikely(rxdata & UNIPHIER_I2C_DREC_LAB)) return -EAGAIN; if (unlikely(rxdata & UNIPHIER_I2C_DREC_LRB)) return -ENXIO; return 0; } static int uniphier_i2c_tx(struct i2c_adapter *adap, u16 addr, u16 len, const u8 *buf) { int ret; ret = uniphier_i2c_send_byte(adap, addr << 1 | UNIPHIER_I2C_DTRM_STA | UNIPHIER_I2C_DTRM_NACK); if (ret) return ret; while (len--) { ret = uniphier_i2c_send_byte(adap, UNIPHIER_I2C_DTRM_NACK | *buf++); if (ret) return ret; } return 0; } static int uniphier_i2c_rx(struct i2c_adapter *adap, u16 addr, u16 len, u8 *buf) { int ret; ret = uniphier_i2c_send_byte(adap, addr << 1 | UNIPHIER_I2C_DTRM_STA | UNIPHIER_I2C_DTRM_NACK | UNIPHIER_I2C_DTRM_RD); if (ret) return ret; while (len--) { u32 rxdata; ret = uniphier_i2c_xfer_byte(adap, len ? 0 : UNIPHIER_I2C_DTRM_NACK, &rxdata); if (ret) return ret; *buf++ = rxdata; } return 0; } static int uniphier_i2c_stop(struct i2c_adapter *adap) { return uniphier_i2c_send_byte(adap, UNIPHIER_I2C_DTRM_STO | UNIPHIER_I2C_DTRM_NACK); } static int uniphier_i2c_master_xfer_one(struct i2c_adapter *adap, struct i2c_msg *msg, bool stop) { bool is_read = msg->flags & I2C_M_RD; bool recovery = false; int ret; if (is_read) ret = uniphier_i2c_rx(adap, msg->addr, msg->len, msg->buf); else ret = uniphier_i2c_tx(adap, msg->addr, msg->len, msg->buf); if (ret == -EAGAIN) /* could not acquire bus. bail out without STOP */ return ret; if (ret == -ETIMEDOUT) { /* This error is fatal. Needs recovery. */ stop = false; recovery = true; } if (stop) { int ret2 = uniphier_i2c_stop(adap); if (ret2) { /* Failed to issue STOP. The bus needs recovery. */ recovery = true; ret = ret ?: ret2; } } if (recovery) i2c_recover_bus(adap); return ret; } static int uniphier_i2c_check_bus_busy(struct i2c_adapter *adap) { struct uniphier_i2c_priv *priv = i2c_get_adapdata(adap); if (!(readl(priv->membase + UNIPHIER_I2C_DREC) & UNIPHIER_I2C_DREC_BBN)) { if (priv->busy_cnt++ > 3) { /* * If bus busy continues too long, it is probably * in a wrong state. Try bus recovery. */ i2c_recover_bus(adap); priv->busy_cnt = 0; } return -EAGAIN; } priv->busy_cnt = 0; return 0; } static int uniphier_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *msg, *emsg = msgs + num; int ret; ret = uniphier_i2c_check_bus_busy(adap); if (ret) return ret; for (msg = msgs; msg < emsg; msg++) { /* Emit STOP if it is the last message or I2C_M_STOP is set. */ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); ret = uniphier_i2c_master_xfer_one(adap, msg, stop); if (ret) return ret; } return num; } static u32 uniphier_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm uniphier_i2c_algo = { .master_xfer = uniphier_i2c_master_xfer, .functionality = uniphier_i2c_functionality, }; static void uniphier_i2c_reset(struct uniphier_i2c_priv *priv, bool reset_on) { u32 val = UNIPHIER_I2C_BRST_RSCL; val |= reset_on ? 0 : UNIPHIER_I2C_BRST_FOEN; writel(val, priv->membase + UNIPHIER_I2C_BRST); } static int uniphier_i2c_get_scl(struct i2c_adapter *adap) { struct uniphier_i2c_priv *priv = i2c_get_adapdata(adap); return !!(readl(priv->membase + UNIPHIER_I2C_BSTS) & UNIPHIER_I2C_BSTS_SCL); } static void uniphier_i2c_set_scl(struct i2c_adapter *adap, int val) { struct uniphier_i2c_priv *priv = i2c_get_adapdata(adap); writel(val ? UNIPHIER_I2C_BRST_RSCL : 0, priv->membase + UNIPHIER_I2C_BRST); } static int uniphier_i2c_get_sda(struct i2c_adapter *adap) { struct uniphier_i2c_priv *priv = i2c_get_adapdata(adap); return !!(readl(priv->membase + UNIPHIER_I2C_BSTS) & UNIPHIER_I2C_BSTS_SDA); } static void uniphier_i2c_unprepare_recovery(struct i2c_adapter *adap) { uniphier_i2c_reset(i2c_get_adapdata(adap), false); } static struct i2c_bus_recovery_info uniphier_i2c_bus_recovery_info = { .recover_bus = i2c_generic_scl_recovery, .get_scl = uniphier_i2c_get_scl, .set_scl = uniphier_i2c_set_scl, .get_sda = uniphier_i2c_get_sda, .unprepare_recovery = uniphier_i2c_unprepare_recovery, }; static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv) { unsigned int cyc = priv->clk_cycle; uniphier_i2c_reset(priv, true); /* * Bit30-16: clock cycles of tLOW. * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us * "tLow/tHIGH = 5/4" meets both. */ writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); uniphier_i2c_reset(priv, false); } static int uniphier_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct uniphier_i2c_priv *priv; u32 bus_speed; unsigned long clk_rate; int irq, ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->membase)) return PTR_ERR(priv->membase); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed)) bus_speed = I2C_MAX_STANDARD_MODE_FREQ; if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) { dev_err(dev, "invalid clock-frequency %d\n", bus_speed); return -EINVAL; } priv->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(priv->clk)) { dev_err(dev, "failed to enable clock\n"); return PTR_ERR(priv->clk); } clk_rate = clk_get_rate(priv->clk); if (!clk_rate) { dev_err(dev, "input clock rate should not be zero\n"); return -EINVAL; } priv->clk_cycle = clk_rate / bus_speed; init_completion(&priv->comp); priv->adap.owner = THIS_MODULE; priv->adap.algo = &uniphier_i2c_algo; priv->adap.dev.parent = dev; priv->adap.dev.of_node = dev->of_node; strscpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name)); priv->adap.bus_recovery_info = &uniphier_i2c_bus_recovery_info; i2c_set_adapdata(&priv->adap, priv); platform_set_drvdata(pdev, priv); uniphier_i2c_hw_init(priv); ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name, priv); if (ret) { dev_err(dev, "failed to request irq %d\n", irq); return ret; } return i2c_add_adapter(&priv->adap); } static void uniphier_i2c_remove(struct platform_device *pdev) { struct uniphier_i2c_priv *priv = platform_get_drvdata(pdev); i2c_del_adapter(&priv->adap); } static int __maybe_unused uniphier_i2c_suspend(struct device *dev) { struct uniphier_i2c_priv *priv = dev_get_drvdata(dev); clk_disable_unprepare(priv->clk); return 0; } static int __maybe_unused uniphier_i2c_resume(struct device *dev) { struct uniphier_i2c_priv *priv = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(priv->clk); if (ret) return ret; uniphier_i2c_hw_init(priv); return 0; } static const struct dev_pm_ops uniphier_i2c_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(uniphier_i2c_suspend, uniphier_i2c_resume) }; static const struct of_device_id uniphier_i2c_match[] = { { .compatible = "socionext,uniphier-i2c" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_i2c_match); static struct platform_driver uniphier_i2c_drv = { .probe = uniphier_i2c_probe, .remove_new = uniphier_i2c_remove, .driver = { .name = "uniphier-i2c", .of_match_table = uniphier_i2c_match, .pm = &uniphier_i2c_pm_ops, }, }; module_platform_driver(uniphier_i2c_drv); MODULE_AUTHOR("Masahiro Yamada <[email protected]>"); MODULE_DESCRIPTION("UniPhier I2C bus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-uniphier.c
// SPDX-License-Identifier: GPL-2.0+ /* * This is i.MX low power i2c controller driver. * * Copyright 2016 Freescale Semiconductor, Inc. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <linux/slab.h> #define DRIVER_NAME "imx-lpi2c" #define LPI2C_PARAM 0x04 /* i2c RX/TX FIFO size */ #define LPI2C_MCR 0x10 /* i2c contrl register */ #define LPI2C_MSR 0x14 /* i2c status register */ #define LPI2C_MIER 0x18 /* i2c interrupt enable */ #define LPI2C_MCFGR0 0x20 /* i2c master configuration */ #define LPI2C_MCFGR1 0x24 /* i2c master configuration */ #define LPI2C_MCFGR2 0x28 /* i2c master configuration */ #define LPI2C_MCFGR3 0x2C /* i2c master configuration */ #define LPI2C_MCCR0 0x48 /* i2c master clk configuration */ #define LPI2C_MCCR1 0x50 /* i2c master clk configuration */ #define LPI2C_MFCR 0x58 /* i2c master FIFO control */ #define LPI2C_MFSR 0x5C /* i2c master FIFO status */ #define LPI2C_MTDR 0x60 /* i2c master TX data register */ #define LPI2C_MRDR 0x70 /* i2c master RX data register */ /* i2c command */ #define TRAN_DATA 0X00 #define RECV_DATA 0X01 #define GEN_STOP 0X02 #define RECV_DISCARD 0X03 #define GEN_START 0X04 #define START_NACK 0X05 #define START_HIGH 0X06 #define START_HIGH_NACK 0X07 #define MCR_MEN BIT(0) #define MCR_RST BIT(1) #define MCR_DOZEN BIT(2) #define MCR_DBGEN BIT(3) #define MCR_RTF BIT(8) #define MCR_RRF BIT(9) #define MSR_TDF BIT(0) #define MSR_RDF BIT(1) #define MSR_SDF BIT(9) #define MSR_NDF BIT(10) #define MSR_ALF BIT(11) #define MSR_MBF BIT(24) #define MSR_BBF BIT(25) #define MIER_TDIE BIT(0) #define MIER_RDIE BIT(1) #define MIER_SDIE BIT(9) #define MIER_NDIE BIT(10) #define MCFGR1_AUTOSTOP BIT(8) #define MCFGR1_IGNACK BIT(9) #define MRDR_RXEMPTY BIT(14) #define I2C_CLK_RATIO 2 #define CHUNK_DATA 256 #define I2C_PM_TIMEOUT 10 /* ms */ enum lpi2c_imx_mode { STANDARD, /* 100+Kbps */ FAST, /* 400+Kbps */ FAST_PLUS, /* 1.0+Mbps */ HS, /* 3.4+Mbps */ ULTRA_FAST, /* 5.0+Mbps */ }; enum lpi2c_imx_pincfg { TWO_PIN_OD, TWO_PIN_OO, TWO_PIN_PP, FOUR_PIN_PP, }; struct lpi2c_imx_struct { struct i2c_adapter adapter; int num_clks; struct clk_bulk_data *clks; void __iomem *base; __u8 *rx_buf; __u8 *tx_buf; struct completion complete; unsigned int msglen; unsigned int delivered; unsigned int block_data; unsigned int bitrate; unsigned int txfifosize; unsigned int rxfifosize; enum lpi2c_imx_mode mode; }; static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx, unsigned int enable) { writel(enable, lpi2c_imx->base + LPI2C_MIER); } static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx) { unsigned long orig_jiffies = jiffies; unsigned int temp; while (1) { temp = readl(lpi2c_imx->base + LPI2C_MSR); /* check for arbitration lost, clear if set */ if (temp & MSR_ALF) { writel(temp, lpi2c_imx->base + LPI2C_MSR); return -EAGAIN; } if (temp & (MSR_BBF | MSR_MBF)) break; if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n"); return -ETIMEDOUT; } schedule(); } return 0; } static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx) { unsigned int bitrate = lpi2c_imx->bitrate; enum lpi2c_imx_mode mode; if (bitrate < I2C_MAX_FAST_MODE_FREQ) mode = STANDARD; else if (bitrate < I2C_MAX_FAST_MODE_PLUS_FREQ) mode = FAST; else if (bitrate < I2C_MAX_HIGH_SPEED_MODE_FREQ) mode = FAST_PLUS; else if (bitrate < I2C_MAX_ULTRA_FAST_MODE_FREQ) mode = HS; else mode = ULTRA_FAST; lpi2c_imx->mode = mode; } static int lpi2c_imx_start(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msgs) { unsigned int temp; temp = readl(lpi2c_imx->base + LPI2C_MCR); temp |= MCR_RRF | MCR_RTF; writel(temp, lpi2c_imx->base + LPI2C_MCR); writel(0x7f00, lpi2c_imx->base + LPI2C_MSR); temp = i2c_8bit_addr_from_msg(msgs) | (GEN_START << 8); writel(temp, lpi2c_imx->base + LPI2C_MTDR); return lpi2c_imx_bus_busy(lpi2c_imx); } static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx) { unsigned long orig_jiffies = jiffies; unsigned int temp; writel(GEN_STOP << 8, lpi2c_imx->base + LPI2C_MTDR); do { temp = readl(lpi2c_imx->base + LPI2C_MSR); if (temp & MSR_SDF) break; if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n"); break; } schedule(); } while (1); } /* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx) { u8 prescale, filt, sethold, datavd; unsigned int clk_rate, clk_cycle, clkhi, clklo; enum lpi2c_imx_pincfg pincfg; unsigned int temp; lpi2c_imx_set_mode(lpi2c_imx); clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk); if (!clk_rate) return -EINVAL; if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST) filt = 0; else filt = 2; for (prescale = 0; prescale <= 7; prescale++) { clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate) - 3 - (filt >> 1); clkhi = DIV_ROUND_UP(clk_cycle, I2C_CLK_RATIO + 1); clklo = clk_cycle - clkhi; if (clklo < 64) break; } if (prescale > 7) return -EINVAL; /* set MCFGR1: PINCFG, PRESCALE, IGNACK */ if (lpi2c_imx->mode == ULTRA_FAST) pincfg = TWO_PIN_OO; else pincfg = TWO_PIN_OD; temp = prescale | pincfg << 24; if (lpi2c_imx->mode == ULTRA_FAST) temp |= MCFGR1_IGNACK; writel(temp, lpi2c_imx->base + LPI2C_MCFGR1); /* set MCFGR2: FILTSDA, FILTSCL */ temp = (filt << 16) | (filt << 24); writel(temp, lpi2c_imx->base + LPI2C_MCFGR2); /* set MCCR: DATAVD, SETHOLD, CLKHI, CLKLO */ sethold = clkhi; datavd = clkhi >> 1; temp = datavd << 24 | sethold << 16 | clkhi << 8 | clklo; if (lpi2c_imx->mode == HS) writel(temp, lpi2c_imx->base + LPI2C_MCCR1); else writel(temp, lpi2c_imx->base + LPI2C_MCCR0); return 0; } static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx) { unsigned int temp; int ret; ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent); if (ret < 0) return ret; temp = MCR_RST; writel(temp, lpi2c_imx->base + LPI2C_MCR); writel(0, lpi2c_imx->base + LPI2C_MCR); ret = lpi2c_imx_config(lpi2c_imx); if (ret) goto rpm_put; temp = readl(lpi2c_imx->base + LPI2C_MCR); temp |= MCR_MEN; writel(temp, lpi2c_imx->base + LPI2C_MCR); return 0; rpm_put: pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent); pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent); return ret; } static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx) { u32 temp; temp = readl(lpi2c_imx->base + LPI2C_MCR); temp &= ~MCR_MEN; writel(temp, lpi2c_imx->base + LPI2C_MCR); pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent); pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent); return 0; } static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx) { unsigned long timeout; timeout = wait_for_completion_timeout(&lpi2c_imx->complete, HZ); return timeout ? 0 : -ETIMEDOUT; } static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx) { unsigned long orig_jiffies = jiffies; u32 txcnt; do { txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff; if (readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) { dev_dbg(&lpi2c_imx->adapter.dev, "NDF detected\n"); return -EIO; } if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n"); return -ETIMEDOUT; } schedule(); } while (txcnt); return 0; } static void lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct *lpi2c_imx) { writel(lpi2c_imx->txfifosize >> 1, lpi2c_imx->base + LPI2C_MFCR); } static void lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct *lpi2c_imx) { unsigned int temp, remaining; remaining = lpi2c_imx->msglen - lpi2c_imx->delivered; if (remaining > (lpi2c_imx->rxfifosize >> 1)) temp = lpi2c_imx->rxfifosize >> 1; else temp = 0; writel(temp << 16, lpi2c_imx->base + LPI2C_MFCR); } static void lpi2c_imx_write_txfifo(struct lpi2c_imx_struct *lpi2c_imx) { unsigned int data, txcnt; txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff; while (txcnt < lpi2c_imx->txfifosize) { if (lpi2c_imx->delivered == lpi2c_imx->msglen) break; data = lpi2c_imx->tx_buf[lpi2c_imx->delivered++]; writel(data, lpi2c_imx->base + LPI2C_MTDR); txcnt++; } if (lpi2c_imx->delivered < lpi2c_imx->msglen) lpi2c_imx_intctrl(lpi2c_imx, MIER_TDIE | MIER_NDIE); else complete(&lpi2c_imx->complete); } static void lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct *lpi2c_imx) { unsigned int blocklen, remaining; unsigned int temp, data; do { data = readl(lpi2c_imx->base + LPI2C_MRDR); if (data & MRDR_RXEMPTY) break; lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff; } while (1); /* * First byte is the length of remaining packet in the SMBus block * data read. Add it to msgs->len. */ if (lpi2c_imx->block_data) { blocklen = lpi2c_imx->rx_buf[0]; lpi2c_imx->msglen += blocklen; } remaining = lpi2c_imx->msglen - lpi2c_imx->delivered; if (!remaining) { complete(&lpi2c_imx->complete); return; } /* not finished, still waiting for rx data */ lpi2c_imx_set_rx_watermark(lpi2c_imx); /* multiple receive commands */ if (lpi2c_imx->block_data) { lpi2c_imx->block_data = 0; temp = remaining; temp |= (RECV_DATA << 8); writel(temp, lpi2c_imx->base + LPI2C_MTDR); } else if (!(lpi2c_imx->delivered & 0xff)) { temp = (remaining > CHUNK_DATA ? CHUNK_DATA : remaining) - 1; temp |= (RECV_DATA << 8); writel(temp, lpi2c_imx->base + LPI2C_MTDR); } lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE); } static void lpi2c_imx_write(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msgs) { lpi2c_imx->tx_buf = msgs->buf; lpi2c_imx_set_tx_watermark(lpi2c_imx); lpi2c_imx_write_txfifo(lpi2c_imx); } static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msgs) { unsigned int temp; lpi2c_imx->rx_buf = msgs->buf; lpi2c_imx->block_data = msgs->flags & I2C_M_RECV_LEN; lpi2c_imx_set_rx_watermark(lpi2c_imx); temp = msgs->len > CHUNK_DATA ? CHUNK_DATA - 1 : msgs->len - 1; temp |= (RECV_DATA << 8); writel(temp, lpi2c_imx->base + LPI2C_MTDR); lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE); } static int lpi2c_imx_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(adapter); unsigned int temp; int i, result; result = lpi2c_imx_master_enable(lpi2c_imx); if (result) return result; for (i = 0; i < num; i++) { result = lpi2c_imx_start(lpi2c_imx, &msgs[i]); if (result) goto disable; /* quick smbus */ if (num == 1 && msgs[0].len == 0) goto stop; lpi2c_imx->rx_buf = NULL; lpi2c_imx->tx_buf = NULL; lpi2c_imx->delivered = 0; lpi2c_imx->msglen = msgs[i].len; init_completion(&lpi2c_imx->complete); if (msgs[i].flags & I2C_M_RD) lpi2c_imx_read(lpi2c_imx, &msgs[i]); else lpi2c_imx_write(lpi2c_imx, &msgs[i]); result = lpi2c_imx_msg_complete(lpi2c_imx); if (result) goto stop; if (!(msgs[i].flags & I2C_M_RD)) { result = lpi2c_imx_txfifo_empty(lpi2c_imx); if (result) goto stop; } } stop: lpi2c_imx_stop(lpi2c_imx); temp = readl(lpi2c_imx->base + LPI2C_MSR); if ((temp & MSR_NDF) && !result) result = -EIO; disable: lpi2c_imx_master_disable(lpi2c_imx); dev_dbg(&lpi2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__, (result < 0) ? "error" : "success msg", (result < 0) ? result : num); return (result < 0) ? result : num; } static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id) { struct lpi2c_imx_struct *lpi2c_imx = dev_id; unsigned int enabled; unsigned int temp; enabled = readl(lpi2c_imx->base + LPI2C_MIER); lpi2c_imx_intctrl(lpi2c_imx, 0); temp = readl(lpi2c_imx->base + LPI2C_MSR); temp &= enabled; if (temp & MSR_NDF) complete(&lpi2c_imx->complete); else if (temp & MSR_RDF) lpi2c_imx_read_rxfifo(lpi2c_imx); else if (temp & MSR_TDF) lpi2c_imx_write_txfifo(lpi2c_imx); return IRQ_HANDLED; } static u32 lpi2c_imx_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA; } static const struct i2c_algorithm lpi2c_imx_algo = { .master_xfer = lpi2c_imx_xfer, .functionality = lpi2c_imx_func, }; static const struct of_device_id lpi2c_imx_of_match[] = { { .compatible = "fsl,imx7ulp-lpi2c" }, { }, }; MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); static int lpi2c_imx_probe(struct platform_device *pdev) { struct lpi2c_imx_struct *lpi2c_imx; unsigned int temp; int irq, ret; lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL); if (!lpi2c_imx) return -ENOMEM; lpi2c_imx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lpi2c_imx->base)) return PTR_ERR(lpi2c_imx->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; lpi2c_imx->adapter.owner = THIS_MODULE; lpi2c_imx->adapter.algo = &lpi2c_imx_algo; lpi2c_imx->adapter.dev.parent = &pdev->dev; lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node; strscpy(lpi2c_imx->adapter.name, pdev->name, sizeof(lpi2c_imx->adapter.name)); ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "can't get I2C peripheral clock\n"); lpi2c_imx->num_clks = ret; ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &lpi2c_imx->bitrate); if (ret) lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ; ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0, pdev->name, lpi2c_imx); if (ret) return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq); i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx); platform_set_drvdata(pdev, lpi2c_imx); ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks); if (ret) return ret; pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); temp = readl(lpi2c_imx->base + LPI2C_PARAM); lpi2c_imx->txfifosize = 1 << (temp & 0x0f); lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f); ret = i2c_add_adapter(&lpi2c_imx->adapter); if (ret) goto rpm_disable; pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n"); return 0; rpm_disable: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); return ret; } static void lpi2c_imx_remove(struct platform_device *pdev) { struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev); i2c_del_adapter(&lpi2c_imx->adapter); pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); } static int __maybe_unused lpi2c_runtime_suspend(struct device *dev) { struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev); clk_bulk_disable(lpi2c_imx->num_clks, lpi2c_imx->clks); pinctrl_pm_select_sleep_state(dev); return 0; } static int __maybe_unused lpi2c_runtime_resume(struct device *dev) { struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev); int ret; pinctrl_pm_select_default_state(dev); ret = clk_bulk_enable(lpi2c_imx->num_clks, lpi2c_imx->clks); if (ret) { dev_err(dev, "failed to enable I2C clock, ret=%d\n", ret); return ret; } return 0; } static const struct dev_pm_ops lpi2c_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend, lpi2c_runtime_resume, NULL) }; static struct platform_driver lpi2c_imx_driver = { .probe = lpi2c_imx_probe, .remove_new = lpi2c_imx_remove, .driver = { .name = DRIVER_NAME, .of_match_table = lpi2c_imx_of_match, .pm = &lpi2c_pm_ops, }, }; module_platform_driver(lpi2c_imx_driver); MODULE_AUTHOR("Gao Pan <[email protected]>"); MODULE_DESCRIPTION("I2C adapter driver for LPI2C bus"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-imx-lpi2c.c
// SPDX-License-Identifier: GPL-2.0-or-later /* i2c Support for Via Technologies 82C586B South Bridge Copyright (c) 1998, 1999 Kyösti Mälkki <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/io.h> /* Power management registers */ #define PM_CFG_REVID 0x08 /* silicon revision code */ #define PM_CFG_IOBASE0 0x20 #define PM_CFG_IOBASE1 0x48 #define I2C_DIR (pm_io_base+0x40) #define I2C_OUT (pm_io_base+0x42) #define I2C_IN (pm_io_base+0x44) #define I2C_SCL 0x02 /* clock bit in DIR/OUT/IN register */ #define I2C_SDA 0x04 /* io-region reservation */ #define IOSPACE 0x06 static struct pci_driver vt586b_driver; static u16 pm_io_base; /* It does not appear from the datasheet that the GPIO pins are open drain. So a we set a low value by setting the direction to output and a high value by setting the direction to input and relying on the required I2C pullup. The data value is initialized to 0 in via_init() and never changed. */ static void bit_via_setscl(void *data, int state) { outb(state ? inb(I2C_DIR) & ~I2C_SCL : inb(I2C_DIR) | I2C_SCL, I2C_DIR); } static void bit_via_setsda(void *data, int state) { outb(state ? inb(I2C_DIR) & ~I2C_SDA : inb(I2C_DIR) | I2C_SDA, I2C_DIR); } static int bit_via_getscl(void *data) { return (0 != (inb(I2C_IN) & I2C_SCL)); } static int bit_via_getsda(void *data) { return (0 != (inb(I2C_IN) & I2C_SDA)); } static struct i2c_algo_bit_data bit_data = { .setsda = bit_via_setsda, .setscl = bit_via_setscl, .getsda = bit_via_getsda, .getscl = bit_via_getscl, .udelay = 5, .timeout = HZ }; static struct i2c_adapter vt586b_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .name = "VIA i2c", .algo_data = &bit_data, }; static const struct pci_device_id vt586b_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) }, { 0, } }; MODULE_DEVICE_TABLE (pci, vt586b_ids); static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 base; u8 rev; int res; if (pm_io_base) { dev_err(&dev->dev, "i2c-via: Will only support one host\n"); return -ENODEV; } pci_read_config_byte(dev, PM_CFG_REVID, &rev); switch (rev) { case 0x00: base = PM_CFG_IOBASE0; break; case 0x01: case 0x10: base = PM_CFG_IOBASE1; break; default: base = PM_CFG_IOBASE1; /* later revision */ } pci_read_config_word(dev, base, &pm_io_base); pm_io_base &= (0xff << 8); if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) { dev_err(&dev->dev, "IO 0x%x-0x%x already in use\n", I2C_DIR, I2C_DIR + IOSPACE); return -ENODEV; } outb(inb(I2C_DIR) & ~(I2C_SDA | I2C_SCL), I2C_DIR); outb(inb(I2C_OUT) & ~(I2C_SDA | I2C_SCL), I2C_OUT); /* set up the sysfs linkage to our parent device */ vt586b_adapter.dev.parent = &dev->dev; res = i2c_bit_add_bus(&vt586b_adapter); if ( res < 0 ) { release_region(I2C_DIR, IOSPACE); pm_io_base = 0; return res; } return 0; } static void vt586b_remove(struct pci_dev *dev) { i2c_del_adapter(&vt586b_adapter); release_region(I2C_DIR, IOSPACE); pm_io_base = 0; } static struct pci_driver vt586b_driver = { .name = "vt586b_smbus", .id_table = vt586b_ids, .probe = vt586b_probe, .remove = vt586b_remove, }; module_pci_driver(vt586b_driver); MODULE_AUTHOR("Kyösti Mälkki <[email protected]>"); MODULE_DESCRIPTION("i2c for Via vt82c586b southbridge"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-via.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 1998, 1999 Frodo Looijaard <[email protected]> and Philip Edelbrock <[email protected]> */ /* Note: we assume there can only be one SIS5595 with one SMBus interface */ /* Note: all have mfr. ID 0x1039. SUPPORTED PCI ID 5595 0008 Note: these chips contain a 0008 device which is incompatible with the 5595. We recognize these by the presence of the listed "blacklist" PCI ID and refuse to load. NOT SUPPORTED PCI ID BLACKLIST PCI ID 540 0008 0540 550 0008 0550 5513 0008 5511 5581 0008 5597 5582 0008 5597 5597 0008 5597 5598 0008 5597/5598 630 0008 0630 645 0008 0645 646 0008 0646 648 0008 0648 650 0008 0650 651 0008 0651 730 0008 0730 735 0008 0735 745 0008 0745 746 0008 0746 */ /* TO DO: * Add Block Transfers (ugly, but supported by the adapter) * Add adapter resets */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> static int blacklist[] = { PCI_DEVICE_ID_SI_540, PCI_DEVICE_ID_SI_550, PCI_DEVICE_ID_SI_630, PCI_DEVICE_ID_SI_645, PCI_DEVICE_ID_SI_646, PCI_DEVICE_ID_SI_648, PCI_DEVICE_ID_SI_650, PCI_DEVICE_ID_SI_651, PCI_DEVICE_ID_SI_730, PCI_DEVICE_ID_SI_735, PCI_DEVICE_ID_SI_745, PCI_DEVICE_ID_SI_746, PCI_DEVICE_ID_SI_5511, /* 5513 chip has the 0008 device but that ID shows up in other chips so we use the 5511 ID for recognition */ PCI_DEVICE_ID_SI_5597, PCI_DEVICE_ID_SI_5598, 0, /* terminates the list */ }; /* Length of ISA address segment */ #define SIS5595_EXTENT 8 /* SIS5595 SMBus registers */ #define SMB_STS_LO 0x00 #define SMB_STS_HI 0x01 #define SMB_CTL_LO 0x02 #define SMB_CTL_HI 0x03 #define SMB_ADDR 0x04 #define SMB_CMD 0x05 #define SMB_PCNT 0x06 #define SMB_CNT 0x07 #define SMB_BYTE 0x08 #define SMB_DEV 0x10 #define SMB_DB0 0x11 #define SMB_DB1 0x12 #define SMB_HAA 0x13 /* PCI Address Constants */ #define SMB_INDEX 0x38 #define SMB_DAT 0x39 #define SIS5595_ENABLE_REG 0x40 #define ACPI_BASE 0x90 /* Other settings */ #define MAX_TIMEOUT 500 /* SIS5595 constants */ #define SIS5595_QUICK 0x00 #define SIS5595_BYTE 0x02 #define SIS5595_BYTE_DATA 0x04 #define SIS5595_WORD_DATA 0x06 #define SIS5595_PROC_CALL 0x08 #define SIS5595_BLOCK_DATA 0x0A /* insmod parameters */ /* If force_addr is set to anything different from 0, we forcibly enable the device at the given address. */ static u16 force_addr; module_param_hw(force_addr, ushort, ioport, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller"); static struct pci_driver sis5595_driver; static unsigned short sis5595_base; static struct pci_dev *sis5595_pdev; static u8 sis5595_read(u8 reg) { outb(reg, sis5595_base + SMB_INDEX); return inb(sis5595_base + SMB_DAT); } static void sis5595_write(u8 reg, u8 data) { outb(reg, sis5595_base + SMB_INDEX); outb(data, sis5595_base + SMB_DAT); } static int sis5595_setup(struct pci_dev *SIS5595_dev) { u16 a; u8 val; int *i; int retval; /* Look for imposters */ for (i = blacklist; *i != 0; i++) { struct pci_dev *dev; dev = pci_get_device(PCI_VENDOR_ID_SI, *i, NULL); if (dev) { dev_err(&SIS5595_dev->dev, "Looked for SIS5595 but found unsupported device %.4x\n", *i); pci_dev_put(dev); return -ENODEV; } } /* Determine the address of the SMBus areas */ pci_read_config_word(SIS5595_dev, ACPI_BASE, &sis5595_base); if (sis5595_base == 0 && force_addr == 0) { dev_err(&SIS5595_dev->dev, "ACPI base address uninitialized - upgrade BIOS or use force_addr=0xaddr\n"); return -ENODEV; } if (force_addr) sis5595_base = force_addr & ~(SIS5595_EXTENT - 1); dev_dbg(&SIS5595_dev->dev, "ACPI Base address: %04x\n", sis5595_base); /* NB: We grab just the two SMBus registers here, but this may still * interfere with ACPI :-( */ retval = acpi_check_region(sis5595_base + SMB_INDEX, 2, sis5595_driver.name); if (retval) return retval; if (!request_region(sis5595_base + SMB_INDEX, 2, sis5595_driver.name)) { dev_err(&SIS5595_dev->dev, "SMBus registers 0x%04x-0x%04x already in use!\n", sis5595_base + SMB_INDEX, sis5595_base + SMB_INDEX + 1); return -ENODEV; } if (force_addr) { dev_info(&SIS5595_dev->dev, "forcing ISA address 0x%04X\n", sis5595_base); retval = pci_write_config_word(SIS5595_dev, ACPI_BASE, sis5595_base); if (retval != PCIBIOS_SUCCESSFUL) goto error; retval = pci_read_config_word(SIS5595_dev, ACPI_BASE, &a); if (retval != PCIBIOS_SUCCESSFUL) goto error; if ((a & ~(SIS5595_EXTENT - 1)) != sis5595_base) { /* doesn't work for some chips! */ dev_err(&SIS5595_dev->dev, "force address failed - not supported?\n"); goto error; } } retval = pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val); if (retval != PCIBIOS_SUCCESSFUL) goto error; if ((val & 0x80) == 0) { dev_info(&SIS5595_dev->dev, "enabling ACPI\n"); retval = pci_write_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, val | 0x80); if (retval != PCIBIOS_SUCCESSFUL) goto error; retval = pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val); if (retval != PCIBIOS_SUCCESSFUL) goto error; if ((val & 0x80) == 0) { /* doesn't work for some chips? */ dev_err(&SIS5595_dev->dev, "ACPI enable failed - not supported?\n"); goto error; } } /* Everything is happy */ return 0; error: release_region(sis5595_base + SMB_INDEX, 2); return -ENODEV; } static int sis5595_transaction(struct i2c_adapter *adap) { int temp; int result = 0; int timeout = 0; /* Make sure the SMBus host is ready to start transmitting */ temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) { dev_dbg(&adap->dev, "SMBus busy (%04x). Resetting...\n", temp); sis5595_write(SMB_STS_LO, temp & 0xff); sis5595_write(SMB_STS_HI, temp >> 8); if ((temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8)) != 0x00) { dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&adap->dev, "Successful!\n"); } } /* start the transaction by setting bit 4 */ sis5595_write(SMB_CTL_LO, sis5595_read(SMB_CTL_LO) | 0x10); /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis5595_read(SMB_STS_LO); } while (!(temp & 0x40) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x10) { dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); result = -ENXIO; } if (temp & 0x20) { dev_err(&adap->dev, "Bus collision! SMBus may be locked until " "next hard reset (or not...)\n"); /* Clock stops and slave is stuck in mid-transmission */ result = -EIO; } temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) { sis5595_write(SMB_STS_LO, temp & 0xff); sis5595_write(SMB_STS_HI, temp >> 8); } temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) dev_dbg(&adap->dev, "Failed reset at end of transaction (%02x)\n", temp); return result; } /* Return negative errno on error. */ static s32 sis5595_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS5595_QUICK; break; case I2C_SMBUS_BYTE: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis5595_write(SMB_CMD, command); size = SIS5595_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis5595_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis5595_write(SMB_BYTE, data->byte); size = SIS5595_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis5595_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis5595_write(SMB_BYTE, data->word & 0xff); sis5595_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL) ? SIS5595_PROC_CALL : SIS5595_WORD_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } sis5595_write(SMB_CTL_LO, ((size & 0x0E))); status = sis5595_transaction(adap); if (status) return status; if ((size != SIS5595_PROC_CALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS5595_QUICK))) return 0; switch (size) { case SIS5595_BYTE: case SIS5595_BYTE_DATA: data->byte = sis5595_read(SMB_BYTE); break; case SIS5595_WORD_DATA: case SIS5595_PROC_CALL: data->word = sis5595_read(SMB_BYTE) + (sis5595_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis5595_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis5595_access, .functionality = sis5595_func, }; static struct i2c_adapter sis5595_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static const struct pci_device_id sis5595_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { 0, } }; MODULE_DEVICE_TABLE (pci, sis5595_ids); static int sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err; if (sis5595_setup(dev)) { dev_err(&dev->dev, "SIS5595 not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ sis5595_adapter.dev.parent = &dev->dev; snprintf(sis5595_adapter.name, sizeof(sis5595_adapter.name), "SMBus SIS5595 adapter at %04x", sis5595_base + SMB_INDEX); err = i2c_add_adapter(&sis5595_adapter); if (err) { release_region(sis5595_base + SMB_INDEX, 2); return err; } /* Always return failure here. This is to allow other drivers to bind * to this pci device. We don't really want to have control over the * pci device, we only wanted to read as few register values from it. */ sis5595_pdev = pci_dev_get(dev); return -ENODEV; } static struct pci_driver sis5595_driver = { .name = "sis5595_smbus", .id_table = sis5595_ids, .probe = sis5595_probe, }; static int __init i2c_sis5595_init(void) { return pci_register_driver(&sis5595_driver); } static void __exit i2c_sis5595_exit(void) { pci_unregister_driver(&sis5595_driver); if (sis5595_pdev) { i2c_del_adapter(&sis5595_adapter); release_region(sis5595_base + SMB_INDEX, 2); pci_dev_put(sis5595_pdev); sis5595_pdev = NULL; } } MODULE_AUTHOR("Frodo Looijaard <[email protected]>"); MODULE_DESCRIPTION("SIS5595 SMBus driver"); MODULE_LICENSE("GPL"); module_init(i2c_sis5595_init); module_exit(i2c_sis5595_exit);
linux-master
drivers/i2c/busses/i2c-sis5595.c
// SPDX-License-Identifier: GPL-2.0 /* * i2c slave support for Atmel's AT91 Two-Wire Interface (TWI) * * Copyright (C) 2017 Juergen Fitschen <[email protected]> */ #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include "i2c-at91.h" static irqreturn_t atmel_twi_interrupt_slave(int irq, void *dev_id) { struct at91_twi_dev *dev = dev_id; const unsigned status = at91_twi_read(dev, AT91_TWI_SR); const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR); u8 value; if (!irqstatus) return IRQ_NONE; /* slave address has been detected on I2C bus */ if (irqstatus & AT91_TWI_SVACC) { if (status & AT91_TWI_SVREAD) { i2c_slave_event(dev->slave, I2C_SLAVE_READ_REQUESTED, &value); writeb_relaxed(value, dev->base + AT91_TWI_THR); at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXRDY | AT91_TWI_EOSACC); } else { i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &value); at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_EOSACC); } at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_SVACC); } /* byte transmitted to remote master */ if (irqstatus & AT91_TWI_TXRDY) { i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, &value); writeb_relaxed(value, dev->base + AT91_TWI_THR); } /* byte received from remote master */ if (irqstatus & AT91_TWI_RXRDY) { value = readb_relaxed(dev->base + AT91_TWI_RHR); i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, &value); } /* master sent stop */ if (irqstatus & AT91_TWI_EOSACC) { at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY | AT91_TWI_RXRDY | AT91_TWI_EOSACC); at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_SVACC); i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &value); } return IRQ_HANDLED; } static int at91_reg_slave(struct i2c_client *slave) { struct at91_twi_dev *dev = i2c_get_adapdata(slave->adapter); if (dev->slave) return -EBUSY; if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; /* Make sure twi_clk doesn't get turned off! */ pm_runtime_get_sync(dev->dev); dev->slave = slave; dev->smr = AT91_TWI_SMR_SADR(slave->addr); at91_init_twi_bus(dev); at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_SVACC); dev_info(dev->dev, "entered slave mode (ADR=%d)\n", slave->addr); return 0; } static int at91_unreg_slave(struct i2c_client *slave) { struct at91_twi_dev *dev = i2c_get_adapdata(slave->adapter); WARN_ON(!dev->slave); dev_info(dev->dev, "leaving slave mode\n"); dev->slave = NULL; dev->smr = 0; at91_init_twi_bus(dev); pm_runtime_put(dev->dev); return 0; } static u32 at91_twi_func(struct i2c_adapter *adapter) { return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA; } static const struct i2c_algorithm at91_twi_algorithm_slave = { .reg_slave = at91_reg_slave, .unreg_slave = at91_unreg_slave, .functionality = at91_twi_func, }; int at91_twi_probe_slave(struct platform_device *pdev, u32 phy_addr, struct at91_twi_dev *dev) { int rc; rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt_slave, 0, dev_name(dev->dev), dev); if (rc) { dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc); return rc; } dev->adapter.algo = &at91_twi_algorithm_slave; return 0; } void at91_init_twi_bus_slave(struct at91_twi_dev *dev) { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSDIS); if (dev->slave_detected && dev->smr) { at91_twi_write(dev, AT91_TWI_SMR, dev->smr); at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVEN); } }
linux-master
drivers/i2c/busses/i2c-at91-slave.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009 ST-Ericsson SA * Copyright (C) 2009 STMicroelectronics * * I2C master mode controller driver, used in Nomadik 8815 * and Ux500 platforms. * * Author: Srinidhi Kasagar <[email protected]> * Author: Sachin Verma <[email protected]> */ #include <linux/init.h> #include <linux/module.h> #include <linux/amba/bus.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #define DRIVER_NAME "nmk-i2c" /* I2C Controller register offsets */ #define I2C_CR (0x000) #define I2C_SCR (0x004) #define I2C_HSMCR (0x008) #define I2C_MCR (0x00C) #define I2C_TFR (0x010) #define I2C_SR (0x014) #define I2C_RFR (0x018) #define I2C_TFTR (0x01C) #define I2C_RFTR (0x020) #define I2C_DMAR (0x024) #define I2C_BRCR (0x028) #define I2C_IMSCR (0x02C) #define I2C_RISR (0x030) #define I2C_MISR (0x034) #define I2C_ICR (0x038) /* Control registers */ #define I2C_CR_PE (0x1 << 0) /* Peripheral Enable */ #define I2C_CR_OM (0x3 << 1) /* Operating mode */ #define I2C_CR_SAM (0x1 << 3) /* Slave addressing mode */ #define I2C_CR_SM (0x3 << 4) /* Speed mode */ #define I2C_CR_SGCM (0x1 << 6) /* Slave general call mode */ #define I2C_CR_FTX (0x1 << 7) /* Flush Transmit */ #define I2C_CR_FRX (0x1 << 8) /* Flush Receive */ #define I2C_CR_DMA_TX_EN (0x1 << 9) /* DMA Tx enable */ #define I2C_CR_DMA_RX_EN (0x1 << 10) /* DMA Rx Enable */ #define I2C_CR_DMA_SLE (0x1 << 11) /* DMA sync. logic enable */ #define I2C_CR_LM (0x1 << 12) /* Loopback mode */ #define I2C_CR_FON (0x3 << 13) /* Filtering on */ #define I2C_CR_FS (0x3 << 15) /* Force stop enable */ /* Master controller (MCR) register */ #define I2C_MCR_OP (0x1 << 0) /* Operation */ #define I2C_MCR_A7 (0x7f << 1) /* 7-bit address */ #define I2C_MCR_EA10 (0x7 << 8) /* 10-bit Extended address */ #define I2C_MCR_SB (0x1 << 11) /* Extended address */ #define I2C_MCR_AM (0x3 << 12) /* Address type */ #define I2C_MCR_STOP (0x1 << 14) /* Stop condition */ #define I2C_MCR_LENGTH (0x7ff << 15) /* Transaction length */ /* Status register (SR) */ #define I2C_SR_OP (0x3 << 0) /* Operation */ #define I2C_SR_STATUS (0x3 << 2) /* controller status */ #define I2C_SR_CAUSE (0x7 << 4) /* Abort cause */ #define I2C_SR_TYPE (0x3 << 7) /* Receive type */ #define I2C_SR_LENGTH (0x7ff << 9) /* Transfer length */ /* Interrupt mask set/clear (IMSCR) bits */ #define I2C_IT_TXFE (0x1 << 0) #define I2C_IT_TXFNE (0x1 << 1) #define I2C_IT_TXFF (0x1 << 2) #define I2C_IT_TXFOVR (0x1 << 3) #define I2C_IT_RXFE (0x1 << 4) #define I2C_IT_RXFNF (0x1 << 5) #define I2C_IT_RXFF (0x1 << 6) #define I2C_IT_RFSR (0x1 << 16) #define I2C_IT_RFSE (0x1 << 17) #define I2C_IT_WTSR (0x1 << 18) #define I2C_IT_MTD (0x1 << 19) #define I2C_IT_STD (0x1 << 20) #define I2C_IT_MAL (0x1 << 24) #define I2C_IT_BERR (0x1 << 25) #define I2C_IT_MTDWS (0x1 << 28) #define GEN_MASK(val, mask, sb) (((val) << (sb)) & (mask)) /* some bits in ICR are reserved */ #define I2C_CLEAR_ALL_INTS 0x131f007f /* first three msb bits are reserved */ #define IRQ_MASK(mask) (mask & 0x1fffffff) /* maximum threshold value */ #define MAX_I2C_FIFO_THRESHOLD 15 enum i2c_freq_mode { I2C_FREQ_MODE_STANDARD, /* up to 100 Kb/s */ I2C_FREQ_MODE_FAST, /* up to 400 Kb/s */ I2C_FREQ_MODE_HIGH_SPEED, /* up to 3.4 Mb/s */ I2C_FREQ_MODE_FAST_PLUS, /* up to 1 Mb/s */ }; /** * struct i2c_vendor_data - per-vendor variations * @has_mtdws: variant has the MTDWS bit * @fifodepth: variant FIFO depth */ struct i2c_vendor_data { bool has_mtdws; u32 fifodepth; }; enum i2c_status { I2C_NOP, I2C_ON_GOING, I2C_OK, I2C_ABORT }; /* operation */ enum i2c_operation { I2C_NO_OPERATION = 0xff, I2C_WRITE = 0x00, I2C_READ = 0x01 }; /** * struct i2c_nmk_client - client specific data * @slave_adr: 7-bit slave address * @count: no. bytes to be transferred * @buffer: client data buffer * @xfer_bytes: bytes transferred till now * @operation: current I2C operation */ struct i2c_nmk_client { unsigned short slave_adr; unsigned long count; unsigned char *buffer; unsigned long xfer_bytes; enum i2c_operation operation; }; /** * struct nmk_i2c_dev - private data structure of the controller. * @vendor: vendor data for this variant. * @adev: parent amba device. * @adap: corresponding I2C adapter. * @irq: interrupt line for the controller. * @virtbase: virtual io memory area. * @clk: hardware i2c block clock. * @cli: holder of client specific data. * @clk_freq: clock frequency for the operation mode * @tft: Tx FIFO Threshold in bytes * @rft: Rx FIFO Threshold in bytes * @timeout: Slave response timeout (ms) * @sm: speed mode * @stop: stop condition. * @xfer_complete: acknowledge completion for a I2C message. * @result: controller propogated result. */ struct nmk_i2c_dev { struct i2c_vendor_data *vendor; struct amba_device *adev; struct i2c_adapter adap; int irq; void __iomem *virtbase; struct clk *clk; struct i2c_nmk_client cli; u32 clk_freq; unsigned char tft; unsigned char rft; int timeout; enum i2c_freq_mode sm; int stop; struct completion xfer_complete; int result; }; /* controller's abort causes */ static const char *abort_causes[] = { "no ack received after address transmission", "no ack received during data phase", "ack received after xmission of master code", "master lost arbitration", "slave restarts", "slave reset", "overflow, maxsize is 2047 bytes", }; static inline void i2c_set_bit(void __iomem *reg, u32 mask) { writel(readl(reg) | mask, reg); } static inline void i2c_clr_bit(void __iomem *reg, u32 mask) { writel(readl(reg) & ~mask, reg); } /** * flush_i2c_fifo() - This function flushes the I2C FIFO * @dev: private data of I2C Driver * * This function flushes the I2C Tx and Rx FIFOs. It returns * 0 on successful flushing of FIFO */ static int flush_i2c_fifo(struct nmk_i2c_dev *dev) { #define LOOP_ATTEMPTS 10 int i; unsigned long timeout; /* * flush the transmit and receive FIFO. The flushing * operation takes several cycles before to be completed. * On the completion, the I2C internal logic clears these * bits, until then no one must access Tx, Rx FIFO and * should poll on these bits waiting for the completion. */ writel((I2C_CR_FTX | I2C_CR_FRX), dev->virtbase + I2C_CR); for (i = 0; i < LOOP_ATTEMPTS; i++) { timeout = jiffies + dev->adap.timeout; while (!time_after(jiffies, timeout)) { if ((readl(dev->virtbase + I2C_CR) & (I2C_CR_FTX | I2C_CR_FRX)) == 0) return 0; } } dev_err(&dev->adev->dev, "flushing operation timed out giving up after %d attempts", LOOP_ATTEMPTS); return -ETIMEDOUT; } /** * disable_all_interrupts() - Disable all interrupts of this I2c Bus * @dev: private data of I2C Driver */ static void disable_all_interrupts(struct nmk_i2c_dev *dev) { u32 mask = IRQ_MASK(0); writel(mask, dev->virtbase + I2C_IMSCR); } /** * clear_all_interrupts() - Clear all interrupts of I2C Controller * @dev: private data of I2C Driver */ static void clear_all_interrupts(struct nmk_i2c_dev *dev) { u32 mask; mask = IRQ_MASK(I2C_CLEAR_ALL_INTS); writel(mask, dev->virtbase + I2C_ICR); } /** * init_hw() - initialize the I2C hardware * @dev: private data of I2C Driver */ static int init_hw(struct nmk_i2c_dev *dev) { int stat; stat = flush_i2c_fifo(dev); if (stat) goto exit; /* disable the controller */ i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE); disable_all_interrupts(dev); clear_all_interrupts(dev); dev->cli.operation = I2C_NO_OPERATION; exit: return stat; } /* enable peripheral, master mode operation */ #define DEFAULT_I2C_REG_CR ((1 << 1) | I2C_CR_PE) /** * load_i2c_mcr_reg() - load the MCR register * @dev: private data of controller * @flags: message flags */ static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev, u16 flags) { u32 mcr = 0; unsigned short slave_adr_3msb_bits; mcr |= GEN_MASK(dev->cli.slave_adr, I2C_MCR_A7, 1); if (unlikely(flags & I2C_M_TEN)) { /* 10-bit address transaction */ mcr |= GEN_MASK(2, I2C_MCR_AM, 12); /* * Get the top 3 bits. * EA10 represents extended address in MCR. This includes * the extension (MSB bits) of the 7 bit address loaded * in A7 */ slave_adr_3msb_bits = (dev->cli.slave_adr >> 7) & 0x7; mcr |= GEN_MASK(slave_adr_3msb_bits, I2C_MCR_EA10, 8); } else { /* 7-bit address transaction */ mcr |= GEN_MASK(1, I2C_MCR_AM, 12); } /* start byte procedure not applied */ mcr |= GEN_MASK(0, I2C_MCR_SB, 11); /* check the operation, master read/write? */ if (dev->cli.operation == I2C_WRITE) mcr |= GEN_MASK(I2C_WRITE, I2C_MCR_OP, 0); else mcr |= GEN_MASK(I2C_READ, I2C_MCR_OP, 0); /* stop or repeated start? */ if (dev->stop) mcr |= GEN_MASK(1, I2C_MCR_STOP, 14); else mcr &= ~(GEN_MASK(1, I2C_MCR_STOP, 14)); mcr |= GEN_MASK(dev->cli.count, I2C_MCR_LENGTH, 15); return mcr; } /** * setup_i2c_controller() - setup the controller * @dev: private data of controller */ static void setup_i2c_controller(struct nmk_i2c_dev *dev) { u32 brcr1, brcr2; u32 i2c_clk, div; u32 ns; u16 slsu; writel(0x0, dev->virtbase + I2C_CR); writel(0x0, dev->virtbase + I2C_HSMCR); writel(0x0, dev->virtbase + I2C_TFTR); writel(0x0, dev->virtbase + I2C_RFTR); writel(0x0, dev->virtbase + I2C_DMAR); i2c_clk = clk_get_rate(dev->clk); /* * set the slsu: * * slsu defines the data setup time after SCL clock * stretching in terms of i2c clk cycles + 1 (zero means * "wait one cycle"), the needed setup time for the three * modes are 250ns, 100ns, 10ns respectively. * * As the time for one cycle T in nanoseconds is * T = (1/f) * 1000000000 => * slsu = cycles / (1000000000 / f) + 1 */ ns = DIV_ROUND_UP_ULL(1000000000ULL, i2c_clk); switch (dev->sm) { case I2C_FREQ_MODE_FAST: case I2C_FREQ_MODE_FAST_PLUS: slsu = DIV_ROUND_UP(100, ns); /* Fast */ break; case I2C_FREQ_MODE_HIGH_SPEED: slsu = DIV_ROUND_UP(10, ns); /* High */ break; case I2C_FREQ_MODE_STANDARD: default: slsu = DIV_ROUND_UP(250, ns); /* Standard */ break; } slsu += 1; dev_dbg(&dev->adev->dev, "calculated SLSU = %04x\n", slsu); writel(slsu << 16, dev->virtbase + I2C_SCR); /* * The spec says, in case of std. mode the divider is * 2 whereas it is 3 for fast and fastplus mode of * operation. TODO - high speed support. */ div = (dev->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2; /* * generate the mask for baud rate counters. The controller * has two baud rate counters. One is used for High speed * operation, and the other is for std, fast mode, fast mode * plus operation. Currently we do not supprt high speed mode * so set brcr1 to 0. */ brcr1 = 0 << 16; brcr2 = (i2c_clk/(dev->clk_freq * div)) & 0xffff; /* set the baud rate counter register */ writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR); /* * set the speed mode. Currently we support * only standard and fast mode of operation * TODO - support for fast mode plus (up to 1Mb/s) * and high speed (up to 3.4 Mb/s) */ if (dev->sm > I2C_FREQ_MODE_FAST) { dev_err(&dev->adev->dev, "do not support this mode defaulting to std. mode\n"); brcr2 = i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2) & 0xffff; writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR); writel(I2C_FREQ_MODE_STANDARD << 4, dev->virtbase + I2C_CR); } writel(dev->sm << 4, dev->virtbase + I2C_CR); /* set the Tx and Rx FIFO threshold */ writel(dev->tft, dev->virtbase + I2C_TFTR); writel(dev->rft, dev->virtbase + I2C_RFTR); } /** * read_i2c() - Read from I2C client device * @dev: private data of I2C Driver * @flags: message flags * * This function reads from i2c client device when controller is in * master mode. There is a completion timeout. If there is no transfer * before timeout error is returned. */ static int read_i2c(struct nmk_i2c_dev *dev, u16 flags) { int status = 0; u32 mcr, irq_mask; unsigned long timeout; mcr = load_i2c_mcr_reg(dev, flags); writel(mcr, dev->virtbase + I2C_MCR); /* load the current CR value */ writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR, dev->virtbase + I2C_CR); /* enable the controller */ i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE); init_completion(&dev->xfer_complete); /* enable interrupts by setting the mask */ irq_mask = (I2C_IT_RXFNF | I2C_IT_RXFF | I2C_IT_MAL | I2C_IT_BERR); if (dev->stop || !dev->vendor->has_mtdws) irq_mask |= I2C_IT_MTD; else irq_mask |= I2C_IT_MTDWS; irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask); writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, dev->virtbase + I2C_IMSCR); timeout = wait_for_completion_timeout( &dev->xfer_complete, dev->adap.timeout); if (timeout == 0) { /* Controller timed out */ dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n", dev->cli.slave_adr); status = -ETIMEDOUT; } return status; } static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes) { int count; for (count = (no_bytes - 2); (count > 0) && (dev->cli.count != 0); count--) { /* write to the Tx FIFO */ writeb(*dev->cli.buffer, dev->virtbase + I2C_TFR); dev->cli.buffer++; dev->cli.count--; dev->cli.xfer_bytes++; } } /** * write_i2c() - Write data to I2C client. * @dev: private data of I2C Driver * @flags: message flags * * This function writes data to I2C client */ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags) { u32 status = 0; u32 mcr, irq_mask; unsigned long timeout; mcr = load_i2c_mcr_reg(dev, flags); writel(mcr, dev->virtbase + I2C_MCR); /* load the current CR value */ writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR, dev->virtbase + I2C_CR); /* enable the controller */ i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE); init_completion(&dev->xfer_complete); /* enable interrupts by settings the masks */ irq_mask = (I2C_IT_TXFOVR | I2C_IT_MAL | I2C_IT_BERR); /* Fill the TX FIFO with transmit data */ fill_tx_fifo(dev, MAX_I2C_FIFO_THRESHOLD); if (dev->cli.count != 0) irq_mask |= I2C_IT_TXFNE; /* * check if we want to transfer a single or multiple bytes, if so * set the MTDWS bit (Master Transaction Done Without Stop) * to start repeated start operation */ if (dev->stop || !dev->vendor->has_mtdws) irq_mask |= I2C_IT_MTD; else irq_mask |= I2C_IT_MTDWS; irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask); writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, dev->virtbase + I2C_IMSCR); timeout = wait_for_completion_timeout( &dev->xfer_complete, dev->adap.timeout); if (timeout == 0) { /* Controller timed out */ dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n", dev->cli.slave_adr); status = -ETIMEDOUT; } return status; } /** * nmk_i2c_xfer_one() - transmit a single I2C message * @dev: device with a message encoded into it * @flags: message flags */ static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags) { int status; if (flags & I2C_M_RD) { /* read operation */ dev->cli.operation = I2C_READ; status = read_i2c(dev, flags); } else { /* write operation */ dev->cli.operation = I2C_WRITE; status = write_i2c(dev, flags); } if (status || (dev->result)) { u32 i2c_sr; u32 cause; i2c_sr = readl(dev->virtbase + I2C_SR); /* * Check if the controller I2C operation status * is set to ABORT(11b). */ if (((i2c_sr >> 2) & 0x3) == 0x3) { /* get the abort cause */ cause = (i2c_sr >> 4) & 0x7; dev_err(&dev->adev->dev, "%s\n", cause >= ARRAY_SIZE(abort_causes) ? "unknown reason" : abort_causes[cause]); } (void) init_hw(dev); status = status ? status : dev->result; } return status; } /** * nmk_i2c_xfer() - I2C transfer function used by kernel framework * @i2c_adap: Adapter pointer to the controller * @msgs: Pointer to data to be written. * @num_msgs: Number of messages to be executed * * This is the function called by the generic kernel i2c_transfer() * or i2c_smbus...() API calls. Note that this code is protected by the * semaphore set in the kernel i2c_transfer() function. * * NOTE: * READ TRANSFER : We impose a restriction of the first message to be the * index message for any read transaction. * - a no index is coded as '0', * - 2byte big endian index is coded as '3' * !!! msg[0].buf holds the actual index. * This is compatible with generic messages of smbus emulator * that send a one byte index. * eg. a I2C transation to read 2 bytes from index 0 * idx = 0; * msg[0].addr = client->addr; * msg[0].flags = 0x0; * msg[0].len = 1; * msg[0].buf = &idx; * * msg[1].addr = client->addr; * msg[1].flags = I2C_M_RD; * msg[1].len = 2; * msg[1].buf = rd_buff * i2c_transfer(adap, msg, 2); * * WRITE TRANSFER : The I2C standard interface interprets all data as payload. * If you want to emulate an SMBUS write transaction put the * index as first byte(or first and second) in the payload. * eg. a I2C transation to write 2 bytes from index 1 * wr_buff[0] = 0x1; * wr_buff[1] = 0x23; * wr_buff[2] = 0x46; * msg[0].flags = 0x0; * msg[0].len = 3; * msg[0].buf = wr_buff; * i2c_transfer(adap, msg, 1); * * To read or write a block of data (multiple bytes) using SMBUS emulation * please use the i2c_smbus_read_i2c_block_data() * or i2c_smbus_write_i2c_block_data() API */ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num_msgs) { int status = 0; int i; struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap); int j; pm_runtime_get_sync(&dev->adev->dev); /* Attempt three times to send the message queue */ for (j = 0; j < 3; j++) { /* setup the i2c controller */ setup_i2c_controller(dev); for (i = 0; i < num_msgs; i++) { dev->cli.slave_adr = msgs[i].addr; dev->cli.buffer = msgs[i].buf; dev->cli.count = msgs[i].len; dev->stop = (i < (num_msgs - 1)) ? 0 : 1; dev->result = 0; status = nmk_i2c_xfer_one(dev, msgs[i].flags); if (status != 0) break; } if (status == 0) break; } pm_runtime_put_sync(&dev->adev->dev); /* return the no. messages processed */ if (status) return status; else return num_msgs; } /** * disable_interrupts() - disable the interrupts * @dev: private data of controller * @irq: interrupt number */ static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq) { irq = IRQ_MASK(irq); writel(readl(dev->virtbase + I2C_IMSCR) & ~(I2C_CLEAR_ALL_INTS & irq), dev->virtbase + I2C_IMSCR); return 0; } /** * i2c_irq_handler() - interrupt routine * @irq: interrupt number * @arg: data passed to the handler * * This is the interrupt handler for the i2c driver. Currently * it handles the major interrupts like Rx & Tx FIFO management * interrupts, master transaction interrupts, arbitration and * bus error interrupts. The rest of the interrupts are treated as * unhandled. */ static irqreturn_t i2c_irq_handler(int irq, void *arg) { struct nmk_i2c_dev *dev = arg; u32 tft, rft; u32 count; u32 misr, src; /* load Tx FIFO and Rx FIFO threshold values */ tft = readl(dev->virtbase + I2C_TFTR); rft = readl(dev->virtbase + I2C_RFTR); /* read interrupt status register */ misr = readl(dev->virtbase + I2C_MISR); src = __ffs(misr); switch ((1 << src)) { /* Transmit FIFO nearly empty interrupt */ case I2C_IT_TXFNE: { if (dev->cli.operation == I2C_READ) { /* * in read operation why do we care for writing? * so disable the Transmit FIFO interrupt */ disable_interrupts(dev, I2C_IT_TXFNE); } else { fill_tx_fifo(dev, (MAX_I2C_FIFO_THRESHOLD - tft)); /* * if done, close the transfer by disabling the * corresponding TXFNE interrupt */ if (dev->cli.count == 0) disable_interrupts(dev, I2C_IT_TXFNE); } } break; /* * Rx FIFO nearly full interrupt. * This is set when the numer of entries in Rx FIFO is * greater or equal than the threshold value programmed * in RFT */ case I2C_IT_RXFNF: for (count = rft; count > 0; count--) { /* Read the Rx FIFO */ *dev->cli.buffer = readb(dev->virtbase + I2C_RFR); dev->cli.buffer++; } dev->cli.count -= rft; dev->cli.xfer_bytes += rft; break; /* Rx FIFO full */ case I2C_IT_RXFF: for (count = MAX_I2C_FIFO_THRESHOLD; count > 0; count--) { *dev->cli.buffer = readb(dev->virtbase + I2C_RFR); dev->cli.buffer++; } dev->cli.count -= MAX_I2C_FIFO_THRESHOLD; dev->cli.xfer_bytes += MAX_I2C_FIFO_THRESHOLD; break; /* Master Transaction Done with/without stop */ case I2C_IT_MTD: case I2C_IT_MTDWS: if (dev->cli.operation == I2C_READ) { while (!(readl(dev->virtbase + I2C_RISR) & I2C_IT_RXFE)) { if (dev->cli.count == 0) break; *dev->cli.buffer = readb(dev->virtbase + I2C_RFR); dev->cli.buffer++; dev->cli.count--; dev->cli.xfer_bytes++; } } disable_all_interrupts(dev); clear_all_interrupts(dev); if (dev->cli.count) { dev->result = -EIO; dev_err(&dev->adev->dev, "%lu bytes still remain to be xfered\n", dev->cli.count); (void) init_hw(dev); } complete(&dev->xfer_complete); break; /* Master Arbitration lost interrupt */ case I2C_IT_MAL: dev->result = -EIO; (void) init_hw(dev); i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MAL); complete(&dev->xfer_complete); break; /* * Bus Error interrupt. * This happens when an unexpected start/stop condition occurs * during the transaction. */ case I2C_IT_BERR: dev->result = -EIO; /* get the status */ if (((readl(dev->virtbase + I2C_SR) >> 2) & 0x3) == I2C_ABORT) (void) init_hw(dev); i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_BERR); complete(&dev->xfer_complete); break; /* * Tx FIFO overrun interrupt. * This is set when a write operation in Tx FIFO is performed and * the Tx FIFO is full. */ case I2C_IT_TXFOVR: dev->result = -EIO; (void) init_hw(dev); dev_err(&dev->adev->dev, "Tx Fifo Over run\n"); complete(&dev->xfer_complete); break; /* unhandled interrupts by this driver - TODO*/ case I2C_IT_TXFE: case I2C_IT_TXFF: case I2C_IT_RXFE: case I2C_IT_RFSR: case I2C_IT_RFSE: case I2C_IT_WTSR: case I2C_IT_STD: dev_err(&dev->adev->dev, "unhandled Interrupt\n"); break; default: dev_err(&dev->adev->dev, "spurious Interrupt..\n"); break; } return IRQ_HANDLED; } static int nmk_i2c_suspend_late(struct device *dev) { int ret; ret = pm_runtime_force_suspend(dev); if (ret) return ret; pinctrl_pm_select_sleep_state(dev); return 0; } static int nmk_i2c_resume_early(struct device *dev) { return pm_runtime_force_resume(dev); } static int nmk_i2c_runtime_suspend(struct device *dev) { struct amba_device *adev = to_amba_device(dev); struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev); clk_disable_unprepare(nmk_i2c->clk); pinctrl_pm_select_idle_state(dev); return 0; } static int nmk_i2c_runtime_resume(struct device *dev) { struct amba_device *adev = to_amba_device(dev); struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev); int ret; ret = clk_prepare_enable(nmk_i2c->clk); if (ret) { dev_err(dev, "can't prepare_enable clock\n"); return ret; } pinctrl_pm_select_default_state(dev); ret = init_hw(nmk_i2c); if (ret) { clk_disable_unprepare(nmk_i2c->clk); pinctrl_pm_select_idle_state(dev); } return ret; } static const struct dev_pm_ops nmk_i2c_pm = { LATE_SYSTEM_SLEEP_PM_OPS(nmk_i2c_suspend_late, nmk_i2c_resume_early) RUNTIME_PM_OPS(nmk_i2c_runtime_suspend, nmk_i2c_runtime_resume, NULL) }; static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; } static const struct i2c_algorithm nmk_i2c_algo = { .master_xfer = nmk_i2c_xfer, .functionality = nmk_i2c_functionality }; static void nmk_i2c_of_probe(struct device_node *np, struct nmk_i2c_dev *nmk) { /* Default to 100 kHz if no frequency is given in the node */ if (of_property_read_u32(np, "clock-frequency", &nmk->clk_freq)) nmk->clk_freq = I2C_MAX_STANDARD_MODE_FREQ; /* This driver only supports 'standard' and 'fast' modes of operation. */ if (nmk->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ) nmk->sm = I2C_FREQ_MODE_STANDARD; else nmk->sm = I2C_FREQ_MODE_FAST; nmk->tft = 1; /* Tx FIFO threshold */ nmk->rft = 8; /* Rx FIFO threshold */ nmk->timeout = 200; /* Slave response timeout(ms) */ } static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) { int ret = 0; struct device_node *np = adev->dev.of_node; struct nmk_i2c_dev *dev; struct i2c_adapter *adap; struct i2c_vendor_data *vendor = id->data; u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1; dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->vendor = vendor; dev->adev = adev; nmk_i2c_of_probe(np, dev); if (dev->tft > max_fifo_threshold) { dev_warn(&adev->dev, "requested TX FIFO threshold %u, adjusted down to %u\n", dev->tft, max_fifo_threshold); dev->tft = max_fifo_threshold; } if (dev->rft > max_fifo_threshold) { dev_warn(&adev->dev, "requested RX FIFO threshold %u, adjusted down to %u\n", dev->rft, max_fifo_threshold); dev->rft = max_fifo_threshold; } amba_set_drvdata(adev, dev); dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, resource_size(&adev->res)); if (!dev->virtbase) return -ENOMEM; dev->irq = adev->irq[0]; ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0, DRIVER_NAME, dev); if (ret) return dev_err_probe(&adev->dev, ret, "cannot claim the irq %d\n", dev->irq); dev->clk = devm_clk_get_enabled(&adev->dev, NULL); if (IS_ERR(dev->clk)) return dev_err_probe(&adev->dev, PTR_ERR(dev->clk), "could enable i2c clock\n"); init_hw(dev); adap = &dev->adap; adap->dev.of_node = np; adap->dev.parent = &adev->dev; adap->owner = THIS_MODULE; adap->class = I2C_CLASS_DEPRECATED; adap->algo = &nmk_i2c_algo; adap->timeout = msecs_to_jiffies(dev->timeout); snprintf(adap->name, sizeof(adap->name), "Nomadik I2C at %pR", &adev->res); i2c_set_adapdata(adap, dev); dev_info(&adev->dev, "initialize %s on virtual base %p\n", adap->name, dev->virtbase); ret = i2c_add_adapter(adap); if (ret) return ret; pm_runtime_put(&adev->dev); return 0; } static void nmk_i2c_remove(struct amba_device *adev) { struct nmk_i2c_dev *dev = amba_get_drvdata(adev); i2c_del_adapter(&dev->adap); flush_i2c_fifo(dev); disable_all_interrupts(dev); clear_all_interrupts(dev); /* disable the controller */ i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE); } static struct i2c_vendor_data vendor_stn8815 = { .has_mtdws = false, .fifodepth = 16, /* Guessed from TFTR/RFTR = 7 */ }; static struct i2c_vendor_data vendor_db8500 = { .has_mtdws = true, .fifodepth = 32, /* Guessed from TFTR/RFTR = 15 */ }; static const struct amba_id nmk_i2c_ids[] = { { .id = 0x00180024, .mask = 0x00ffffff, .data = &vendor_stn8815, }, { .id = 0x00380024, .mask = 0x00ffffff, .data = &vendor_db8500, }, {}, }; MODULE_DEVICE_TABLE(amba, nmk_i2c_ids); static struct amba_driver nmk_i2c_driver = { .drv = { .owner = THIS_MODULE, .name = DRIVER_NAME, .pm = pm_ptr(&nmk_i2c_pm), }, .id_table = nmk_i2c_ids, .probe = nmk_i2c_probe, .remove = nmk_i2c_remove, }; static int __init nmk_i2c_init(void) { return amba_driver_register(&nmk_i2c_driver); } static void __exit nmk_i2c_exit(void) { amba_driver_unregister(&nmk_i2c_driver); } subsys_initcall(nmk_i2c_init); module_exit(nmk_i2c_exit); MODULE_AUTHOR("Sachin Verma"); MODULE_AUTHOR("Srinidhi KASAGAR"); MODULE_DESCRIPTION("Nomadik/Ux500 I2C driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-nomadik.c
// SPDX-License-Identifier: GPL-2.0+ /* * FSI-attached I2C master algorithm * * Copyright 2018 IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/fsi.h> #include <linux/i2c.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/slab.h> #define FSI_ENGID_I2C 0x7 #define I2C_DEFAULT_CLK_DIV 6 /* i2c registers */ #define I2C_FSI_FIFO 0x00 #define I2C_FSI_CMD 0x04 #define I2C_FSI_MODE 0x08 #define I2C_FSI_WATER_MARK 0x0C #define I2C_FSI_INT_MASK 0x10 #define I2C_FSI_INT_COND 0x14 #define I2C_FSI_OR_INT_MASK 0x14 #define I2C_FSI_INTS 0x18 #define I2C_FSI_AND_INT_MASK 0x18 #define I2C_FSI_STAT 0x1C #define I2C_FSI_RESET_I2C 0x1C #define I2C_FSI_ESTAT 0x20 #define I2C_FSI_RESET_ERR 0x20 #define I2C_FSI_RESID_LEN 0x24 #define I2C_FSI_SET_SCL 0x24 #define I2C_FSI_PORT_BUSY 0x28 #define I2C_FSI_RESET_SCL 0x2C #define I2C_FSI_SET_SDA 0x30 #define I2C_FSI_RESET_SDA 0x34 /* cmd register */ #define I2C_CMD_WITH_START BIT(31) #define I2C_CMD_WITH_ADDR BIT(30) #define I2C_CMD_RD_CONT BIT(29) #define I2C_CMD_WITH_STOP BIT(28) #define I2C_CMD_FORCELAUNCH BIT(27) #define I2C_CMD_ADDR GENMASK(23, 17) #define I2C_CMD_READ BIT(16) #define I2C_CMD_LEN GENMASK(15, 0) /* mode register */ #define I2C_MODE_CLKDIV GENMASK(31, 16) #define I2C_MODE_PORT GENMASK(15, 10) #define I2C_MODE_ENHANCED BIT(3) #define I2C_MODE_DIAG BIT(2) #define I2C_MODE_PACE_ALLOW BIT(1) #define I2C_MODE_WRAP BIT(0) /* watermark register */ #define I2C_WATERMARK_HI GENMASK(15, 12) #define I2C_WATERMARK_LO GENMASK(7, 4) #define I2C_FIFO_HI_LVL 4 #define I2C_FIFO_LO_LVL 4 /* interrupt register */ #define I2C_INT_INV_CMD BIT(15) #define I2C_INT_PARITY BIT(14) #define I2C_INT_BE_OVERRUN BIT(13) #define I2C_INT_BE_ACCESS BIT(12) #define I2C_INT_LOST_ARB BIT(11) #define I2C_INT_NACK BIT(10) #define I2C_INT_DAT_REQ BIT(9) #define I2C_INT_CMD_COMP BIT(8) #define I2C_INT_STOP_ERR BIT(7) #define I2C_INT_BUSY BIT(6) #define I2C_INT_IDLE BIT(5) /* status register */ #define I2C_STAT_INV_CMD BIT(31) #define I2C_STAT_PARITY BIT(30) #define I2C_STAT_BE_OVERRUN BIT(29) #define I2C_STAT_BE_ACCESS BIT(28) #define I2C_STAT_LOST_ARB BIT(27) #define I2C_STAT_NACK BIT(26) #define I2C_STAT_DAT_REQ BIT(25) #define I2C_STAT_CMD_COMP BIT(24) #define I2C_STAT_STOP_ERR BIT(23) #define I2C_STAT_MAX_PORT GENMASK(22, 16) #define I2C_STAT_ANY_INT BIT(15) #define I2C_STAT_SCL_IN BIT(11) #define I2C_STAT_SDA_IN BIT(10) #define I2C_STAT_PORT_BUSY BIT(9) #define I2C_STAT_SELF_BUSY BIT(8) #define I2C_STAT_FIFO_COUNT GENMASK(7, 0) #define I2C_STAT_ERR (I2C_STAT_INV_CMD | \ I2C_STAT_PARITY | \ I2C_STAT_BE_OVERRUN | \ I2C_STAT_BE_ACCESS | \ I2C_STAT_LOST_ARB | \ I2C_STAT_NACK | \ I2C_STAT_STOP_ERR) #define I2C_STAT_ANY_RESP (I2C_STAT_ERR | \ I2C_STAT_DAT_REQ | \ I2C_STAT_CMD_COMP) /* extended status register */ #define I2C_ESTAT_FIFO_SZ GENMASK(31, 24) #define I2C_ESTAT_SCL_IN_SY BIT(15) #define I2C_ESTAT_SDA_IN_SY BIT(14) #define I2C_ESTAT_S_SCL BIT(13) #define I2C_ESTAT_S_SDA BIT(12) #define I2C_ESTAT_M_SCL BIT(11) #define I2C_ESTAT_M_SDA BIT(10) #define I2C_ESTAT_HI_WATER BIT(9) #define I2C_ESTAT_LO_WATER BIT(8) #define I2C_ESTAT_PORT_BUSY BIT(7) #define I2C_ESTAT_SELF_BUSY BIT(6) #define I2C_ESTAT_VERSION GENMASK(4, 0) /* port busy register */ #define I2C_PORT_BUSY_RESET BIT(31) /* wait for command complete or data request */ #define I2C_CMD_SLEEP_MAX_US 500 #define I2C_CMD_SLEEP_MIN_US 50 /* wait after reset; choose time from legacy driver */ #define I2C_RESET_SLEEP_MAX_US 2000 #define I2C_RESET_SLEEP_MIN_US 1000 /* choose timeout length from legacy driver; it's well tested */ #define I2C_ABORT_TIMEOUT msecs_to_jiffies(100) struct fsi_i2c_master { struct fsi_device *fsi; u8 fifo_size; struct list_head ports; struct mutex lock; }; struct fsi_i2c_port { struct list_head list; struct i2c_adapter adapter; struct fsi_i2c_master *master; u16 port; u16 xfrd; }; static int fsi_i2c_read_reg(struct fsi_device *fsi, unsigned int reg, u32 *data) { int rc; __be32 data_be; rc = fsi_device_read(fsi, reg, &data_be, sizeof(data_be)); if (rc) return rc; *data = be32_to_cpu(data_be); return 0; } static int fsi_i2c_write_reg(struct fsi_device *fsi, unsigned int reg, u32 *data) { __be32 data_be = cpu_to_be32p(data); return fsi_device_write(fsi, reg, &data_be, sizeof(data_be)); } static int fsi_i2c_dev_init(struct fsi_i2c_master *i2c) { int rc; u32 mode = I2C_MODE_ENHANCED, extended_status, watermark; u32 interrupt = 0; /* since we use polling, disable interrupts */ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_INT_MASK, &interrupt); if (rc) return rc; mode |= FIELD_PREP(I2C_MODE_CLKDIV, I2C_DEFAULT_CLK_DIV); rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode); if (rc) return rc; rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_ESTAT, &extended_status); if (rc) return rc; i2c->fifo_size = FIELD_GET(I2C_ESTAT_FIFO_SZ, extended_status); watermark = FIELD_PREP(I2C_WATERMARK_HI, i2c->fifo_size - I2C_FIFO_HI_LVL); watermark |= FIELD_PREP(I2C_WATERMARK_LO, I2C_FIFO_LO_LVL); return fsi_i2c_write_reg(i2c->fsi, I2C_FSI_WATER_MARK, &watermark); } static int fsi_i2c_set_port(struct fsi_i2c_port *port) { int rc; struct fsi_device *fsi = port->master->fsi; u32 mode, dummy = 0; rc = fsi_i2c_read_reg(fsi, I2C_FSI_MODE, &mode); if (rc) return rc; if (FIELD_GET(I2C_MODE_PORT, mode) == port->port) return 0; mode = (mode & ~I2C_MODE_PORT) | FIELD_PREP(I2C_MODE_PORT, port->port); rc = fsi_i2c_write_reg(fsi, I2C_FSI_MODE, &mode); if (rc) return rc; /* reset engine when port is changed */ return fsi_i2c_write_reg(fsi, I2C_FSI_RESET_ERR, &dummy); } static int fsi_i2c_start(struct fsi_i2c_port *port, struct i2c_msg *msg, bool stop) { struct fsi_i2c_master *i2c = port->master; u32 cmd = I2C_CMD_WITH_START | I2C_CMD_WITH_ADDR; port->xfrd = 0; if (msg->flags & I2C_M_RD) cmd |= I2C_CMD_READ; if (stop || msg->flags & I2C_M_STOP) cmd |= I2C_CMD_WITH_STOP; cmd |= FIELD_PREP(I2C_CMD_ADDR, msg->addr); cmd |= FIELD_PREP(I2C_CMD_LEN, msg->len); return fsi_i2c_write_reg(i2c->fsi, I2C_FSI_CMD, &cmd); } static int fsi_i2c_get_op_bytes(int op_bytes) { /* fsi is limited to max 4 byte aligned ops */ if (op_bytes > 4) return 4; else if (op_bytes == 3) return 2; return op_bytes; } static int fsi_i2c_write_fifo(struct fsi_i2c_port *port, struct i2c_msg *msg, u8 fifo_count) { int write; int rc; struct fsi_i2c_master *i2c = port->master; int bytes_to_write = i2c->fifo_size - fifo_count; int bytes_remaining = msg->len - port->xfrd; bytes_to_write = min(bytes_to_write, bytes_remaining); while (bytes_to_write) { write = fsi_i2c_get_op_bytes(bytes_to_write); rc = fsi_device_write(i2c->fsi, I2C_FSI_FIFO, &msg->buf[port->xfrd], write); if (rc) return rc; port->xfrd += write; bytes_to_write -= write; } return 0; } static int fsi_i2c_read_fifo(struct fsi_i2c_port *port, struct i2c_msg *msg, u8 fifo_count) { int read; int rc; struct fsi_i2c_master *i2c = port->master; int bytes_to_read; int xfr_remaining = msg->len - port->xfrd; u32 dummy; bytes_to_read = min_t(int, fifo_count, xfr_remaining); while (bytes_to_read) { read = fsi_i2c_get_op_bytes(bytes_to_read); if (xfr_remaining) { rc = fsi_device_read(i2c->fsi, I2C_FSI_FIFO, &msg->buf[port->xfrd], read); if (rc) return rc; port->xfrd += read; xfr_remaining -= read; } else { /* no more buffer but data in fifo, need to clear it */ rc = fsi_device_read(i2c->fsi, I2C_FSI_FIFO, &dummy, read); if (rc) return rc; } bytes_to_read -= read; } return 0; } static int fsi_i2c_get_scl(struct i2c_adapter *adap) { u32 stat = 0; struct fsi_i2c_port *port = adap->algo_data; struct fsi_i2c_master *i2c = port->master; fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat); return !!(stat & I2C_STAT_SCL_IN); } static void fsi_i2c_set_scl(struct i2c_adapter *adap, int val) { u32 dummy = 0; struct fsi_i2c_port *port = adap->algo_data; struct fsi_i2c_master *i2c = port->master; if (val) fsi_i2c_write_reg(i2c->fsi, I2C_FSI_SET_SCL, &dummy); else fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_SCL, &dummy); } static int fsi_i2c_get_sda(struct i2c_adapter *adap) { u32 stat = 0; struct fsi_i2c_port *port = adap->algo_data; struct fsi_i2c_master *i2c = port->master; fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat); return !!(stat & I2C_STAT_SDA_IN); } static void fsi_i2c_set_sda(struct i2c_adapter *adap, int val) { u32 dummy = 0; struct fsi_i2c_port *port = adap->algo_data; struct fsi_i2c_master *i2c = port->master; if (val) fsi_i2c_write_reg(i2c->fsi, I2C_FSI_SET_SDA, &dummy); else fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_SDA, &dummy); } static void fsi_i2c_prepare_recovery(struct i2c_adapter *adap) { int rc; u32 mode; struct fsi_i2c_port *port = adap->algo_data; struct fsi_i2c_master *i2c = port->master; rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode); if (rc) return; mode |= I2C_MODE_DIAG; fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode); } static void fsi_i2c_unprepare_recovery(struct i2c_adapter *adap) { int rc; u32 mode; struct fsi_i2c_port *port = adap->algo_data; struct fsi_i2c_master *i2c = port->master; rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode); if (rc) return; mode &= ~I2C_MODE_DIAG; fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode); } static int fsi_i2c_reset_bus(struct fsi_i2c_master *i2c, struct fsi_i2c_port *port) { int rc; u32 stat, dummy = 0; /* force bus reset, ignore errors */ i2c_recover_bus(&port->adapter); /* reset errors */ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_ERR, &dummy); if (rc) return rc; /* wait for command complete */ usleep_range(I2C_RESET_SLEEP_MIN_US, I2C_RESET_SLEEP_MAX_US); rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat); if (rc) return rc; if (stat & I2C_STAT_CMD_COMP) return 0; /* failed to get command complete; reset engine again */ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_I2C, &dummy); if (rc) return rc; /* re-init engine again */ return fsi_i2c_dev_init(i2c); } static int fsi_i2c_reset_engine(struct fsi_i2c_master *i2c, u16 port) { int rc; u32 mode, dummy = 0; /* reset engine */ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_I2C, &dummy); if (rc) return rc; /* re-init engine */ rc = fsi_i2c_dev_init(i2c); if (rc) return rc; rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode); if (rc) return rc; /* set port; default after reset is 0 */ if (port) { mode &= ~I2C_MODE_PORT; mode |= FIELD_PREP(I2C_MODE_PORT, port); rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode); if (rc) return rc; } /* reset busy register; hw workaround */ dummy = I2C_PORT_BUSY_RESET; rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_PORT_BUSY, &dummy); if (rc) return rc; return 0; } static int fsi_i2c_abort(struct fsi_i2c_port *port, u32 status) { int rc; unsigned long start; u32 cmd = I2C_CMD_WITH_STOP; u32 stat; struct fsi_i2c_master *i2c = port->master; struct fsi_device *fsi = i2c->fsi; rc = fsi_i2c_reset_engine(i2c, port->port); if (rc) return rc; rc = fsi_i2c_read_reg(fsi, I2C_FSI_STAT, &stat); if (rc) return rc; /* if sda is low, peform full bus reset */ if (!(stat & I2C_STAT_SDA_IN)) { rc = fsi_i2c_reset_bus(i2c, port); if (rc) return rc; } /* skip final stop command for these errors */ if (status & (I2C_STAT_PARITY | I2C_STAT_LOST_ARB | I2C_STAT_STOP_ERR)) return 0; /* write stop command */ rc = fsi_i2c_write_reg(fsi, I2C_FSI_CMD, &cmd); if (rc) return rc; /* wait until we see command complete in the master */ start = jiffies; do { rc = fsi_i2c_read_reg(fsi, I2C_FSI_STAT, &status); if (rc) return rc; if (status & I2C_STAT_CMD_COMP) return 0; usleep_range(I2C_CMD_SLEEP_MIN_US, I2C_CMD_SLEEP_MAX_US); } while (time_after(start + I2C_ABORT_TIMEOUT, jiffies)); return -ETIMEDOUT; } static int fsi_i2c_handle_status(struct fsi_i2c_port *port, struct i2c_msg *msg, u32 status) { int rc; u8 fifo_count; if (status & I2C_STAT_ERR) { rc = fsi_i2c_abort(port, status); if (rc) return rc; if (status & I2C_STAT_INV_CMD) return -EINVAL; if (status & (I2C_STAT_PARITY | I2C_STAT_BE_OVERRUN | I2C_STAT_BE_ACCESS)) return -EPROTO; if (status & I2C_STAT_NACK) return -ENXIO; if (status & I2C_STAT_LOST_ARB) return -EAGAIN; if (status & I2C_STAT_STOP_ERR) return -EBADMSG; return -EIO; } if (status & I2C_STAT_DAT_REQ) { fifo_count = FIELD_GET(I2C_STAT_FIFO_COUNT, status); if (msg->flags & I2C_M_RD) return fsi_i2c_read_fifo(port, msg, fifo_count); return fsi_i2c_write_fifo(port, msg, fifo_count); } if (status & I2C_STAT_CMD_COMP) { if (port->xfrd < msg->len) return -ENODATA; return msg->len; } return 0; } static int fsi_i2c_wait(struct fsi_i2c_port *port, struct i2c_msg *msg, unsigned long timeout) { u32 status = 0; int rc; unsigned long start = jiffies; do { rc = fsi_i2c_read_reg(port->master->fsi, I2C_FSI_STAT, &status); if (rc) return rc; if (status & I2C_STAT_ANY_RESP) { rc = fsi_i2c_handle_status(port, msg, status); if (rc < 0) return rc; /* cmd complete and all data xfrd */ if (rc == msg->len) return 0; /* need to xfr more data, but maybe don't need wait */ continue; } usleep_range(I2C_CMD_SLEEP_MIN_US, I2C_CMD_SLEEP_MAX_US); } while (time_after(start + timeout, jiffies)); return -ETIMEDOUT; } static int fsi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int i, rc; unsigned long start_time; struct fsi_i2c_port *port = adap->algo_data; struct fsi_i2c_master *master = port->master; struct i2c_msg *msg; mutex_lock(&master->lock); rc = fsi_i2c_set_port(port); if (rc) goto unlock; for (i = 0; i < num; i++) { msg = msgs + i; start_time = jiffies; rc = fsi_i2c_start(port, msg, i == num - 1); if (rc) goto unlock; rc = fsi_i2c_wait(port, msg, adap->timeout - (jiffies - start_time)); if (rc) goto unlock; } unlock: mutex_unlock(&master->lock); return rc ? : num; } static u32 fsi_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_PROTOCOL_MANGLING | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; } static struct i2c_bus_recovery_info fsi_i2c_bus_recovery_info = { .recover_bus = i2c_generic_scl_recovery, .get_scl = fsi_i2c_get_scl, .set_scl = fsi_i2c_set_scl, .get_sda = fsi_i2c_get_sda, .set_sda = fsi_i2c_set_sda, .prepare_recovery = fsi_i2c_prepare_recovery, .unprepare_recovery = fsi_i2c_unprepare_recovery, }; static const struct i2c_algorithm fsi_i2c_algorithm = { .master_xfer = fsi_i2c_xfer, .functionality = fsi_i2c_functionality, }; static struct device_node *fsi_i2c_find_port_of_node(struct device_node *fsi, int port) { struct device_node *np; u32 port_no; int rc; for_each_child_of_node(fsi, np) { rc = of_property_read_u32(np, "reg", &port_no); if (!rc && port_no == port) return np; } return NULL; } static int fsi_i2c_probe(struct device *dev) { struct fsi_i2c_master *i2c; struct fsi_i2c_port *port; struct device_node *np; u32 port_no, ports, stat; int rc; i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; mutex_init(&i2c->lock); i2c->fsi = to_fsi_dev(dev); INIT_LIST_HEAD(&i2c->ports); rc = fsi_i2c_dev_init(i2c); if (rc) return rc; rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat); if (rc) return rc; ports = FIELD_GET(I2C_STAT_MAX_PORT, stat) + 1; dev_dbg(dev, "I2C master has %d ports\n", ports); for (port_no = 0; port_no < ports; port_no++) { np = fsi_i2c_find_port_of_node(dev->of_node, port_no); if (!of_device_is_available(np)) continue; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) { of_node_put(np); break; } port->master = i2c; port->port = port_no; port->adapter.owner = THIS_MODULE; port->adapter.dev.of_node = np; port->adapter.dev.parent = dev; port->adapter.algo = &fsi_i2c_algorithm; port->adapter.bus_recovery_info = &fsi_i2c_bus_recovery_info; port->adapter.algo_data = port; snprintf(port->adapter.name, sizeof(port->adapter.name), "i2c_bus-%u", port_no); rc = i2c_add_adapter(&port->adapter); if (rc < 0) { dev_err(dev, "Failed to register adapter: %d\n", rc); kfree(port); continue; } list_add(&port->list, &i2c->ports); } dev_set_drvdata(dev, i2c); return 0; } static int fsi_i2c_remove(struct device *dev) { struct fsi_i2c_master *i2c = dev_get_drvdata(dev); struct fsi_i2c_port *port, *tmp; list_for_each_entry_safe(port, tmp, &i2c->ports, list) { list_del(&port->list); i2c_del_adapter(&port->adapter); kfree(port); } return 0; } static const struct fsi_device_id fsi_i2c_ids[] = { { FSI_ENGID_I2C, FSI_VERSION_ANY }, { } }; static struct fsi_driver fsi_i2c_driver = { .id_table = fsi_i2c_ids, .drv = { .name = "i2c-fsi", .bus = &fsi_bus_type, .probe = fsi_i2c_probe, .remove = fsi_i2c_remove, }, }; module_fsi_driver(fsi_i2c_driver); MODULE_AUTHOR("Eddie James <[email protected]>"); MODULE_DESCRIPTION("FSI attached I2C master"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-fsi.c
// SPDX-License-Identifier: GPL-2.0 /* * SuperH Mobile I2C Controller * * Copyright (C) 2014-19 Wolfram Sang <[email protected]> * Copyright (C) 2008 Magnus Damm * * Portions of the code based on out-of-tree driver i2c-sh7343.c * Copyright (c) 2006 Carlos Munoz <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> /* Transmit operation: */ /* */ /* 0 byte transmit */ /* BUS: S A8 ACK P(*) */ /* IRQ: DTE WAIT */ /* ICIC: */ /* ICCR: 0x94 0x90 */ /* ICDR: A8 */ /* */ /* 1 byte transmit */ /* BUS: S A8 ACK D8(1) ACK P(*) */ /* IRQ: DTE WAIT WAIT */ /* ICIC: -DTE */ /* ICCR: 0x94 0x90 */ /* ICDR: A8 D8(1) */ /* */ /* 2 byte transmit */ /* BUS: S A8 ACK D8(1) ACK D8(2) ACK P(*) */ /* IRQ: DTE WAIT WAIT WAIT */ /* ICIC: -DTE */ /* ICCR: 0x94 0x90 */ /* ICDR: A8 D8(1) D8(2) */ /* */ /* 3 bytes or more, +---------+ gets repeated */ /* */ /* */ /* Receive operation: */ /* */ /* 0 byte receive - not supported since slave may hold SDA low */ /* */ /* 1 byte receive [TX] | [RX] */ /* BUS: S A8 ACK | D8(1) ACK P(*) */ /* IRQ: DTE WAIT | WAIT DTE */ /* ICIC: -DTE | +DTE */ /* ICCR: 0x94 0x81 | 0xc0 */ /* ICDR: A8 | D8(1) */ /* */ /* 2 byte receive [TX]| [RX] */ /* BUS: S A8 ACK | D8(1) ACK D8(2) ACK P(*) */ /* IRQ: DTE WAIT | WAIT WAIT DTE */ /* ICIC: -DTE | +DTE */ /* ICCR: 0x94 0x81 | 0xc0 */ /* ICDR: A8 | D8(1) D8(2) */ /* */ /* 3 byte receive [TX] | [RX] (*) */ /* BUS: S A8 ACK | D8(1) ACK D8(2) ACK D8(3) ACK P */ /* IRQ: DTE WAIT | WAIT WAIT WAIT DTE */ /* ICIC: -DTE | +DTE */ /* ICCR: 0x94 0x81 | 0xc0 */ /* ICDR: A8 | D8(1) D8(2) D8(3) */ /* */ /* 4 bytes or more, this part is repeated +---------+ */ /* */ /* */ /* Interrupt order and BUSY flag */ /* ___ _ */ /* SDA ___\___XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXAAAAAAAAA___/ */ /* SCL \_/1\_/2\_/3\_/4\_/5\_/6\_/7\_/8\___/9\_____/ */ /* */ /* S D7 D6 D5 D4 D3 D2 D1 D0 P(*) */ /* ___ */ /* WAIT IRQ ________________________________/ \___________ */ /* TACK IRQ ____________________________________/ \_______ */ /* DTE IRQ __________________________________________/ \_ */ /* AL IRQ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /* _______________________________________________ */ /* BUSY __/ \_ */ /* */ /* (*) The STOP condition is only sent by the master at the end of the last */ /* I2C message or if the I2C_M_STOP flag is set. Similarly, the BUSY bit is */ /* only cleared after the STOP condition, so, between messages we have to */ /* poll for the DTE bit. */ /* */ enum sh_mobile_i2c_op { OP_START = 0, OP_TX_FIRST, OP_TX, OP_TX_STOP, OP_TX_TO_RX, OP_RX, OP_RX_STOP, OP_RX_STOP_DATA, }; struct sh_mobile_i2c_data { struct device *dev; void __iomem *reg; struct i2c_adapter adap; unsigned long bus_speed; unsigned int clks_per_count; struct clk *clk; u_int8_t icic; u_int8_t flags; u_int16_t iccl; u_int16_t icch; spinlock_t lock; wait_queue_head_t wait; struct i2c_msg *msg; int pos; int sr; bool send_stop; bool stop_after_dma; bool atomic_xfer; struct resource *res; struct dma_chan *dma_tx; struct dma_chan *dma_rx; struct scatterlist sg; enum dma_data_direction dma_direction; u8 *dma_buf; }; struct sh_mobile_dt_config { int clks_per_count; int (*setup)(struct sh_mobile_i2c_data *pd); }; #define IIC_FLAG_HAS_ICIC67 (1 << 0) /* Register offsets */ #define ICDR 0x00 #define ICCR 0x04 #define ICSR 0x08 #define ICIC 0x0c #define ICCL 0x10 #define ICCH 0x14 #define ICSTART 0x70 /* Register bits */ #define ICCR_ICE 0x80 #define ICCR_RACK 0x40 #define ICCR_TRS 0x10 #define ICCR_BBSY 0x04 #define ICCR_SCP 0x01 #define ICSR_SCLM 0x80 #define ICSR_SDAM 0x40 #define SW_DONE 0x20 #define ICSR_BUSY 0x10 #define ICSR_AL 0x08 #define ICSR_TACK 0x04 #define ICSR_WAIT 0x02 #define ICSR_DTE 0x01 #define ICIC_ICCLB8 0x80 #define ICIC_ICCHB8 0x40 #define ICIC_TDMAE 0x20 #define ICIC_RDMAE 0x10 #define ICIC_ALE 0x08 #define ICIC_TACKE 0x04 #define ICIC_WAITE 0x02 #define ICIC_DTEE 0x01 #define ICSTART_ICSTART 0x10 static void iic_wr(struct sh_mobile_i2c_data *pd, int offs, unsigned char data) { if (offs == ICIC) data |= pd->icic; iowrite8(data, pd->reg + offs); } static unsigned char iic_rd(struct sh_mobile_i2c_data *pd, int offs) { return ioread8(pd->reg + offs); } static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs, unsigned char set, unsigned char clr) { iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr); } static u32 sh_mobile_i2c_iccl(unsigned long count_khz, u32 tLOW, u32 tf) { /* * Conditional expression: * ICCL >= COUNT_CLK * (tLOW + tf) * * SH-Mobile IIC hardware starts counting the LOW period of * the SCL signal (tLOW) as soon as it pulls the SCL line. * In order to meet the tLOW timing spec, we need to take into * account the fall time of SCL signal (tf). Default tf value * should be 0.3 us, for safety. */ return (((count_khz * (tLOW + tf)) + 5000) / 10000); } static u32 sh_mobile_i2c_icch(unsigned long count_khz, u32 tHIGH, u32 tf) { /* * Conditional expression: * ICCH >= COUNT_CLK * (tHIGH + tf) * * SH-Mobile IIC hardware is aware of SCL transition period 'tr', * and can ignore it. SH-Mobile IIC controller starts counting * the HIGH period of the SCL signal (tHIGH) after the SCL input * voltage increases at VIH. * * Afterward it turned out calculating ICCH using only tHIGH spec * will result in violation of the tHD;STA timing spec. We need * to take into account the fall time of SDA signal (tf) at START * condition, in order to meet both tHIGH and tHD;STA specs. */ return (((count_khz * (tHIGH + tf)) + 5000) / 10000); } static int sh_mobile_i2c_check_timing(struct sh_mobile_i2c_data *pd) { u16 max_val = pd->flags & IIC_FLAG_HAS_ICIC67 ? 0x1ff : 0xff; if (pd->iccl > max_val || pd->icch > max_val) { dev_err(pd->dev, "timing values out of range: L/H=0x%x/0x%x\n", pd->iccl, pd->icch); return -EINVAL; } /* one more bit of ICCL in ICIC */ if (pd->iccl & 0x100) pd->icic |= ICIC_ICCLB8; else pd->icic &= ~ICIC_ICCLB8; /* one more bit of ICCH in ICIC */ if (pd->icch & 0x100) pd->icic |= ICIC_ICCHB8; else pd->icic &= ~ICIC_ICCHB8; dev_dbg(pd->dev, "timing values: L/H=0x%x/0x%x\n", pd->iccl, pd->icch); return 0; } static int sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd) { unsigned long i2c_clk_khz; u32 tHIGH, tLOW, tf; i2c_clk_khz = clk_get_rate(pd->clk) / 1000 / pd->clks_per_count; if (pd->bus_speed == I2C_MAX_STANDARD_MODE_FREQ) { tLOW = 47; /* tLOW = 4.7 us */ tHIGH = 40; /* tHD;STA = tHIGH = 4.0 us */ tf = 3; /* tf = 0.3 us */ } else if (pd->bus_speed == I2C_MAX_FAST_MODE_FREQ) { tLOW = 13; /* tLOW = 1.3 us */ tHIGH = 6; /* tHD;STA = tHIGH = 0.6 us */ tf = 3; /* tf = 0.3 us */ } else { dev_err(pd->dev, "unrecognized bus speed %lu Hz\n", pd->bus_speed); return -EINVAL; } pd->iccl = sh_mobile_i2c_iccl(i2c_clk_khz, tLOW, tf); pd->icch = sh_mobile_i2c_icch(i2c_clk_khz, tHIGH, tf); return sh_mobile_i2c_check_timing(pd); } static int sh_mobile_i2c_v2_init(struct sh_mobile_i2c_data *pd) { unsigned long clks_per_cycle; /* L = 5, H = 4, L + H = 9 */ clks_per_cycle = clk_get_rate(pd->clk) / pd->bus_speed; pd->iccl = DIV_ROUND_UP(clks_per_cycle * 5 / 9 - 1, pd->clks_per_count); pd->icch = DIV_ROUND_UP(clks_per_cycle * 4 / 9 - 5, pd->clks_per_count); return sh_mobile_i2c_check_timing(pd); } static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op op) { unsigned char ret = 0; unsigned long flags; dev_dbg(pd->dev, "op %d\n", op); spin_lock_irqsave(&pd->lock, flags); switch (op) { case OP_START: /* issue start and trigger DTE interrupt */ iic_wr(pd, ICCR, ICCR_ICE | ICCR_TRS | ICCR_BBSY); break; case OP_TX_FIRST: /* disable DTE interrupt and write client address */ iic_wr(pd, ICIC, ICIC_WAITE | ICIC_ALE | ICIC_TACKE); iic_wr(pd, ICDR, i2c_8bit_addr_from_msg(pd->msg)); break; case OP_TX: /* write data */ iic_wr(pd, ICDR, pd->msg->buf[pd->pos]); break; case OP_TX_STOP: /* issue a stop (or rep_start) */ iic_wr(pd, ICCR, pd->send_stop ? ICCR_ICE | ICCR_TRS : ICCR_ICE | ICCR_TRS | ICCR_BBSY); break; case OP_TX_TO_RX: /* select read mode */ iic_wr(pd, ICCR, ICCR_ICE | ICCR_SCP); break; case OP_RX: /* just read data */ ret = iic_rd(pd, ICDR); break; case OP_RX_STOP: /* enable DTE interrupt, issue stop */ if (!pd->atomic_xfer) iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); break; case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */ if (!pd->atomic_xfer) iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); ret = iic_rd(pd, ICDR); iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); break; } spin_unlock_irqrestore(&pd->lock, flags); dev_dbg(pd->dev, "op %d, data out 0x%02x\n", op, ret); return ret; } static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd) { if (pd->pos == pd->msg->len) { i2c_op(pd, OP_TX_STOP); return 1; } if (pd->pos == -1) i2c_op(pd, OP_TX_FIRST); else i2c_op(pd, OP_TX); pd->pos++; return 0; } static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd) { int real_pos; /* switch from TX (address) to RX (data) adds two interrupts */ real_pos = pd->pos - 2; if (pd->pos == -1) { i2c_op(pd, OP_TX_FIRST); } else if (pd->pos == 0) { i2c_op(pd, OP_TX_TO_RX); } else if (pd->pos == pd->msg->len) { if (pd->stop_after_dma) { /* Simulate PIO end condition after DMA transfer */ i2c_op(pd, OP_RX_STOP); pd->pos++; goto done; } if (real_pos < 0) i2c_op(pd, OP_RX_STOP); else pd->msg->buf[real_pos] = i2c_op(pd, OP_RX_STOP_DATA); } else if (real_pos >= 0) { pd->msg->buf[real_pos] = i2c_op(pd, OP_RX); } done: pd->pos++; return pd->pos == (pd->msg->len + 2); } static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) { struct sh_mobile_i2c_data *pd = dev_id; unsigned char sr; int wakeup = 0; sr = iic_rd(pd, ICSR); pd->sr |= sr; /* remember state */ dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr, (pd->msg->flags & I2C_M_RD) ? "read" : "write", pd->pos, pd->msg->len); /* Kick off TxDMA after preface was done */ if (pd->dma_direction == DMA_TO_DEVICE && pd->pos == 0) iic_set_clr(pd, ICIC, ICIC_TDMAE, 0); else if (sr & (ICSR_AL | ICSR_TACK)) /* don't interrupt transaction - continue to issue stop */ iic_wr(pd, ICSR, sr & ~(ICSR_AL | ICSR_TACK)); else if (pd->msg->flags & I2C_M_RD) wakeup = sh_mobile_i2c_isr_rx(pd); else wakeup = sh_mobile_i2c_isr_tx(pd); /* Kick off RxDMA after preface was done */ if (pd->dma_direction == DMA_FROM_DEVICE && pd->pos == 1) iic_set_clr(pd, ICIC, ICIC_RDMAE, 0); if (sr & ICSR_WAIT) /* TODO: add delay here to support slow acks */ iic_wr(pd, ICSR, sr & ~ICSR_WAIT); if (wakeup) { pd->sr |= SW_DONE; if (!pd->atomic_xfer) wake_up(&pd->wait); } /* defeat write posting to avoid spurious WAIT interrupts */ iic_rd(pd, ICSR); return IRQ_HANDLED; } static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd, bool terminate) { struct dma_chan *chan = pd->dma_direction == DMA_FROM_DEVICE ? pd->dma_rx : pd->dma_tx; /* only allowed from thread context! */ if (terminate) dmaengine_terminate_sync(chan); dma_unmap_single(chan->device->dev, sg_dma_address(&pd->sg), pd->msg->len, pd->dma_direction); pd->dma_direction = DMA_NONE; } static void sh_mobile_i2c_dma_callback(void *data) { struct sh_mobile_i2c_data *pd = data; sh_mobile_i2c_cleanup_dma(pd, false); pd->pos = pd->msg->len; pd->stop_after_dma = true; iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); } static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev, enum dma_transfer_direction dir, dma_addr_t port_addr) { struct dma_chan *chan; struct dma_slave_config cfg; char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx"; int ret; chan = dma_request_chan(dev, chan_name); if (IS_ERR(chan)) { dev_dbg(dev, "request_channel failed for %s (%ld)\n", chan_name, PTR_ERR(chan)); return chan; } memset(&cfg, 0, sizeof(cfg)); cfg.direction = dir; if (dir == DMA_MEM_TO_DEV) { cfg.dst_addr = port_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; } else { cfg.src_addr = port_addr; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; } ret = dmaengine_slave_config(chan, &cfg); if (ret) { dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret); dma_release_channel(chan); return ERR_PTR(ret); } dev_dbg(dev, "got DMA channel for %s\n", chan_name); return chan; } static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) { bool read = pd->msg->flags & I2C_M_RD; enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; struct dma_chan *chan = read ? pd->dma_rx : pd->dma_tx; struct dma_async_tx_descriptor *txdesc; dma_addr_t dma_addr; dma_cookie_t cookie; if (PTR_ERR(chan) == -EPROBE_DEFER) { if (read) chan = pd->dma_rx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM, pd->res->start + ICDR); else chan = pd->dma_tx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV, pd->res->start + ICDR); } if (IS_ERR(chan)) return; dma_addr = dma_map_single(chan->device->dev, pd->dma_buf, pd->msg->len, dir); if (dma_mapping_error(chan->device->dev, dma_addr)) { dev_dbg(pd->dev, "dma map failed, using PIO\n"); return; } sg_dma_len(&pd->sg) = pd->msg->len; sg_dma_address(&pd->sg) = dma_addr; pd->dma_direction = dir; txdesc = dmaengine_prep_slave_sg(chan, &pd->sg, 1, read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) { dev_dbg(pd->dev, "dma prep slave sg failed, using PIO\n"); sh_mobile_i2c_cleanup_dma(pd, false); return; } txdesc->callback = sh_mobile_i2c_dma_callback; txdesc->callback_param = pd; cookie = dmaengine_submit(txdesc); if (dma_submit_error(cookie)) { dev_dbg(pd->dev, "submitting dma failed, using PIO\n"); sh_mobile_i2c_cleanup_dma(pd, false); return; } dma_async_issue_pending(chan); } static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, bool do_init) { if (do_init) { /* Initialize channel registers */ iic_wr(pd, ICCR, ICCR_SCP); /* Enable channel and configure rx ack */ iic_wr(pd, ICCR, ICCR_ICE | ICCR_SCP); /* Set the clock */ iic_wr(pd, ICCL, pd->iccl & 0xff); iic_wr(pd, ICCH, pd->icch & 0xff); } pd->msg = usr_msg; pd->pos = -1; pd->sr = 0; if (pd->atomic_xfer) return; pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8); if (pd->dma_buf) sh_mobile_i2c_xfer_dma(pd); /* Enable all interrupts to begin with */ iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); } static int poll_dte(struct sh_mobile_i2c_data *pd) { int i; for (i = 1000; i; i--) { u_int8_t val = iic_rd(pd, ICSR); if (val & ICSR_DTE) break; if (val & ICSR_TACK) return -ENXIO; udelay(10); } return i ? 0 : -ETIMEDOUT; } static int poll_busy(struct sh_mobile_i2c_data *pd) { int i; for (i = 1000; i; i--) { u_int8_t val = iic_rd(pd, ICSR); dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr); /* the interrupt handler may wake us up before the * transfer is finished, so poll the hardware * until we're done. */ if (!(val & ICSR_BUSY)) { /* handle missing acknowledge and arbitration lost */ val |= pd->sr; if (val & ICSR_TACK) return -ENXIO; if (val & ICSR_AL) return -EAGAIN; break; } udelay(10); } return i ? 0 : -ETIMEDOUT; } static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd, struct i2c_msg *msgs, int num) { struct i2c_msg *msg; int err = 0; int i; long time_left; /* Wake up device and enable clock */ pm_runtime_get_sync(pd->dev); /* Process all messages */ for (i = 0; i < num; i++) { bool do_start = pd->send_stop || !i; msg = &msgs[i]; pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; pd->stop_after_dma = false; start_ch(pd, msg, do_start); if (do_start) i2c_op(pd, OP_START); if (pd->atomic_xfer) { unsigned long j = jiffies + pd->adap.timeout; time_left = time_before_eq(jiffies, j); while (time_left && !(pd->sr & (ICSR_TACK | SW_DONE))) { unsigned char sr = iic_rd(pd, ICSR); if (sr & (ICSR_AL | ICSR_TACK | ICSR_WAIT | ICSR_DTE)) { sh_mobile_i2c_isr(0, pd); udelay(150); } else { cpu_relax(); } time_left = time_before_eq(jiffies, j); } } else { /* The interrupt handler takes care of the rest... */ time_left = wait_event_timeout(pd->wait, pd->sr & (ICSR_TACK | SW_DONE), pd->adap.timeout); /* 'stop_after_dma' tells if DMA xfer was complete */ i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); } if (!time_left) { dev_err(pd->dev, "Transfer request timed out\n"); if (pd->dma_direction != DMA_NONE) sh_mobile_i2c_cleanup_dma(pd, true); err = -ETIMEDOUT; break; } if (pd->send_stop) err = poll_busy(pd); else err = poll_dte(pd); if (err < 0) break; } /* Disable channel */ iic_wr(pd, ICCR, ICCR_SCP); /* Disable clock and mark device as idle */ pm_runtime_put_sync(pd->dev); return err ?: num; } static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); pd->atomic_xfer = false; return sh_mobile_xfer(pd, msgs, num); } static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); pd->atomic_xfer = true; return sh_mobile_xfer(pd, msgs, num); } static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } static const struct i2c_algorithm sh_mobile_i2c_algorithm = { .functionality = sh_mobile_i2c_func, .master_xfer = sh_mobile_i2c_xfer, .master_xfer_atomic = sh_mobile_i2c_xfer_atomic, }; static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN_READ, }; /* * r8a7740 has an errata regarding I2C I/O pad reset needing this workaround. */ static int sh_mobile_i2c_r8a7740_workaround(struct sh_mobile_i2c_data *pd) { iic_set_clr(pd, ICCR, ICCR_ICE, 0); iic_rd(pd, ICCR); /* dummy read */ iic_set_clr(pd, ICSTART, ICSTART_ICSTART, 0); iic_rd(pd, ICSTART); /* dummy read */ udelay(10); iic_wr(pd, ICCR, ICCR_SCP); iic_wr(pd, ICSTART, 0); udelay(10); iic_wr(pd, ICCR, ICCR_TRS); udelay(10); iic_wr(pd, ICCR, 0); udelay(10); iic_wr(pd, ICCR, ICCR_TRS); udelay(10); return sh_mobile_i2c_init(pd); } static const struct sh_mobile_dt_config default_dt_config = { .clks_per_count = 1, .setup = sh_mobile_i2c_init, }; static const struct sh_mobile_dt_config fast_clock_dt_config = { .clks_per_count = 2, .setup = sh_mobile_i2c_init, }; static const struct sh_mobile_dt_config v2_freq_calc_dt_config = { .clks_per_count = 2, .setup = sh_mobile_i2c_v2_init, }; static const struct sh_mobile_dt_config r8a7740_dt_config = { .clks_per_count = 1, .setup = sh_mobile_i2c_r8a7740_workaround, }; static const struct of_device_id sh_mobile_i2c_dt_ids[] = { { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config }, { .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7793", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7794", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7795", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a77990", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config }, { .compatible = "renesas,rcar-gen2-iic", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,rcar-gen3-iic", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,rmobile-iic", .data = &default_dt_config }, {}, }; MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids); static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd) { if (!IS_ERR(pd->dma_tx)) { dma_release_channel(pd->dma_tx); pd->dma_tx = ERR_PTR(-EPROBE_DEFER); } if (!IS_ERR(pd->dma_rx)) { dma_release_channel(pd->dma_rx); pd->dma_rx = ERR_PTR(-EPROBE_DEFER); } } static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, struct sh_mobile_i2c_data *pd) { struct device_node *np = dev_of_node(&dev->dev); int k = 0, ret; if (np) { int irq; while ((irq = platform_get_irq_optional(dev, k)) != -ENXIO) { if (irq < 0) return irq; ret = devm_request_irq(&dev->dev, irq, sh_mobile_i2c_isr, 0, dev_name(&dev->dev), pd); if (ret) { dev_err(&dev->dev, "cannot request IRQ %d\n", irq); return ret; } k++; } } else { struct resource *res; resource_size_t n; while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) { for (n = res->start; n <= res->end; n++) { ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr, 0, dev_name(&dev->dev), pd); if (ret) { dev_err(&dev->dev, "cannot request IRQ %pa\n", &n); return ret; } } k++; } } return k > 0 ? 0 : -ENOENT; } static int sh_mobile_i2c_probe(struct platform_device *dev) { struct sh_mobile_i2c_data *pd; struct i2c_adapter *adap; const struct sh_mobile_dt_config *config; int ret; u32 bus_speed; pd = devm_kzalloc(&dev->dev, sizeof(struct sh_mobile_i2c_data), GFP_KERNEL); if (!pd) return -ENOMEM; pd->clk = devm_clk_get(&dev->dev, NULL); if (IS_ERR(pd->clk)) { dev_err(&dev->dev, "cannot get clock\n"); return PTR_ERR(pd->clk); } ret = sh_mobile_i2c_hook_irqs(dev, pd); if (ret) return ret; pd->dev = &dev->dev; platform_set_drvdata(dev, pd); pd->reg = devm_platform_get_and_ioremap_resource(dev, 0, &pd->res); if (IS_ERR(pd->reg)) return PTR_ERR(pd->reg); ret = of_property_read_u32(dev->dev.of_node, "clock-frequency", &bus_speed); pd->bus_speed = (ret || !bus_speed) ? I2C_MAX_STANDARD_MODE_FREQ : bus_speed; pd->clks_per_count = 1; /* Newer variants come with two new bits in ICIC */ if (resource_size(pd->res) > 0x17) pd->flags |= IIC_FLAG_HAS_ICIC67; pm_runtime_enable(&dev->dev); pm_runtime_get_sync(&dev->dev); config = of_device_get_match_data(&dev->dev); if (config) { pd->clks_per_count = config->clks_per_count; ret = config->setup(pd); } else { ret = sh_mobile_i2c_init(pd); } pm_runtime_put_sync(&dev->dev); if (ret) return ret; /* Init DMA */ sg_init_table(&pd->sg, 1); pd->dma_direction = DMA_NONE; pd->dma_rx = pd->dma_tx = ERR_PTR(-EPROBE_DEFER); /* setup the private data */ adap = &pd->adap; i2c_set_adapdata(adap, pd); adap->owner = THIS_MODULE; adap->algo = &sh_mobile_i2c_algorithm; adap->quirks = &sh_mobile_i2c_quirks; adap->dev.parent = &dev->dev; adap->retries = 5; adap->nr = dev->id; adap->dev.of_node = dev->dev.of_node; strscpy(adap->name, dev->name, sizeof(adap->name)); spin_lock_init(&pd->lock); init_waitqueue_head(&pd->wait); ret = i2c_add_numbered_adapter(adap); if (ret < 0) { sh_mobile_i2c_release_dma(pd); return ret; } dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz\n", adap->nr, pd->bus_speed); return 0; } static void sh_mobile_i2c_remove(struct platform_device *dev) { struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); i2c_del_adapter(&pd->adap); sh_mobile_i2c_release_dma(pd); pm_runtime_disable(&dev->dev); } static int sh_mobile_i2c_suspend(struct device *dev) { struct sh_mobile_i2c_data *pd = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&pd->adap); return 0; } static int sh_mobile_i2c_resume(struct device *dev) { struct sh_mobile_i2c_data *pd = dev_get_drvdata(dev); i2c_mark_adapter_resumed(&pd->adap); return 0; } static const struct dev_pm_ops sh_mobile_i2c_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(sh_mobile_i2c_suspend, sh_mobile_i2c_resume) }; static struct platform_driver sh_mobile_i2c_driver = { .driver = { .name = "i2c-sh_mobile", .of_match_table = sh_mobile_i2c_dt_ids, .pm = pm_sleep_ptr(&sh_mobile_i2c_pm_ops), }, .probe = sh_mobile_i2c_probe, .remove_new = sh_mobile_i2c_remove, }; static int __init sh_mobile_i2c_adap_init(void) { return platform_driver_register(&sh_mobile_i2c_driver); } subsys_initcall(sh_mobile_i2c_adap_init); static void __exit sh_mobile_i2c_adap_exit(void) { platform_driver_unregister(&sh_mobile_i2c_driver); } module_exit(sh_mobile_i2c_adap_exit); MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_AUTHOR("Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c-sh_mobile");
linux-master
drivers/i2c/busses/i2c-sh_mobile.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2013 Broadcom Corporation #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> /* Hardware register offsets and field defintions */ #define CS_OFFSET 0x00000020 #define CS_ACK_SHIFT 3 #define CS_ACK_MASK 0x00000008 #define CS_ACK_CMD_GEN_START 0x00000000 #define CS_ACK_CMD_GEN_RESTART 0x00000001 #define CS_CMD_SHIFT 1 #define CS_CMD_CMD_NO_ACTION 0x00000000 #define CS_CMD_CMD_START_RESTART 0x00000001 #define CS_CMD_CMD_STOP 0x00000002 #define CS_EN_SHIFT 0 #define CS_EN_CMD_ENABLE_BSC 0x00000001 #define TIM_OFFSET 0x00000024 #define TIM_PRESCALE_SHIFT 6 #define TIM_P_SHIFT 3 #define TIM_NO_DIV_SHIFT 2 #define TIM_DIV_SHIFT 0 #define DAT_OFFSET 0x00000028 #define TOUT_OFFSET 0x0000002c #define TXFCR_OFFSET 0x0000003c #define TXFCR_FIFO_FLUSH_MASK 0x00000080 #define TXFCR_FIFO_EN_MASK 0x00000040 #define IER_OFFSET 0x00000044 #define IER_READ_COMPLETE_INT_MASK 0x00000010 #define IER_I2C_INT_EN_MASK 0x00000008 #define IER_FIFO_INT_EN_MASK 0x00000002 #define IER_NOACK_EN_MASK 0x00000001 #define ISR_OFFSET 0x00000048 #define ISR_RESERVED_MASK 0xffffff60 #define ISR_CMDBUSY_MASK 0x00000080 #define ISR_READ_COMPLETE_MASK 0x00000010 #define ISR_SES_DONE_MASK 0x00000008 #define ISR_ERR_MASK 0x00000004 #define ISR_TXFIFOEMPTY_MASK 0x00000002 #define ISR_NOACK_MASK 0x00000001 #define CLKEN_OFFSET 0x0000004C #define CLKEN_AUTOSENSE_OFF_MASK 0x00000080 #define CLKEN_M_SHIFT 4 #define CLKEN_N_SHIFT 1 #define CLKEN_CLKEN_MASK 0x00000001 #define FIFO_STATUS_OFFSET 0x00000054 #define FIFO_STATUS_RXFIFO_EMPTY_MASK 0x00000004 #define FIFO_STATUS_TXFIFO_EMPTY_MASK 0x00000010 #define HSTIM_OFFSET 0x00000058 #define HSTIM_HS_MODE_MASK 0x00008000 #define HSTIM_HS_HOLD_SHIFT 10 #define HSTIM_HS_HIGH_PHASE_SHIFT 5 #define HSTIM_HS_SETUP_SHIFT 0 #define PADCTL_OFFSET 0x0000005c #define PADCTL_PAD_OUT_EN_MASK 0x00000004 #define RXFCR_OFFSET 0x00000068 #define RXFCR_NACK_EN_SHIFT 7 #define RXFCR_READ_COUNT_SHIFT 0 #define RXFIFORDOUT_OFFSET 0x0000006c /* Locally used constants */ #define MAX_RX_FIFO_SIZE 64U /* bytes */ #define MAX_TX_FIFO_SIZE 64U /* bytes */ #define STD_EXT_CLK_FREQ 13000000UL #define HS_EXT_CLK_FREQ 104000000UL #define MASTERCODE 0x08 /* Mastercodes are 0000_1xxxb */ #define I2C_TIMEOUT 100 /* msecs */ /* Operations that can be commanded to the controller */ enum bcm_kona_cmd_t { BCM_CMD_NOACTION = 0, BCM_CMD_START, BCM_CMD_RESTART, BCM_CMD_STOP, }; enum bus_speed_index { BCM_SPD_100K = 0, BCM_SPD_400K, BCM_SPD_1MHZ, }; enum hs_bus_speed_index { BCM_SPD_3P4MHZ = 0, }; /* Internal divider settings for standard mode, fast mode and fast mode plus */ struct bus_speed_cfg { uint8_t time_m; /* Number of cycles for setup time */ uint8_t time_n; /* Number of cycles for hold time */ uint8_t prescale; /* Prescale divider */ uint8_t time_p; /* Timing coefficient */ uint8_t no_div; /* Disable clock divider */ uint8_t time_div; /* Post-prescale divider */ }; /* Internal divider settings for high-speed mode */ struct hs_bus_speed_cfg { uint8_t hs_hold; /* Number of clock cycles SCL stays low until the end of bit period */ uint8_t hs_high_phase; /* Number of clock cycles SCL stays high before it falls */ uint8_t hs_setup; /* Number of clock cycles SCL stays low before it rises */ uint8_t prescale; /* Prescale divider */ uint8_t time_p; /* Timing coefficient */ uint8_t no_div; /* Disable clock divider */ uint8_t time_div; /* Post-prescale divider */ }; static const struct bus_speed_cfg std_cfg_table[] = { [BCM_SPD_100K] = {0x01, 0x01, 0x03, 0x06, 0x00, 0x02}, [BCM_SPD_400K] = {0x05, 0x01, 0x03, 0x05, 0x01, 0x02}, [BCM_SPD_1MHZ] = {0x01, 0x01, 0x03, 0x01, 0x01, 0x03}, }; static const struct hs_bus_speed_cfg hs_cfg_table[] = { [BCM_SPD_3P4MHZ] = {0x01, 0x08, 0x14, 0x00, 0x06, 0x01, 0x00}, }; struct bcm_kona_i2c_dev { struct device *device; void __iomem *base; int irq; struct clk *external_clk; struct i2c_adapter adapter; struct completion done; const struct bus_speed_cfg *std_cfg; const struct hs_bus_speed_cfg *hs_cfg; }; static void bcm_kona_i2c_send_cmd_to_ctrl(struct bcm_kona_i2c_dev *dev, enum bcm_kona_cmd_t cmd) { dev_dbg(dev->device, "%s, %d\n", __func__, cmd); switch (cmd) { case BCM_CMD_NOACTION: writel((CS_CMD_CMD_NO_ACTION << CS_CMD_SHIFT) | (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), dev->base + CS_OFFSET); break; case BCM_CMD_START: writel((CS_ACK_CMD_GEN_START << CS_ACK_SHIFT) | (CS_CMD_CMD_START_RESTART << CS_CMD_SHIFT) | (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), dev->base + CS_OFFSET); break; case BCM_CMD_RESTART: writel((CS_ACK_CMD_GEN_RESTART << CS_ACK_SHIFT) | (CS_CMD_CMD_START_RESTART << CS_CMD_SHIFT) | (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), dev->base + CS_OFFSET); break; case BCM_CMD_STOP: writel((CS_CMD_CMD_STOP << CS_CMD_SHIFT) | (CS_EN_CMD_ENABLE_BSC << CS_EN_SHIFT), dev->base + CS_OFFSET); break; default: dev_err(dev->device, "Unknown command %d\n", cmd); } } static void bcm_kona_i2c_enable_clock(struct bcm_kona_i2c_dev *dev) { writel(readl(dev->base + CLKEN_OFFSET) | CLKEN_CLKEN_MASK, dev->base + CLKEN_OFFSET); } static void bcm_kona_i2c_disable_clock(struct bcm_kona_i2c_dev *dev) { writel(readl(dev->base + CLKEN_OFFSET) & ~CLKEN_CLKEN_MASK, dev->base + CLKEN_OFFSET); } static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid) { struct bcm_kona_i2c_dev *dev = devid; uint32_t status = readl(dev->base + ISR_OFFSET); if ((status & ~ISR_RESERVED_MASK) == 0) return IRQ_NONE; /* Must flush the TX FIFO when NAK detected */ if (status & ISR_NOACK_MASK) writel(TXFCR_FIFO_FLUSH_MASK | TXFCR_FIFO_EN_MASK, dev->base + TXFCR_OFFSET); writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); complete(&dev->done); return IRQ_HANDLED; } /* Wait for ISR_CMDBUSY_MASK to go low before writing to CS, DAT, or RCD */ static int bcm_kona_i2c_wait_if_busy(struct bcm_kona_i2c_dev *dev) { unsigned long timeout = jiffies + msecs_to_jiffies(I2C_TIMEOUT); while (readl(dev->base + ISR_OFFSET) & ISR_CMDBUSY_MASK) if (time_after(jiffies, timeout)) { dev_err(dev->device, "CMDBUSY timeout\n"); return -ETIMEDOUT; } return 0; } /* Send command to I2C bus */ static int bcm_kona_send_i2c_cmd(struct bcm_kona_i2c_dev *dev, enum bcm_kona_cmd_t cmd) { int rc; unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); /* Make sure the hardware is ready */ rc = bcm_kona_i2c_wait_if_busy(dev); if (rc < 0) return rc; /* Unmask the session done interrupt */ writel(IER_I2C_INT_EN_MASK, dev->base + IER_OFFSET); /* Mark as incomplete before sending the command */ reinit_completion(&dev->done); /* Send the command */ bcm_kona_i2c_send_cmd_to_ctrl(dev, cmd); /* Wait for transaction to finish or timeout */ time_left = wait_for_completion_timeout(&dev->done, time_left); /* Mask all interrupts */ writel(0, dev->base + IER_OFFSET); if (!time_left) { dev_err(dev->device, "controller timed out\n"); rc = -ETIMEDOUT; } /* Clear command */ bcm_kona_i2c_send_cmd_to_ctrl(dev, BCM_CMD_NOACTION); return rc; } /* Read a single RX FIFO worth of data from the i2c bus */ static int bcm_kona_i2c_read_fifo_single(struct bcm_kona_i2c_dev *dev, uint8_t *buf, unsigned int len, unsigned int last_byte_nak) { unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); /* Mark as incomplete before starting the RX FIFO */ reinit_completion(&dev->done); /* Unmask the read complete interrupt */ writel(IER_READ_COMPLETE_INT_MASK, dev->base + IER_OFFSET); /* Start the RX FIFO */ writel((last_byte_nak << RXFCR_NACK_EN_SHIFT) | (len << RXFCR_READ_COUNT_SHIFT), dev->base + RXFCR_OFFSET); /* Wait for FIFO read to complete */ time_left = wait_for_completion_timeout(&dev->done, time_left); /* Mask all interrupts */ writel(0, dev->base + IER_OFFSET); if (!time_left) { dev_err(dev->device, "RX FIFO time out\n"); return -EREMOTEIO; } /* Read data from FIFO */ for (; len > 0; len--, buf++) *buf = readl(dev->base + RXFIFORDOUT_OFFSET); return 0; } /* Read any amount of data using the RX FIFO from the i2c bus */ static int bcm_kona_i2c_read_fifo(struct bcm_kona_i2c_dev *dev, struct i2c_msg *msg) { unsigned int bytes_to_read = MAX_RX_FIFO_SIZE; unsigned int last_byte_nak = 0; unsigned int bytes_read = 0; int rc; uint8_t *tmp_buf = msg->buf; while (bytes_read < msg->len) { if (msg->len - bytes_read <= MAX_RX_FIFO_SIZE) { last_byte_nak = 1; /* NAK last byte of transfer */ bytes_to_read = msg->len - bytes_read; } rc = bcm_kona_i2c_read_fifo_single(dev, tmp_buf, bytes_to_read, last_byte_nak); if (rc < 0) return -EREMOTEIO; bytes_read += bytes_to_read; tmp_buf += bytes_to_read; } return 0; } /* Write a single byte of data to the i2c bus */ static int bcm_kona_i2c_write_byte(struct bcm_kona_i2c_dev *dev, uint8_t data, unsigned int nak_expected) { int rc; unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); unsigned int nak_received; /* Make sure the hardware is ready */ rc = bcm_kona_i2c_wait_if_busy(dev); if (rc < 0) return rc; /* Clear pending session done interrupt */ writel(ISR_SES_DONE_MASK, dev->base + ISR_OFFSET); /* Unmask the session done interrupt */ writel(IER_I2C_INT_EN_MASK, dev->base + IER_OFFSET); /* Mark as incomplete before sending the data */ reinit_completion(&dev->done); /* Send one byte of data */ writel(data, dev->base + DAT_OFFSET); /* Wait for byte to be written */ time_left = wait_for_completion_timeout(&dev->done, time_left); /* Mask all interrupts */ writel(0, dev->base + IER_OFFSET); if (!time_left) { dev_dbg(dev->device, "controller timed out\n"); return -ETIMEDOUT; } nak_received = readl(dev->base + CS_OFFSET) & CS_ACK_MASK ? 1 : 0; if (nak_received ^ nak_expected) { dev_dbg(dev->device, "unexpected NAK/ACK\n"); return -EREMOTEIO; } return 0; } /* Write a single TX FIFO worth of data to the i2c bus */ static int bcm_kona_i2c_write_fifo_single(struct bcm_kona_i2c_dev *dev, uint8_t *buf, unsigned int len) { int k; unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT); unsigned int fifo_status; /* Mark as incomplete before sending data to the TX FIFO */ reinit_completion(&dev->done); /* Unmask the fifo empty and nak interrupt */ writel(IER_FIFO_INT_EN_MASK | IER_NOACK_EN_MASK, dev->base + IER_OFFSET); /* Disable IRQ to load a FIFO worth of data without interruption */ disable_irq(dev->irq); /* Write data into FIFO */ for (k = 0; k < len; k++) writel(buf[k], (dev->base + DAT_OFFSET)); /* Enable IRQ now that data has been loaded */ enable_irq(dev->irq); /* Wait for FIFO to empty */ do { time_left = wait_for_completion_timeout(&dev->done, time_left); fifo_status = readl(dev->base + FIFO_STATUS_OFFSET); } while (time_left && !(fifo_status & FIFO_STATUS_TXFIFO_EMPTY_MASK)); /* Mask all interrupts */ writel(0, dev->base + IER_OFFSET); /* Check if there was a NAK */ if (readl(dev->base + CS_OFFSET) & CS_ACK_MASK) { dev_err(dev->device, "unexpected NAK\n"); return -EREMOTEIO; } /* Check if a timeout occured */ if (!time_left) { dev_err(dev->device, "completion timed out\n"); return -EREMOTEIO; } return 0; } /* Write any amount of data using TX FIFO to the i2c bus */ static int bcm_kona_i2c_write_fifo(struct bcm_kona_i2c_dev *dev, struct i2c_msg *msg) { unsigned int bytes_to_write = MAX_TX_FIFO_SIZE; unsigned int bytes_written = 0; int rc; uint8_t *tmp_buf = msg->buf; while (bytes_written < msg->len) { if (msg->len - bytes_written <= MAX_TX_FIFO_SIZE) bytes_to_write = msg->len - bytes_written; rc = bcm_kona_i2c_write_fifo_single(dev, tmp_buf, bytes_to_write); if (rc < 0) return -EREMOTEIO; bytes_written += bytes_to_write; tmp_buf += bytes_to_write; } return 0; } /* Send i2c address */ static int bcm_kona_i2c_do_addr(struct bcm_kona_i2c_dev *dev, struct i2c_msg *msg) { unsigned char addr; if (msg->flags & I2C_M_TEN) { /* First byte is 11110XX0 where XX is upper 2 bits */ addr = 0xF0 | ((msg->addr & 0x300) >> 7); if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) return -EREMOTEIO; /* Second byte is the remaining 8 bits */ addr = msg->addr & 0xFF; if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) return -EREMOTEIO; if (msg->flags & I2C_M_RD) { /* For read, send restart command */ if (bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART) < 0) return -EREMOTEIO; /* Then re-send the first byte with the read bit set */ addr = 0xF0 | ((msg->addr & 0x300) >> 7) | 0x01; if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) return -EREMOTEIO; } } else { addr = i2c_8bit_addr_from_msg(msg); if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0) return -EREMOTEIO; } return 0; } static void bcm_kona_i2c_enable_autosense(struct bcm_kona_i2c_dev *dev) { writel(readl(dev->base + CLKEN_OFFSET) & ~CLKEN_AUTOSENSE_OFF_MASK, dev->base + CLKEN_OFFSET); } static void bcm_kona_i2c_config_timing(struct bcm_kona_i2c_dev *dev) { writel(readl(dev->base + HSTIM_OFFSET) & ~HSTIM_HS_MODE_MASK, dev->base + HSTIM_OFFSET); writel((dev->std_cfg->prescale << TIM_PRESCALE_SHIFT) | (dev->std_cfg->time_p << TIM_P_SHIFT) | (dev->std_cfg->no_div << TIM_NO_DIV_SHIFT) | (dev->std_cfg->time_div << TIM_DIV_SHIFT), dev->base + TIM_OFFSET); writel((dev->std_cfg->time_m << CLKEN_M_SHIFT) | (dev->std_cfg->time_n << CLKEN_N_SHIFT) | CLKEN_CLKEN_MASK, dev->base + CLKEN_OFFSET); } static void bcm_kona_i2c_config_timing_hs(struct bcm_kona_i2c_dev *dev) { writel((dev->hs_cfg->prescale << TIM_PRESCALE_SHIFT) | (dev->hs_cfg->time_p << TIM_P_SHIFT) | (dev->hs_cfg->no_div << TIM_NO_DIV_SHIFT) | (dev->hs_cfg->time_div << TIM_DIV_SHIFT), dev->base + TIM_OFFSET); writel((dev->hs_cfg->hs_hold << HSTIM_HS_HOLD_SHIFT) | (dev->hs_cfg->hs_high_phase << HSTIM_HS_HIGH_PHASE_SHIFT) | (dev->hs_cfg->hs_setup << HSTIM_HS_SETUP_SHIFT), dev->base + HSTIM_OFFSET); writel(readl(dev->base + HSTIM_OFFSET) | HSTIM_HS_MODE_MASK, dev->base + HSTIM_OFFSET); } static int bcm_kona_i2c_switch_to_hs(struct bcm_kona_i2c_dev *dev) { int rc; /* Send mastercode at standard speed */ rc = bcm_kona_i2c_write_byte(dev, MASTERCODE, 1); if (rc < 0) { pr_err("High speed handshake failed\n"); return rc; } /* Configure external clock to higher frequency */ rc = clk_set_rate(dev->external_clk, HS_EXT_CLK_FREQ); if (rc) { dev_err(dev->device, "%s: clk_set_rate returned %d\n", __func__, rc); return rc; } /* Reconfigure internal dividers */ bcm_kona_i2c_config_timing_hs(dev); /* Send a restart command */ rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART); if (rc < 0) dev_err(dev->device, "High speed restart command failed\n"); return rc; } static int bcm_kona_i2c_switch_to_std(struct bcm_kona_i2c_dev *dev) { int rc; /* Reconfigure internal dividers */ bcm_kona_i2c_config_timing(dev); /* Configure external clock to lower frequency */ rc = clk_set_rate(dev->external_clk, STD_EXT_CLK_FREQ); if (rc) { dev_err(dev->device, "%s: clk_set_rate returned %d\n", __func__, rc); } return rc; } /* Master transfer function */ static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) { struct bcm_kona_i2c_dev *dev = i2c_get_adapdata(adapter); struct i2c_msg *pmsg; int rc = 0; int i; rc = clk_prepare_enable(dev->external_clk); if (rc) { dev_err(dev->device, "%s: peri clock enable failed. err %d\n", __func__, rc); return rc; } /* Enable pad output */ writel(0, dev->base + PADCTL_OFFSET); /* Enable internal clocks */ bcm_kona_i2c_enable_clock(dev); /* Send start command */ rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_START); if (rc < 0) { dev_err(dev->device, "Start command failed rc = %d\n", rc); goto xfer_disable_pad; } /* Switch to high speed if applicable */ if (dev->hs_cfg) { rc = bcm_kona_i2c_switch_to_hs(dev); if (rc < 0) goto xfer_send_stop; } /* Loop through all messages */ for (i = 0; i < num; i++) { pmsg = &msgs[i]; /* Send restart for subsequent messages */ if ((i != 0) && ((pmsg->flags & I2C_M_NOSTART) == 0)) { rc = bcm_kona_send_i2c_cmd(dev, BCM_CMD_RESTART); if (rc < 0) { dev_err(dev->device, "restart cmd failed rc = %d\n", rc); goto xfer_send_stop; } } /* Send slave address */ if (!(pmsg->flags & I2C_M_NOSTART)) { rc = bcm_kona_i2c_do_addr(dev, pmsg); if (rc < 0) { dev_err(dev->device, "NAK from addr %2.2x msg#%d rc = %d\n", pmsg->addr, i, rc); goto xfer_send_stop; } } /* Perform data transfer */ if (pmsg->flags & I2C_M_RD) { rc = bcm_kona_i2c_read_fifo(dev, pmsg); if (rc < 0) { dev_err(dev->device, "read failure\n"); goto xfer_send_stop; } } else { rc = bcm_kona_i2c_write_fifo(dev, pmsg); if (rc < 0) { dev_err(dev->device, "write failure"); goto xfer_send_stop; } } } rc = num; xfer_send_stop: /* Send a STOP command */ bcm_kona_send_i2c_cmd(dev, BCM_CMD_STOP); /* Return from high speed if applicable */ if (dev->hs_cfg) { int hs_rc = bcm_kona_i2c_switch_to_std(dev); if (hs_rc) rc = hs_rc; } xfer_disable_pad: /* Disable pad output */ writel(PADCTL_PAD_OUT_EN_MASK, dev->base + PADCTL_OFFSET); /* Stop internal clock */ bcm_kona_i2c_disable_clock(dev); clk_disable_unprepare(dev->external_clk); return rc; } static uint32_t bcm_kona_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_NOSTART; } static const struct i2c_algorithm bcm_algo = { .master_xfer = bcm_kona_i2c_xfer, .functionality = bcm_kona_i2c_functionality, }; static int bcm_kona_i2c_assign_bus_speed(struct bcm_kona_i2c_dev *dev) { unsigned int bus_speed; int ret = of_property_read_u32(dev->device->of_node, "clock-frequency", &bus_speed); if (ret < 0) { dev_err(dev->device, "missing clock-frequency property\n"); return -ENODEV; } switch (bus_speed) { case I2C_MAX_STANDARD_MODE_FREQ: dev->std_cfg = &std_cfg_table[BCM_SPD_100K]; break; case I2C_MAX_FAST_MODE_FREQ: dev->std_cfg = &std_cfg_table[BCM_SPD_400K]; break; case I2C_MAX_FAST_MODE_PLUS_FREQ: dev->std_cfg = &std_cfg_table[BCM_SPD_1MHZ]; break; case I2C_MAX_HIGH_SPEED_MODE_FREQ: /* Send mastercode at 100k */ dev->std_cfg = &std_cfg_table[BCM_SPD_100K]; dev->hs_cfg = &hs_cfg_table[BCM_SPD_3P4MHZ]; break; default: pr_err("%d hz bus speed not supported\n", bus_speed); pr_err("Valid speeds are 100khz, 400khz, 1mhz, and 3.4mhz\n"); return -EINVAL; } return 0; } static int bcm_kona_i2c_probe(struct platform_device *pdev) { int rc = 0; struct bcm_kona_i2c_dev *dev; struct i2c_adapter *adap; /* Allocate memory for private data structure */ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; platform_set_drvdata(pdev, dev); dev->device = &pdev->dev; init_completion(&dev->done); /* Map hardware registers */ dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) return PTR_ERR(dev->base); /* Get and enable external clock */ dev->external_clk = devm_clk_get(dev->device, NULL); if (IS_ERR(dev->external_clk)) { dev_err(dev->device, "couldn't get clock\n"); return -ENODEV; } rc = clk_set_rate(dev->external_clk, STD_EXT_CLK_FREQ); if (rc) { dev_err(dev->device, "%s: clk_set_rate returned %d\n", __func__, rc); return rc; } rc = clk_prepare_enable(dev->external_clk); if (rc) { dev_err(dev->device, "couldn't enable clock\n"); return rc; } /* Parse bus speed */ rc = bcm_kona_i2c_assign_bus_speed(dev); if (rc) goto probe_disable_clk; /* Enable internal clocks */ bcm_kona_i2c_enable_clock(dev); /* Configure internal dividers */ bcm_kona_i2c_config_timing(dev); /* Disable timeout */ writel(0, dev->base + TOUT_OFFSET); /* Enable autosense */ bcm_kona_i2c_enable_autosense(dev); /* Enable TX FIFO */ writel(TXFCR_FIFO_FLUSH_MASK | TXFCR_FIFO_EN_MASK, dev->base + TXFCR_OFFSET); /* Mask all interrupts */ writel(0, dev->base + IER_OFFSET); /* Clear all pending interrupts */ writel(ISR_CMDBUSY_MASK | ISR_READ_COMPLETE_MASK | ISR_SES_DONE_MASK | ISR_ERR_MASK | ISR_TXFIFOEMPTY_MASK | ISR_NOACK_MASK, dev->base + ISR_OFFSET); /* Get the interrupt number */ dev->irq = platform_get_irq(pdev, 0); if (dev->irq < 0) { rc = dev->irq; goto probe_disable_clk; } /* register the ISR handler */ rc = devm_request_irq(&pdev->dev, dev->irq, bcm_kona_i2c_isr, IRQF_SHARED, pdev->name, dev); if (rc) { dev_err(dev->device, "failed to request irq %i\n", dev->irq); goto probe_disable_clk; } /* Enable the controller but leave it idle */ bcm_kona_i2c_send_cmd_to_ctrl(dev, BCM_CMD_NOACTION); /* Disable pad output */ writel(PADCTL_PAD_OUT_EN_MASK, dev->base + PADCTL_OFFSET); /* Disable internal clock */ bcm_kona_i2c_disable_clock(dev); /* Disable external clock */ clk_disable_unprepare(dev->external_clk); /* Add the i2c adapter */ adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; strscpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name)); adap->algo = &bcm_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; rc = i2c_add_adapter(adap); if (rc) return rc; dev_info(dev->device, "device registered successfully\n"); return 0; probe_disable_clk: bcm_kona_i2c_disable_clock(dev); clk_disable_unprepare(dev->external_clk); return rc; } static void bcm_kona_i2c_remove(struct platform_device *pdev) { struct bcm_kona_i2c_dev *dev = platform_get_drvdata(pdev); i2c_del_adapter(&dev->adapter); } static const struct of_device_id bcm_kona_i2c_of_match[] = { {.compatible = "brcm,kona-i2c",}, {}, }; MODULE_DEVICE_TABLE(of, bcm_kona_i2c_of_match); static struct platform_driver bcm_kona_i2c_driver = { .driver = { .name = "bcm-kona-i2c", .of_match_table = bcm_kona_i2c_of_match, }, .probe = bcm_kona_i2c_probe, .remove_new = bcm_kona_i2c_remove, }; module_platform_driver(bcm_kona_i2c_driver); MODULE_AUTHOR("Tim Kryger <[email protected]>"); MODULE_DESCRIPTION("Broadcom Kona I2C Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-bcm-kona.c
/* * Copyright (c) 2003-2015 Broadcom Corporation * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #define XLP9XX_I2C_DIV 0x0 #define XLP9XX_I2C_CTRL 0x1 #define XLP9XX_I2C_CMD 0x2 #define XLP9XX_I2C_STATUS 0x3 #define XLP9XX_I2C_MTXFIFO 0x4 #define XLP9XX_I2C_MRXFIFO 0x5 #define XLP9XX_I2C_MFIFOCTRL 0x6 #define XLP9XX_I2C_STXFIFO 0x7 #define XLP9XX_I2C_SRXFIFO 0x8 #define XLP9XX_I2C_SFIFOCTRL 0x9 #define XLP9XX_I2C_SLAVEADDR 0xA #define XLP9XX_I2C_OWNADDR 0xB #define XLP9XX_I2C_FIFOWCNT 0xC #define XLP9XX_I2C_INTEN 0xD #define XLP9XX_I2C_INTST 0xE #define XLP9XX_I2C_WAITCNT 0xF #define XLP9XX_I2C_TIMEOUT 0X10 #define XLP9XX_I2C_GENCALLADDR 0x11 #define XLP9XX_I2C_STATUS_BUSY BIT(0) #define XLP9XX_I2C_CMD_START BIT(7) #define XLP9XX_I2C_CMD_STOP BIT(6) #define XLP9XX_I2C_CMD_READ BIT(5) #define XLP9XX_I2C_CMD_WRITE BIT(4) #define XLP9XX_I2C_CMD_ACK BIT(3) #define XLP9XX_I2C_CTRL_MCTLEN_SHIFT 16 #define XLP9XX_I2C_CTRL_MCTLEN_MASK 0xffff0000 #define XLP9XX_I2C_CTRL_RST BIT(8) #define XLP9XX_I2C_CTRL_EN BIT(6) #define XLP9XX_I2C_CTRL_MASTER BIT(4) #define XLP9XX_I2C_CTRL_FIFORD BIT(1) #define XLP9XX_I2C_CTRL_ADDMODE BIT(0) #define XLP9XX_I2C_INTEN_NACKADDR BIT(25) #define XLP9XX_I2C_INTEN_SADDR BIT(13) #define XLP9XX_I2C_INTEN_DATADONE BIT(12) #define XLP9XX_I2C_INTEN_ARLOST BIT(11) #define XLP9XX_I2C_INTEN_MFIFOFULL BIT(4) #define XLP9XX_I2C_INTEN_MFIFOEMTY BIT(3) #define XLP9XX_I2C_INTEN_MFIFOHI BIT(2) #define XLP9XX_I2C_INTEN_BUSERR BIT(0) #define XLP9XX_I2C_MFIFOCTRL_HITH_SHIFT 8 #define XLP9XX_I2C_MFIFOCTRL_LOTH_SHIFT 0 #define XLP9XX_I2C_MFIFOCTRL_RST BIT(16) #define XLP9XX_I2C_SLAVEADDR_RW BIT(0) #define XLP9XX_I2C_SLAVEADDR_ADDR_SHIFT 1 #define XLP9XX_I2C_IP_CLK_FREQ 133000000UL #define XLP9XX_I2C_FIFO_SIZE 0x80U #define XLP9XX_I2C_TIMEOUT_MS 1000 #define XLP9XX_I2C_BUSY_TIMEOUT 50 #define XLP9XX_I2C_FIFO_WCNT_MASK 0xff #define XLP9XX_I2C_STATUS_ERRMASK (XLP9XX_I2C_INTEN_ARLOST | \ XLP9XX_I2C_INTEN_NACKADDR | XLP9XX_I2C_INTEN_BUSERR) struct xlp9xx_i2c_dev { struct device *dev; struct i2c_adapter adapter; struct completion msg_complete; struct i2c_smbus_alert_setup alert_data; struct i2c_client *ara; int irq; bool msg_read; bool len_recv; bool client_pec; u32 __iomem *base; u32 msg_buf_remaining; u32 msg_len; u32 ip_clk_hz; u32 clk_hz; u32 msg_err; u8 *msg_buf; }; static inline void xlp9xx_write_i2c_reg(struct xlp9xx_i2c_dev *priv, unsigned long reg, u32 val) { writel(val, priv->base + reg); } static inline u32 xlp9xx_read_i2c_reg(struct xlp9xx_i2c_dev *priv, unsigned long reg) { return readl(priv->base + reg); } static void xlp9xx_i2c_mask_irq(struct xlp9xx_i2c_dev *priv, u32 mask) { u32 inten; inten = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_INTEN) & ~mask; xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, inten); } static void xlp9xx_i2c_unmask_irq(struct xlp9xx_i2c_dev *priv, u32 mask) { u32 inten; inten = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_INTEN) | mask; xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, inten); } static void xlp9xx_i2c_update_rx_fifo_thres(struct xlp9xx_i2c_dev *priv) { u32 thres; if (priv->len_recv) /* interrupt after the first read to examine * the length byte before proceeding further */ thres = 1; else if (priv->msg_buf_remaining > XLP9XX_I2C_FIFO_SIZE) thres = XLP9XX_I2C_FIFO_SIZE; else thres = priv->msg_buf_remaining; xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_MFIFOCTRL, thres << XLP9XX_I2C_MFIFOCTRL_HITH_SHIFT); } static void xlp9xx_i2c_fill_tx_fifo(struct xlp9xx_i2c_dev *priv) { u32 len, i; u8 *buf = priv->msg_buf; len = min(priv->msg_buf_remaining, XLP9XX_I2C_FIFO_SIZE); for (i = 0; i < len; i++) xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_MTXFIFO, buf[i]); priv->msg_buf_remaining -= len; priv->msg_buf += len; } static void xlp9xx_i2c_update_rlen(struct xlp9xx_i2c_dev *priv) { u32 val, len; /* * Update receive length. Re-read len to get the latest value, * and then add 4 to have a minimum value that can be safely * written. This is to account for the byte read above, the * transfer in progress and any delays in the register I/O */ val = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_CTRL); len = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_FIFOWCNT) & XLP9XX_I2C_FIFO_WCNT_MASK; len = max_t(u32, priv->msg_len, len + 4); if (len >= I2C_SMBUS_BLOCK_MAX + 2) return; val = (val & ~XLP9XX_I2C_CTRL_MCTLEN_MASK) | (len << XLP9XX_I2C_CTRL_MCTLEN_SHIFT); xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, val); } static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv) { u32 len, i; u8 rlen, *buf = priv->msg_buf; len = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_FIFOWCNT) & XLP9XX_I2C_FIFO_WCNT_MASK; if (!len) return; if (priv->len_recv) { /* read length byte */ rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); /* * We expect at least 2 interrupts for I2C_M_RECV_LEN * transactions. The length is updated during the first * interrupt, and the buffer contents are only copied * during subsequent interrupts. If in case the interrupts * get merged we would complete the transaction without * copying out the bytes from RX fifo. To avoid this now we * drain the fifo as and when data is available. * We drained the rlen byte already, decrement total length * by one. */ len--; if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) { rlen = 0; /*abort transfer */ priv->msg_buf_remaining = 0; priv->msg_len = 0; xlp9xx_i2c_update_rlen(priv); return; } *buf++ = rlen; if (priv->client_pec) ++rlen; /* account for error check byte */ /* update remaining bytes and message length */ priv->msg_buf_remaining = rlen; priv->msg_len = rlen + 1; xlp9xx_i2c_update_rlen(priv); priv->len_recv = false; } len = min(priv->msg_buf_remaining, len); for (i = 0; i < len; i++, buf++) *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); priv->msg_buf_remaining -= len; priv->msg_buf = buf; if (priv->msg_buf_remaining) xlp9xx_i2c_update_rx_fifo_thres(priv); } static irqreturn_t xlp9xx_i2c_isr(int irq, void *dev_id) { struct xlp9xx_i2c_dev *priv = dev_id; u32 status; status = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_INTST); if (status == 0) return IRQ_NONE; xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTST, status); if (status & XLP9XX_I2C_STATUS_ERRMASK) { priv->msg_err = status; goto xfer_done; } /* SADDR ACK for SMBUS_QUICK */ if ((status & XLP9XX_I2C_INTEN_SADDR) && (priv->msg_len == 0)) goto xfer_done; if (!priv->msg_read) { if (status & XLP9XX_I2C_INTEN_MFIFOEMTY) { /* TX FIFO got empty, fill it up again */ if (priv->msg_buf_remaining) xlp9xx_i2c_fill_tx_fifo(priv); else xlp9xx_i2c_mask_irq(priv, XLP9XX_I2C_INTEN_MFIFOEMTY); } } else { if (status & (XLP9XX_I2C_INTEN_DATADONE | XLP9XX_I2C_INTEN_MFIFOHI)) { /* data is in FIFO, read it */ if (priv->msg_buf_remaining) xlp9xx_i2c_drain_rx_fifo(priv); } } /* Transfer complete */ if (status & XLP9XX_I2C_INTEN_DATADONE) goto xfer_done; return IRQ_HANDLED; xfer_done: xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, 0); complete(&priv->msg_complete); return IRQ_HANDLED; } static int xlp9xx_i2c_check_bus_status(struct xlp9xx_i2c_dev *priv) { u32 status; u32 busy_timeout = XLP9XX_I2C_BUSY_TIMEOUT; while (busy_timeout) { status = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_STATUS); if ((status & XLP9XX_I2C_STATUS_BUSY) == 0) break; busy_timeout--; usleep_range(1000, 1100); } if (!busy_timeout) return -EIO; return 0; } static int xlp9xx_i2c_init(struct xlp9xx_i2c_dev *priv) { u32 prescale; /* * The controller uses 5 * SCL clock internally. * So prescale value should be divided by 5. */ prescale = DIV_ROUND_UP(priv->ip_clk_hz, priv->clk_hz); prescale = ((prescale - 8) / 5) - 1; xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, XLP9XX_I2C_CTRL_RST); xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, XLP9XX_I2C_CTRL_EN | XLP9XX_I2C_CTRL_MASTER); xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_DIV, prescale); xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, 0); return 0; } static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg, int last_msg) { unsigned long timeleft; u32 intr_mask, cmd, val, len; priv->msg_buf = msg->buf; priv->msg_buf_remaining = priv->msg_len = msg->len; priv->msg_err = 0; priv->msg_read = (msg->flags & I2C_M_RD); reinit_completion(&priv->msg_complete); /* Reset FIFO */ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_MFIFOCTRL, XLP9XX_I2C_MFIFOCTRL_RST); /* set slave addr */ xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_SLAVEADDR, (msg->addr << XLP9XX_I2C_SLAVEADDR_ADDR_SHIFT) | (priv->msg_read ? XLP9XX_I2C_SLAVEADDR_RW : 0)); /* Build control word for transfer */ val = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_CTRL); if (!priv->msg_read) val &= ~XLP9XX_I2C_CTRL_FIFORD; else val |= XLP9XX_I2C_CTRL_FIFORD; /* read */ if (msg->flags & I2C_M_TEN) val |= XLP9XX_I2C_CTRL_ADDMODE; /* 10-bit address mode*/ else val &= ~XLP9XX_I2C_CTRL_ADDMODE; priv->len_recv = msg->flags & I2C_M_RECV_LEN; len = priv->len_recv ? I2C_SMBUS_BLOCK_MAX + 2 : msg->len; priv->client_pec = msg->flags & I2C_CLIENT_PEC; /* set FIFO threshold if reading */ if (priv->msg_read) xlp9xx_i2c_update_rx_fifo_thres(priv); /* set data length to be transferred */ val = (val & ~XLP9XX_I2C_CTRL_MCTLEN_MASK) | (len << XLP9XX_I2C_CTRL_MCTLEN_SHIFT); xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, val); /* fill fifo during tx */ if (!priv->msg_read) xlp9xx_i2c_fill_tx_fifo(priv); /* set interrupt mask */ intr_mask = (XLP9XX_I2C_INTEN_ARLOST | XLP9XX_I2C_INTEN_BUSERR | XLP9XX_I2C_INTEN_NACKADDR | XLP9XX_I2C_INTEN_DATADONE); if (priv->msg_read) { intr_mask |= XLP9XX_I2C_INTEN_MFIFOHI; if (msg->len == 0) intr_mask |= XLP9XX_I2C_INTEN_SADDR; } else { if (msg->len == 0) intr_mask |= XLP9XX_I2C_INTEN_SADDR; else intr_mask |= XLP9XX_I2C_INTEN_MFIFOEMTY; } xlp9xx_i2c_unmask_irq(priv, intr_mask); /* set cmd reg */ cmd = XLP9XX_I2C_CMD_START; if (msg->len) cmd |= (priv->msg_read ? XLP9XX_I2C_CMD_READ : XLP9XX_I2C_CMD_WRITE); if (last_msg) cmd |= XLP9XX_I2C_CMD_STOP; xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CMD, cmd); timeleft = msecs_to_jiffies(XLP9XX_I2C_TIMEOUT_MS); timeleft = wait_for_completion_timeout(&priv->msg_complete, timeleft); if (priv->msg_err & XLP9XX_I2C_INTEN_BUSERR) { dev_dbg(priv->dev, "transfer error %x!\n", priv->msg_err); xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CMD, XLP9XX_I2C_CMD_STOP); return -EIO; } else if (priv->msg_err & XLP9XX_I2C_INTEN_NACKADDR) { return -ENXIO; } if (timeleft == 0) { dev_dbg(priv->dev, "i2c transfer timed out!\n"); xlp9xx_i2c_init(priv); return -ETIMEDOUT; } /* update msg->len with actual received length */ if (msg->flags & I2C_M_RECV_LEN) { if (!priv->msg_len) return -EPROTO; msg->len = priv->msg_len; } return 0; } static int xlp9xx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int i, ret; struct xlp9xx_i2c_dev *priv = i2c_get_adapdata(adap); ret = xlp9xx_i2c_check_bus_status(priv); if (ret) { xlp9xx_i2c_init(priv); ret = xlp9xx_i2c_check_bus_status(priv); if (ret) return ret; } for (i = 0; i < num; i++) { ret = xlp9xx_i2c_xfer_msg(priv, &msgs[i], i == num - 1); if (ret != 0) return ret; } return num; } static u32 xlp9xx_i2c_functionality(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR; } static const struct i2c_algorithm xlp9xx_i2c_algo = { .master_xfer = xlp9xx_i2c_xfer, .functionality = xlp9xx_i2c_functionality, }; static int xlp9xx_i2c_get_frequency(struct platform_device *pdev, struct xlp9xx_i2c_dev *priv) { struct clk *clk; u32 freq; int err; clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { priv->ip_clk_hz = XLP9XX_I2C_IP_CLK_FREQ; dev_dbg(&pdev->dev, "using default input frequency %u\n", priv->ip_clk_hz); } else { priv->ip_clk_hz = clk_get_rate(clk); } err = device_property_read_u32(&pdev->dev, "clock-frequency", &freq); if (err) { freq = I2C_MAX_STANDARD_MODE_FREQ; dev_dbg(&pdev->dev, "using default frequency %u\n", freq); } else if (freq == 0 || freq > I2C_MAX_FAST_MODE_FREQ) { dev_warn(&pdev->dev, "invalid frequency %u, using default\n", freq); freq = I2C_MAX_STANDARD_MODE_FREQ; } priv->clk_hz = freq; return 0; } static int xlp9xx_i2c_smbus_setup(struct xlp9xx_i2c_dev *priv, struct platform_device *pdev) { struct i2c_client *ara; if (!priv->alert_data.irq) return -EINVAL; ara = i2c_new_smbus_alert_device(&priv->adapter, &priv->alert_data); if (IS_ERR(ara)) return PTR_ERR(ara); priv->ara = ara; return 0; } static int xlp9xx_i2c_probe(struct platform_device *pdev) { struct xlp9xx_i2c_dev *priv; int err = 0; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); priv->irq = platform_get_irq(pdev, 0); if (priv->irq < 0) return priv->irq; /* SMBAlert irq */ priv->alert_data.irq = platform_get_irq(pdev, 1); if (priv->alert_data.irq <= 0) priv->alert_data.irq = 0; xlp9xx_i2c_get_frequency(pdev, priv); xlp9xx_i2c_init(priv); err = devm_request_irq(&pdev->dev, priv->irq, xlp9xx_i2c_isr, 0, pdev->name, priv); if (err) return dev_err_probe(&pdev->dev, err, "IRQ request failed!\n"); init_completion(&priv->msg_complete); priv->adapter.dev.parent = &pdev->dev; priv->adapter.algo = &xlp9xx_i2c_algo; priv->adapter.class = I2C_CLASS_HWMON; ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&pdev->dev)); priv->adapter.dev.of_node = pdev->dev.of_node; priv->dev = &pdev->dev; snprintf(priv->adapter.name, sizeof(priv->adapter.name), "xlp9xx-i2c"); i2c_set_adapdata(&priv->adapter, priv); err = i2c_add_adapter(&priv->adapter); if (err) return err; err = xlp9xx_i2c_smbus_setup(priv, pdev); if (err) dev_dbg(&pdev->dev, "No active SMBus alert %d\n", err); platform_set_drvdata(pdev, priv); dev_dbg(&pdev->dev, "I2C bus:%d added\n", priv->adapter.nr); return 0; } static void xlp9xx_i2c_remove(struct platform_device *pdev) { struct xlp9xx_i2c_dev *priv; priv = platform_get_drvdata(pdev); xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_INTEN, 0); synchronize_irq(priv->irq); i2c_del_adapter(&priv->adapter); xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, 0); } #ifdef CONFIG_ACPI static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = { {"BRCM9007", 0}, {"CAV9007", 0}, {} }; MODULE_DEVICE_TABLE(acpi, xlp9xx_i2c_acpi_ids); #endif static struct platform_driver xlp9xx_i2c_driver = { .probe = xlp9xx_i2c_probe, .remove_new = xlp9xx_i2c_remove, .driver = { .name = "xlp9xx-i2c", .acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids), }, }; module_platform_driver(xlp9xx_i2c_driver); MODULE_AUTHOR("Subhendu Sekhar Behera <[email protected]>"); MODULE_DESCRIPTION("XLP9XX/5XX I2C Bus Controller Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-xlp9xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2021 The Asahi Linux Contributors * * PA Semi PWRficient SMBus host driver for Apple SoCs */ #include <linux/clk.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #include "i2c-pasemi-core.h" struct pasemi_platform_i2c_data { struct pasemi_smbus smbus; struct clk *clk_ref; }; static int pasemi_platform_i2c_calc_clk_div(struct pasemi_platform_i2c_data *data, u32 frequency) { unsigned long clk_rate = clk_get_rate(data->clk_ref); if (!clk_rate) return -EINVAL; data->smbus.clk_div = DIV_ROUND_UP(clk_rate, 16 * frequency); if (data->smbus.clk_div < 4) return dev_err_probe(data->smbus.dev, -EINVAL, "Bus frequency %d is too fast.\n", frequency); if (data->smbus.clk_div > 0xff) return dev_err_probe(data->smbus.dev, -EINVAL, "Bus frequency %d is too slow.\n", frequency); return 0; } static int pasemi_platform_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pasemi_platform_i2c_data *data; struct pasemi_smbus *smbus; u32 frequency; int error; int irq_num; data = devm_kzalloc(dev, sizeof(struct pasemi_platform_i2c_data), GFP_KERNEL); if (!data) return -ENOMEM; smbus = &data->smbus; smbus->dev = dev; smbus->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(smbus->ioaddr)) return PTR_ERR(smbus->ioaddr); if (of_property_read_u32(dev->of_node, "clock-frequency", &frequency)) frequency = I2C_MAX_STANDARD_MODE_FREQ; data->clk_ref = devm_clk_get_enabled(dev, NULL); if (IS_ERR(data->clk_ref)) return PTR_ERR(data->clk_ref); error = pasemi_platform_i2c_calc_clk_div(data, frequency); if (error) return error; smbus->adapter.dev.of_node = pdev->dev.of_node; error = pasemi_i2c_common_probe(smbus); if (error) return error; irq_num = platform_get_irq(pdev, 0); error = devm_request_irq(smbus->dev, irq_num, pasemi_irq_handler, 0, "pasemi_apple_i2c", (void *)smbus); if (!error) smbus->use_irq = 1; platform_set_drvdata(pdev, data); return 0; } static void pasemi_platform_i2c_remove(struct platform_device *pdev) { } static const struct of_device_id pasemi_platform_i2c_of_match[] = { { .compatible = "apple,t8103-i2c" }, { .compatible = "apple,i2c" }, {}, }; MODULE_DEVICE_TABLE(of, pasemi_platform_i2c_of_match); static struct platform_driver pasemi_platform_i2c_driver = { .driver = { .name = "i2c-apple", .of_match_table = pasemi_platform_i2c_of_match, }, .probe = pasemi_platform_i2c_probe, .remove_new = pasemi_platform_i2c_remove, }; module_platform_driver(pasemi_platform_i2c_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sven Peter <[email protected]>"); MODULE_DESCRIPTION("Apple/PASemi SMBus platform driver");
linux-master
drivers/i2c/busses/i2c-pasemi-platform.c
// SPDX-License-Identifier: GPL-2.0+ /* * Freescale MXS I2C bus driver * * Copyright (C) 2012-2013 Marek Vasut <[email protected]> * Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K. * * based on a (non-working) driver which was: * * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. */ #include <linux/slab.h> #include <linux/device.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/jiffies.h> #include <linux/io.h> #include <linux/stmp_device.h> #include <linux/of.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/dma/mxs-dma.h> #define DRIVER_NAME "mxs-i2c" #define MXS_I2C_CTRL0 (0x00) #define MXS_I2C_CTRL0_SET (0x04) #define MXS_I2C_CTRL0_CLR (0x08) #define MXS_I2C_CTRL0_SFTRST 0x80000000 #define MXS_I2C_CTRL0_RUN 0x20000000 #define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000 #define MXS_I2C_CTRL0_PIO_MODE 0x01000000 #define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000 #define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000 #define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000 #define MXS_I2C_CTRL0_MASTER_MODE 0x00020000 #define MXS_I2C_CTRL0_DIRECTION 0x00010000 #define MXS_I2C_CTRL0_XFER_COUNT(v) ((v) & 0x0000FFFF) #define MXS_I2C_TIMING0 (0x10) #define MXS_I2C_TIMING1 (0x20) #define MXS_I2C_TIMING2 (0x30) #define MXS_I2C_CTRL1 (0x40) #define MXS_I2C_CTRL1_SET (0x44) #define MXS_I2C_CTRL1_CLR (0x48) #define MXS_I2C_CTRL1_CLR_GOT_A_NAK 0x10000000 #define MXS_I2C_CTRL1_BUS_FREE_IRQ 0x80 #define MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ 0x40 #define MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ 0x20 #define MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ 0x10 #define MXS_I2C_CTRL1_EARLY_TERM_IRQ 0x08 #define MXS_I2C_CTRL1_MASTER_LOSS_IRQ 0x04 #define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02 #define MXS_I2C_CTRL1_SLAVE_IRQ 0x01 #define MXS_I2C_STAT (0x50) #define MXS_I2C_STAT_GOT_A_NAK 0x10000000 #define MXS_I2C_STAT_BUS_BUSY 0x00000800 #define MXS_I2C_STAT_CLK_GEN_BUSY 0x00000400 #define MXS_I2C_DATA(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x60 : 0xa0) #define MXS_I2C_DEBUG0_CLR(i2c) ((i2c->dev_type == MXS_I2C_V1) ? 0x78 : 0xb8) #define MXS_I2C_DEBUG0_DMAREQ 0x80000000 #define MXS_I2C_IRQ_MASK (MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ | \ MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ | \ MXS_I2C_CTRL1_EARLY_TERM_IRQ | \ MXS_I2C_CTRL1_MASTER_LOSS_IRQ | \ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \ MXS_I2C_CTRL1_SLAVE_IRQ) #define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \ MXS_I2C_CTRL0_PRE_SEND_START | \ MXS_I2C_CTRL0_MASTER_MODE | \ MXS_I2C_CTRL0_DIRECTION | \ MXS_I2C_CTRL0_XFER_COUNT(1)) #define MXS_CMD_I2C_WRITE (MXS_I2C_CTRL0_PRE_SEND_START | \ MXS_I2C_CTRL0_MASTER_MODE | \ MXS_I2C_CTRL0_DIRECTION) #define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \ MXS_I2C_CTRL0_MASTER_MODE) enum mxs_i2c_devtype { MXS_I2C_UNKNOWN = 0, MXS_I2C_V1, MXS_I2C_V2, }; /** * struct mxs_i2c_dev - per device, private MXS-I2C data * * @dev: driver model device node * @dev_type: distinguish i.MX23/i.MX28 features * @regs: IO registers pointer * @cmd_complete: completion object for transaction wait * @cmd_err: error code for last transaction * @adapter: i2c subsystem adapter node */ struct mxs_i2c_dev { struct device *dev; enum mxs_i2c_devtype dev_type; void __iomem *regs; struct completion cmd_complete; int cmd_err; struct i2c_adapter adapter; uint32_t timing0; uint32_t timing1; uint32_t timing2; /* DMA support components */ struct dma_chan *dmach; uint32_t pio_data[2]; uint32_t addr_data; struct scatterlist sg_io[2]; bool dma_read; }; static int mxs_i2c_reset(struct mxs_i2c_dev *i2c) { int ret = stmp_reset_block(i2c->regs); if (ret) return ret; /* * Configure timing for the I2C block. The I2C TIMING2 register has to * be programmed with this particular magic number. The rest is derived * from the XTAL speed and requested I2C speed. * * For details, see i.MX233 [25.4.2 - 25.4.4] and i.MX28 [27.5.2 - 27.5.4]. */ writel(i2c->timing0, i2c->regs + MXS_I2C_TIMING0); writel(i2c->timing1, i2c->regs + MXS_I2C_TIMING1); writel(i2c->timing2, i2c->regs + MXS_I2C_TIMING2); writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); return 0; } static void mxs_i2c_dma_finish(struct mxs_i2c_dev *i2c) { if (i2c->dma_read) { dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); dma_unmap_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE); } else { dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); } } static void mxs_i2c_dma_irq_callback(void *param) { struct mxs_i2c_dev *i2c = param; complete(&i2c->cmd_complete); mxs_i2c_dma_finish(i2c); } static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, u8 *buf, uint32_t flags) { struct dma_async_tx_descriptor *desc; struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); i2c->addr_data = i2c_8bit_addr_from_msg(msg); if (msg->flags & I2C_M_RD) { i2c->dma_read = true; /* * SELECT command. */ /* Queue the PIO register write transfer. */ i2c->pio_data[0] = MXS_CMD_I2C_SELECT; desc = dmaengine_prep_slave_sg(i2c->dmach, (struct scatterlist *)&i2c->pio_data[0], 1, DMA_TRANS_NONE, 0); if (!desc) { dev_err(i2c->dev, "Failed to get PIO reg. write descriptor.\n"); goto select_init_pio_fail; } /* Queue the DMA data transfer. */ sg_init_one(&i2c->sg_io[0], &i2c->addr_data, 1); dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); goto select_init_dma_fail; } /* * READ command. */ /* Queue the PIO register write transfer. */ i2c->pio_data[1] = flags | MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(msg->len); desc = dmaengine_prep_slave_sg(i2c->dmach, (struct scatterlist *)&i2c->pio_data[1], 1, DMA_TRANS_NONE, DMA_PREP_INTERRUPT); if (!desc) { dev_err(i2c->dev, "Failed to get PIO reg. write descriptor.\n"); goto select_init_dma_fail; } /* Queue the DMA data transfer. */ sg_init_one(&i2c->sg_io[1], buf, msg->len); dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); goto read_init_dma_fail; } } else { i2c->dma_read = false; /* * WRITE command. */ /* Queue the PIO register write transfer. */ i2c->pio_data[0] = flags | MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1); desc = dmaengine_prep_slave_sg(i2c->dmach, (struct scatterlist *)&i2c->pio_data[0], 1, DMA_TRANS_NONE, 0); if (!desc) { dev_err(i2c->dev, "Failed to get PIO reg. write descriptor.\n"); goto write_init_pio_fail; } /* Queue the DMA data transfer. */ sg_init_table(i2c->sg_io, 2); sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1); sg_set_buf(&i2c->sg_io[1], buf, msg->len); dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); goto write_init_dma_fail; } } /* * The last descriptor must have this callback, * to finish the DMA transaction. */ desc->callback = mxs_i2c_dma_irq_callback; desc->callback_param = i2c; /* Start the transfer. */ dmaengine_submit(desc); dma_async_issue_pending(i2c->dmach); return 0; /* Read failpath. */ read_init_dma_fail: dma_unmap_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE); select_init_dma_fail: dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); select_init_pio_fail: dmaengine_terminate_sync(i2c->dmach); return -EINVAL; /* Write failpath. */ write_init_dma_fail: dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); write_init_pio_fail: dmaengine_terminate_sync(i2c->dmach); return -EINVAL; } static int mxs_i2c_pio_wait_xfer_end(struct mxs_i2c_dev *i2c) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); while (readl(i2c->regs + MXS_I2C_CTRL0) & MXS_I2C_CTRL0_RUN) { if (readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ) return -ENXIO; if (time_after(jiffies, timeout)) return -ETIMEDOUT; cond_resched(); } return 0; } static int mxs_i2c_pio_check_error_state(struct mxs_i2c_dev *i2c) { u32 state; state = readl(i2c->regs + MXS_I2C_CTRL1_CLR) & MXS_I2C_IRQ_MASK; if (state & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ) i2c->cmd_err = -ENXIO; else if (state & (MXS_I2C_CTRL1_EARLY_TERM_IRQ | MXS_I2C_CTRL1_MASTER_LOSS_IRQ | MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ)) i2c->cmd_err = -EIO; return i2c->cmd_err; } static void mxs_i2c_pio_trigger_cmd(struct mxs_i2c_dev *i2c, u32 cmd) { u32 reg; writel(cmd, i2c->regs + MXS_I2C_CTRL0); /* readback makes sure the write is latched into hardware */ reg = readl(i2c->regs + MXS_I2C_CTRL0); reg |= MXS_I2C_CTRL0_RUN; writel(reg, i2c->regs + MXS_I2C_CTRL0); } /* * Start WRITE transaction on the I2C bus. By studying i.MX23 datasheet, * CTRL0::PIO_MODE bit description clarifies the order in which the registers * must be written during PIO mode operation. First, the CTRL0 register has * to be programmed with all the necessary bits but the RUN bit. Then the * payload has to be written into the DATA register. Finally, the transmission * is executed by setting the RUN bit in CTRL0. */ static void mxs_i2c_pio_trigger_write_cmd(struct mxs_i2c_dev *i2c, u32 cmd, u32 data) { writel(cmd, i2c->regs + MXS_I2C_CTRL0); if (i2c->dev_type == MXS_I2C_V1) writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_SET); writel(data, i2c->regs + MXS_I2C_DATA(i2c)); writel(MXS_I2C_CTRL0_RUN, i2c->regs + MXS_I2C_CTRL0_SET); } static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, uint32_t flags) { struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); uint32_t addr_data = i2c_8bit_addr_from_msg(msg); uint32_t data = 0; int i, ret, xlen = 0, xmit = 0; uint32_t start; /* Mute IRQs coming from this block. */ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR); /* * MX23 idea: * - Enable CTRL0::PIO_MODE (1 << 24) * - Enable CTRL1::ACK_MODE (1 << 27) * * WARNING! The MX23 is broken in some way, even if it claims * to support PIO, when we try to transfer any amount of data * that is not aligned to 4 bytes, the DMA engine will have * bits in DEBUG1::DMA_BYTES_ENABLES still set even after the * transfer. This in turn will mess up the next transfer as * the block it emit one byte write onto the bus terminated * with a NAK+STOP. A possible workaround is to reset the IP * block after every PIO transmission, which might just work. * * NOTE: The CTRL0::PIO_MODE description is important, since * it outlines how the PIO mode is really supposed to work. */ if (msg->flags & I2C_M_RD) { /* * PIO READ transfer: * * This transfer MUST be limited to 4 bytes maximum. It is not * possible to transfer more than four bytes via PIO, since we * can not in any way make sure we can read the data from the * DATA register fast enough. Besides, the RX FIFO is only four * bytes deep, thus we can only really read up to four bytes at * time. Finally, there is no bit indicating us that new data * arrived at the FIFO and can thus be fetched from the DATA * register. */ BUG_ON(msg->len > 4); /* SELECT command. */ mxs_i2c_pio_trigger_write_cmd(i2c, MXS_CMD_I2C_SELECT, addr_data); ret = mxs_i2c_pio_wait_xfer_end(i2c); if (ret) { dev_dbg(i2c->dev, "PIO: Failed to send SELECT command!\n"); goto cleanup; } /* READ command. */ mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_READ | flags | MXS_I2C_CTRL0_XFER_COUNT(msg->len)); ret = mxs_i2c_pio_wait_xfer_end(i2c); if (ret) { dev_dbg(i2c->dev, "PIO: Failed to send READ command!\n"); goto cleanup; } data = readl(i2c->regs + MXS_I2C_DATA(i2c)); for (i = 0; i < msg->len; i++) { msg->buf[i] = data & 0xff; data >>= 8; } } else { /* * PIO WRITE transfer: * * The code below implements clock stretching to circumvent * the possibility of kernel not being able to supply data * fast enough. It is possible to transfer arbitrary amount * of data using PIO write. */ /* * The LSB of data buffer is the first byte blasted across * the bus. Higher order bytes follow. Thus the following * filling schematic. */ data = addr_data << 24; /* Start the transfer with START condition. */ start = MXS_I2C_CTRL0_PRE_SEND_START; /* If the transfer is long, use clock stretching. */ if (msg->len > 3) start |= MXS_I2C_CTRL0_RETAIN_CLOCK; for (i = 0; i < msg->len; i++) { data >>= 8; data |= (msg->buf[i] << 24); xmit = 0; /* This is the last transfer of the message. */ if (i + 1 == msg->len) { /* Add optional STOP flag. */ start |= flags; /* Remove RETAIN_CLOCK bit. */ start &= ~MXS_I2C_CTRL0_RETAIN_CLOCK; xmit = 1; } /* Four bytes are ready in the "data" variable. */ if ((i & 3) == 2) xmit = 1; /* Nothing interesting happened, continue stuffing. */ if (!xmit) continue; /* * Compute the size of the transfer and shift the * data accordingly. * * i = (4k + 0) .... xlen = 2 * i = (4k + 1) .... xlen = 3 * i = (4k + 2) .... xlen = 4 * i = (4k + 3) .... xlen = 1 */ if ((i % 4) == 3) xlen = 1; else xlen = (i % 4) + 2; data >>= (4 - xlen) * 8; dev_dbg(i2c->dev, "PIO: len=%i pos=%i total=%i [W%s%s%s]\n", xlen, i, msg->len, start & MXS_I2C_CTRL0_PRE_SEND_START ? "S" : "", start & MXS_I2C_CTRL0_POST_SEND_STOP ? "E" : "", start & MXS_I2C_CTRL0_RETAIN_CLOCK ? "C" : ""); writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR(i2c)); mxs_i2c_pio_trigger_write_cmd(i2c, start | MXS_I2C_CTRL0_MASTER_MODE | MXS_I2C_CTRL0_DIRECTION | MXS_I2C_CTRL0_XFER_COUNT(xlen), data); /* The START condition is sent only once. */ start &= ~MXS_I2C_CTRL0_PRE_SEND_START; /* Wait for the end of the transfer. */ ret = mxs_i2c_pio_wait_xfer_end(i2c); if (ret) { dev_dbg(i2c->dev, "PIO: Failed to finish WRITE cmd!\n"); break; } /* Check NAK here. */ ret = readl(i2c->regs + MXS_I2C_STAT) & MXS_I2C_STAT_GOT_A_NAK; if (ret) { ret = -ENXIO; goto cleanup; } } } /* make sure we capture any occurred error into cmd_err */ ret = mxs_i2c_pio_check_error_state(i2c); cleanup: /* Clear any dangling IRQs and re-enable interrupts. */ writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR); writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); /* Clear the PIO_MODE on i.MX23 */ if (i2c->dev_type == MXS_I2C_V1) writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_CLR); return ret; } /* * Low level master read/write transaction. */ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); int ret; int flags; u8 *dma_buf; int use_pio = 0; unsigned long time_left; flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); /* * The MX28 I2C IP block can only do PIO READ for transfer of to up * 4 bytes of length. The write transfer is not limited as it can use * clock stretching to avoid FIFO underruns. */ if ((msg->flags & I2C_M_RD) && (msg->len <= 4)) use_pio = 1; if (!(msg->flags & I2C_M_RD) && (msg->len < 7)) use_pio = 1; i2c->cmd_err = 0; if (use_pio) { ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); /* No need to reset the block if NAK was received. */ if (ret && (ret != -ENXIO)) mxs_i2c_reset(i2c); } else { dma_buf = i2c_get_dma_safe_msg_buf(msg, 1); if (!dma_buf) return -ENOMEM; reinit_completion(&i2c->cmd_complete); ret = mxs_i2c_dma_setup_xfer(adap, msg, dma_buf, flags); if (ret) { i2c_put_dma_safe_msg_buf(dma_buf, msg, false); return ret; } time_left = wait_for_completion_timeout(&i2c->cmd_complete, msecs_to_jiffies(1000)); i2c_put_dma_safe_msg_buf(dma_buf, msg, true); if (!time_left) goto timeout; ret = i2c->cmd_err; } if (ret == -ENXIO) { /* * If the transfer fails with a NAK from the slave the * controller halts until it gets told to return to idle state. */ writel(MXS_I2C_CTRL1_CLR_GOT_A_NAK, i2c->regs + MXS_I2C_CTRL1_SET); } /* * WARNING! * The i.MX23 is strange. After each and every operation, it's I2C IP * block must be reset, otherwise the IP block will misbehave. This can * be observed on the bus by the block sending out one single byte onto * the bus. In case such an error happens, bit 27 will be set in the * DEBUG0 register. This bit is not documented in the i.MX23 datasheet * and is marked as "TBD" instead. To reset this bit to a correct state, * reset the whole block. Since the block reset does not take long, do * reset the block after every transfer to play safe. */ if (i2c->dev_type == MXS_I2C_V1) mxs_i2c_reset(i2c); dev_dbg(i2c->dev, "Done with err=%d\n", ret); return ret; timeout: dev_dbg(i2c->dev, "Timeout!\n"); mxs_i2c_dma_finish(i2c); ret = mxs_i2c_reset(i2c); if (ret) return ret; return -ETIMEDOUT; } static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { int i; int err; for (i = 0; i < num; i++) { err = mxs_i2c_xfer_msg(adap, &msgs[i], i == (num - 1)); if (err) return err; } return num; } static u32 mxs_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id) { struct mxs_i2c_dev *i2c = dev_id; u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK; if (!stat) return IRQ_NONE; if (stat & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ) i2c->cmd_err = -ENXIO; else if (stat & (MXS_I2C_CTRL1_EARLY_TERM_IRQ | MXS_I2C_CTRL1_MASTER_LOSS_IRQ | MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ)) /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */ i2c->cmd_err = -EIO; writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR); return IRQ_HANDLED; } static const struct i2c_algorithm mxs_i2c_algo = { .master_xfer = mxs_i2c_xfer, .functionality = mxs_i2c_func, }; static const struct i2c_adapter_quirks mxs_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, uint32_t speed) { /* The I2C block clock runs at 24MHz */ const uint32_t clk = 24000000; uint32_t divider; uint16_t high_count, low_count, rcv_count, xmit_count; uint32_t bus_free, leadin; struct device *dev = i2c->dev; divider = DIV_ROUND_UP(clk, speed); if (divider < 25) { /* * limit the divider, so that min(low_count, high_count) * is >= 1 */ divider = 25; dev_warn(dev, "Speed too high (%u.%03u kHz), using %u.%03u kHz\n", speed / 1000, speed % 1000, clk / divider / 1000, clk / divider % 1000); } else if (divider > 1897) { /* * limit the divider, so that max(low_count, high_count) * cannot exceed 1023 */ divider = 1897; dev_warn(dev, "Speed too low (%u.%03u kHz), using %u.%03u kHz\n", speed / 1000, speed % 1000, clk / divider / 1000, clk / divider % 1000); } /* * The I2C spec specifies the following timing data: * standard mode fast mode Bitfield name * tLOW (SCL LOW period) 4700 ns 1300 ns * tHIGH (SCL HIGH period) 4000 ns 600 ns * tSU;DAT (data setup time) 250 ns 100 ns * tHD;STA (START hold time) 4000 ns 600 ns * tBUF (bus free time) 4700 ns 1300 ns * * The hardware (of the i.MX28 at least) seems to add 2 additional * clock cycles to the low_count and 7 cycles to the high_count. * This is compensated for by subtracting the respective constants * from the values written to the timing registers. */ if (speed > I2C_MAX_STANDARD_MODE_FREQ) { /* fast mode */ low_count = DIV_ROUND_CLOSEST(divider * 13, (13 + 6)); high_count = DIV_ROUND_CLOSEST(divider * 6, (13 + 6)); leadin = DIV_ROUND_UP(600 * (clk / 1000000), 1000); bus_free = DIV_ROUND_UP(1300 * (clk / 1000000), 1000); } else { /* normal mode */ low_count = DIV_ROUND_CLOSEST(divider * 47, (47 + 40)); high_count = DIV_ROUND_CLOSEST(divider * 40, (47 + 40)); leadin = DIV_ROUND_UP(4700 * (clk / 1000000), 1000); bus_free = DIV_ROUND_UP(4700 * (clk / 1000000), 1000); } rcv_count = high_count * 3 / 8; xmit_count = low_count * 3 / 8; dev_dbg(dev, "speed=%u(actual %u) divider=%u low=%u high=%u xmit=%u rcv=%u leadin=%u bus_free=%u\n", speed, clk / divider, divider, low_count, high_count, xmit_count, rcv_count, leadin, bus_free); low_count -= 2; high_count -= 7; i2c->timing0 = (high_count << 16) | rcv_count; i2c->timing1 = (low_count << 16) | xmit_count; i2c->timing2 = (bus_free << 16 | leadin); } static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c) { uint32_t speed; struct device *dev = i2c->dev; struct device_node *node = dev->of_node; int ret; ret = of_property_read_u32(node, "clock-frequency", &speed); if (ret) { dev_warn(dev, "No I2C speed selected, using 100kHz\n"); speed = I2C_MAX_STANDARD_MODE_FREQ; } mxs_i2c_derive_timing(i2c, speed); return 0; } static const struct of_device_id mxs_i2c_dt_ids[] = { { .compatible = "fsl,imx23-i2c", .data = (void *)MXS_I2C_V1, }, { .compatible = "fsl,imx28-i2c", .data = (void *)MXS_I2C_V2, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids); static int mxs_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mxs_i2c_dev *i2c; struct i2c_adapter *adap; int err, irq; i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->dev_type = (uintptr_t)of_device_get_match_data(&pdev->dev); i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c); if (err) return err; i2c->dev = dev; init_completion(&i2c->cmd_complete); if (dev->of_node) { err = mxs_i2c_get_ofdata(i2c); if (err) return err; } /* Setup the DMA */ i2c->dmach = dma_request_chan(dev, "rx-tx"); if (IS_ERR(i2c->dmach)) { return dev_err_probe(dev, PTR_ERR(i2c->dmach), "Failed to request dma\n"); } platform_set_drvdata(pdev, i2c); /* Do reset to enforce correct startup after pinmuxing */ err = mxs_i2c_reset(i2c); if (err) return err; adap = &i2c->adapter; strscpy(adap->name, "MXS I2C adapter", sizeof(adap->name)); adap->owner = THIS_MODULE; adap->algo = &mxs_i2c_algo; adap->quirks = &mxs_i2c_quirks; adap->dev.parent = dev; adap->nr = pdev->id; adap->dev.of_node = pdev->dev.of_node; i2c_set_adapdata(adap, i2c); err = i2c_add_numbered_adapter(adap); if (err) { writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET); return err; } return 0; } static void mxs_i2c_remove(struct platform_device *pdev) { struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adapter); if (i2c->dmach) dma_release_channel(i2c->dmach); writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET); } static struct platform_driver mxs_i2c_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = mxs_i2c_dt_ids, }, .probe = mxs_i2c_probe, .remove_new = mxs_i2c_remove, }; static int __init mxs_i2c_init(void) { return platform_driver_register(&mxs_i2c_driver); } subsys_initcall(mxs_i2c_init); static void __exit mxs_i2c_exit(void) { platform_driver_unregister(&mxs_i2c_driver); } module_exit(mxs_i2c_exit); MODULE_AUTHOR("Marek Vasut <[email protected]>"); MODULE_AUTHOR("Wolfram Sang <[email protected]>"); MODULE_DESCRIPTION("MXS I2C Bus Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/i2c/busses/i2c-mxs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Actions Semiconductor Owl SoC's I2C driver * * Copyright (c) 2014 Actions Semi Inc. * Author: David Liu <[email protected]> * * Copyright (c) 2018 Linaro Ltd. * Author: Manivannan Sadhasivam <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> /* I2C registers */ #define OWL_I2C_REG_CTL 0x0000 #define OWL_I2C_REG_CLKDIV 0x0004 #define OWL_I2C_REG_STAT 0x0008 #define OWL_I2C_REG_ADDR 0x000C #define OWL_I2C_REG_TXDAT 0x0010 #define OWL_I2C_REG_RXDAT 0x0014 #define OWL_I2C_REG_CMD 0x0018 #define OWL_I2C_REG_FIFOCTL 0x001C #define OWL_I2C_REG_FIFOSTAT 0x0020 #define OWL_I2C_REG_DATCNT 0x0024 #define OWL_I2C_REG_RCNT 0x0028 /* I2Cx_CTL Bit Mask */ #define OWL_I2C_CTL_RB BIT(1) #define OWL_I2C_CTL_GBCC(x) (((x) & 0x3) << 2) #define OWL_I2C_CTL_GBCC_NONE OWL_I2C_CTL_GBCC(0) #define OWL_I2C_CTL_GBCC_START OWL_I2C_CTL_GBCC(1) #define OWL_I2C_CTL_GBCC_STOP OWL_I2C_CTL_GBCC(2) #define OWL_I2C_CTL_GBCC_RSTART OWL_I2C_CTL_GBCC(3) #define OWL_I2C_CTL_IRQE BIT(5) #define OWL_I2C_CTL_EN BIT(7) #define OWL_I2C_CTL_AE BIT(8) #define OWL_I2C_CTL_SHSM BIT(10) #define OWL_I2C_DIV_FACTOR(x) ((x) & 0xff) /* I2Cx_STAT Bit Mask */ #define OWL_I2C_STAT_RACK BIT(0) #define OWL_I2C_STAT_BEB BIT(1) #define OWL_I2C_STAT_IRQP BIT(2) #define OWL_I2C_STAT_LAB BIT(3) #define OWL_I2C_STAT_STPD BIT(4) #define OWL_I2C_STAT_STAD BIT(5) #define OWL_I2C_STAT_BBB BIT(6) #define OWL_I2C_STAT_TCB BIT(7) #define OWL_I2C_STAT_LBST BIT(8) #define OWL_I2C_STAT_SAMB BIT(9) #define OWL_I2C_STAT_SRGC BIT(10) /* I2Cx_CMD Bit Mask */ #define OWL_I2C_CMD_SBE BIT(0) #define OWL_I2C_CMD_RBE BIT(4) #define OWL_I2C_CMD_DE BIT(8) #define OWL_I2C_CMD_NS BIT(9) #define OWL_I2C_CMD_SE BIT(10) #define OWL_I2C_CMD_MSS BIT(11) #define OWL_I2C_CMD_WRS BIT(12) #define OWL_I2C_CMD_SECL BIT(15) #define OWL_I2C_CMD_AS(x) (((x) & 0x7) << 1) #define OWL_I2C_CMD_SAS(x) (((x) & 0x7) << 5) /* I2Cx_FIFOCTL Bit Mask */ #define OWL_I2C_FIFOCTL_NIB BIT(0) #define OWL_I2C_FIFOCTL_RFR BIT(1) #define OWL_I2C_FIFOCTL_TFR BIT(2) /* I2Cc_FIFOSTAT Bit Mask */ #define OWL_I2C_FIFOSTAT_CECB BIT(0) #define OWL_I2C_FIFOSTAT_RNB BIT(1) #define OWL_I2C_FIFOSTAT_RFE BIT(2) #define OWL_I2C_FIFOSTAT_TFF BIT(5) #define OWL_I2C_FIFOSTAT_TFD GENMASK(23, 16) #define OWL_I2C_FIFOSTAT_RFD GENMASK(15, 8) /* I2C bus timeout */ #define OWL_I2C_TIMEOUT_MS (4 * 1000) #define OWL_I2C_TIMEOUT msecs_to_jiffies(OWL_I2C_TIMEOUT_MS) #define OWL_I2C_MAX_RETRIES 50 struct owl_i2c_dev { struct i2c_adapter adap; struct i2c_msg *msg; struct completion msg_complete; struct clk *clk; spinlock_t lock; void __iomem *base; unsigned long clk_rate; u32 bus_freq; u32 msg_ptr; int err; }; static void owl_i2c_update_reg(void __iomem *reg, unsigned int val, bool state) { unsigned int regval; regval = readl(reg); if (state) regval |= val; else regval &= ~val; writel(regval, reg); } static void owl_i2c_reset(struct owl_i2c_dev *i2c_dev) { owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL, OWL_I2C_CTL_EN, false); mdelay(1); owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL, OWL_I2C_CTL_EN, true); /* Clear status registers */ writel(0, i2c_dev->base + OWL_I2C_REG_STAT); } static int owl_i2c_reset_fifo(struct owl_i2c_dev *i2c_dev) { unsigned int val, timeout = 0; /* Reset FIFO */ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL, OWL_I2C_FIFOCTL_RFR | OWL_I2C_FIFOCTL_TFR, true); /* Wait 50ms for FIFO reset complete */ do { val = readl(i2c_dev->base + OWL_I2C_REG_FIFOCTL); if (!(val & (OWL_I2C_FIFOCTL_RFR | OWL_I2C_FIFOCTL_TFR))) break; usleep_range(500, 1000); } while (timeout++ < OWL_I2C_MAX_RETRIES); if (timeout > OWL_I2C_MAX_RETRIES) { dev_err(&i2c_dev->adap.dev, "FIFO reset timeout\n"); return -ETIMEDOUT; } return 0; } static void owl_i2c_set_freq(struct owl_i2c_dev *i2c_dev) { unsigned int val; val = DIV_ROUND_UP(i2c_dev->clk_rate, i2c_dev->bus_freq * 16); /* Set clock divider factor */ writel(OWL_I2C_DIV_FACTOR(val), i2c_dev->base + OWL_I2C_REG_CLKDIV); } static void owl_i2c_xfer_data(struct owl_i2c_dev *i2c_dev) { struct i2c_msg *msg = i2c_dev->msg; unsigned int stat, fifostat; i2c_dev->err = 0; /* Handle NACK from slave */ fifostat = readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT); if (fifostat & OWL_I2C_FIFOSTAT_RNB) { i2c_dev->err = -ENXIO; /* Clear NACK error bit by writing "1" */ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOSTAT, OWL_I2C_FIFOSTAT_RNB, true); return; } /* Handle bus error */ stat = readl(i2c_dev->base + OWL_I2C_REG_STAT); if (stat & OWL_I2C_STAT_BEB) { i2c_dev->err = -EIO; /* Clear BUS error bit by writing "1" */ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_STAT, OWL_I2C_STAT_BEB, true); return; } /* Handle FIFO read */ if (msg->flags & I2C_M_RD) { while ((readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) & OWL_I2C_FIFOSTAT_RFE) && i2c_dev->msg_ptr < msg->len) { msg->buf[i2c_dev->msg_ptr++] = readl(i2c_dev->base + OWL_I2C_REG_RXDAT); } } else { /* Handle the remaining bytes which were not sent */ while (!(readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) & OWL_I2C_FIFOSTAT_TFF) && i2c_dev->msg_ptr < msg->len) { writel(msg->buf[i2c_dev->msg_ptr++], i2c_dev->base + OWL_I2C_REG_TXDAT); } } } static irqreturn_t owl_i2c_interrupt(int irq, void *_dev) { struct owl_i2c_dev *i2c_dev = _dev; spin_lock(&i2c_dev->lock); owl_i2c_xfer_data(i2c_dev); /* Clear pending interrupts */ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_STAT, OWL_I2C_STAT_IRQP, true); complete_all(&i2c_dev->msg_complete); spin_unlock(&i2c_dev->lock); return IRQ_HANDLED; } static u32 owl_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static int owl_i2c_check_bus_busy(struct i2c_adapter *adap) { struct owl_i2c_dev *i2c_dev = i2c_get_adapdata(adap); unsigned long timeout; /* Check for Bus busy */ timeout = jiffies + OWL_I2C_TIMEOUT; while (readl(i2c_dev->base + OWL_I2C_REG_STAT) & OWL_I2C_STAT_BBB) { if (time_after(jiffies, timeout)) { dev_err(&adap->dev, "Bus busy timeout\n"); return -ETIMEDOUT; } } return 0; } static int owl_i2c_xfer_common(struct i2c_adapter *adap, struct i2c_msg *msgs, int num, bool atomic) { struct owl_i2c_dev *i2c_dev = i2c_get_adapdata(adap); struct i2c_msg *msg; unsigned long time_left, flags; unsigned int i2c_cmd, val; unsigned int addr; int ret, idx; spin_lock_irqsave(&i2c_dev->lock, flags); /* Reset I2C controller */ owl_i2c_reset(i2c_dev); /* Set bus frequency */ owl_i2c_set_freq(i2c_dev); /* * Spinlock should be released before calling reset FIFO and * bus busy check since those functions may sleep */ spin_unlock_irqrestore(&i2c_dev->lock, flags); /* Reset FIFO */ ret = owl_i2c_reset_fifo(i2c_dev); if (ret) goto unlocked_err_exit; /* Check for bus busy */ ret = owl_i2c_check_bus_busy(adap); if (ret) goto unlocked_err_exit; spin_lock_irqsave(&i2c_dev->lock, flags); /* Check for Arbitration lost */ val = readl(i2c_dev->base + OWL_I2C_REG_STAT); if (val & OWL_I2C_STAT_LAB) { val &= ~OWL_I2C_STAT_LAB; writel(val, i2c_dev->base + OWL_I2C_REG_STAT); ret = -EAGAIN; goto err_exit; } if (!atomic) reinit_completion(&i2c_dev->msg_complete); /* Enable/disable I2C controller interrupt */ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL, OWL_I2C_CTL_IRQE, !atomic); /* * Select: FIFO enable, Master mode, Stop enable, Data count enable, * Send start bit */ i2c_cmd = OWL_I2C_CMD_SECL | OWL_I2C_CMD_MSS | OWL_I2C_CMD_SE | OWL_I2C_CMD_NS | OWL_I2C_CMD_DE | OWL_I2C_CMD_SBE; /* Handle repeated start condition */ if (num > 1) { /* Set internal address length and enable repeated start */ i2c_cmd |= OWL_I2C_CMD_AS(msgs[0].len + 1) | OWL_I2C_CMD_SAS(1) | OWL_I2C_CMD_RBE; /* Write slave address */ addr = i2c_8bit_addr_from_msg(&msgs[0]); writel(addr, i2c_dev->base + OWL_I2C_REG_TXDAT); /* Write internal register address */ for (idx = 0; idx < msgs[0].len; idx++) writel(msgs[0].buf[idx], i2c_dev->base + OWL_I2C_REG_TXDAT); msg = &msgs[1]; } else { /* Set address length */ i2c_cmd |= OWL_I2C_CMD_AS(1); msg = &msgs[0]; } i2c_dev->msg = msg; i2c_dev->msg_ptr = 0; /* Set data count for the message */ writel(msg->len, i2c_dev->base + OWL_I2C_REG_DATCNT); addr = i2c_8bit_addr_from_msg(msg); writel(addr, i2c_dev->base + OWL_I2C_REG_TXDAT); if (!(msg->flags & I2C_M_RD)) { /* Write data to FIFO */ for (idx = 0; idx < msg->len; idx++) { /* Check for FIFO full */ if (readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) & OWL_I2C_FIFOSTAT_TFF) break; writel(msg->buf[idx], i2c_dev->base + OWL_I2C_REG_TXDAT); } i2c_dev->msg_ptr = idx; } /* Ignore the NACK if needed */ if (msg->flags & I2C_M_IGNORE_NAK) owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL, OWL_I2C_FIFOCTL_NIB, true); else owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL, OWL_I2C_FIFOCTL_NIB, false); /* Start the transfer */ writel(i2c_cmd, i2c_dev->base + OWL_I2C_REG_CMD); spin_unlock_irqrestore(&i2c_dev->lock, flags); if (atomic) { /* Wait for Command Execute Completed or NACK Error bits */ ret = readl_poll_timeout_atomic(i2c_dev->base + OWL_I2C_REG_FIFOSTAT, val, val & (OWL_I2C_FIFOSTAT_CECB | OWL_I2C_FIFOSTAT_RNB), 10, OWL_I2C_TIMEOUT_MS * 1000); } else { time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, adap->timeout); if (!time_left) ret = -ETIMEDOUT; } spin_lock_irqsave(&i2c_dev->lock, flags); if (ret) { dev_err(&adap->dev, "Transaction timed out\n"); /* Send stop condition and release the bus */ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL, OWL_I2C_CTL_GBCC_STOP | OWL_I2C_CTL_RB, true); goto err_exit; } if (atomic) owl_i2c_xfer_data(i2c_dev); ret = i2c_dev->err < 0 ? i2c_dev->err : num; err_exit: spin_unlock_irqrestore(&i2c_dev->lock, flags); unlocked_err_exit: /* Disable I2C controller */ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL, OWL_I2C_CTL_EN, false); return ret; } static int owl_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return owl_i2c_xfer_common(adap, msgs, num, false); } static int owl_i2c_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return owl_i2c_xfer_common(adap, msgs, num, true); } static const struct i2c_algorithm owl_i2c_algorithm = { .master_xfer = owl_i2c_xfer, .master_xfer_atomic = owl_i2c_xfer_atomic, .functionality = owl_i2c_func, }; static const struct i2c_adapter_quirks owl_i2c_quirks = { .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST, .max_read_len = 240, .max_write_len = 240, .max_comb_1st_msg_len = 6, .max_comb_2nd_msg_len = 240, }; static int owl_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct owl_i2c_dev *i2c_dev; int ret, irq; i2c_dev = devm_kzalloc(dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; i2c_dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; if (of_property_read_u32(dev->of_node, "clock-frequency", &i2c_dev->bus_freq)) i2c_dev->bus_freq = I2C_MAX_STANDARD_MODE_FREQ; /* We support only frequencies of 100k and 400k for now */ if (i2c_dev->bus_freq != I2C_MAX_STANDARD_MODE_FREQ && i2c_dev->bus_freq != I2C_MAX_FAST_MODE_FREQ) { dev_err(dev, "invalid clock-frequency %d\n", i2c_dev->bus_freq); return -EINVAL; } i2c_dev->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(i2c_dev->clk)) { dev_err(dev, "failed to enable clock\n"); return PTR_ERR(i2c_dev->clk); } i2c_dev->clk_rate = clk_get_rate(i2c_dev->clk); if (!i2c_dev->clk_rate) { dev_err(dev, "input clock rate should not be zero\n"); return -EINVAL; } init_completion(&i2c_dev->msg_complete); spin_lock_init(&i2c_dev->lock); i2c_dev->adap.owner = THIS_MODULE; i2c_dev->adap.algo = &owl_i2c_algorithm; i2c_dev->adap.timeout = OWL_I2C_TIMEOUT; i2c_dev->adap.quirks = &owl_i2c_quirks; i2c_dev->adap.dev.parent = dev; i2c_dev->adap.dev.of_node = dev->of_node; snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name), "%s", "OWL I2C adapter"); i2c_set_adapdata(&i2c_dev->adap, i2c_dev); platform_set_drvdata(pdev, i2c_dev); ret = devm_request_irq(dev, irq, owl_i2c_interrupt, 0, pdev->name, i2c_dev); if (ret) { dev_err(dev, "failed to request irq %d\n", irq); return ret; } return i2c_add_adapter(&i2c_dev->adap); } static const struct of_device_id owl_i2c_of_match[] = { { .compatible = "actions,s500-i2c" }, { .compatible = "actions,s700-i2c" }, { .compatible = "actions,s900-i2c" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, owl_i2c_of_match); static struct platform_driver owl_i2c_driver = { .probe = owl_i2c_probe, .driver = { .name = "owl-i2c", .of_match_table = owl_i2c_of_match, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; module_platform_driver(owl_i2c_driver); MODULE_AUTHOR("David Liu <[email protected]>"); MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>"); MODULE_DESCRIPTION("Actions Semiconductor Owl SoC's I2C driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-owl.c
// SPDX-License-Identifier: GPL-2.0 /* * Microchip CoreI2C I2C controller driver * * Copyright (c) 2018-2022 Microchip Corporation. All rights reserved. * * Author: Daire McNamara <[email protected]> * Author: Conor Dooley <[email protected]> */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #define CORE_I2C_CTRL (0x00) #define CTRL_CR0 BIT(0) #define CTRL_CR1 BIT(1) #define CTRL_AA BIT(2) #define CTRL_SI BIT(3) #define CTRL_STO BIT(4) #define CTRL_STA BIT(5) #define CTRL_ENS1 BIT(6) #define CTRL_CR2 BIT(7) #define STATUS_BUS_ERROR (0x00) #define STATUS_M_START_SENT (0x08) #define STATUS_M_REPEATED_START_SENT (0x10) #define STATUS_M_SLAW_ACK (0x18) #define STATUS_M_SLAW_NACK (0x20) #define STATUS_M_TX_DATA_ACK (0x28) #define STATUS_M_TX_DATA_NACK (0x30) #define STATUS_M_ARB_LOST (0x38) #define STATUS_M_SLAR_ACK (0x40) #define STATUS_M_SLAR_NACK (0x48) #define STATUS_M_RX_DATA_ACKED (0x50) #define STATUS_M_RX_DATA_NACKED (0x58) #define STATUS_S_SLAW_ACKED (0x60) #define STATUS_S_ARB_LOST_SLAW_ACKED (0x68) #define STATUS_S_GENERAL_CALL_ACKED (0x70) #define STATUS_S_ARB_LOST_GENERAL_CALL_ACKED (0x78) #define STATUS_S_RX_DATA_ACKED (0x80) #define STATUS_S_RX_DATA_NACKED (0x88) #define STATUS_S_GENERAL_CALL_RX_DATA_ACKED (0x90) #define STATUS_S_GENERAL_CALL_RX_DATA_NACKED (0x98) #define STATUS_S_RX_STOP (0xA0) #define STATUS_S_SLAR_ACKED (0xA8) #define STATUS_S_ARB_LOST_SLAR_ACKED (0xB0) #define STATUS_S_TX_DATA_ACK (0xB8) #define STATUS_S_TX_DATA_NACK (0xC0) #define STATUS_LAST_DATA_ACK (0xC8) #define STATUS_M_SMB_MASTER_RESET (0xD0) #define STATUS_S_SCL_LOW_TIMEOUT (0xD8) /* 25 ms */ #define STATUS_NO_STATE_INFO (0xF8) #define CORE_I2C_STATUS (0x04) #define CORE_I2C_DATA (0x08) #define WRITE_BIT (0x0) #define READ_BIT (0x1) #define SLAVE_ADDR_SHIFT (1) #define CORE_I2C_SLAVE0_ADDR (0x0c) #define GENERAL_CALL_BIT (0x0) #define CORE_I2C_SMBUS (0x10) #define SMBALERT_INT_ENB (0x0) #define SMBSUS_INT_ENB (0x1) #define SMBUS_ENB (0x2) #define SMBALERT_NI_STATUS (0x3) #define SMBALERT_NO_CTRL (0x4) #define SMBSUS_NI_STATUS (0x5) #define SMBSUS_NO_CTRL (0x6) #define SMBUS_RESET (0x7) #define CORE_I2C_FREQ (0x14) #define CORE_I2C_GLITCHREG (0x18) #define CORE_I2C_SLAVE1_ADDR (0x1c) #define PCLK_DIV_960 (CTRL_CR2) #define PCLK_DIV_256 (0) #define PCLK_DIV_224 (CTRL_CR0) #define PCLK_DIV_192 (CTRL_CR1) #define PCLK_DIV_160 (CTRL_CR0 | CTRL_CR1) #define PCLK_DIV_120 (CTRL_CR0 | CTRL_CR2) #define PCLK_DIV_60 (CTRL_CR1 | CTRL_CR2) #define BCLK_DIV_8 (CTRL_CR0 | CTRL_CR1 | CTRL_CR2) #define CLK_MASK (CTRL_CR0 | CTRL_CR1 | CTRL_CR2) /** * struct mchp_corei2c_dev - Microchip CoreI2C device private data * * @base: pointer to register struct * @dev: device reference * @i2c_clk: clock reference for i2c input clock * @buf: pointer to msg buffer for easier use * @msg_complete: xfer completion object * @adapter: core i2c abstraction * @msg_err: error code for completed message * @bus_clk_rate: current i2c bus clock rate * @isr_status: cached copy of local ISR status * @msg_len: number of bytes transferred in msg * @addr: address of the current slave */ struct mchp_corei2c_dev { void __iomem *base; struct device *dev; struct clk *i2c_clk; u8 *buf; struct completion msg_complete; struct i2c_adapter adapter; int msg_err; u32 bus_clk_rate; u32 isr_status; u16 msg_len; u8 addr; }; static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev) { u8 ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl &= ~CTRL_ENS1; writeb(ctrl, idev->base + CORE_I2C_CTRL); } static void mchp_corei2c_core_enable(struct mchp_corei2c_dev *idev) { u8 ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl |= CTRL_ENS1; writeb(ctrl, idev->base + CORE_I2C_CTRL); } static void mchp_corei2c_reset(struct mchp_corei2c_dev *idev) { mchp_corei2c_core_disable(idev); mchp_corei2c_core_enable(idev); } static inline void mchp_corei2c_stop(struct mchp_corei2c_dev *idev) { u8 ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl |= CTRL_STO; writeb(ctrl, idev->base + CORE_I2C_CTRL); } static inline int mchp_corei2c_set_divisor(u32 rate, struct mchp_corei2c_dev *idev) { u8 clkval, ctrl; if (rate >= 960) clkval = PCLK_DIV_960; else if (rate >= 256) clkval = PCLK_DIV_256; else if (rate >= 224) clkval = PCLK_DIV_224; else if (rate >= 192) clkval = PCLK_DIV_192; else if (rate >= 160) clkval = PCLK_DIV_160; else if (rate >= 120) clkval = PCLK_DIV_120; else if (rate >= 60) clkval = PCLK_DIV_60; else if (rate >= 8) clkval = BCLK_DIV_8; else return -EINVAL; ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl &= ~CLK_MASK; ctrl |= clkval; writeb(ctrl, idev->base + CORE_I2C_CTRL); ctrl = readb(idev->base + CORE_I2C_CTRL); if ((ctrl & CLK_MASK) != clkval) return -EIO; return 0; } static int mchp_corei2c_init(struct mchp_corei2c_dev *idev) { u32 clk_rate = clk_get_rate(idev->i2c_clk); u32 divisor = clk_rate / idev->bus_clk_rate; int ret; ret = mchp_corei2c_set_divisor(divisor, idev); if (ret) return ret; mchp_corei2c_reset(idev); return 0; } static void mchp_corei2c_empty_rx(struct mchp_corei2c_dev *idev) { u8 ctrl; if (idev->msg_len > 0) { *idev->buf++ = readb(idev->base + CORE_I2C_DATA); idev->msg_len--; } if (idev->msg_len <= 1) { ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl &= ~CTRL_AA; writeb(ctrl, idev->base + CORE_I2C_CTRL); } } static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev) { if (idev->msg_len > 0) writeb(*idev->buf++, idev->base + CORE_I2C_DATA); idev->msg_len--; return 0; } static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) { u32 status = idev->isr_status; u8 ctrl; bool last_byte = false, finished = false; if (!idev->buf) return IRQ_NONE; switch (status) { case STATUS_M_START_SENT: case STATUS_M_REPEATED_START_SENT: ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl &= ~CTRL_STA; writeb(idev->addr, idev->base + CORE_I2C_DATA); writeb(ctrl, idev->base + CORE_I2C_CTRL); if (idev->msg_len == 0) finished = true; break; case STATUS_M_ARB_LOST: idev->msg_err = -EAGAIN; finished = true; break; case STATUS_M_SLAW_ACK: case STATUS_M_TX_DATA_ACK: if (idev->msg_len > 0) mchp_corei2c_fill_tx(idev); else last_byte = true; break; case STATUS_M_TX_DATA_NACK: case STATUS_M_SLAR_NACK: case STATUS_M_SLAW_NACK: idev->msg_err = -ENXIO; last_byte = true; break; case STATUS_M_SLAR_ACK: ctrl = readb(idev->base + CORE_I2C_CTRL); if (idev->msg_len == 1u) { ctrl &= ~CTRL_AA; writeb(ctrl, idev->base + CORE_I2C_CTRL); } else { ctrl |= CTRL_AA; writeb(ctrl, idev->base + CORE_I2C_CTRL); } if (idev->msg_len < 1u) last_byte = true; break; case STATUS_M_RX_DATA_ACKED: mchp_corei2c_empty_rx(idev); break; case STATUS_M_RX_DATA_NACKED: mchp_corei2c_empty_rx(idev); if (idev->msg_len == 0) last_byte = true; break; default: break; } /* On the last byte to be transmitted, send STOP */ if (last_byte) mchp_corei2c_stop(idev); if (last_byte || finished) complete(&idev->msg_complete); return IRQ_HANDLED; } static irqreturn_t mchp_corei2c_isr(int irq, void *_dev) { struct mchp_corei2c_dev *idev = _dev; irqreturn_t ret = IRQ_NONE; u8 ctrl; ctrl = readb(idev->base + CORE_I2C_CTRL); if (ctrl & CTRL_SI) { idev->isr_status = readb(idev->base + CORE_I2C_STATUS); ret = mchp_corei2c_handle_isr(idev); } ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl &= ~CTRL_SI; writeb(ctrl, idev->base + CORE_I2C_CTRL); return ret; } static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev, struct i2c_msg *msg) { u8 ctrl; unsigned long time_left; idev->addr = i2c_8bit_addr_from_msg(msg); idev->msg_len = msg->len; idev->buf = msg->buf; idev->msg_err = 0; reinit_completion(&idev->msg_complete); mchp_corei2c_core_enable(idev); ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl |= CTRL_STA; writeb(ctrl, idev->base + CORE_I2C_CTRL); time_left = wait_for_completion_timeout(&idev->msg_complete, idev->adapter.timeout); if (!time_left) return -ETIMEDOUT; return idev->msg_err; } static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap); int i, ret; for (i = 0; i < num; i++) { ret = mchp_corei2c_xfer_msg(idev, msgs++); if (ret) return ret; } return num; } static u32 mchp_corei2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm mchp_corei2c_algo = { .master_xfer = mchp_corei2c_xfer, .functionality = mchp_corei2c_func, }; static int mchp_corei2c_probe(struct platform_device *pdev) { struct mchp_corei2c_dev *idev; struct resource *res; int irq, ret; idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); if (!idev) return -ENOMEM; idev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(idev->base)) return PTR_ERR(idev->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; idev->i2c_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(idev->i2c_clk)) return dev_err_probe(&pdev->dev, PTR_ERR(idev->i2c_clk), "missing clock\n"); idev->dev = &pdev->dev; init_completion(&idev->msg_complete); ret = device_property_read_u32(idev->dev, "clock-frequency", &idev->bus_clk_rate); if (ret || !idev->bus_clk_rate) { dev_info(&pdev->dev, "default to 100kHz\n"); idev->bus_clk_rate = 100000; } if (idev->bus_clk_rate > 400000) return dev_err_probe(&pdev->dev, -EINVAL, "clock-frequency too high: %d\n", idev->bus_clk_rate); /* * This driver supports both the hard peripherals & soft FPGA cores. * The hard peripherals do not have shared IRQs, but we don't have * control over what way the interrupts are wired for the soft cores. */ ret = devm_request_irq(&pdev->dev, irq, mchp_corei2c_isr, IRQF_SHARED, pdev->name, idev); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to claim irq %d\n", irq); ret = clk_prepare_enable(idev->i2c_clk); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n"); ret = mchp_corei2c_init(idev); if (ret) { clk_disable_unprepare(idev->i2c_clk); return dev_err_probe(&pdev->dev, ret, "failed to program clock divider\n"); } i2c_set_adapdata(&idev->adapter, idev); snprintf(idev->adapter.name, sizeof(idev->adapter.name), "Microchip I2C hw bus at %08lx", (unsigned long)res->start); idev->adapter.owner = THIS_MODULE; idev->adapter.algo = &mchp_corei2c_algo; idev->adapter.dev.parent = &pdev->dev; idev->adapter.dev.of_node = pdev->dev.of_node; idev->adapter.timeout = HZ; platform_set_drvdata(pdev, idev); ret = i2c_add_adapter(&idev->adapter); if (ret) { clk_disable_unprepare(idev->i2c_clk); return ret; } dev_info(&pdev->dev, "registered CoreI2C bus driver\n"); return 0; } static void mchp_corei2c_remove(struct platform_device *pdev) { struct mchp_corei2c_dev *idev = platform_get_drvdata(pdev); clk_disable_unprepare(idev->i2c_clk); i2c_del_adapter(&idev->adapter); } static const struct of_device_id mchp_corei2c_of_match[] = { { .compatible = "microchip,mpfs-i2c" }, { .compatible = "microchip,corei2c-rtl-v7" }, {}, }; MODULE_DEVICE_TABLE(of, mchp_corei2c_of_match); static struct platform_driver mchp_corei2c_driver = { .probe = mchp_corei2c_probe, .remove_new = mchp_corei2c_remove, .driver = { .name = "microchip-corei2c", .of_match_table = mchp_corei2c_of_match, }, }; module_platform_driver(mchp_corei2c_driver); MODULE_DESCRIPTION("Microchip CoreI2C bus driver"); MODULE_AUTHOR("Daire McNamara <[email protected]>"); MODULE_AUTHOR("Conor Dooley <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-microchip-corei2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for RobotFuzz OSIF * * Copyright (c) 2013 Andrew Lunn <[email protected]> * Copyright (c) 2007 Barry Carter <[email protected]> * * Based on the i2c-tiny-usb by * * Copyright (C) 2006 Til Harbaum ([email protected]) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/usb.h> #define OSIFI2C_READ 20 #define OSIFI2C_WRITE 21 #define OSIFI2C_STOP 22 #define OSIFI2C_STATUS 23 #define OSIFI2C_SET_BIT_RATE 24 #define STATUS_ADDRESS_ACK 0 #define STATUS_ADDRESS_NAK 2 struct osif_priv { struct usb_device *usb_dev; struct usb_interface *interface; struct i2c_adapter adapter; unsigned char status; }; static int osif_usb_read(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct osif_priv *priv = adapter->algo_data; return usb_control_msg(priv->usb_dev, usb_rcvctrlpipe(priv->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_IN, value, index, data, len, 2000); } static int osif_usb_write(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct osif_priv *priv = adapter->algo_data; return usb_control_msg(priv->usb_dev, usb_sndctrlpipe(priv->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, index, data, len, 2000); } static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct osif_priv *priv = adapter->algo_data; struct i2c_msg *pmsg; int ret; int i; for (i = 0; i < num; i++) { pmsg = &msgs[i]; if (pmsg->flags & I2C_M_RD) { ret = osif_usb_read(adapter, OSIFI2C_READ, pmsg->flags, pmsg->addr, pmsg->buf, pmsg->len); if (ret != pmsg->len) { dev_err(&adapter->dev, "failure reading data\n"); return -EREMOTEIO; } } else { ret = osif_usb_write(adapter, OSIFI2C_WRITE, pmsg->flags, pmsg->addr, pmsg->buf, pmsg->len); if (ret != pmsg->len) { dev_err(&adapter->dev, "failure writing data\n"); return -EREMOTEIO; } } ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0); if (ret) { dev_err(&adapter->dev, "failure sending STOP\n"); return -EREMOTEIO; } /* read status */ ret = osif_usb_read(adapter, OSIFI2C_STATUS, 0, 0, &priv->status, 1); if (ret != 1) { dev_err(&adapter->dev, "failure reading status\n"); return -EREMOTEIO; } if (priv->status != STATUS_ADDRESS_ACK) { dev_dbg(&adapter->dev, "status = %d\n", priv->status); return -EREMOTEIO; } } return i; } static u32 osif_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm osif_algorithm = { .master_xfer = osif_xfer, .functionality = osif_func, }; #define USB_OSIF_VENDOR_ID 0x1964 #define USB_OSIF_PRODUCT_ID 0x0001 static const struct usb_device_id osif_table[] = { { USB_DEVICE(USB_OSIF_VENDOR_ID, USB_OSIF_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE(usb, osif_table); static int osif_probe(struct usb_interface *interface, const struct usb_device_id *id) { int ret; struct osif_priv *priv; u16 version; priv = devm_kzalloc(&interface->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->usb_dev = usb_get_dev(interface_to_usbdev(interface)); priv->interface = interface; usb_set_intfdata(interface, priv); priv->adapter.owner = THIS_MODULE; priv->adapter.class = I2C_CLASS_HWMON; priv->adapter.algo = &osif_algorithm; priv->adapter.algo_data = priv; snprintf(priv->adapter.name, sizeof(priv->adapter.name), "OSIF at bus %03d device %03d", priv->usb_dev->bus->busnum, priv->usb_dev->devnum); /* * Set bus frequency. The frequency is: * 120,000,000 / ( 16 + 2 * div * 4^prescale). * Using dev = 52, prescale = 0 give 100KHz */ ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0, NULL, 0); if (ret) { dev_err(&interface->dev, "failure sending bit rate"); usb_put_dev(priv->usb_dev); return ret; } i2c_add_adapter(&(priv->adapter)); version = le16_to_cpu(priv->usb_dev->descriptor.bcdDevice); dev_info(&interface->dev, "version %x.%02x found at bus %03d address %03d", version >> 8, version & 0xff, priv->usb_dev->bus->busnum, priv->usb_dev->devnum); return 0; } static void osif_disconnect(struct usb_interface *interface) { struct osif_priv *priv = usb_get_intfdata(interface); i2c_del_adapter(&(priv->adapter)); usb_set_intfdata(interface, NULL); usb_put_dev(priv->usb_dev); } static struct usb_driver osif_driver = { .name = "RobotFuzz Open Source InterFace, OSIF", .probe = osif_probe, .disconnect = osif_disconnect, .id_table = osif_table, }; module_usb_driver(osif_driver); MODULE_AUTHOR("Andrew Lunn <[email protected]>"); MODULE_AUTHOR("Barry Carter <[email protected]>"); MODULE_DESCRIPTION("RobotFuzz OSIF driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-robotfuzz-osif.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. // Copyright (c) 2017-2022 Linaro Limited. #include <linux/clk.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #define CCI_HW_VERSION 0x0 #define CCI_RESET_CMD 0x004 #define CCI_RESET_CMD_MASK 0x0f73f3f7 #define CCI_RESET_CMD_M0_MASK 0x000003f1 #define CCI_RESET_CMD_M1_MASK 0x0003f001 #define CCI_QUEUE_START 0x008 #define CCI_HALT_REQ 0x034 #define CCI_HALT_REQ_I2C_M0_Q0Q1 BIT(0) #define CCI_HALT_REQ_I2C_M1_Q0Q1 BIT(1) #define CCI_I2C_Mm_SCL_CTL(m) (0x100 + 0x100 * (m)) #define CCI_I2C_Mm_SDA_CTL_0(m) (0x104 + 0x100 * (m)) #define CCI_I2C_Mm_SDA_CTL_1(m) (0x108 + 0x100 * (m)) #define CCI_I2C_Mm_SDA_CTL_2(m) (0x10c + 0x100 * (m)) #define CCI_I2C_Mm_MISC_CTL(m) (0x110 + 0x100 * (m)) #define CCI_I2C_Mm_READ_DATA(m) (0x118 + 0x100 * (m)) #define CCI_I2C_Mm_READ_BUF_LEVEL(m) (0x11c + 0x100 * (m)) #define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n) (0x300 + 0x200 * (m) + 0x100 * (n)) #define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n) (0x304 + 0x200 * (m) + 0x100 * (n)) #define CCI_I2C_Mm_Qn_CUR_CMD(m, n) (0x308 + 0x200 * (m) + 0x100 * (n)) #define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n) (0x30c + 0x200 * (m) + 0x100 * (n)) #define CCI_I2C_Mm_Qn_LOAD_DATA(m, n) (0x310 + 0x200 * (m) + 0x100 * (n)) #define CCI_IRQ_GLOBAL_CLEAR_CMD 0xc00 #define CCI_IRQ_MASK_0 0xc04 #define CCI_IRQ_MASK_0_I2C_M0_RD_DONE BIT(0) #define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT BIT(4) #define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT BIT(8) #define CCI_IRQ_MASK_0_I2C_M1_RD_DONE BIT(12) #define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT BIT(16) #define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT BIT(20) #define CCI_IRQ_MASK_0_RST_DONE_ACK BIT(24) #define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK BIT(25) #define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK BIT(26) #define CCI_IRQ_MASK_0_I2C_M0_ERROR 0x18000ee6 #define CCI_IRQ_MASK_0_I2C_M1_ERROR 0x60ee6000 #define CCI_IRQ_CLEAR_0 0xc08 #define CCI_IRQ_STATUS_0 0xc0c #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE BIT(0) #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT BIT(4) #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT BIT(8) #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE BIT(12) #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT BIT(16) #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT BIT(20) #define CCI_IRQ_STATUS_0_RST_DONE_ACK BIT(24) #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK BIT(25) #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK BIT(26) #define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR BIT(27) #define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR BIT(28) #define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR BIT(29) #define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR BIT(30) #define CCI_IRQ_STATUS_0_I2C_M0_ERROR 0x18000ee6 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR 0x60ee6000 #define CCI_TIMEOUT (msecs_to_jiffies(100)) #define NUM_MASTERS 2 #define NUM_QUEUES 2 /* Max number of resources + 1 for a NULL terminator */ #define CCI_RES_MAX 6 #define CCI_I2C_SET_PARAM 1 #define CCI_I2C_REPORT 8 #define CCI_I2C_WRITE 9 #define CCI_I2C_READ 10 #define CCI_I2C_REPORT_IRQ_EN BIT(8) enum { I2C_MODE_STANDARD, I2C_MODE_FAST, I2C_MODE_FAST_PLUS, }; enum cci_i2c_queue_t { QUEUE_0, QUEUE_1 }; struct hw_params { u16 thigh; /* HIGH period of the SCL clock in clock ticks */ u16 tlow; /* LOW period of the SCL clock */ u16 tsu_sto; /* set-up time for STOP condition */ u16 tsu_sta; /* set-up time for a repeated START condition */ u16 thd_dat; /* data hold time */ u16 thd_sta; /* hold time (repeated) START condition */ u16 tbuf; /* bus free time between a STOP and START condition */ u8 scl_stretch_en; u16 trdhld; u16 tsp; /* pulse width of spikes suppressed by the input filter */ }; struct cci; struct cci_master { struct i2c_adapter adap; u16 master; u8 mode; int status; struct completion irq_complete; struct cci *cci; }; struct cci_data { unsigned int num_masters; struct i2c_adapter_quirks quirks; u16 queue_size[NUM_QUEUES]; unsigned long cci_clk_rate; struct hw_params params[3]; }; struct cci { struct device *dev; void __iomem *base; unsigned int irq; const struct cci_data *data; struct clk_bulk_data *clocks; int nclocks; struct cci_master master[NUM_MASTERS]; }; static irqreturn_t cci_isr(int irq, void *dev) { struct cci *cci = dev; u32 val, reset = 0; int ret = IRQ_NONE; val = readl(cci->base + CCI_IRQ_STATUS_0); writel(val, cci->base + CCI_IRQ_CLEAR_0); writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD); if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) { complete(&cci->master[0].irq_complete); if (cci->master[1].master) complete(&cci->master[1].irq_complete); ret = IRQ_HANDLED; } if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE || val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT || val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) { cci->master[0].status = 0; complete(&cci->master[0].irq_complete); ret = IRQ_HANDLED; } if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE || val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT || val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) { cci->master[1].status = 0; complete(&cci->master[1].irq_complete); ret = IRQ_HANDLED; } if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) { reset = CCI_RESET_CMD_M0_MASK; ret = IRQ_HANDLED; } if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) { reset = CCI_RESET_CMD_M1_MASK; ret = IRQ_HANDLED; } if (unlikely(reset)) writel(reset, cci->base + CCI_RESET_CMD); if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) { if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR || val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR) cci->master[0].status = -ENXIO; else cci->master[0].status = -EIO; writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ); ret = IRQ_HANDLED; } if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) { if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR || val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR) cci->master[1].status = -ENXIO; else cci->master[1].status = -EIO; writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ); ret = IRQ_HANDLED; } return ret; } static int cci_halt(struct cci *cci, u8 master_num) { struct cci_master *master; u32 val; if (master_num >= cci->data->num_masters) { dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num); return -EINVAL; } val = BIT(master_num); master = &cci->master[master_num]; reinit_completion(&master->irq_complete); writel(val, cci->base + CCI_HALT_REQ); if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) { dev_err(cci->dev, "CCI halt timeout\n"); return -ETIMEDOUT; } return 0; } static int cci_reset(struct cci *cci) { /* * we reset the whole controller, here and for implicity use * master[0].xxx for waiting on it. */ reinit_completion(&cci->master[0].irq_complete); writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD); if (!wait_for_completion_timeout(&cci->master[0].irq_complete, CCI_TIMEOUT)) { dev_err(cci->dev, "CCI reset timeout\n"); return -ETIMEDOUT; } return 0; } static int cci_init(struct cci *cci) { u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE | CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT | CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT | CCI_IRQ_MASK_0_I2C_M1_RD_DONE | CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT | CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT | CCI_IRQ_MASK_0_RST_DONE_ACK | CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK | CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK | CCI_IRQ_MASK_0_I2C_M0_ERROR | CCI_IRQ_MASK_0_I2C_M1_ERROR; int i; writel(val, cci->base + CCI_IRQ_MASK_0); for (i = 0; i < cci->data->num_masters; i++) { int mode = cci->master[i].mode; const struct hw_params *hw; if (!cci->master[i].cci) continue; hw = &cci->data->params[mode]; val = hw->thigh << 16 | hw->tlow; writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i)); val = hw->tsu_sto << 16 | hw->tsu_sta; writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i)); val = hw->thd_dat << 16 | hw->thd_sta; writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i)); val = hw->tbuf; writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i)); val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp; writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i)); } return 0; } static int cci_run_queue(struct cci *cci, u8 master, u8 queue) { u32 val; val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue)); writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue)); reinit_completion(&cci->master[master].irq_complete); val = BIT(master * 2 + queue); writel(val, cci->base + CCI_QUEUE_START); if (!wait_for_completion_timeout(&cci->master[master].irq_complete, CCI_TIMEOUT)) { dev_err(cci->dev, "master %d queue %d timeout\n", master, queue); cci_reset(cci); cci_init(cci); return -ETIMEDOUT; } return cci->master[master].status; } static int cci_validate_queue(struct cci *cci, u8 master, u8 queue) { u32 val; val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue)); if (val == cci->data->queue_size[queue]) return -EINVAL; if (!val) return 0; val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN; writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); return cci_run_queue(cci, master, queue); } static int cci_i2c_read(struct cci *cci, u16 master, u16 addr, u8 *buf, u16 len) { u32 val, words_read, words_exp; u8 queue = QUEUE_1; int i, index = 0, ret; bool first = true; /* * Call validate queue to make sure queue is empty before starting. * This is to avoid overflow / underflow of queue. */ ret = cci_validate_queue(cci, master, queue); if (ret < 0) return ret; val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4; writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); val = CCI_I2C_READ | len << 4; writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); ret = cci_run_queue(cci, master, queue); if (ret < 0) return ret; words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master)); words_exp = len / 4 + 1; if (words_read != words_exp) { dev_err(cci->dev, "words read = %d, words expected = %d\n", words_read, words_exp); return -EIO; } do { val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master)); for (i = 0; i < 4 && index < len; i++) { if (first) { /* The LS byte of this register represents the * first byte read from the slave during a read * access. */ first = false; continue; } buf[index++] = (val >> (i * 8)) & 0xff; } } while (--words_read); return 0; } static int cci_i2c_write(struct cci *cci, u16 master, u16 addr, u8 *buf, u16 len) { u8 queue = QUEUE_0; u8 load[12] = { 0 }; int i = 0, j, ret; u32 val; /* * Call validate queue to make sure queue is empty before starting. * This is to avoid overflow / underflow of queue. */ ret = cci_validate_queue(cci, master, queue); if (ret < 0) return ret; val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4; writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); load[i++] = CCI_I2C_WRITE | len << 4; for (j = 0; j < len; j++) load[i++] = buf[j]; for (j = 0; j < i; j += 4) { val = load[j]; val |= load[j + 1] << 8; val |= load[j + 2] << 16; val |= load[j + 3] << 24; writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); } val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN; writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); return cci_run_queue(cci, master, queue); } static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct cci_master *cci_master = i2c_get_adapdata(adap); struct cci *cci = cci_master->cci; int i, ret; ret = pm_runtime_get_sync(cci->dev); if (ret < 0) goto err; for (i = 0; i < num; i++) { if (msgs[i].flags & I2C_M_RD) ret = cci_i2c_read(cci, cci_master->master, msgs[i].addr, msgs[i].buf, msgs[i].len); else ret = cci_i2c_write(cci, cci_master->master, msgs[i].addr, msgs[i].buf, msgs[i].len); if (ret < 0) break; } if (!ret) ret = num; err: pm_runtime_mark_last_busy(cci->dev); pm_runtime_put_autosuspend(cci->dev); return ret; } static u32 cci_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm cci_algo = { .master_xfer = cci_xfer, .functionality = cci_func, }; static int cci_enable_clocks(struct cci *cci) { return clk_bulk_prepare_enable(cci->nclocks, cci->clocks); } static void cci_disable_clocks(struct cci *cci) { clk_bulk_disable_unprepare(cci->nclocks, cci->clocks); } static int __maybe_unused cci_suspend_runtime(struct device *dev) { struct cci *cci = dev_get_drvdata(dev); cci_disable_clocks(cci); return 0; } static int __maybe_unused cci_resume_runtime(struct device *dev) { struct cci *cci = dev_get_drvdata(dev); int ret; ret = cci_enable_clocks(cci); if (ret) return ret; cci_init(cci); return 0; } static int __maybe_unused cci_suspend(struct device *dev) { if (!pm_runtime_suspended(dev)) return cci_suspend_runtime(dev); return 0; } static int __maybe_unused cci_resume(struct device *dev) { cci_resume_runtime(dev); pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); return 0; } static const struct dev_pm_ops qcom_cci_pm = { SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume) SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL) }; static int cci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; unsigned long cci_clk_rate = 0; struct device_node *child; struct resource *r; struct cci *cci; int ret, i; u32 val; cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL); if (!cci) return -ENOMEM; cci->dev = dev; platform_set_drvdata(pdev, cci); cci->data = device_get_match_data(dev); if (!cci->data) return -ENOENT; for_each_available_child_of_node(dev->of_node, child) { struct cci_master *master; u32 idx; ret = of_property_read_u32(child, "reg", &idx); if (ret) { dev_err(dev, "%pOF invalid 'reg' property", child); continue; } if (idx >= cci->data->num_masters) { dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)", child, idx, cci->data->num_masters - 1); continue; } master = &cci->master[idx]; master->adap.quirks = &cci->data->quirks; master->adap.algo = &cci_algo; master->adap.dev.parent = dev; master->adap.dev.of_node = of_node_get(child); master->master = idx; master->cci = cci; i2c_set_adapdata(&master->adap, master); snprintf(master->adap.name, sizeof(master->adap.name), "Qualcomm-CCI"); master->mode = I2C_MODE_STANDARD; ret = of_property_read_u32(child, "clock-frequency", &val); if (!ret) { if (val == I2C_MAX_FAST_MODE_FREQ) master->mode = I2C_MODE_FAST; else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ) master->mode = I2C_MODE_FAST_PLUS; } init_completion(&master->irq_complete); } /* Memory */ cci->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(cci->base)) return PTR_ERR(cci->base); /* Clocks */ ret = devm_clk_bulk_get_all(dev, &cci->clocks); if (ret < 0) return dev_err_probe(dev, ret, "failed to get clocks\n"); else if (!ret) return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n"); cci->nclocks = ret; /* Retrieve CCI clock rate */ for (i = 0; i < cci->nclocks; i++) { if (!strcmp(cci->clocks[i].id, "cci")) { cci_clk_rate = clk_get_rate(cci->clocks[i].clk); break; } } if (cci_clk_rate != cci->data->cci_clk_rate) { /* cci clock set by the bootloader or via assigned clock rate * in DT. */ dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n", cci_clk_rate, cci->data->cci_clk_rate); } ret = cci_enable_clocks(cci); if (ret < 0) return ret; /* Interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) goto disable_clocks; cci->irq = ret; ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci); if (ret < 0) { dev_err(dev, "request_irq failed, ret: %d\n", ret); goto disable_clocks; } val = readl(cci->base + CCI_HW_VERSION); dev_dbg(dev, "CCI HW version = 0x%08x", val); ret = cci_reset(cci); if (ret < 0) goto error; ret = cci_init(cci); if (ret < 0) goto error; pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); for (i = 0; i < cci->data->num_masters; i++) { if (!cci->master[i].cci) continue; ret = i2c_add_adapter(&cci->master[i].adap); if (ret < 0) { of_node_put(cci->master[i].adap.dev.of_node); goto error_i2c; } } return 0; error_i2c: pm_runtime_disable(dev); pm_runtime_dont_use_autosuspend(dev); for (--i ; i >= 0; i--) { if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); of_node_put(cci->master[i].adap.dev.of_node); } } error: disable_irq(cci->irq); disable_clocks: cci_disable_clocks(cci); return ret; } static void cci_remove(struct platform_device *pdev) { struct cci *cci = platform_get_drvdata(pdev); int i; for (i = 0; i < cci->data->num_masters; i++) { if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); of_node_put(cci->master[i].adap.dev.of_node); } cci_halt(cci, i); } disable_irq(cci->irq); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); } static const struct cci_data cci_v1_data = { .num_masters = 1, .queue_size = { 64, 16 }, .quirks = { .max_write_len = 10, .max_read_len = 12, }, .cci_clk_rate = 19200000, .params[I2C_MODE_STANDARD] = { .thigh = 78, .tlow = 114, .tsu_sto = 28, .tsu_sta = 28, .thd_dat = 10, .thd_sta = 77, .tbuf = 118, .scl_stretch_en = 0, .trdhld = 6, .tsp = 1 }, .params[I2C_MODE_FAST] = { .thigh = 20, .tlow = 28, .tsu_sto = 21, .tsu_sta = 21, .thd_dat = 13, .thd_sta = 18, .tbuf = 32, .scl_stretch_en = 0, .trdhld = 6, .tsp = 3 }, }; static const struct cci_data cci_v1_5_data = { .num_masters = 2, .queue_size = { 64, 16 }, .quirks = { .max_write_len = 10, .max_read_len = 12, }, .cci_clk_rate = 19200000, .params[I2C_MODE_STANDARD] = { .thigh = 78, .tlow = 114, .tsu_sto = 28, .tsu_sta = 28, .thd_dat = 10, .thd_sta = 77, .tbuf = 118, .scl_stretch_en = 0, .trdhld = 6, .tsp = 1 }, .params[I2C_MODE_FAST] = { .thigh = 20, .tlow = 28, .tsu_sto = 21, .tsu_sta = 21, .thd_dat = 13, .thd_sta = 18, .tbuf = 32, .scl_stretch_en = 0, .trdhld = 6, .tsp = 3 }, }; static const struct cci_data cci_v2_data = { .num_masters = 2, .queue_size = { 64, 16 }, .quirks = { .max_write_len = 11, .max_read_len = 12, }, .cci_clk_rate = 37500000, .params[I2C_MODE_STANDARD] = { .thigh = 201, .tlow = 174, .tsu_sto = 204, .tsu_sta = 231, .thd_dat = 22, .thd_sta = 162, .tbuf = 227, .scl_stretch_en = 0, .trdhld = 6, .tsp = 3 }, .params[I2C_MODE_FAST] = { .thigh = 38, .tlow = 56, .tsu_sto = 40, .tsu_sta = 40, .thd_dat = 22, .thd_sta = 35, .tbuf = 62, .scl_stretch_en = 0, .trdhld = 6, .tsp = 3 }, .params[I2C_MODE_FAST_PLUS] = { .thigh = 16, .tlow = 22, .tsu_sto = 17, .tsu_sta = 18, .thd_dat = 16, .thd_sta = 15, .tbuf = 24, .scl_stretch_en = 0, .trdhld = 3, .tsp = 3 }, }; static const struct of_device_id cci_dt_match[] = { { .compatible = "qcom,msm8226-cci", .data = &cci_v1_data}, { .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data}, { .compatible = "qcom,msm8996-cci", .data = &cci_v2_data}, /* * Legacy compatibles kept for backwards compatibility. * Do not add any new ones unless they introduce a new config */ { .compatible = "qcom,msm8916-cci", .data = &cci_v1_data}, { .compatible = "qcom,sdm845-cci", .data = &cci_v2_data}, { .compatible = "qcom,sm8250-cci", .data = &cci_v2_data}, { .compatible = "qcom,sm8450-cci", .data = &cci_v2_data}, {} }; MODULE_DEVICE_TABLE(of, cci_dt_match); static struct platform_driver qcom_cci_driver = { .probe = cci_probe, .remove_new = cci_remove, .driver = { .name = "i2c-qcom-cci", .of_match_table = cci_dt_match, .pm = &qcom_cci_pm, }, }; module_platform_driver(qcom_cci_driver); MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver"); MODULE_AUTHOR("Todor Tomov <[email protected]>"); MODULE_AUTHOR("Loic Poulain <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-qcom-cci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Intel CHT Whiskey Cove PMIC I2C Master driver * Copyright (C) 2017 Hans de Goede <[email protected]> * * Based on various non upstream patches to support the CHT Whiskey Cove PMIC: * Copyright (C) 2011 - 2014 Intel Corporation. All rights reserved. */ #include <linux/acpi.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power/bq24190_charger.h> #include <linux/power/bq25890_charger.h> #include <linux/slab.h> #define CHT_WC_I2C_CTRL 0x5e24 #define CHT_WC_I2C_CTRL_WR BIT(0) #define CHT_WC_I2C_CTRL_RD BIT(1) #define CHT_WC_I2C_CLIENT_ADDR 0x5e25 #define CHT_WC_I2C_REG_OFFSET 0x5e26 #define CHT_WC_I2C_WRDATA 0x5e27 #define CHT_WC_I2C_RDDATA 0x5e28 #define CHT_WC_EXTCHGRIRQ 0x6e0a #define CHT_WC_EXTCHGRIRQ_CLIENT_IRQ BIT(0) #define CHT_WC_EXTCHGRIRQ_WRITE_IRQ BIT(1) #define CHT_WC_EXTCHGRIRQ_READ_IRQ BIT(2) #define CHT_WC_EXTCHGRIRQ_NACK_IRQ BIT(3) #define CHT_WC_EXTCHGRIRQ_ADAP_IRQMASK ((u8)GENMASK(3, 1)) #define CHT_WC_EXTCHGRIRQ_MSK 0x6e17 struct cht_wc_i2c_adap { struct i2c_adapter adapter; wait_queue_head_t wait; struct irq_chip irqchip; struct mutex adap_lock; struct mutex irqchip_lock; struct regmap *regmap; struct irq_domain *irq_domain; struct i2c_client *client; int client_irq; u8 irq_mask; u8 old_irq_mask; int read_data; bool io_error; bool done; }; static irqreturn_t cht_wc_i2c_adap_thread_handler(int id, void *data) { struct cht_wc_i2c_adap *adap = data; int ret, reg; mutex_lock(&adap->adap_lock); /* Read IRQs */ ret = regmap_read(adap->regmap, CHT_WC_EXTCHGRIRQ, &reg); if (ret) { dev_err(&adap->adapter.dev, "Error reading extchgrirq reg\n"); mutex_unlock(&adap->adap_lock); return IRQ_NONE; } reg &= ~adap->irq_mask; /* Reads must be acked after reading the received data. */ ret = regmap_read(adap->regmap, CHT_WC_I2C_RDDATA, &adap->read_data); if (ret) adap->io_error = true; /* * Immediately ack IRQs, so that if new IRQs arrives while we're * handling the previous ones our irq will re-trigger when we're done. */ ret = regmap_write(adap->regmap, CHT_WC_EXTCHGRIRQ, reg); if (ret) dev_err(&adap->adapter.dev, "Error writing extchgrirq reg\n"); if (reg & CHT_WC_EXTCHGRIRQ_ADAP_IRQMASK) { adap->io_error |= !!(reg & CHT_WC_EXTCHGRIRQ_NACK_IRQ); adap->done = true; } mutex_unlock(&adap->adap_lock); if (reg & CHT_WC_EXTCHGRIRQ_ADAP_IRQMASK) wake_up(&adap->wait); /* * Do NOT use handle_nested_irq here, the client irq handler will * likely want to do i2c transfers and the i2c controller uses this * interrupt handler as well, so running the client irq handler from * this thread will cause things to lock up. */ if (reg & CHT_WC_EXTCHGRIRQ_CLIENT_IRQ) generic_handle_irq_safe(adap->client_irq); return IRQ_HANDLED; } static u32 cht_wc_i2c_adap_master_func(struct i2c_adapter *adap) { /* This i2c adapter only supports SMBUS byte transfers */ return I2C_FUNC_SMBUS_BYTE_DATA; } static int cht_wc_i2c_adap_smbus_xfer(struct i2c_adapter *_adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct cht_wc_i2c_adap *adap = i2c_get_adapdata(_adap); int ret; mutex_lock(&adap->adap_lock); adap->io_error = false; adap->done = false; mutex_unlock(&adap->adap_lock); ret = regmap_write(adap->regmap, CHT_WC_I2C_CLIENT_ADDR, addr); if (ret) return ret; if (read_write == I2C_SMBUS_WRITE) { ret = regmap_write(adap->regmap, CHT_WC_I2C_WRDATA, data->byte); if (ret) return ret; } ret = regmap_write(adap->regmap, CHT_WC_I2C_REG_OFFSET, command); if (ret) return ret; ret = regmap_write(adap->regmap, CHT_WC_I2C_CTRL, (read_write == I2C_SMBUS_WRITE) ? CHT_WC_I2C_CTRL_WR : CHT_WC_I2C_CTRL_RD); if (ret) return ret; ret = wait_event_timeout(adap->wait, adap->done, msecs_to_jiffies(30)); if (ret == 0) { /* * The CHT GPIO controller serializes all IRQs, sometimes * causing significant delays, check status manually. */ cht_wc_i2c_adap_thread_handler(0, adap); if (!adap->done) return -ETIMEDOUT; } ret = 0; mutex_lock(&adap->adap_lock); if (adap->io_error) ret = -EIO; else if (read_write == I2C_SMBUS_READ) data->byte = adap->read_data; mutex_unlock(&adap->adap_lock); return ret; } static const struct i2c_algorithm cht_wc_i2c_adap_algo = { .functionality = cht_wc_i2c_adap_master_func, .smbus_xfer = cht_wc_i2c_adap_smbus_xfer, }; /* * We are an i2c-adapter which itself is part of an i2c-client. This means that * transfers done through us take adapter->bus_lock twice, once for our parent * i2c-adapter and once to take our own bus_lock. Lockdep does not like this * nested locking, to make lockdep happy in the case of busses with muxes, the * i2c-core's i2c_adapter_lock_bus function calls: * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); * * But i2c_adapter_depth only works when the direct parent of the adapter is * another adapter, as it is only meant for muxes. In our case there is an * i2c-client and MFD instantiated platform_device in the parent->child chain * between the 2 devices. * * So we override the default i2c_lock_operations and pass a hardcoded * depth of 1 to rt_mutex_lock_nested, to make lockdep happy. * * Note that if there were to be a mux attached to our adapter, this would * break things again since the i2c-mux code expects the root-adapter to have * a locking depth of 0. But we always have only 1 client directly attached * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC. */ static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { rt_mutex_lock_nested(&adapter->bus_lock, 1); } static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { return rt_mutex_trylock(&adapter->bus_lock); } static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { rt_mutex_unlock(&adapter->bus_lock); } static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = { .lock_bus = cht_wc_i2c_adap_lock_bus, .trylock_bus = cht_wc_i2c_adap_trylock_bus, .unlock_bus = cht_wc_i2c_adap_unlock_bus, }; /**** irqchip for the client connected to the extchgr i2c adapter ****/ static void cht_wc_i2c_irq_lock(struct irq_data *data) { struct cht_wc_i2c_adap *adap = irq_data_get_irq_chip_data(data); mutex_lock(&adap->irqchip_lock); } static void cht_wc_i2c_irq_sync_unlock(struct irq_data *data) { struct cht_wc_i2c_adap *adap = irq_data_get_irq_chip_data(data); int ret; if (adap->irq_mask != adap->old_irq_mask) { ret = regmap_write(adap->regmap, CHT_WC_EXTCHGRIRQ_MSK, adap->irq_mask); if (ret == 0) adap->old_irq_mask = adap->irq_mask; else dev_err(&adap->adapter.dev, "Error writing EXTCHGRIRQ_MSK\n"); } mutex_unlock(&adap->irqchip_lock); } static void cht_wc_i2c_irq_enable(struct irq_data *data) { struct cht_wc_i2c_adap *adap = irq_data_get_irq_chip_data(data); adap->irq_mask &= ~CHT_WC_EXTCHGRIRQ_CLIENT_IRQ; } static void cht_wc_i2c_irq_disable(struct irq_data *data) { struct cht_wc_i2c_adap *adap = irq_data_get_irq_chip_data(data); adap->irq_mask |= CHT_WC_EXTCHGRIRQ_CLIENT_IRQ; } static const struct irq_chip cht_wc_i2c_irq_chip = { .irq_bus_lock = cht_wc_i2c_irq_lock, .irq_bus_sync_unlock = cht_wc_i2c_irq_sync_unlock, .irq_disable = cht_wc_i2c_irq_disable, .irq_enable = cht_wc_i2c_irq_enable, .name = "cht_wc_ext_chrg_irq_chip", }; /********** GPD Win / Pocket charger IC settings **********/ static const char * const bq24190_suppliers[] = { "tcpm-source-psy-i2c-fusb302" }; static const struct property_entry bq24190_props[] = { PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers), PROPERTY_ENTRY_BOOL("omit-battery-class"), PROPERTY_ENTRY_BOOL("disable-reset"), { } }; static const struct software_node bq24190_node = { .properties = bq24190_props, }; static struct regulator_consumer_supply fusb302_consumer = { .supply = "vbus", /* Must match fusb302 dev_name in intel_cht_int33fe.c */ .dev_name = "i2c-fusb302", }; static const struct regulator_init_data bq24190_vbus_init_data = { .constraints = { /* The name is used in intel_cht_int33fe.c do not change. */ .name = "cht_wc_usb_typec_vbus", .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .consumer_supplies = &fusb302_consumer, .num_consumer_supplies = 1, }; static struct bq24190_platform_data bq24190_pdata = { .regulator_init_data = &bq24190_vbus_init_data, }; static struct i2c_board_info gpd_win_board_info = { .type = "bq24190", .addr = 0x6b, .dev_name = "bq24190", .swnode = &bq24190_node, .platform_data = &bq24190_pdata, }; /********** Xiaomi Mi Pad 2 charger IC settings **********/ static struct regulator_consumer_supply bq2589x_vbus_consumer = { .supply = "vbus", .dev_name = "cht_wcove_pwrsrc", }; static const struct regulator_init_data bq2589x_vbus_init_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .consumer_supplies = &bq2589x_vbus_consumer, .num_consumer_supplies = 1, }; static struct bq25890_platform_data bq2589x_pdata = { .regulator_init_data = &bq2589x_vbus_init_data, }; static const struct property_entry xiaomi_mipad2_props[] = { PROPERTY_ENTRY_BOOL("linux,skip-reset"), PROPERTY_ENTRY_BOOL("linux,read-back-settings"), { } }; static const struct software_node xiaomi_mipad2_node = { .properties = xiaomi_mipad2_props, }; static struct i2c_board_info xiaomi_mipad2_board_info = { .type = "bq25890", .addr = 0x6a, .dev_name = "bq25890", .swnode = &xiaomi_mipad2_node, .platform_data = &bq2589x_pdata, }; /********** Lenovo Yogabook YB1-X90F/-X91F/-X91L charger settings **********/ static const char * const lenovo_yb1_bq25892_suppliers[] = { "cht_wcove_pwrsrc" }; static const struct property_entry lenovo_yb1_bq25892_props[] = { PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lenovo_yb1_bq25892_suppliers), PROPERTY_ENTRY_U32("linux,pump-express-vbus-max", 12000000), PROPERTY_ENTRY_BOOL("linux,skip-reset"), /* * The firmware sets everything to the defaults, which leads to a * somewhat low charge-current of 2048mA and worse to a battery-voltage * of 4.2V instead of 4.35V (when booted without a charger connected). * Use our own values instead of "linux,read-back-settings" to fix this. */ PROPERTY_ENTRY_U32("ti,charge-current", 4224000), PROPERTY_ENTRY_U32("ti,battery-regulation-voltage", 4352000), PROPERTY_ENTRY_U32("ti,termination-current", 256000), PROPERTY_ENTRY_U32("ti,precharge-current", 128000), PROPERTY_ENTRY_U32("ti,minimum-sys-voltage", 3500000), PROPERTY_ENTRY_U32("ti,boost-voltage", 4998000), PROPERTY_ENTRY_U32("ti,boost-max-current", 1400000), PROPERTY_ENTRY_BOOL("ti,use-ilim-pin"), { } }; static const struct software_node lenovo_yb1_bq25892_node = { .properties = lenovo_yb1_bq25892_props, }; static struct i2c_board_info lenovo_yogabook1_board_info = { .type = "bq25892", .addr = 0x6b, .dev_name = "bq25892", .swnode = &lenovo_yb1_bq25892_node, .platform_data = &bq2589x_pdata, }; /********** Lenovo Yogabook YT3-X90F charger settings **********/ static const char * const lenovo_yt3_bq25892_1_suppliers[] = { "cht_wcove_pwrsrc" }; /* * bq25892 charger settings for the round li-ion cells in the hinge, * this is the main / biggest battery. */ static const struct property_entry lenovo_yt3_bq25892_1_props[] = { PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lenovo_yt3_bq25892_1_suppliers), PROPERTY_ENTRY_STRING("linux,secondary-charger-name", "bq25890-charger-0"), PROPERTY_ENTRY_U32("linux,iinlim-percentage", 60), PROPERTY_ENTRY_U32("linux,pump-express-vbus-max", 12000000), PROPERTY_ENTRY_BOOL("linux,skip-reset"), /* * The firmware sets everything to the defaults, leading to a low(ish) * charge-current and battery-voltage of 2048mA resp 4.2V. Use the * Android values instead of "linux,read-back-settings" to fix this. */ PROPERTY_ENTRY_U32("ti,charge-current", 3072000), PROPERTY_ENTRY_U32("ti,battery-regulation-voltage", 4352000), PROPERTY_ENTRY_U32("ti,termination-current", 128000), PROPERTY_ENTRY_U32("ti,precharge-current", 128000), PROPERTY_ENTRY_U32("ti,minimum-sys-voltage", 3700000), PROPERTY_ENTRY_BOOL("ti,use-ilim-pin"), /* Set 5V boost current-limit to 1.2A (MAX/POR values are 2.45A/1.4A) */ PROPERTY_ENTRY_U32("ti,boost-voltage", 4998000), PROPERTY_ENTRY_U32("ti,boost-max-current", 1200000), { } }; static const struct software_node lenovo_yt3_bq25892_1_node = { .properties = lenovo_yt3_bq25892_1_props, }; /* bq25892 charger for the round li-ion cells in the hinge */ static struct i2c_board_info lenovo_yoga_tab3_board_info = { .type = "bq25892", .addr = 0x6b, .dev_name = "bq25892_1", .swnode = &lenovo_yt3_bq25892_1_node, .platform_data = &bq2589x_pdata, }; static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); struct i2c_board_info *board_info = NULL; struct cht_wc_i2c_adap *adap; int ret, reg, irq; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; adap = devm_kzalloc(&pdev->dev, sizeof(*adap), GFP_KERNEL); if (!adap) return -ENOMEM; init_waitqueue_head(&adap->wait); mutex_init(&adap->adap_lock); mutex_init(&adap->irqchip_lock); adap->irqchip = cht_wc_i2c_irq_chip; adap->regmap = pmic->regmap; adap->adapter.owner = THIS_MODULE; adap->adapter.class = I2C_CLASS_HWMON; adap->adapter.algo = &cht_wc_i2c_adap_algo; adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops; strscpy(adap->adapter.name, "PMIC I2C Adapter", sizeof(adap->adapter.name)); adap->adapter.dev.parent = &pdev->dev; /* Clear and activate i2c-adapter interrupts, disable client IRQ */ adap->old_irq_mask = adap->irq_mask = ~CHT_WC_EXTCHGRIRQ_ADAP_IRQMASK; ret = regmap_read(adap->regmap, CHT_WC_I2C_RDDATA, &reg); if (ret) return ret; ret = regmap_write(adap->regmap, CHT_WC_EXTCHGRIRQ, ~adap->irq_mask); if (ret) return ret; ret = regmap_write(adap->regmap, CHT_WC_EXTCHGRIRQ_MSK, adap->irq_mask); if (ret) return ret; /* Alloc and register client IRQ */ adap->irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL); if (!adap->irq_domain) return -ENOMEM; adap->client_irq = irq_create_mapping(adap->irq_domain, 0); if (!adap->client_irq) { ret = -ENOMEM; goto remove_irq_domain; } irq_set_chip_data(adap->client_irq, adap); irq_set_chip_and_handler(adap->client_irq, &adap->irqchip, handle_simple_irq); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, cht_wc_i2c_adap_thread_handler, IRQF_ONESHOT, "PMIC I2C Adapter", adap); if (ret) goto remove_irq_domain; i2c_set_adapdata(&adap->adapter, adap); ret = i2c_add_adapter(&adap->adapter); if (ret) goto remove_irq_domain; switch (pmic->cht_wc_model) { case INTEL_CHT_WC_GPD_WIN_POCKET: board_info = &gpd_win_board_info; break; case INTEL_CHT_WC_XIAOMI_MIPAD2: board_info = &xiaomi_mipad2_board_info; break; case INTEL_CHT_WC_LENOVO_YOGABOOK1: board_info = &lenovo_yogabook1_board_info; break; case INTEL_CHT_WC_LENOVO_YT3_X90: board_info = &lenovo_yoga_tab3_board_info; break; default: dev_warn(&pdev->dev, "Unknown model, not instantiating charger device\n"); break; } if (board_info) { board_info->irq = adap->client_irq; adap->client = i2c_new_client_device(&adap->adapter, board_info); if (IS_ERR(adap->client)) { ret = PTR_ERR(adap->client); goto del_adapter; } } platform_set_drvdata(pdev, adap); return 0; del_adapter: i2c_del_adapter(&adap->adapter); remove_irq_domain: irq_domain_remove(adap->irq_domain); return ret; } static void cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev) { struct cht_wc_i2c_adap *adap = platform_get_drvdata(pdev); i2c_unregister_device(adap->client); i2c_del_adapter(&adap->adapter); irq_domain_remove(adap->irq_domain); } static const struct platform_device_id cht_wc_i2c_adap_id_table[] = { { .name = "cht_wcove_ext_chgr" }, {}, }; MODULE_DEVICE_TABLE(platform, cht_wc_i2c_adap_id_table); static struct platform_driver cht_wc_i2c_adap_driver = { .probe = cht_wc_i2c_adap_i2c_probe, .remove_new = cht_wc_i2c_adap_i2c_remove, .driver = { .name = "cht_wcove_ext_chgr", }, .id_table = cht_wc_i2c_adap_id_table, }; module_platform_driver(cht_wc_i2c_adap_driver); MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC I2C Master driver"); MODULE_AUTHOR("Hans de Goede <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-cht-wc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * I2C bus driver for the Cadence I2C controller. * * Copyright (C) 2009 - 2014 Xilinx, Inc. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/consumer.h> #include <linux/reset.h> /* Register offsets for the I2C device. */ #define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */ #define CDNS_I2C_SR_OFFSET 0x04 /* Status Register, RO */ #define CDNS_I2C_ADDR_OFFSET 0x08 /* I2C Address Register, RW */ #define CDNS_I2C_DATA_OFFSET 0x0C /* I2C Data Register, RW */ #define CDNS_I2C_ISR_OFFSET 0x10 /* IRQ Status Register, RW */ #define CDNS_I2C_XFER_SIZE_OFFSET 0x14 /* Transfer Size Register, RW */ #define CDNS_I2C_TIME_OUT_OFFSET 0x1C /* Time Out Register, RW */ #define CDNS_I2C_IMR_OFFSET 0x20 /* IRQ Mask Register, RO */ #define CDNS_I2C_IER_OFFSET 0x24 /* IRQ Enable Register, WO */ #define CDNS_I2C_IDR_OFFSET 0x28 /* IRQ Disable Register, WO */ /* Control Register Bit mask definitions */ #define CDNS_I2C_CR_HOLD BIT(4) /* Hold Bus bit */ #define CDNS_I2C_CR_ACK_EN BIT(3) #define CDNS_I2C_CR_NEA BIT(2) #define CDNS_I2C_CR_MS BIT(1) /* Read or Write Master transfer 0 = Transmitter, 1 = Receiver */ #define CDNS_I2C_CR_RW BIT(0) /* 1 = Auto init FIFO to zeroes */ #define CDNS_I2C_CR_CLR_FIFO BIT(6) #define CDNS_I2C_CR_DIVA_SHIFT 14 #define CDNS_I2C_CR_DIVA_MASK (3 << CDNS_I2C_CR_DIVA_SHIFT) #define CDNS_I2C_CR_DIVB_SHIFT 8 #define CDNS_I2C_CR_DIVB_MASK (0x3f << CDNS_I2C_CR_DIVB_SHIFT) #define CDNS_I2C_CR_MASTER_EN_MASK (CDNS_I2C_CR_NEA | \ CDNS_I2C_CR_ACK_EN | \ CDNS_I2C_CR_MS) #define CDNS_I2C_CR_SLAVE_EN_MASK ~CDNS_I2C_CR_MASTER_EN_MASK /* Status Register Bit mask definitions */ #define CDNS_I2C_SR_BA BIT(8) #define CDNS_I2C_SR_TXDV BIT(6) #define CDNS_I2C_SR_RXDV BIT(5) #define CDNS_I2C_SR_RXRW BIT(3) /* * I2C Address Register Bit mask definitions * Normal addressing mode uses [6:0] bits. Extended addressing mode uses [9:0] * bits. A write access to this register always initiates a transfer if the I2C * is in master mode. */ #define CDNS_I2C_ADDR_MASK 0x000003FF /* I2C Address Mask */ /* * I2C Interrupt Registers Bit mask definitions * All the four interrupt registers (Status/Mask/Enable/Disable) have the same * bit definitions. */ #define CDNS_I2C_IXR_ARB_LOST BIT(9) #define CDNS_I2C_IXR_RX_UNF BIT(7) #define CDNS_I2C_IXR_TX_OVF BIT(6) #define CDNS_I2C_IXR_RX_OVF BIT(5) #define CDNS_I2C_IXR_SLV_RDY BIT(4) #define CDNS_I2C_IXR_TO BIT(3) #define CDNS_I2C_IXR_NACK BIT(2) #define CDNS_I2C_IXR_DATA BIT(1) #define CDNS_I2C_IXR_COMP BIT(0) #define CDNS_I2C_IXR_ALL_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \ CDNS_I2C_IXR_RX_UNF | \ CDNS_I2C_IXR_TX_OVF | \ CDNS_I2C_IXR_RX_OVF | \ CDNS_I2C_IXR_SLV_RDY | \ CDNS_I2C_IXR_TO | \ CDNS_I2C_IXR_NACK | \ CDNS_I2C_IXR_DATA | \ CDNS_I2C_IXR_COMP) #define CDNS_I2C_IXR_ERR_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \ CDNS_I2C_IXR_RX_UNF | \ CDNS_I2C_IXR_TX_OVF | \ CDNS_I2C_IXR_RX_OVF | \ CDNS_I2C_IXR_NACK) #define CDNS_I2C_ENABLED_INTR_MASK (CDNS_I2C_IXR_ARB_LOST | \ CDNS_I2C_IXR_RX_UNF | \ CDNS_I2C_IXR_TX_OVF | \ CDNS_I2C_IXR_RX_OVF | \ CDNS_I2C_IXR_NACK | \ CDNS_I2C_IXR_DATA | \ CDNS_I2C_IXR_COMP) #define CDNS_I2C_IXR_SLAVE_INTR_MASK (CDNS_I2C_IXR_RX_UNF | \ CDNS_I2C_IXR_TX_OVF | \ CDNS_I2C_IXR_RX_OVF | \ CDNS_I2C_IXR_TO | \ CDNS_I2C_IXR_NACK | \ CDNS_I2C_IXR_DATA | \ CDNS_I2C_IXR_COMP) #define CDNS_I2C_TIMEOUT msecs_to_jiffies(1000) /* timeout for pm runtime autosuspend */ #define CNDS_I2C_PM_TIMEOUT 1000 /* ms */ #define CDNS_I2C_FIFO_DEPTH_DEFAULT 16 #define CDNS_I2C_MAX_TRANSFER_SIZE 255 /* Transfer size in multiples of data interrupt depth */ #define CDNS_I2C_TRANSFER_SIZE(max) ((max) - 3) #define DRIVER_NAME "cdns-i2c" #define CDNS_I2C_DIVA_MAX 4 #define CDNS_I2C_DIVB_MAX 64 #define CDNS_I2C_TIMEOUT_MAX 0xFF #define CDNS_I2C_BROKEN_HOLD_BIT BIT(0) #define CDNS_I2C_POLL_US 100000 #define CDNS_I2C_TIMEOUT_US 500000 #define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset) #define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset) #if IS_ENABLED(CONFIG_I2C_SLAVE) /** * enum cdns_i2c_mode - I2C Controller current operating mode * * @CDNS_I2C_MODE_SLAVE: I2C controller operating in slave mode * @CDNS_I2C_MODE_MASTER: I2C Controller operating in master mode */ enum cdns_i2c_mode { CDNS_I2C_MODE_SLAVE, CDNS_I2C_MODE_MASTER, }; /** * enum cdns_i2c_slave_state - Slave state when I2C is operating in slave mode * * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master * @CDNS_I2C_SLAVE_STATE_RECV: I2C slave receiving data from master */ enum cdns_i2c_slave_state { CDNS_I2C_SLAVE_STATE_IDLE, CDNS_I2C_SLAVE_STATE_SEND, CDNS_I2C_SLAVE_STATE_RECV, }; #endif /** * struct cdns_i2c - I2C device private data structure * * @dev: Pointer to device structure * @membase: Base address of the I2C device * @adap: I2C adapter instance * @p_msg: Message pointer * @err_status: Error status in Interrupt Status Register * @xfer_done: Transfer complete status * @p_send_buf: Pointer to transmit buffer * @p_recv_buf: Pointer to receive buffer * @send_count: Number of bytes still expected to send * @recv_count: Number of bytes still expected to receive * @curr_recv_count: Number of bytes to be received in current transfer * @input_clk: Input clock to I2C controller * @i2c_clk: Maximum I2C clock speed * @bus_hold_flag: Flag used in repeated start for clearing HOLD bit * @clk: Pointer to struct clk * @clk_rate_change_nb: Notifier block for clock rate changes * @reset: Reset control for the device * @quirks: flag for broken hold bit usage in r1p10 * @ctrl_reg: Cached value of the control register. * @rinfo: I2C GPIO recovery information * @ctrl_reg_diva_divb: value of fields DIV_A and DIV_B from CR register * @slave: Registered slave instance. * @dev_mode: I2C operating role(master/slave). * @slave_state: I2C Slave state(idle/read/write). * @fifo_depth: The depth of the transfer FIFO * @transfer_size: The maximum number of bytes in one transfer */ struct cdns_i2c { struct device *dev; void __iomem *membase; struct i2c_adapter adap; struct i2c_msg *p_msg; int err_status; struct completion xfer_done; unsigned char *p_send_buf; unsigned char *p_recv_buf; unsigned int send_count; unsigned int recv_count; unsigned int curr_recv_count; unsigned long input_clk; unsigned int i2c_clk; unsigned int bus_hold_flag; struct clk *clk; struct notifier_block clk_rate_change_nb; struct reset_control *reset; u32 quirks; u32 ctrl_reg; struct i2c_bus_recovery_info rinfo; #if IS_ENABLED(CONFIG_I2C_SLAVE) u16 ctrl_reg_diva_divb; struct i2c_client *slave; enum cdns_i2c_mode dev_mode; enum cdns_i2c_slave_state slave_state; #endif u32 fifo_depth; unsigned int transfer_size; }; struct cdns_platform_data { u32 quirks; }; #define to_cdns_i2c(_nb) container_of(_nb, struct cdns_i2c, \ clk_rate_change_nb) /** * cdns_i2c_clear_bus_hold - Clear bus hold bit * @id: Pointer to driver data struct * * Helper to clear the controller's bus hold bit. */ static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id) { u32 reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); if (reg & CDNS_I2C_CR_HOLD) cdns_i2c_writereg(reg & ~CDNS_I2C_CR_HOLD, CDNS_I2C_CR_OFFSET); } static inline bool cdns_is_holdquirk(struct cdns_i2c *id, bool hold_wrkaround) { return (hold_wrkaround && (id->curr_recv_count == id->fifo_depth + 1)); } #if IS_ENABLED(CONFIG_I2C_SLAVE) static void cdns_i2c_set_mode(enum cdns_i2c_mode mode, struct cdns_i2c *id) { /* Disable all interrupts */ cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET); /* Clear FIFO and transfer size */ cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET); /* Update device mode and state */ id->dev_mode = mode; id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; switch (mode) { case CDNS_I2C_MODE_MASTER: /* Enable i2c master */ cdns_i2c_writereg(id->ctrl_reg_diva_divb | CDNS_I2C_CR_MASTER_EN_MASK, CDNS_I2C_CR_OFFSET); /* * This delay is needed to give the IP some time to switch to * the master mode. With lower values(like 110 us) i2cdetect * will not detect any slave and without this delay, the IP will * trigger a timeout interrupt. */ usleep_range(115, 125); break; case CDNS_I2C_MODE_SLAVE: /* Enable i2c slave */ cdns_i2c_writereg(id->ctrl_reg_diva_divb & CDNS_I2C_CR_SLAVE_EN_MASK, CDNS_I2C_CR_OFFSET); /* Setting slave address */ cdns_i2c_writereg(id->slave->addr & CDNS_I2C_ADDR_MASK, CDNS_I2C_ADDR_OFFSET); /* Enable slave send/receive interrupts */ cdns_i2c_writereg(CDNS_I2C_IXR_SLAVE_INTR_MASK, CDNS_I2C_IER_OFFSET); break; } } static void cdns_i2c_slave_rcv_data(struct cdns_i2c *id) { u8 bytes; unsigned char data; /* Prepare backend for data reception */ if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) { id->slave_state = CDNS_I2C_SLAVE_STATE_RECV; i2c_slave_event(id->slave, I2C_SLAVE_WRITE_REQUESTED, NULL); } /* Fetch number of bytes to receive */ bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); /* Read data and send to backend */ while (bytes--) { data = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); i2c_slave_event(id->slave, I2C_SLAVE_WRITE_RECEIVED, &data); } } static void cdns_i2c_slave_send_data(struct cdns_i2c *id) { u8 data; /* Prepare backend for data transmission */ if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) { id->slave_state = CDNS_I2C_SLAVE_STATE_SEND; i2c_slave_event(id->slave, I2C_SLAVE_READ_REQUESTED, &data); } else { i2c_slave_event(id->slave, I2C_SLAVE_READ_PROCESSED, &data); } /* Send data over bus */ cdns_i2c_writereg(data, CDNS_I2C_DATA_OFFSET); } /** * cdns_i2c_slave_isr - Interrupt handler for the I2C device in slave role * @ptr: Pointer to I2C device private data * * This function handles the data interrupt and transfer complete interrupt of * the I2C device in slave role. * * Return: IRQ_HANDLED always */ static irqreturn_t cdns_i2c_slave_isr(void *ptr) { struct cdns_i2c *id = ptr; unsigned int isr_status, i2c_status; /* Fetch the interrupt status */ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); /* Ignore masked interrupts */ isr_status &= ~cdns_i2c_readreg(CDNS_I2C_IMR_OFFSET); /* Fetch transfer mode (send/receive) */ i2c_status = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET); /* Handle data send/receive */ if (i2c_status & CDNS_I2C_SR_RXRW) { /* Send data to master */ if (isr_status & CDNS_I2C_IXR_DATA) cdns_i2c_slave_send_data(id); if (isr_status & CDNS_I2C_IXR_COMP) { id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); } } else { /* Receive data from master */ if (isr_status & CDNS_I2C_IXR_DATA) cdns_i2c_slave_rcv_data(id); if (isr_status & CDNS_I2C_IXR_COMP) { cdns_i2c_slave_rcv_data(id); id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); } } /* Master indicated xfer stop or fifo underflow/overflow */ if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_RX_OVF | CDNS_I2C_IXR_RX_UNF | CDNS_I2C_IXR_TX_OVF)) { id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET); } return IRQ_HANDLED; } #endif /** * cdns_i2c_master_isr - Interrupt handler for the I2C device in master role * @ptr: Pointer to I2C device private data * * This function handles the data interrupt, transfer complete interrupt and * the error interrupts of the I2C device in master role. * * Return: IRQ_HANDLED always */ static irqreturn_t cdns_i2c_master_isr(void *ptr) { unsigned int isr_status, avail_bytes; unsigned int bytes_to_send; bool updatetx; struct cdns_i2c *id = ptr; /* Signal completion only after everything is updated */ int done_flag = 0; irqreturn_t status = IRQ_NONE; isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); id->err_status = 0; /* Handling nack and arbitration lost interrupt */ if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) { done_flag = 1; status = IRQ_HANDLED; } /* * Check if transfer size register needs to be updated again for a * large data receive operation. */ updatetx = id->recv_count > id->curr_recv_count; /* When receiving, handle data interrupt and completion interrupt */ if (id->p_recv_buf && ((isr_status & CDNS_I2C_IXR_COMP) || (isr_status & CDNS_I2C_IXR_DATA))) { /* Read data if receive data valid is set */ while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_RXDV) { if (id->recv_count > 0) { *(id->p_recv_buf)++ = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); id->recv_count--; id->curr_recv_count--; /* * Clear hold bit that was set for FIFO control * if RX data left is less than or equal to * FIFO DEPTH unless repeated start is selected */ if (id->recv_count <= id->fifo_depth && !id->bus_hold_flag) cdns_i2c_clear_bus_hold(id); } else { dev_err(id->adap.dev.parent, "xfer_size reg rollover. xfer aborted!\n"); id->err_status |= CDNS_I2C_IXR_TO; break; } if (cdns_is_holdquirk(id, updatetx)) break; } /* * The controller sends NACK to the slave when transfer size * register reaches zero without considering the HOLD bit. * This workaround is implemented for large data transfers to * maintain transfer size non-zero while performing a large * receive operation. */ if (cdns_is_holdquirk(id, updatetx)) { /* wait while fifo is full */ while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != (id->curr_recv_count - id->fifo_depth)) ; /* * Check number of bytes to be received against maximum * transfer size and update register accordingly. */ if (((int)(id->recv_count) - id->fifo_depth) > id->transfer_size) { cdns_i2c_writereg(id->transfer_size, CDNS_I2C_XFER_SIZE_OFFSET); id->curr_recv_count = id->transfer_size + id->fifo_depth; } else { cdns_i2c_writereg(id->recv_count - id->fifo_depth, CDNS_I2C_XFER_SIZE_OFFSET); id->curr_recv_count = id->recv_count; } } /* Clear hold (if not repeated start) and signal completion */ if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) { if (!id->bus_hold_flag) cdns_i2c_clear_bus_hold(id); done_flag = 1; } status = IRQ_HANDLED; } /* When sending, handle transfer complete interrupt */ if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) { /* * If there is more data to be sent, calculate the * space available in FIFO and fill with that many bytes. */ if (id->send_count) { avail_bytes = id->fifo_depth - cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); if (id->send_count > avail_bytes) bytes_to_send = avail_bytes; else bytes_to_send = id->send_count; while (bytes_to_send--) { cdns_i2c_writereg( (*(id->p_send_buf)++), CDNS_I2C_DATA_OFFSET); id->send_count--; } } else { /* * Signal the completion of transaction and * clear the hold bus bit if there are no * further messages to be processed. */ done_flag = 1; } if (!id->send_count && !id->bus_hold_flag) cdns_i2c_clear_bus_hold(id); status = IRQ_HANDLED; } /* Update the status for errors */ id->err_status |= isr_status & CDNS_I2C_IXR_ERR_INTR_MASK; if (id->err_status) status = IRQ_HANDLED; if (done_flag) complete(&id->xfer_done); return status; } /** * cdns_i2c_isr - Interrupt handler for the I2C device * @irq: irq number for the I2C device * @ptr: void pointer to cdns_i2c structure * * This function passes the control to slave/master based on current role of * i2c controller. * * Return: IRQ_HANDLED always */ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) { #if IS_ENABLED(CONFIG_I2C_SLAVE) struct cdns_i2c *id = ptr; if (id->dev_mode == CDNS_I2C_MODE_SLAVE) return cdns_i2c_slave_isr(ptr); #endif return cdns_i2c_master_isr(ptr); } /** * cdns_i2c_mrecv - Prepare and start a master receive operation * @id: pointer to the i2c device structure */ static void cdns_i2c_mrecv(struct cdns_i2c *id) { unsigned int ctrl_reg; unsigned int isr_status; unsigned long flags; bool hold_clear = false; bool irq_save = false; u32 addr; id->p_recv_buf = id->p_msg->buf; id->recv_count = id->p_msg->len; /* Put the controller in master receive mode and clear the FIFO */ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); ctrl_reg |= CDNS_I2C_CR_RW | CDNS_I2C_CR_CLR_FIFO; /* * Receive up to I2C_SMBUS_BLOCK_MAX data bytes, plus one message length * byte, plus one checksum byte if PEC is enabled. p_msg->len will be 2 if * PEC is enabled, otherwise 1. */ if (id->p_msg->flags & I2C_M_RECV_LEN) id->recv_count = I2C_SMBUS_BLOCK_MAX + id->p_msg->len; id->curr_recv_count = id->recv_count; /* * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ if (id->recv_count > id->fifo_depth) ctrl_reg |= CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register */ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); /* * The no. of bytes to receive is checked against the limit of * max transfer size. Set transfer size register with no of bytes * receive if it is less than transfer size and transfer size if * it is more. Enable the interrupts. */ if (id->recv_count > id->transfer_size) { cdns_i2c_writereg(id->transfer_size, CDNS_I2C_XFER_SIZE_OFFSET); id->curr_recv_count = id->transfer_size; } else { cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET); } /* Determine hold_clear based on number of bytes to receive and hold flag */ if (!id->bus_hold_flag && id->recv_count <= id->fifo_depth) { if (ctrl_reg & CDNS_I2C_CR_HOLD) { hold_clear = true; if (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) irq_save = true; } } addr = id->p_msg->addr; addr &= CDNS_I2C_ADDR_MASK; if (hold_clear) { ctrl_reg &= ~CDNS_I2C_CR_HOLD; /* * In case of Xilinx Zynq SOC, clear the HOLD bit before transfer size * register reaches '0'. This is an IP bug which causes transfer size * register overflow to 0xFF. To satisfy this timing requirement, * disable the interrupts on current processor core between register * writes to slave address register and control register. */ if (irq_save) local_irq_save(flags); cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET); cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Read it back to avoid bufferring and make sure write happens */ cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); if (irq_save) local_irq_restore(flags); } else { cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET); } cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); } /** * cdns_i2c_msend - Prepare and start a master send operation * @id: pointer to the i2c device */ static void cdns_i2c_msend(struct cdns_i2c *id) { unsigned int avail_bytes; unsigned int bytes_to_send; unsigned int ctrl_reg; unsigned int isr_status; id->p_recv_buf = NULL; id->p_send_buf = id->p_msg->buf; id->send_count = id->p_msg->len; /* Set the controller in Master transmit mode and clear the FIFO. */ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); ctrl_reg &= ~CDNS_I2C_CR_RW; ctrl_reg |= CDNS_I2C_CR_CLR_FIFO; /* * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ if (id->send_count > id->fifo_depth) ctrl_reg |= CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); /* * Calculate the space available in FIFO. Check the message length * against the space available, and fill the FIFO accordingly. * Enable the interrupts. */ avail_bytes = id->fifo_depth - cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); if (id->send_count > avail_bytes) bytes_to_send = avail_bytes; else bytes_to_send = id->send_count; while (bytes_to_send--) { cdns_i2c_writereg((*(id->p_send_buf)++), CDNS_I2C_DATA_OFFSET); id->send_count--; } /* * Clear the bus hold flag if there is no more data * and if it is the last message. */ if (!id->bus_hold_flag && !id->send_count) cdns_i2c_clear_bus_hold(id); /* Set the slave address in address register - triggers operation. */ cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, CDNS_I2C_ADDR_OFFSET); cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); } /** * cdns_i2c_master_reset - Reset the interface * @adap: pointer to the i2c adapter driver instance * * This function cleanup the fifos, clear the hold bit and status * and disable the interrupts. */ static void cdns_i2c_master_reset(struct i2c_adapter *adap) { struct cdns_i2c *id = adap->algo_data; u32 regval; /* Disable the interrupts */ cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET); /* Clear the hold bit and fifos */ regval = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); regval &= ~CDNS_I2C_CR_HOLD; regval |= CDNS_I2C_CR_CLR_FIFO; cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET); /* Update the transfercount register to zero */ cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET); /* Clear the interrupt status register */ regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET); /* Clear the status register */ regval = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET); cdns_i2c_writereg(regval, CDNS_I2C_SR_OFFSET); } static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, struct i2c_adapter *adap) { unsigned long time_left, msg_timeout; u32 reg; id->p_msg = msg; id->err_status = 0; reinit_completion(&id->xfer_done); /* Check for the TEN Bit mode on each msg */ reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); if (msg->flags & I2C_M_TEN) { if (reg & CDNS_I2C_CR_NEA) cdns_i2c_writereg(reg & ~CDNS_I2C_CR_NEA, CDNS_I2C_CR_OFFSET); } else { if (!(reg & CDNS_I2C_CR_NEA)) cdns_i2c_writereg(reg | CDNS_I2C_CR_NEA, CDNS_I2C_CR_OFFSET); } /* Check for the R/W flag on each msg */ if (msg->flags & I2C_M_RD) cdns_i2c_mrecv(id); else cdns_i2c_msend(id); /* Minimal time to execute this message */ msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk); /* Plus some wiggle room */ msg_timeout += msecs_to_jiffies(500); if (msg_timeout < adap->timeout) msg_timeout = adap->timeout; /* Wait for the signal of completion */ time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout); if (time_left == 0) { cdns_i2c_master_reset(adap); dev_err(id->adap.dev.parent, "timeout waiting on completion\n"); return -ETIMEDOUT; } cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET); /* If it is bus arbitration error, try again */ if (id->err_status & CDNS_I2C_IXR_ARB_LOST) return -EAGAIN; if (msg->flags & I2C_M_RECV_LEN) msg->len += min_t(unsigned int, msg->buf[0], I2C_SMBUS_BLOCK_MAX); return 0; } /** * cdns_i2c_master_xfer - The main i2c transfer function * @adap: pointer to the i2c adapter driver instance * @msgs: pointer to the i2c message structure * @num: the number of messages to transfer * * Initiates the send/recv activity based on the transfer message received. * * Return: number of msgs processed on success, negative error otherwise */ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int ret, count; u32 reg; struct cdns_i2c *id = adap->algo_data; bool hold_quirk; #if IS_ENABLED(CONFIG_I2C_SLAVE) bool change_role = false; #endif ret = pm_runtime_resume_and_get(id->dev); if (ret < 0) return ret; #if IS_ENABLED(CONFIG_I2C_SLAVE) /* Check i2c operating mode and switch if possible */ if (id->dev_mode == CDNS_I2C_MODE_SLAVE) { if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) { ret = -EAGAIN; goto out; } /* Set mode to master */ cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id); /* Mark flag to change role once xfer is completed */ change_role = true; } #endif /* Check if the bus is free */ ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET, reg, !(reg & CDNS_I2C_SR_BA), CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US); if (ret) { ret = -EAGAIN; if (id->adap.bus_recovery_info) i2c_recover_bus(adap); goto out; } hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT); /* * Set the flag to one when multiple messages are to be * processed with a repeated start. */ if (num > 1) { /* * This controller does not give completion interrupt after a * master receive message if HOLD bit is set (repeated start), * resulting in SW timeout. Hence, if a receive message is * followed by any other message, an error is returned * indicating that this sequence is not supported. */ for (count = 0; (count < num - 1 && hold_quirk); count++) { if (msgs[count].flags & I2C_M_RD) { dev_warn(adap->dev.parent, "Can't do repeated start after a receive message\n"); ret = -EOPNOTSUPP; goto out; } } id->bus_hold_flag = 1; reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); reg |= CDNS_I2C_CR_HOLD; cdns_i2c_writereg(reg, CDNS_I2C_CR_OFFSET); } else { id->bus_hold_flag = 0; } /* Process the msg one by one */ for (count = 0; count < num; count++, msgs++) { if (count == (num - 1)) id->bus_hold_flag = 0; ret = cdns_i2c_process_msg(id, msgs, adap); if (ret) goto out; /* Report the other error interrupts to application */ if (id->err_status) { cdns_i2c_master_reset(adap); if (id->err_status & CDNS_I2C_IXR_NACK) { ret = -ENXIO; goto out; } ret = -EIO; goto out; } } ret = num; out: #if IS_ENABLED(CONFIG_I2C_SLAVE) /* Switch i2c mode to slave */ if (change_role) cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); #endif pm_runtime_mark_last_busy(id->dev); pm_runtime_put_autosuspend(id->dev); return ret; } /** * cdns_i2c_func - Returns the supported features of the I2C driver * @adap: pointer to the i2c adapter structure * * Return: 32 bit value, each bit corresponding to a feature */ static u32 cdns_i2c_func(struct i2c_adapter *adap) { u32 func = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_SMBUS_BLOCK_DATA; #if IS_ENABLED(CONFIG_I2C_SLAVE) func |= I2C_FUNC_SLAVE; #endif return func; } #if IS_ENABLED(CONFIG_I2C_SLAVE) static int cdns_reg_slave(struct i2c_client *slave) { int ret; struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c, adap); if (id->slave) return -EBUSY; if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; ret = pm_runtime_resume_and_get(id->dev); if (ret < 0) return ret; /* Store slave information */ id->slave = slave; /* Enable I2C slave */ cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); return 0; } static int cdns_unreg_slave(struct i2c_client *slave) { struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c, adap); pm_runtime_put(id->dev); /* Remove slave information */ id->slave = NULL; /* Enable I2C master */ cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id); return 0; } #endif static const struct i2c_algorithm cdns_i2c_algo = { .master_xfer = cdns_i2c_master_xfer, .functionality = cdns_i2c_func, #if IS_ENABLED(CONFIG_I2C_SLAVE) .reg_slave = cdns_reg_slave, .unreg_slave = cdns_unreg_slave, #endif }; /** * cdns_i2c_calc_divs - Calculate clock dividers * @f: I2C clock frequency * @input_clk: Input clock frequency * @a: First divider (return value) * @b: Second divider (return value) * * f is used as input and output variable. As input it is used as target I2C * frequency. On function exit f holds the actually resulting I2C frequency. * * Return: 0 on success, negative errno otherwise. */ static int cdns_i2c_calc_divs(unsigned long *f, unsigned long input_clk, unsigned int *a, unsigned int *b) { unsigned long fscl = *f, best_fscl = *f, actual_fscl, temp; unsigned int div_a, div_b, calc_div_a = 0, calc_div_b = 0; unsigned int last_error, current_error; /* calculate (divisor_a+1) x (divisor_b+1) */ temp = input_clk / (22 * fscl); /* * If the calculated value is negative or 0, the fscl input is out of * range. Return error. */ if (!temp || (temp > (CDNS_I2C_DIVA_MAX * CDNS_I2C_DIVB_MAX))) return -EINVAL; last_error = -1; for (div_a = 0; div_a < CDNS_I2C_DIVA_MAX; div_a++) { div_b = DIV_ROUND_UP(input_clk, 22 * fscl * (div_a + 1)); if ((div_b < 1) || (div_b > CDNS_I2C_DIVB_MAX)) continue; div_b--; actual_fscl = input_clk / (22 * (div_a + 1) * (div_b + 1)); if (actual_fscl > fscl) continue; current_error = fscl - actual_fscl; if (last_error > current_error) { calc_div_a = div_a; calc_div_b = div_b; best_fscl = actual_fscl; last_error = current_error; } } *a = calc_div_a; *b = calc_div_b; *f = best_fscl; return 0; } /** * cdns_i2c_setclk - This function sets the serial clock rate for the I2C device * @clk_in: I2C clock input frequency in Hz * @id: Pointer to the I2C device structure * * The device must be idle rather than busy transferring data before setting * these device options. * The data rate is set by values in the control register. * The formula for determining the correct register values is * Fscl = Fpclk/(22 x (divisor_a+1) x (divisor_b+1)) * See the hardware data sheet for a full explanation of setting the serial * clock rate. The clock can not be faster than the input clock divide by 22. * The two most common clock rates are 100KHz and 400KHz. * * Return: 0 on success, negative error otherwise */ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id) { unsigned int div_a, div_b; unsigned int ctrl_reg; int ret = 0; unsigned long fscl = id->i2c_clk; ret = cdns_i2c_calc_divs(&fscl, clk_in, &div_a, &div_b); if (ret) return ret; ctrl_reg = id->ctrl_reg; ctrl_reg &= ~(CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK); ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) | (div_b << CDNS_I2C_CR_DIVB_SHIFT)); id->ctrl_reg = ctrl_reg; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); #if IS_ENABLED(CONFIG_I2C_SLAVE) id->ctrl_reg_diva_divb = ctrl_reg & (CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK); #endif return 0; } /** * cdns_i2c_clk_notifier_cb - Clock rate change callback * @nb: Pointer to notifier block * @event: Notification reason * @data: Pointer to notification data object * * This function is called when the cdns_i2c input clock frequency changes. * The callback checks whether a valid bus frequency can be generated after the * change. If so, the change is acknowledged, otherwise the change is aborted. * New dividers are written to the HW in the pre- or post change notification * depending on the scaling direction. * * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK * to acknowledge the change, NOTIFY_DONE if the notification is * considered irrelevant. */ static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { struct clk_notifier_data *ndata = data; struct cdns_i2c *id = to_cdns_i2c(nb); if (pm_runtime_suspended(id->dev)) return NOTIFY_OK; switch (event) { case PRE_RATE_CHANGE: { unsigned long input_clk = ndata->new_rate; unsigned long fscl = id->i2c_clk; unsigned int div_a, div_b; int ret; ret = cdns_i2c_calc_divs(&fscl, input_clk, &div_a, &div_b); if (ret) { dev_warn(id->adap.dev.parent, "clock rate change rejected\n"); return NOTIFY_STOP; } /* scale up */ if (ndata->new_rate > ndata->old_rate) cdns_i2c_setclk(ndata->new_rate, id); return NOTIFY_OK; } case POST_RATE_CHANGE: id->input_clk = ndata->new_rate; /* scale down */ if (ndata->new_rate < ndata->old_rate) cdns_i2c_setclk(ndata->new_rate, id); return NOTIFY_OK; case ABORT_RATE_CHANGE: /* scale up */ if (ndata->new_rate > ndata->old_rate) cdns_i2c_setclk(ndata->old_rate, id); return NOTIFY_OK; default: return NOTIFY_DONE; } } /** * cdns_i2c_runtime_suspend - Runtime suspend method for the driver * @dev: Address of the platform_device structure * * Put the driver into low power mode. * * Return: 0 always */ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev) { struct cdns_i2c *xi2c = dev_get_drvdata(dev); clk_disable(xi2c->clk); return 0; } /** * cdns_i2c_init - Controller initialisation * @id: Device private data structure * * Initialise the i2c controller. * */ static void cdns_i2c_init(struct cdns_i2c *id) { cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET); /* * Cadence I2C controller has a bug wherein it generates * invalid read transaction after HW timeout in master receiver mode. * HW timeout is not used by this driver and the interrupt is disabled. * But the feature itself cannot be disabled. Hence maximum value * is written to this register to reduce the chances of error. */ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); } /** * cdns_i2c_runtime_resume - Runtime resume * @dev: Address of the platform_device structure * * Runtime resume callback. * * Return: 0 on success and error value on error */ static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev) { struct cdns_i2c *xi2c = dev_get_drvdata(dev); int ret; ret = clk_enable(xi2c->clk); if (ret) { dev_err(dev, "Cannot enable clock.\n"); return ret; } cdns_i2c_init(xi2c); return 0; } static const struct dev_pm_ops cdns_i2c_dev_pm_ops = { SET_RUNTIME_PM_OPS(cdns_i2c_runtime_suspend, cdns_i2c_runtime_resume, NULL) }; static const struct cdns_platform_data r1p10_i2c_def = { .quirks = CDNS_I2C_BROKEN_HOLD_BIT, }; static const struct of_device_id cdns_i2c_of_match[] = { { .compatible = "cdns,i2c-r1p10", .data = &r1p10_i2c_def }, { .compatible = "cdns,i2c-r1p14",}, { /* end of table */ } }; MODULE_DEVICE_TABLE(of, cdns_i2c_of_match); /** * cdns_i2c_detect_transfer_size - Detect the maximum transfer size supported * @id: Device private data structure * * Detect the maximum transfer size that is supported by this instance of the * Cadence I2C controller. */ static void cdns_i2c_detect_transfer_size(struct cdns_i2c *id) { u32 val; /* * Writing to the transfer size register is only possible if these two bits * are set in the control register. */ cdns_i2c_writereg(CDNS_I2C_CR_MS | CDNS_I2C_CR_RW, CDNS_I2C_CR_OFFSET); /* * The number of writable bits of the transfer size register can be between * 4 and 8. This is a controlled through a synthesis parameter of the IP * core and can vary from instance to instance. The unused MSBs always read * back as 0. Writing 0xff and then reading the value back will report the * maximum supported transfer size. */ cdns_i2c_writereg(CDNS_I2C_MAX_TRANSFER_SIZE, CDNS_I2C_XFER_SIZE_OFFSET); val = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); id->transfer_size = CDNS_I2C_TRANSFER_SIZE(val); cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET); cdns_i2c_writereg(0, CDNS_I2C_CR_OFFSET); } /** * cdns_i2c_probe - Platform registration call * @pdev: Handle to the platform device structure * * This function does all the memory allocation and registration for the i2c * device. User can modify the address mode to 10 bit address mode using the * ioctl call with option I2C_TENBIT. * * Return: 0 on success, negative error otherwise */ static int cdns_i2c_probe(struct platform_device *pdev) { struct resource *r_mem; struct cdns_i2c *id; int ret, irq; const struct of_device_id *match; id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; id->dev = &pdev->dev; platform_set_drvdata(pdev, id); match = of_match_node(cdns_i2c_of_match, pdev->dev.of_node); if (match && match->data) { const struct cdns_platform_data *data = match->data; id->quirks = data->quirks; } id->rinfo.pinctrl = devm_pinctrl_get(&pdev->dev); if (IS_ERR(id->rinfo.pinctrl)) { int err = PTR_ERR(id->rinfo.pinctrl); dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n"); if (err != -ENODEV) return err; } else { id->adap.bus_recovery_info = &id->rinfo; } id->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem); if (IS_ERR(id->membase)) return PTR_ERR(id->membase); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; id->adap.owner = THIS_MODULE; id->adap.dev.of_node = pdev->dev.of_node; id->adap.algo = &cdns_i2c_algo; id->adap.timeout = CDNS_I2C_TIMEOUT; id->adap.retries = 3; /* Default retry value. */ id->adap.algo_data = id; id->adap.dev.parent = &pdev->dev; init_completion(&id->xfer_done); snprintf(id->adap.name, sizeof(id->adap.name), "Cadence I2C at %08lx", (unsigned long)r_mem->start); id->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(id->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(id->clk), "input clock not found.\n"); id->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); if (IS_ERR(id->reset)) return dev_err_probe(&pdev->dev, PTR_ERR(id->reset), "Failed to request reset.\n"); ret = clk_prepare_enable(id->clk); if (ret) dev_err(&pdev->dev, "Unable to enable clock.\n"); ret = reset_control_deassert(id->reset); if (ret) { dev_err_probe(&pdev->dev, ret, "Failed to de-assert reset.\n"); goto err_clk_dis; } pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(id->dev); pm_runtime_set_active(id->dev); pm_runtime_enable(id->dev); id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb; if (clk_notifier_register(id->clk, &id->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); id->input_clk = clk_get_rate(id->clk); ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &id->i2c_clk); if (ret || (id->i2c_clk > I2C_MAX_FAST_MODE_FREQ)) id->i2c_clk = I2C_MAX_STANDARD_MODE_FREQ; #if IS_ENABLED(CONFIG_I2C_SLAVE) /* Set initial mode to master */ id->dev_mode = CDNS_I2C_MODE_MASTER; id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; #endif id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS; id->fifo_depth = CDNS_I2C_FIFO_DEPTH_DEFAULT; of_property_read_u32(pdev->dev.of_node, "fifo-depth", &id->fifo_depth); cdns_i2c_detect_transfer_size(id); ret = cdns_i2c_setclk(id->input_clk, id); if (ret) { dev_err(&pdev->dev, "invalid SCL clock: %u Hz\n", id->i2c_clk); ret = -EINVAL; goto err_clk_notifier_unregister; } ret = devm_request_irq(&pdev->dev, irq, cdns_i2c_isr, 0, DRIVER_NAME, id); if (ret) { dev_err(&pdev->dev, "cannot get irq %d\n", irq); goto err_clk_notifier_unregister; } cdns_i2c_init(id); ret = i2c_add_adapter(&id->adap); if (ret < 0) goto err_clk_notifier_unregister; dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", id->i2c_clk / 1000, (unsigned long)r_mem->start, irq); return 0; err_clk_notifier_unregister: clk_notifier_unregister(id->clk, &id->clk_rate_change_nb); reset_control_assert(id->reset); err_clk_dis: clk_disable_unprepare(id->clk); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); return ret; } /** * cdns_i2c_remove - Unregister the device after releasing the resources * @pdev: Handle to the platform device structure * * This function frees all the resources allocated to the device. * * Return: 0 always */ static void cdns_i2c_remove(struct platform_device *pdev) { struct cdns_i2c *id = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); i2c_del_adapter(&id->adap); clk_notifier_unregister(id->clk, &id->clk_rate_change_nb); reset_control_assert(id->reset); clk_disable_unprepare(id->clk); } static struct platform_driver cdns_i2c_drv = { .driver = { .name = DRIVER_NAME, .of_match_table = cdns_i2c_of_match, .pm = &cdns_i2c_dev_pm_ops, }, .probe = cdns_i2c_probe, .remove_new = cdns_i2c_remove, }; module_platform_driver(cdns_i2c_drv); MODULE_AUTHOR("Xilinx Inc."); MODULE_DESCRIPTION("Cadence I2C bus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-cadence.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Instantiate UCSI device for Cypress CCGx Type-C controller. * Derived from i2c-designware-pcidrv.c and i2c-nvidia-gpu.c. */ #include <linux/i2c.h> #include <linux/export.h> #include <linux/module.h> #include <linux/string.h> #include "i2c-ccgx-ucsi.h" struct software_node; struct i2c_client *i2c_new_ccgx_ucsi(struct i2c_adapter *adapter, int irq, const struct software_node *swnode) { struct i2c_board_info info = {}; strscpy(info.type, "ccgx-ucsi", sizeof(info.type)); info.addr = 0x08; info.irq = irq; info.swnode = swnode; return i2c_new_client_device(adapter, &info); } EXPORT_SYMBOL_GPL(i2c_new_ccgx_ucsi); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-ccgx-ucsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* SMBus driver for nVidia nForce2 MCP Added nForce3 Pro 150 Thomas Leibold <[email protected]>, Ported to 2.5 Patrick Dreker <[email protected]>, Copyright (c) 2003 Hans-Frieder Vogt <[email protected]>, Based on SMBus 2.0 driver for AMD-8111 IO-Hub Copyright (c) 2002 Vojtech Pavlik */ /* SUPPORTED DEVICES PCI ID nForce2 MCP 0064 nForce2 Ultra 400 MCP 0084 nForce3 Pro150 MCP 00D4 nForce3 250Gb MCP 00E4 nForce4 MCP 0052 nForce4 MCP-04 0034 nForce MCP51 0264 nForce MCP55 0368 nForce MCP61 03EB nForce MCP65 0446 nForce MCP67 0542 nForce MCP73 07D8 nForce MCP78S 0752 nForce MCP79 0AA2 This driver supports the 2 SMBuses that are included in the MCP of the nForce2/3/4/5xx chipsets. */ /* Note: we assume there can only be one nForce2, with two SMBus interfaces */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/slab.h> #include <linux/io.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Hans-Frieder Vogt <[email protected]>"); MODULE_DESCRIPTION("nForce2/3/4/5xx SMBus driver"); struct nforce2_smbus { struct i2c_adapter adapter; int base; int size; int blockops; int can_abort; }; /* * nVidia nForce2 SMBus control register definitions * (Newer incarnations use standard BARs 4 and 5 instead) */ #define NFORCE_PCI_SMB1 0x50 #define NFORCE_PCI_SMB2 0x54 /* * ACPI 2.0 chapter 13 SMBus 2.0 EC register model */ #define NVIDIA_SMB_PRTCL (smbus->base + 0x00) /* protocol, PEC */ #define NVIDIA_SMB_STS (smbus->base + 0x01) /* status */ #define NVIDIA_SMB_ADDR (smbus->base + 0x02) /* address */ #define NVIDIA_SMB_CMD (smbus->base + 0x03) /* command */ #define NVIDIA_SMB_DATA (smbus->base + 0x04) /* 32 data registers */ #define NVIDIA_SMB_BCNT (smbus->base + 0x24) /* number of data bytes */ #define NVIDIA_SMB_STATUS_ABRT (smbus->base + 0x3c) /* register used to check the status of the abort command */ #define NVIDIA_SMB_CTRL (smbus->base + 0x3e) /* control register */ #define NVIDIA_SMB_STATUS_ABRT_STS 0x01 /* Bit to notify that abort succeeded */ #define NVIDIA_SMB_CTRL_ABORT 0x20 #define NVIDIA_SMB_STS_DONE 0x80 #define NVIDIA_SMB_STS_ALRM 0x40 #define NVIDIA_SMB_STS_RES 0x20 #define NVIDIA_SMB_STS_STATUS 0x1f #define NVIDIA_SMB_PRTCL_WRITE 0x00 #define NVIDIA_SMB_PRTCL_READ 0x01 #define NVIDIA_SMB_PRTCL_QUICK 0x02 #define NVIDIA_SMB_PRTCL_BYTE 0x04 #define NVIDIA_SMB_PRTCL_BYTE_DATA 0x06 #define NVIDIA_SMB_PRTCL_WORD_DATA 0x08 #define NVIDIA_SMB_PRTCL_BLOCK_DATA 0x0a #define NVIDIA_SMB_PRTCL_PEC 0x80 /* Misc definitions */ #define MAX_TIMEOUT 100 /* We disable the second SMBus channel on these boards */ static const struct dmi_system_id nforce2_dmi_blacklist2[] = { { .ident = "DFI Lanparty NF4 Expert", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "DFI Corp,LTD"), DMI_MATCH(DMI_BOARD_NAME, "LP UT NF4 Expert"), }, }, { } }; static struct pci_driver nforce2_driver; /* For multiplexing support, we need a global reference to the 1st SMBus channel */ #if IS_ENABLED(CONFIG_I2C_NFORCE2_S4985) struct i2c_adapter *nforce2_smbus; EXPORT_SYMBOL_GPL(nforce2_smbus); static void nforce2_set_reference(struct i2c_adapter *adap) { nforce2_smbus = adap; } #else static inline void nforce2_set_reference(struct i2c_adapter *adap) { } #endif static void nforce2_abort(struct i2c_adapter *adap) { struct nforce2_smbus *smbus = adap->algo_data; int timeout = 0; unsigned char temp; dev_dbg(&adap->dev, "Aborting current transaction\n"); outb_p(NVIDIA_SMB_CTRL_ABORT, NVIDIA_SMB_CTRL); do { msleep(1); temp = inb_p(NVIDIA_SMB_STATUS_ABRT); } while (!(temp & NVIDIA_SMB_STATUS_ABRT_STS) && (timeout++ < MAX_TIMEOUT)); if (!(temp & NVIDIA_SMB_STATUS_ABRT_STS)) dev_err(&adap->dev, "Can't reset the smbus\n"); outb_p(NVIDIA_SMB_STATUS_ABRT_STS, NVIDIA_SMB_STATUS_ABRT); } static int nforce2_check_status(struct i2c_adapter *adap) { struct nforce2_smbus *smbus = adap->algo_data; int timeout = 0; unsigned char temp; do { msleep(1); temp = inb_p(NVIDIA_SMB_STS); } while ((!temp) && (timeout++ < MAX_TIMEOUT)); if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); if (smbus->can_abort) nforce2_abort(adap); return -ETIMEDOUT; } if (!(temp & NVIDIA_SMB_STS_DONE) || (temp & NVIDIA_SMB_STS_STATUS)) { dev_dbg(&adap->dev, "Transaction failed (0x%02x)!\n", temp); return -EIO; } return 0; } /* Return negative errno on error */ static s32 nforce2_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct nforce2_smbus *smbus = adap->algo_data; unsigned char protocol, pec; u8 len; int i, status; protocol = (read_write == I2C_SMBUS_READ) ? NVIDIA_SMB_PRTCL_READ : NVIDIA_SMB_PRTCL_WRITE; pec = (flags & I2C_CLIENT_PEC) ? NVIDIA_SMB_PRTCL_PEC : 0; switch (size) { case I2C_SMBUS_QUICK: protocol |= NVIDIA_SMB_PRTCL_QUICK; read_write = I2C_SMBUS_WRITE; break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_WRITE) outb_p(command, NVIDIA_SMB_CMD); protocol |= NVIDIA_SMB_PRTCL_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, NVIDIA_SMB_DATA); protocol |= NVIDIA_SMB_PRTCL_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word, NVIDIA_SMB_DATA); outb_p(data->word >> 8, NVIDIA_SMB_DATA + 1); } protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec; break; case I2C_SMBUS_BLOCK_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) { dev_err(&adap->dev, "Transaction failed (requested block size: %d)\n", len); return -EINVAL; } outb_p(len, NVIDIA_SMB_BCNT); for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++) outb_p(data->block[i + 1], NVIDIA_SMB_DATA + i); } protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec; break; default: dev_err(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } outb_p((addr & 0x7f) << 1, NVIDIA_SMB_ADDR); outb_p(protocol, NVIDIA_SMB_PRTCL); status = nforce2_check_status(adap); if (status) return status; if (read_write == I2C_SMBUS_WRITE) return 0; switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = inb_p(NVIDIA_SMB_DATA); break; case I2C_SMBUS_WORD_DATA: data->word = inb_p(NVIDIA_SMB_DATA) | (inb_p(NVIDIA_SMB_DATA + 1) << 8); break; case I2C_SMBUS_BLOCK_DATA: len = inb_p(NVIDIA_SMB_BCNT); if ((len <= 0) || (len > I2C_SMBUS_BLOCK_MAX)) { dev_err(&adap->dev, "Transaction failed (received block size: 0x%02x)\n", len); return -EPROTO; } for (i = 0; i < len; i++) data->block[i + 1] = inb_p(NVIDIA_SMB_DATA + i); data->block[0] = len; break; } return 0; } static u32 nforce2_func(struct i2c_adapter *adapter) { /* other functionality might be possible, but is not tested */ return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PEC | (((struct nforce2_smbus *)adapter->algo_data)->blockops ? I2C_FUNC_SMBUS_BLOCK_DATA : 0); } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = nforce2_access, .functionality = nforce2_func, }; static const struct pci_device_id nforce2_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS) }, { 0 } }; MODULE_DEVICE_TABLE(pci, nforce2_ids); static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg, struct nforce2_smbus *smbus, const char *name) { int error; smbus->base = pci_resource_start(dev, bar); if (smbus->base) { smbus->size = pci_resource_len(dev, bar); } else { /* Older incarnations of the device used non-standard BARs */ u16 iobase; error = pci_read_config_word(dev, alt_reg, &iobase); if (error != PCIBIOS_SUCCESSFUL) { dev_err(&dev->dev, "Error reading PCI config for %s\n", name); return -EIO; } smbus->base = iobase & PCI_BASE_ADDRESS_IO_MASK; smbus->size = 64; } error = acpi_check_region(smbus->base, smbus->size, nforce2_driver.name); if (error) return error; if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) { dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n", smbus->base, smbus->base+smbus->size-1, name); return -EBUSY; } smbus->adapter.owner = THIS_MODULE; smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; smbus->adapter.algo = &smbus_algorithm; smbus->adapter.algo_data = smbus; smbus->adapter.dev.parent = &dev->dev; snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), "SMBus nForce2 adapter at %04x", smbus->base); error = i2c_add_adapter(&smbus->adapter); if (error) { release_region(smbus->base, smbus->size); return error; } dev_info(&smbus->adapter.dev, "nForce2 SMBus adapter at %#x\n", smbus->base); return 0; } static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct nforce2_smbus *smbuses; int res1, res2; /* we support 2 SMBus adapters */ smbuses = kcalloc(2, sizeof(struct nforce2_smbus), GFP_KERNEL); if (!smbuses) return -ENOMEM; pci_set_drvdata(dev, smbuses); switch (dev->device) { case PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS: case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS: case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS: smbuses[0].blockops = 1; smbuses[1].blockops = 1; smbuses[0].can_abort = 1; smbuses[1].can_abort = 1; } /* SMBus adapter 1 */ res1 = nforce2_probe_smb(dev, 4, NFORCE_PCI_SMB1, &smbuses[0], "SMB1"); if (res1 < 0) smbuses[0].base = 0; /* to have a check value */ /* SMBus adapter 2 */ if (dmi_check_system(nforce2_dmi_blacklist2)) { dev_err(&dev->dev, "Disabling SMB2 for safety reasons.\n"); res2 = -EPERM; smbuses[1].base = 0; } else { res2 = nforce2_probe_smb(dev, 5, NFORCE_PCI_SMB2, &smbuses[1], "SMB2"); if (res2 < 0) smbuses[1].base = 0; /* to have a check value */ } if ((res1 < 0) && (res2 < 0)) { /* we did not find even one of the SMBuses, so we give up */ kfree(smbuses); return -ENODEV; } nforce2_set_reference(&smbuses[0].adapter); return 0; } static void nforce2_remove(struct pci_dev *dev) { struct nforce2_smbus *smbuses = pci_get_drvdata(dev); nforce2_set_reference(NULL); if (smbuses[0].base) { i2c_del_adapter(&smbuses[0].adapter); release_region(smbuses[0].base, smbuses[0].size); } if (smbuses[1].base) { i2c_del_adapter(&smbuses[1].adapter); release_region(smbuses[1].base, smbuses[1].size); } kfree(smbuses); } static struct pci_driver nforce2_driver = { .name = "nForce2_smbus", .id_table = nforce2_ids, .probe = nforce2_probe, .remove = nforce2_remove, }; module_pci_driver(nforce2_driver);
linux-master
drivers/i2c/busses/i2c-nforce2.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c-pca-isa.c driver for PCA9564 on ISA boards * Copyright (C) 2004 Arcom Control Systems * Copyright (C) 2008 Pengutronix */ #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/isa.h> #include <linux/i2c.h> #include <linux/i2c-algo-pca.h> #include <linux/io.h> #include <asm/irq.h> #define DRIVER "i2c-pca-isa" #define IO_SIZE 4 static unsigned long base; static int irq = -1; /* Data sheet recommends 59kHz for 100kHz operation due to variation * in the actual clock rate */ static int clock = 59000; static struct i2c_adapter pca_isa_ops; static wait_queue_head_t pca_wait; static void pca_isa_writebyte(void *pd, int reg, int val) { #ifdef DEBUG_IO static char *names[] = { "T/O", "DAT", "ADR", "CON" }; printk(KERN_DEBUG "*** write %s at %#lx <= %#04x\n", names[reg], base+reg, val); #endif outb(val, base+reg); } static int pca_isa_readbyte(void *pd, int reg) { int res = inb(base+reg); #ifdef DEBUG_IO { static char *names[] = { "STA", "DAT", "ADR", "CON" }; printk(KERN_DEBUG "*** read %s => %#04x\n", names[reg], res); } #endif return res; } static int pca_isa_waitforcompletion(void *pd) { unsigned long timeout; long ret; if (irq > -1) { ret = wait_event_timeout(pca_wait, pca_isa_readbyte(pd, I2C_PCA_CON) & I2C_PCA_CON_SI, pca_isa_ops.timeout); } else { /* Do polling */ timeout = jiffies + pca_isa_ops.timeout; do { ret = time_before(jiffies, timeout); if (pca_isa_readbyte(pd, I2C_PCA_CON) & I2C_PCA_CON_SI) break; udelay(100); } while (ret); } return ret > 0; } static void pca_isa_resetchip(void *pd) { /* apparently only an external reset will do it. not a lot can be done */ printk(KERN_WARNING DRIVER ": Haven't figured out how to do a reset yet\n"); } static irqreturn_t pca_handler(int this_irq, void *dev_id) { wake_up(&pca_wait); return IRQ_HANDLED; } static struct i2c_algo_pca_data pca_isa_data = { /* .data intentionally left NULL, not needed with ISA */ .write_byte = pca_isa_writebyte, .read_byte = pca_isa_readbyte, .wait_for_completion = pca_isa_waitforcompletion, .reset_chip = pca_isa_resetchip, }; static struct i2c_adapter pca_isa_ops = { .owner = THIS_MODULE, .algo_data = &pca_isa_data, .name = "PCA9564/PCA9665 ISA Adapter", .timeout = HZ, }; static int pca_isa_match(struct device *dev, unsigned int id) { int match = base != 0; if (match) { if (irq <= -1) dev_warn(dev, "Using polling mode (specify irq)\n"); } else dev_err(dev, "Please specify I/O base\n"); return match; } static int pca_isa_probe(struct device *dev, unsigned int id) { init_waitqueue_head(&pca_wait); dev_info(dev, "i/o base %#08lx. irq %d\n", base, irq); #ifdef CONFIG_PPC if (check_legacy_ioport(base)) { dev_err(dev, "I/O address %#08lx is not available\n", base); goto out; } #endif if (!request_region(base, IO_SIZE, "i2c-pca-isa")) { dev_err(dev, "I/O address %#08lx is in use\n", base); goto out; } if (irq > -1) { if (request_irq(irq, pca_handler, 0, "i2c-pca-isa", &pca_isa_ops) < 0) { dev_err(dev, "Request irq%d failed\n", irq); goto out_region; } } pca_isa_data.i2c_clock = clock; if (i2c_pca_add_bus(&pca_isa_ops) < 0) { dev_err(dev, "Failed to add i2c bus\n"); goto out_irq; } return 0; out_irq: if (irq > -1) free_irq(irq, &pca_isa_ops); out_region: release_region(base, IO_SIZE); out: return -ENODEV; } static void pca_isa_remove(struct device *dev, unsigned int id) { i2c_del_adapter(&pca_isa_ops); if (irq > -1) { disable_irq(irq); free_irq(irq, &pca_isa_ops); } release_region(base, IO_SIZE); } static struct isa_driver pca_isa_driver = { .match = pca_isa_match, .probe = pca_isa_probe, .remove = pca_isa_remove, .driver = { .owner = THIS_MODULE, .name = DRIVER, } }; MODULE_AUTHOR("Ian Campbell <[email protected]>"); MODULE_DESCRIPTION("ISA base PCA9564/PCA9665 driver"); MODULE_LICENSE("GPL"); module_param_hw(base, ulong, ioport, 0); MODULE_PARM_DESC(base, "I/O base address"); module_param_hw(irq, int, irq, 0); MODULE_PARM_DESC(irq, "IRQ"); module_param(clock, int, 0); MODULE_PARM_DESC(clock, "Clock rate in hertz.\n\t\t" "For PCA9564: 330000,288000,217000,146000," "88000,59000,44000,36000\n" "\t\tFor PCA9665:\tStandard: 60300 - 100099\n" "\t\t\t\tFast: 100100 - 400099\n" "\t\t\t\tFast+: 400100 - 10000099\n" "\t\t\t\tTurbo: Up to 1265800"); module_isa_driver(pca_isa_driver, 1);
linux-master
drivers/i2c/busses/i2c-pca-isa.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 1998 - 2002 Frodo Looijaard <[email protected]> and Philip Edelbrock <[email protected]> */ /* Supports: Intel PIIX4, 440MX Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100 ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800 AMD Hudson-2, ML, CZ Hygon CZ SMSC Victory66 Note: we assume there can only be one device, with one or more SMBus interfaces. The device can register multiple i2c_adapters (up to PIIX4_MAX_ADAPTERS). For devices supporting multiple ports the i2c_adapter should provide an i2c_algorithm to access them. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/io.h> /* PIIX4 SMBus address offsets */ #define SMBHSTSTS (0 + piix4_smba) #define SMBHSLVSTS (1 + piix4_smba) #define SMBHSTCNT (2 + piix4_smba) #define SMBHSTCMD (3 + piix4_smba) #define SMBHSTADD (4 + piix4_smba) #define SMBHSTDAT0 (5 + piix4_smba) #define SMBHSTDAT1 (6 + piix4_smba) #define SMBBLKDAT (7 + piix4_smba) #define SMBSLVCNT (8 + piix4_smba) #define SMBSHDWCMD (9 + piix4_smba) #define SMBSLVEVT (0xA + piix4_smba) #define SMBSLVDAT (0xC + piix4_smba) /* count for request_region */ #define SMBIOSIZE 9 /* PCI Address Constants */ #define SMBBA 0x090 #define SMBHSTCFG 0x0D2 #define SMBSLVC 0x0D3 #define SMBSHDW1 0x0D4 #define SMBSHDW2 0x0D5 #define SMBREV 0x0D6 /* Other settings */ #define MAX_TIMEOUT 500 #define ENABLE_INT9 0 /* PIIX4 constants */ #define PIIX4_QUICK 0x00 #define PIIX4_BYTE 0x04 #define PIIX4_BYTE_DATA 0x08 #define PIIX4_WORD_DATA 0x0C #define PIIX4_BLOCK_DATA 0x14 /* Multi-port constants */ #define PIIX4_MAX_ADAPTERS 4 #define HUDSON2_MAIN_PORTS 2 /* HUDSON2, KERNCZ reserves ports 3, 4 */ /* SB800 constants */ #define SB800_PIIX4_SMB_IDX 0xcd6 #define SB800_PIIX4_SMB_MAP_SIZE 2 #define KERNCZ_IMC_IDX 0x3e #define KERNCZ_IMC_DATA 0x3f /* * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. * Hudson-2/Bolton port is always selected by bits 2:1 of register 0x2f. */ #define SB800_PIIX4_PORT_IDX 0x2c #define SB800_PIIX4_PORT_IDX_ALT 0x2e #define SB800_PIIX4_PORT_IDX_SEL 0x2f #define SB800_PIIX4_PORT_IDX_MASK 0x06 #define SB800_PIIX4_PORT_IDX_SHIFT 1 /* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 #define SB800_PIIX4_FCH_PM_ADDR 0xFED80300 #define SB800_PIIX4_FCH_PM_SIZE 8 /* insmod parameters */ /* If force is set to anything different from 0, we forcibly enable the PIIX4. DANGEROUS! */ static int force; module_param (force, int, 0); MODULE_PARM_DESC(force, "Forcibly enable the PIIX4. DANGEROUS!"); /* If force_addr is set to anything different from 0, we forcibly enable the PIIX4 at the given address. VERY DANGEROUS! */ static int force_addr; module_param_hw(force_addr, int, ioport, 0); MODULE_PARM_DESC(force_addr, "Forcibly enable the PIIX4 at the given address. " "EXTREMELY DANGEROUS!"); static int srvrworks_csb5_delay; static struct pci_driver piix4_driver; static const struct dmi_system_id piix4_dmi_blacklist[] = { { .ident = "Sapphire AM2RD790", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "SAPPHIRE Inc."), DMI_MATCH(DMI_BOARD_NAME, "PC-AM2RD790"), }, }, { .ident = "DFI Lanparty UT 790FX", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "DFI Inc."), DMI_MATCH(DMI_BOARD_NAME, "LP UT 790FX"), }, }, { } }; /* The IBM entry is in a separate table because we only check it on Intel-based systems */ static const struct dmi_system_id piix4_dmi_ibm[] = { { .ident = "IBM", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, { }, }; /* * SB800 globals */ static u8 piix4_port_sel_sb800; static u8 piix4_port_mask_sb800; static u8 piix4_port_shift_sb800; static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { " port 0", " port 2", " port 3", " port 4" }; static const char *piix4_aux_port_name_sb800 = " port 1"; struct sb800_mmio_cfg { void __iomem *addr; bool use_mmio; }; struct i2c_piix4_adapdata { unsigned short smba; /* SB800 */ bool sb800_main; bool notify_imc; u8 port; /* Port number, shifted */ struct sb800_mmio_cfg mmio_cfg; }; static int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_cfg) { if (mmio_cfg->use_mmio) { void __iomem *addr; if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR, SB800_PIIX4_FCH_PM_SIZE, "sb800_piix4_smb")) { dev_err(dev, "SMBus base address memory region 0x%x already in use.\n", SB800_PIIX4_FCH_PM_ADDR); return -EBUSY; } addr = ioremap(SB800_PIIX4_FCH_PM_ADDR, SB800_PIIX4_FCH_PM_SIZE); if (!addr) { release_mem_region(SB800_PIIX4_FCH_PM_ADDR, SB800_PIIX4_FCH_PM_SIZE); dev_err(dev, "SMBus base address mapping failed.\n"); return -ENOMEM; } mmio_cfg->addr = addr; return 0; } if (!request_muxed_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE, "sb800_piix4_smb")) { dev_err(dev, "SMBus base address index region 0x%x already in use.\n", SB800_PIIX4_SMB_IDX); return -EBUSY; } return 0; } static void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_cfg) { if (mmio_cfg->use_mmio) { iounmap(mmio_cfg->addr); release_mem_region(SB800_PIIX4_FCH_PM_ADDR, SB800_PIIX4_FCH_PM_SIZE); return; } release_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE); } static bool piix4_sb800_use_mmio(struct pci_dev *PIIX4_dev) { /* * cd6h/cd7h port I/O accesses can be disabled on AMD processors * w/ SMBus PCI revision ID 0x51 or greater. MMIO is supported on * the same processors and is the recommended access method. */ return (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD && PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && PIIX4_dev->revision >= 0x51); } static int piix4_setup(struct pci_dev *PIIX4_dev, const struct pci_device_id *id) { unsigned char temp; unsigned short piix4_smba; if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) && (PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5)) srvrworks_csb5_delay = 1; /* On some motherboards, it was reported that accessing the SMBus caused severe hardware problems */ if (dmi_check_system(piix4_dmi_blacklist)) { dev_err(&PIIX4_dev->dev, "Accessing the SMBus on this system is unsafe!\n"); return -EPERM; } /* Don't access SMBus on IBM systems which get corrupted eeproms */ if (dmi_check_system(piix4_dmi_ibm) && PIIX4_dev->vendor == PCI_VENDOR_ID_INTEL) { dev_err(&PIIX4_dev->dev, "IBM system detected; this module " "may corrupt your serial eeprom! Refusing to load " "module!\n"); return -EPERM; } /* Determine the address of the SMBus areas */ if (force_addr) { piix4_smba = force_addr & 0xfff0; force = 0; } else { pci_read_config_word(PIIX4_dev, SMBBA, &piix4_smba); piix4_smba &= 0xfff0; if(piix4_smba == 0) { dev_err(&PIIX4_dev->dev, "SMBus base address " "uninitialized - upgrade BIOS or use " "force_addr=0xaddr\n"); return -ENODEV; } } if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", piix4_smba); return -EBUSY; } pci_read_config_byte(PIIX4_dev, SMBHSTCFG, &temp); /* If force_addr is set, we program the new address here. Just to make sure, we disable the PIIX4 first. */ if (force_addr) { pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp & 0xfe); pci_write_config_word(PIIX4_dev, SMBBA, piix4_smba); pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 0x01); dev_info(&PIIX4_dev->dev, "WARNING: SMBus interface set to " "new address %04x!\n", piix4_smba); } else if ((temp & 1) == 0) { if (force) { /* This should never need to be done, but has been * noted that many Dell machines have the SMBus * interface on the PIIX4 disabled!? NOTE: This assumes * I/O space and other allocations WERE done by the * Bios! Don't complain if your hardware does weird * things after enabling this. :') Check for Bios * updates before resorting to this. */ pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 1); dev_notice(&PIIX4_dev->dev, "WARNING: SMBus interface has been FORCEFULLY ENABLED!\n"); } else { dev_err(&PIIX4_dev->dev, "SMBus Host Controller not enabled!\n"); release_region(piix4_smba, SMBIOSIZE); return -ENODEV; } } if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2)) dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n"); else if ((temp & 0x0E) == 0) dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n"); else dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration " "(or code out of date)!\n"); pci_read_config_byte(PIIX4_dev, SMBREV, &temp); dev_info(&PIIX4_dev->dev, "SMBus Host Controller at 0x%x, revision %d\n", piix4_smba, temp); return piix4_smba; } static int piix4_setup_sb800_smba(struct pci_dev *PIIX4_dev, u8 smb_en, u8 aux, u8 *smb_en_status, unsigned short *piix4_smba) { struct sb800_mmio_cfg mmio_cfg; u8 smba_en_lo; u8 smba_en_hi; int retval; mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev); retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg); if (retval) return retval; if (mmio_cfg.use_mmio) { smba_en_lo = ioread8(mmio_cfg.addr); smba_en_hi = ioread8(mmio_cfg.addr + 1); } else { outb_p(smb_en, SB800_PIIX4_SMB_IDX); smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX); smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1); } piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg); if (!smb_en) { *smb_en_status = smba_en_lo & 0x10; *piix4_smba = smba_en_hi << 8; if (aux) *piix4_smba |= 0x20; } else { *smb_en_status = smba_en_lo & 0x01; *piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; } if (!*smb_en_status) { dev_err(&PIIX4_dev->dev, "SMBus Host Controller not enabled!\n"); return -ENODEV; } return 0; } static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, const struct pci_device_id *id, u8 aux) { unsigned short piix4_smba; u8 smb_en, smb_en_status, port_sel; u8 i2ccfg, i2ccfg_offset = 0x10; struct sb800_mmio_cfg mmio_cfg; int retval; /* SB800 and later SMBus does not support forcing address */ if (force || force_addr) { dev_err(&PIIX4_dev->dev, "SMBus does not support " "forcing address!\n"); return -EINVAL; } /* Determine the address of the SMBus areas */ if ((PIIX4_dev->vendor == PCI_VENDOR_ID_AMD && PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && PIIX4_dev->revision >= 0x41) || (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD && PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && PIIX4_dev->revision >= 0x49) || (PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON && PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) smb_en = 0x00; else smb_en = (aux) ? 0x28 : 0x2c; retval = piix4_setup_sb800_smba(PIIX4_dev, smb_en, aux, &smb_en_status, &piix4_smba); if (retval) return retval; if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", piix4_smba); return -EBUSY; } /* Aux SMBus does not support IRQ information */ if (aux) { dev_info(&PIIX4_dev->dev, "Auxiliary SMBus Host Controller at 0x%x\n", piix4_smba); return piix4_smba; } /* Request the SMBus I2C bus config region */ if (!request_region(piix4_smba + i2ccfg_offset, 1, "i2ccfg")) { dev_err(&PIIX4_dev->dev, "SMBus I2C bus config region " "0x%x already in use!\n", piix4_smba + i2ccfg_offset); release_region(piix4_smba, SMBIOSIZE); return -EBUSY; } i2ccfg = inb_p(piix4_smba + i2ccfg_offset); release_region(piix4_smba + i2ccfg_offset, 1); if (i2ccfg & 1) dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n"); else dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n"); dev_info(&PIIX4_dev->dev, "SMBus Host Controller at 0x%x, revision %d\n", piix4_smba, i2ccfg >> 4); /* Find which register is used for port selection */ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD || PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) { if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS || (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && PIIX4_dev->revision >= 0x1F)) { piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; } else { piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; } } else { mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev); retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg); if (retval) { release_region(piix4_smba, SMBIOSIZE); return retval; } outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); port_sel = inb_p(SB800_PIIX4_SMB_IDX + 1); piix4_port_sel_sb800 = (port_sel & 0x01) ? SB800_PIIX4_PORT_IDX_ALT : SB800_PIIX4_PORT_IDX; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg); } dev_info(&PIIX4_dev->dev, "Using register 0x%02x for SMBus port selection\n", (unsigned int)piix4_port_sel_sb800); return piix4_smba; } static int piix4_setup_aux(struct pci_dev *PIIX4_dev, const struct pci_device_id *id, unsigned short base_reg_addr) { /* Set up auxiliary SMBus controllers found on some * AMD chipsets e.g. SP5100 (SB700 derivative) */ unsigned short piix4_smba; /* Read address of auxiliary SMBus controller */ pci_read_config_word(PIIX4_dev, base_reg_addr, &piix4_smba); if ((piix4_smba & 1) == 0) { dev_dbg(&PIIX4_dev->dev, "Auxiliary SMBus controller not enabled\n"); return -ENODEV; } piix4_smba &= 0xfff0; if (piix4_smba == 0) { dev_dbg(&PIIX4_dev->dev, "Auxiliary SMBus base address uninitialized\n"); return -ENODEV; } if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "Auxiliary SMBus region 0x%x " "already in use!\n", piix4_smba); return -EBUSY; } dev_info(&PIIX4_dev->dev, "Auxiliary SMBus Host Controller at 0x%x\n", piix4_smba); return piix4_smba; } static int piix4_transaction(struct i2c_adapter *piix4_adapter) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(piix4_adapter); unsigned short piix4_smba = adapdata->smba; int temp; int result = 0; int timeout = 0; dev_dbg(&piix4_adapter->dev, "Transaction (pre): CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* Make sure the SMBus host is ready to start transmitting */ if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_dbg(&piix4_adapter->dev, "SMBus busy (%02x). " "Resetting...\n", temp); outb_p(temp, SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_err(&piix4_adapter->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&piix4_adapter->dev, "Successful!\n"); } } /* start the transaction by setting bit 6 */ outb_p(inb(SMBHSTCNT) | 0x040, SMBHSTCNT); /* We will always wait for a fraction of a second! (See PIIX4 docs errata) */ if (srvrworks_csb5_delay) /* Extra delay for SERVERWORKS_CSB5 */ usleep_range(2000, 2100); else usleep_range(250, 500); while ((++timeout < MAX_TIMEOUT) && ((temp = inb_p(SMBHSTSTS)) & 0x01)) usleep_range(250, 500); /* If the SMBus is still busy, we give up */ if (timeout == MAX_TIMEOUT) { dev_err(&piix4_adapter->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x10) { result = -EIO; dev_err(&piix4_adapter->dev, "Error: Failed bus transaction\n"); } if (temp & 0x08) { result = -EIO; dev_dbg(&piix4_adapter->dev, "Bus collision! SMBus may be " "locked until next hard reset. (sorry!)\n"); /* Clock stops and slave is stuck in mid-transmission */ } if (temp & 0x04) { result = -ENXIO; dev_dbg(&piix4_adapter->dev, "Error: no response!\n"); } if (inb_p(SMBHSTSTS) != 0x00) outb_p(inb(SMBHSTSTS), SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_err(&piix4_adapter->dev, "Failed reset at end of " "transaction (%02x)\n", temp); } dev_dbg(&piix4_adapter->dev, "Transaction (post): CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); return result; } /* Return negative errno on error. */ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); unsigned short piix4_smba = adapdata->smba; int i, len; int status; switch (size) { case I2C_SMBUS_QUICK: outb_p((addr << 1) | read_write, SMBHSTADD); size = PIIX4_QUICK; break; case I2C_SMBUS_BYTE: outb_p((addr << 1) | read_write, SMBHSTADD); if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMBHSTCMD); size = PIIX4_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0); size = PIIX4_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); } size = PIIX4_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; outb_p(len, SMBHSTDAT0); inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */ for (i = 1; i <= len; i++) outb_p(data->block[i], SMBBLKDAT); } size = PIIX4_BLOCK_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT); status = piix4_transaction(adap); if (status) return status; if ((read_write == I2C_SMBUS_WRITE) || (size == PIIX4_QUICK)) return 0; switch (size) { case PIIX4_BYTE: case PIIX4_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0); break; case PIIX4_WORD_DATA: data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); break; case PIIX4_BLOCK_DATA: data->block[0] = inb_p(SMBHSTDAT0); if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */ for (i = 1; i <= data->block[0]; i++) data->block[i] = inb_p(SMBBLKDAT); break; } return 0; } static uint8_t piix4_imc_read(uint8_t idx) { outb_p(idx, KERNCZ_IMC_IDX); return inb_p(KERNCZ_IMC_DATA); } static void piix4_imc_write(uint8_t idx, uint8_t value) { outb_p(idx, KERNCZ_IMC_IDX); outb_p(value, KERNCZ_IMC_DATA); } static int piix4_imc_sleep(void) { int timeout = MAX_TIMEOUT; if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc")) return -EBUSY; /* clear response register */ piix4_imc_write(0x82, 0x00); /* request ownership flag */ piix4_imc_write(0x83, 0xB4); /* kick off IMC Mailbox command 96 */ piix4_imc_write(0x80, 0x96); while (timeout--) { if (piix4_imc_read(0x82) == 0xfa) { release_region(KERNCZ_IMC_IDX, 2); return 0; } usleep_range(1000, 2000); } release_region(KERNCZ_IMC_IDX, 2); return -ETIMEDOUT; } static void piix4_imc_wakeup(void) { int timeout = MAX_TIMEOUT; if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc")) return; /* clear response register */ piix4_imc_write(0x82, 0x00); /* release ownership flag */ piix4_imc_write(0x83, 0xB5); /* kick off IMC Mailbox command 96 */ piix4_imc_write(0x80, 0x96); while (timeout--) { if (piix4_imc_read(0x82) == 0xfa) break; usleep_range(1000, 2000); } release_region(KERNCZ_IMC_IDX, 2); } static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg) { u8 smba_en_lo, val; if (mmio_cfg->use_mmio) { smba_en_lo = ioread8(mmio_cfg->addr + piix4_port_sel_sb800); val = (smba_en_lo & ~piix4_port_mask_sb800) | port; if (smba_en_lo != val) iowrite8(val, mmio_cfg->addr + piix4_port_sel_sb800); return (smba_en_lo & piix4_port_mask_sb800); } outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); val = (smba_en_lo & ~piix4_port_mask_sb800) | port; if (smba_en_lo != val) outb_p(val, SB800_PIIX4_SMB_IDX + 1); return (smba_en_lo & piix4_port_mask_sb800); } /* * Handles access to multiple SMBus ports on the SB800. * The port is selected by bits 2:1 of the smb_en register (0x2c). * Returns negative errno on error. * * Note: The selected port must be returned to the initial selection to avoid * problems on certain systems. */ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); unsigned short piix4_smba = adapdata->smba; int retries = MAX_TIMEOUT; int smbslvcnt; u8 prev_port; int retval; retval = piix4_sb800_region_request(&adap->dev, &adapdata->mmio_cfg); if (retval) return retval; /* Request the SMBUS semaphore, avoid conflicts with the IMC */ smbslvcnt = inb_p(SMBSLVCNT); do { outb_p(smbslvcnt | 0x10, SMBSLVCNT); /* Check the semaphore status */ smbslvcnt = inb_p(SMBSLVCNT); if (smbslvcnt & 0x10) break; usleep_range(1000, 2000); } while (--retries); /* SMBus is still owned by the IMC, we give up */ if (!retries) { retval = -EBUSY; goto release; } /* * Notify the IMC (Integrated Micro Controller) if required. * Among other responsibilities, the IMC is in charge of monitoring * the System fans and temperature sensors, and act accordingly. * All this is done through SMBus and can/will collide * with our transactions if they are long (BLOCK_DATA). * Therefore we need to request the ownership flag during those * transactions. */ if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) { int ret; ret = piix4_imc_sleep(); switch (ret) { case -EBUSY: dev_warn(&adap->dev, "IMC base address index region 0x%x already in use.\n", KERNCZ_IMC_IDX); break; case -ETIMEDOUT: dev_warn(&adap->dev, "Failed to communicate with the IMC.\n"); break; default: break; } /* If IMC communication fails do not retry */ if (ret) { dev_warn(&adap->dev, "Continuing without IMC notification.\n"); adapdata->notify_imc = false; } } prev_port = piix4_sb800_port_sel(adapdata->port, &adapdata->mmio_cfg); retval = piix4_access(adap, addr, flags, read_write, command, size, data); piix4_sb800_port_sel(prev_port, &adapdata->mmio_cfg); /* Release the semaphore */ outb_p(smbslvcnt | 0x20, SMBSLVCNT); if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) piix4_imc_wakeup(); release: piix4_sb800_region_release(&adap->dev, &adapdata->mmio_cfg); return retval; } static u32 piix4_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = piix4_access, .functionality = piix4_func, }; static const struct i2c_algorithm piix4_smbus_algorithm_sb800 = { .smbus_xfer = piix4_access_sb800, .functionality = piix4_func, }; static const struct pci_device_id piix4_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) }, { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1100LD) }, { 0, } }; MODULE_DEVICE_TABLE (pci, piix4_ids); static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS]; static struct i2c_adapter *piix4_aux_adapter; static int piix4_adapter_count; static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, bool sb800_main, u8 port, bool notify_imc, u8 hw_port_nr, const char *name, struct i2c_adapter **padap) { struct i2c_adapter *adap; struct i2c_piix4_adapdata *adapdata; int retval; adap = kzalloc(sizeof(*adap), GFP_KERNEL); if (adap == NULL) { release_region(smba, SMBIOSIZE); return -ENOMEM; } adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->algo = sb800_main ? &piix4_smbus_algorithm_sb800 : &smbus_algorithm; adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL); if (adapdata == NULL) { kfree(adap); release_region(smba, SMBIOSIZE); return -ENOMEM; } adapdata->mmio_cfg.use_mmio = piix4_sb800_use_mmio(dev); adapdata->smba = smba; adapdata->sb800_main = sb800_main; adapdata->port = port << piix4_port_shift_sb800; adapdata->notify_imc = notify_imc; /* set up the sysfs linkage to our parent device */ adap->dev.parent = &dev->dev; if (has_acpi_companion(&dev->dev)) { acpi_preset_companion(&adap->dev, ACPI_COMPANION(&dev->dev), hw_port_nr); } snprintf(adap->name, sizeof(adap->name), "SMBus PIIX4 adapter%s at %04x", name, smba); i2c_set_adapdata(adap, adapdata); retval = i2c_add_adapter(adap); if (retval) { kfree(adapdata); kfree(adap); release_region(smba, SMBIOSIZE); return retval; } *padap = adap; return 0; } static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba, bool notify_imc) { struct i2c_piix4_adapdata *adapdata; int port; int retval; if (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS || (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && dev->revision >= 0x1F)) { piix4_adapter_count = HUDSON2_MAIN_PORTS; } else { piix4_adapter_count = PIIX4_MAX_ADAPTERS; } for (port = 0; port < piix4_adapter_count; port++) { u8 hw_port_nr = port == 0 ? 0 : port + 1; retval = piix4_add_adapter(dev, smba, true, port, notify_imc, hw_port_nr, piix4_main_port_names_sb800[port], &piix4_main_adapters[port]); if (retval < 0) goto error; } return retval; error: dev_err(&dev->dev, "Error setting up SB800 adapters. Unregistering!\n"); while (--port >= 0) { adapdata = i2c_get_adapdata(piix4_main_adapters[port]); if (adapdata->smba) { i2c_del_adapter(piix4_main_adapters[port]); kfree(adapdata); kfree(piix4_main_adapters[port]); piix4_main_adapters[port] = NULL; } } return retval; } static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) { int retval; bool is_sb800 = false; if ((dev->vendor == PCI_VENDOR_ID_ATI && dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && dev->revision >= 0x40) || dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) { bool notify_imc = false; is_sb800 = true; if ((dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) && dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { u8 imc; /* * Detect if IMC is active or not, this method is * described on coreboot's AMD IMC notes */ pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3), 0x40, &imc); if (imc & 0x80) notify_imc = true; } /* base address location etc changed in SB800 */ retval = piix4_setup_sb800(dev, id, 0); if (retval < 0) return retval; /* * Try to register multiplexed main SMBus adapter, * give up if we can't */ retval = piix4_add_adapters_sb800(dev, retval, notify_imc); if (retval < 0) return retval; } else { retval = piix4_setup(dev, id); if (retval < 0) return retval; /* Try to register main SMBus adapter, give up if we can't */ retval = piix4_add_adapter(dev, retval, false, 0, false, 0, "", &piix4_main_adapters[0]); if (retval < 0) return retval; piix4_adapter_count = 1; } /* Check for auxiliary SMBus on some AMD chipsets */ retval = -ENODEV; if (dev->vendor == PCI_VENDOR_ID_ATI && dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS) { if (dev->revision < 0x40) { retval = piix4_setup_aux(dev, id, 0x58); } else { /* SB800 added aux bus too */ retval = piix4_setup_sb800(dev, id, 1); } } if (dev->vendor == PCI_VENDOR_ID_AMD && (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { retval = piix4_setup_sb800(dev, id, 1); } if (retval > 0) { /* Try to add the aux adapter if it exists, * piix4_add_adapter will clean up if this fails */ piix4_add_adapter(dev, retval, false, 0, false, 1, is_sb800 ? piix4_aux_port_name_sb800 : "", &piix4_aux_adapter); } return 0; } static void piix4_adap_remove(struct i2c_adapter *adap) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); if (adapdata->smba) { i2c_del_adapter(adap); if (adapdata->port == (0 << piix4_port_shift_sb800)) release_region(adapdata->smba, SMBIOSIZE); kfree(adapdata); kfree(adap); } } static void piix4_remove(struct pci_dev *dev) { int port = piix4_adapter_count; while (--port >= 0) { if (piix4_main_adapters[port]) { piix4_adap_remove(piix4_main_adapters[port]); piix4_main_adapters[port] = NULL; } } if (piix4_aux_adapter) { piix4_adap_remove(piix4_aux_adapter); piix4_aux_adapter = NULL; } } static struct pci_driver piix4_driver = { .name = "piix4_smbus", .id_table = piix4_ids, .probe = piix4_probe, .remove = piix4_remove, }; module_pci_driver(piix4_driver); MODULE_AUTHOR("Frodo Looijaard <[email protected]>"); MODULE_AUTHOR("Philip Edelbrock <[email protected]>"); MODULE_DESCRIPTION("PIIX4 SMBus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-piix4.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 1999 Frodo Looijaard <[email protected]> and Philip Edelbrock <[email protected]> and Mark D. Studebaker <[email protected]> */ /* This is the driver for the SMB Host controller on Acer Labs Inc. (ALI) M1541 and M1543C South Bridges. The M1543C is a South bridge for desktop systems. The M1533 is a South bridge for portable systems. They are part of the following ALI chipsets: "Aladdin Pro 2": Includes the M1621 Slot 1 North bridge with AGP and 100MHz CPU Front Side bus "Aladdin V": Includes the M1541 Socket 7 North bridge with AGP and 100MHz CPU Front Side bus "Aladdin IV": Includes the M1541 Socket 7 North bridge with host bus up to 83.3 MHz. For an overview of these chips see http://www.acerlabs.com The M1533/M1543C devices appear as FOUR separate devices on the PCI bus. An output of lspci will show something similar to the following: 00:02.0 USB Controller: Acer Laboratories Inc. M5237 00:03.0 Bridge: Acer Laboratories Inc. M7101 00:07.0 ISA bridge: Acer Laboratories Inc. M1533 00:0f.0 IDE interface: Acer Laboratories Inc. M5229 The SMB controller is part of the 7101 device, which is an ACPI-compliant Power Management Unit (PMU). The whole 7101 device has to be enabled for the SMB to work. You can't just enable the SMB alone. The SMB and the ACPI have separate I/O spaces. We make sure that the SMB is enabled. We leave the ACPI alone. This driver controls the SMB Host only. The SMB Slave controller on the M15X3 is not enabled. This driver does not use interrupts. */ /* Note: we assume there can only be one ALI15X3, with one SMBus interface */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* ALI15X3 SMBus address offsets */ #define SMBHSTSTS (0 + ali15x3_smba) #define SMBHSTCNT (1 + ali15x3_smba) #define SMBHSTSTART (2 + ali15x3_smba) #define SMBHSTCMD (7 + ali15x3_smba) #define SMBHSTADD (3 + ali15x3_smba) #define SMBHSTDAT0 (4 + ali15x3_smba) #define SMBHSTDAT1 (5 + ali15x3_smba) #define SMBBLKDAT (6 + ali15x3_smba) /* PCI Address Constants */ #define SMBCOM 0x004 #define SMBBA 0x014 #define SMBATPC 0x05B /* used to unlock xxxBA registers */ #define SMBHSTCFG 0x0E0 #define SMBSLVC 0x0E1 #define SMBCLK 0x0E2 #define SMBREV 0x008 /* Other settings */ #define MAX_TIMEOUT 200 /* times 1/100 sec */ #define ALI15X3_SMB_IOSIZE 32 /* this is what the Award 1004 BIOS sets them to on a ASUS P5A MB. We don't use these here. If the bases aren't set to some value we tell user to upgrade BIOS and we fail. */ #define ALI15X3_SMB_DEFAULTBASE 0xE800 /* ALI15X3 address lock bits */ #define ALI15X3_LOCK 0x06 /* ALI15X3 command constants */ #define ALI15X3_ABORT 0x02 #define ALI15X3_T_OUT 0x04 #define ALI15X3_QUICK 0x00 #define ALI15X3_BYTE 0x10 #define ALI15X3_BYTE_DATA 0x20 #define ALI15X3_WORD_DATA 0x30 #define ALI15X3_BLOCK_DATA 0x40 #define ALI15X3_BLOCK_CLR 0x80 /* ALI15X3 status register bits */ #define ALI15X3_STS_IDLE 0x04 #define ALI15X3_STS_BUSY 0x08 #define ALI15X3_STS_DONE 0x10 #define ALI15X3_STS_DEV 0x20 /* device error */ #define ALI15X3_STS_COLL 0x40 /* collision or no response */ #define ALI15X3_STS_TERM 0x80 /* terminated by abort */ #define ALI15X3_STS_ERR 0xE0 /* all the bad error bits */ /* If force_addr is set to anything different from 0, we forcibly enable the device at the given address. */ static u16 force_addr; module_param_hw(force_addr, ushort, ioport, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller"); static struct pci_driver ali15x3_driver; static unsigned short ali15x3_smba; static int ali15x3_setup(struct pci_dev *ALI15X3_dev) { u16 a; unsigned char temp; /* Check the following things: - SMB I/O address is initialized - Device is enabled - We can use the addresses */ /* Unlock the register. The data sheet says that the address registers are read-only if the lock bits are 1, but in fact the address registers are zero unless you clear the lock bits. */ pci_read_config_byte(ALI15X3_dev, SMBATPC, &temp); if (temp & ALI15X3_LOCK) { temp &= ~ALI15X3_LOCK; pci_write_config_byte(ALI15X3_dev, SMBATPC, temp); } /* Determine the address of the SMBus area */ pci_read_config_word(ALI15X3_dev, SMBBA, &ali15x3_smba); ali15x3_smba &= (0xffff & ~(ALI15X3_SMB_IOSIZE - 1)); if (ali15x3_smba == 0 && force_addr == 0) { dev_err(&ALI15X3_dev->dev, "ALI15X3_smb region uninitialized " "- upgrade BIOS or use force_addr=0xaddr\n"); return -ENODEV; } if(force_addr) ali15x3_smba = force_addr & ~(ALI15X3_SMB_IOSIZE - 1); if (acpi_check_region(ali15x3_smba, ALI15X3_SMB_IOSIZE, ali15x3_driver.name)) return -EBUSY; if (!request_region(ali15x3_smba, ALI15X3_SMB_IOSIZE, ali15x3_driver.name)) { dev_err(&ALI15X3_dev->dev, "ALI15X3_smb region 0x%x already in use!\n", ali15x3_smba); return -ENODEV; } if(force_addr) { int ret; dev_info(&ALI15X3_dev->dev, "forcing ISA address 0x%04X\n", ali15x3_smba); ret = pci_write_config_word(ALI15X3_dev, SMBBA, ali15x3_smba); if (ret != PCIBIOS_SUCCESSFUL) goto error; ret = pci_read_config_word(ALI15X3_dev, SMBBA, &a); if (ret != PCIBIOS_SUCCESSFUL) goto error; if ((a & ~(ALI15X3_SMB_IOSIZE - 1)) != ali15x3_smba) { /* make sure it works */ dev_err(&ALI15X3_dev->dev, "force address failed - not supported?\n"); goto error; } } /* check if whole device is enabled */ pci_read_config_byte(ALI15X3_dev, SMBCOM, &temp); if ((temp & 1) == 0) { dev_info(&ALI15X3_dev->dev, "enabling SMBus device\n"); pci_write_config_byte(ALI15X3_dev, SMBCOM, temp | 0x01); } /* Is SMB Host controller enabled? */ pci_read_config_byte(ALI15X3_dev, SMBHSTCFG, &temp); if ((temp & 1) == 0) { dev_info(&ALI15X3_dev->dev, "enabling SMBus controller\n"); pci_write_config_byte(ALI15X3_dev, SMBHSTCFG, temp | 0x01); } /* set SMB clock to 74KHz as recommended in data sheet */ pci_write_config_byte(ALI15X3_dev, SMBCLK, 0x20); /* The interrupt routing for SMB is set up in register 0x77 in the 1533 ISA Bridge device, NOT in the 7101 device. Don't bother with finding the 1533 device and reading the register. if ((....... & 0x0F) == 1) dev_dbg(&ALI15X3_dev->dev, "ALI15X3 using Interrupt 9 for SMBus.\n"); */ pci_read_config_byte(ALI15X3_dev, SMBREV, &temp); dev_dbg(&ALI15X3_dev->dev, "SMBREV = 0x%X\n", temp); dev_dbg(&ALI15X3_dev->dev, "iALI15X3_smba = 0x%X\n", ali15x3_smba); return 0; error: release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE); return -ENODEV; } /* Another internally used function */ static int ali15x3_transaction(struct i2c_adapter *adap) { int temp; int result = 0; int timeout = 0; dev_dbg(&adap->dev, "Transaction (pre): STS=%02x, CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTSTS), inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* get status */ temp = inb_p(SMBHSTSTS); /* Make sure the SMBus host is ready to start transmitting */ /* Check the busy bit first */ if (temp & ALI15X3_STS_BUSY) { /* If the host controller is still busy, it may have timed out in the previous transaction, resulting in a "SMBus Timeout" Dev. I've tried the following to reset a stuck busy bit. 1. Reset the controller with an ABORT command. (this doesn't seem to clear the controller if an external device is hung) 2. Reset the controller and the other SMBus devices with a T_OUT command. (this clears the host busy bit if an external device is hung, but it comes back upon a new access to a device) 3. Disable and reenable the controller in SMBHSTCFG Worst case, nothing seems to work except power reset. */ /* Abort - reset the host controller */ /* Try resetting entire SMB bus, including other devices - This may not work either - it clears the BUSY bit but then the BUSY bit may come back on when you try and use the chip again. If that's the case you are stuck. */ dev_info(&adap->dev, "Resetting entire SMB Bus to " "clear busy condition (%02x)\n", temp); outb_p(ALI15X3_T_OUT, SMBHSTCNT); temp = inb_p(SMBHSTSTS); } /* now check the error bits and the busy bit */ if (temp & (ALI15X3_STS_ERR | ALI15X3_STS_BUSY)) { /* do a clear-on-write */ outb_p(0xFF, SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) & (ALI15X3_STS_ERR | ALI15X3_STS_BUSY)) { /* this is probably going to be correctable only by a power reset as one of the bits now appears to be stuck */ /* This may be a bus or device with electrical problems. */ dev_err(&adap->dev, "SMBus reset failed! (0x%02x) - " "controller or device on bus is probably hung\n", temp); return -EBUSY; } } else { /* check and clear done bit */ if (temp & ALI15X3_STS_DONE) { outb_p(temp, SMBHSTSTS); } } /* start the transaction by writing anything to the start register */ outb_p(0xFF, SMBHSTSTART); /* We will always wait for a fraction of a second! */ timeout = 0; do { msleep(1); temp = inb_p(SMBHSTSTS); } while ((!(temp & (ALI15X3_STS_ERR | ALI15X3_STS_DONE))) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { result = -ETIMEDOUT; dev_err(&adap->dev, "SMBus Timeout!\n"); } if (temp & ALI15X3_STS_TERM) { result = -EIO; dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); } /* Unfortunately the ALI SMB controller maps "no response" and "bus collision" into a single bit. No response is the usual case so don't do a printk. This means that bus collisions go unreported. */ if (temp & ALI15X3_STS_COLL) { result = -ENXIO; dev_dbg(&adap->dev, "Error: no response or bus collision ADD=%02x\n", inb_p(SMBHSTADD)); } /* haven't ever seen this */ if (temp & ALI15X3_STS_DEV) { result = -EIO; dev_err(&adap->dev, "Error: device error\n"); } dev_dbg(&adap->dev, "Transaction (post): STS=%02x, CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTSTS), inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); return result; } /* Return negative errno on error. */ static s32 ali15x3_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int i, len; int temp; int timeout; /* clear all the bits (clear-on-write) */ outb_p(0xFF, SMBHSTSTS); /* make sure SMBus is idle */ temp = inb_p(SMBHSTSTS); for (timeout = 0; (timeout < MAX_TIMEOUT) && !(temp & ALI15X3_STS_IDLE); timeout++) { msleep(1); temp = inb_p(SMBHSTSTS); } if (timeout >= MAX_TIMEOUT) { dev_err(&adap->dev, "Idle wait Timeout! STS=0x%02x\n", temp); } switch (size) { case I2C_SMBUS_QUICK: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI15X3_QUICK; break; case I2C_SMBUS_BYTE: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMBHSTCMD); size = ALI15X3_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0); size = ALI15X3_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); } size = ALI15X3_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) { len = 0; data->block[0] = len; } if (len > 32) { len = 32; data->block[0] = len; } outb_p(len, SMBHSTDAT0); /* Reset SMBBLKDAT */ outb_p(inb_p(SMBHSTCNT) | ALI15X3_BLOCK_CLR, SMBHSTCNT); for (i = 1; i <= len; i++) outb_p(data->block[i], SMBBLKDAT); } size = ALI15X3_BLOCK_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } outb_p(size, SMBHSTCNT); /* output command */ temp = ali15x3_transaction(adap); if (temp) return temp; if ((read_write == I2C_SMBUS_WRITE) || (size == ALI15X3_QUICK)) return 0; switch (size) { case ALI15X3_BYTE: /* Result put in SMBHSTDAT0 */ data->byte = inb_p(SMBHSTDAT0); break; case ALI15X3_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0); break; case ALI15X3_WORD_DATA: data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); break; case ALI15X3_BLOCK_DATA: len = inb_p(SMBHSTDAT0); if (len > 32) len = 32; data->block[0] = len; /* Reset SMBBLKDAT */ outb_p(inb_p(SMBHSTCNT) | ALI15X3_BLOCK_CLR, SMBHSTCNT); for (i = 1; i <= data->block[0]; i++) { data->block[i] = inb_p(SMBBLKDAT); dev_dbg(&adap->dev, "Blk: len=%d, i=%d, data=%02x\n", len, i, data->block[i]); } break; } return 0; } static u32 ali15x3_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = ali15x3_access, .functionality = ali15x3_func, }; static struct i2c_adapter ali15x3_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static const struct pci_device_id ali15x3_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { 0, } }; MODULE_DEVICE_TABLE (pci, ali15x3_ids); static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id) { if (ali15x3_setup(dev)) { dev_err(&dev->dev, "ALI15X3 not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ ali15x3_adapter.dev.parent = &dev->dev; snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name), "SMBus ALI15X3 adapter at %04x", ali15x3_smba); return i2c_add_adapter(&ali15x3_adapter); } static void ali15x3_remove(struct pci_dev *dev) { i2c_del_adapter(&ali15x3_adapter); release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE); } static struct pci_driver ali15x3_driver = { .name = "ali15x3_smbus", .id_table = ali15x3_ids, .probe = ali15x3_probe, .remove = ali15x3_remove, }; module_pci_driver(ali15x3_driver); MODULE_AUTHOR("Frodo Looijaard <[email protected]>"); MODULE_AUTHOR("Philip Edelbrock <[email protected]>"); MODULE_AUTHOR("Mark D. Studebaker <[email protected]>"); MODULE_DESCRIPTION("ALI15X3 SMBus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-ali15x3.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Synopsys DesignWare I2C adapter driver. * * Based on the TI DAVINCI I2C adapter driver. * * Copyright (C) 2006 Texas Instruments. * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. */ #include <linux/acpi.h> #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/suspend.h> #include <linux/units.h> #include "i2c-designware-core.h" static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) { return clk_get_rate(dev->clk) / KILO; } #ifdef CONFIG_ACPI static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, { "INT33C3", 0 }, { "INT3432", 0 }, { "INT3433", 0 }, { "80860F41", ACCESS_NO_IRQ_SUSPEND }, { "808622C1", ACCESS_NO_IRQ_SUSPEND }, { "AMD0010", ACCESS_INTR_MASK }, { "AMDI0010", ACCESS_INTR_MASK }, { "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE }, { "AMDI0510", 0 }, { "APMC0D0F", 0 }, { "HISI02A1", 0 }, { "HISI02A2", 0 }, { "HISI02A3", 0 }, { "HYGO0010", ACCESS_INTR_MASK }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); #endif #ifdef CONFIG_OF #define BT1_I2C_CTL 0x100 #define BT1_I2C_CTL_ADDR_MASK GENMASK(7, 0) #define BT1_I2C_CTL_WR BIT(8) #define BT1_I2C_CTL_GO BIT(31) #define BT1_I2C_DI 0x104 #define BT1_I2C_DO 0x108 static int bt1_i2c_read(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; int ret; /* * Note these methods shouldn't ever fail because the system controller * registers are memory mapped. We check the return value just in case. */ ret = regmap_write(dev->sysmap, BT1_I2C_CTL, BT1_I2C_CTL_GO | (reg & BT1_I2C_CTL_ADDR_MASK)); if (ret) return ret; return regmap_read(dev->sysmap, BT1_I2C_DO, val); } static int bt1_i2c_write(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; int ret; ret = regmap_write(dev->sysmap, BT1_I2C_DI, val); if (ret) return ret; return regmap_write(dev->sysmap, BT1_I2C_CTL, BT1_I2C_CTL_GO | BT1_I2C_CTL_WR | (reg & BT1_I2C_CTL_ADDR_MASK)); } static struct regmap_config bt1_i2c_cfg = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .fast_io = true, .reg_read = bt1_i2c_read, .reg_write = bt1_i2c_write, .max_register = DW_IC_COMP_TYPE, }; static int bt1_i2c_request_regs(struct dw_i2c_dev *dev) { dev->sysmap = syscon_node_to_regmap(dev->dev->of_node->parent); if (IS_ERR(dev->sysmap)) return PTR_ERR(dev->sysmap); dev->map = devm_regmap_init(dev->dev, NULL, dev, &bt1_i2c_cfg); return PTR_ERR_OR_ZERO(dev->map); } #define MSCC_ICPU_CFG_TWI_DELAY 0x0 #define MSCC_ICPU_CFG_TWI_DELAY_ENABLE BIT(0) #define MSCC_ICPU_CFG_TWI_SPIKE_FILTER 0x4 static int mscc_twi_set_sda_hold_time(struct dw_i2c_dev *dev) { writel((dev->sda_hold_time << 1) | MSCC_ICPU_CFG_TWI_DELAY_ENABLE, dev->ext + MSCC_ICPU_CFG_TWI_DELAY); return 0; } static int dw_i2c_of_configure(struct platform_device *pdev) { struct dw_i2c_dev *dev = platform_get_drvdata(pdev); switch (dev->flags & MODEL_MASK) { case MODEL_MSCC_OCELOT: dev->ext = devm_platform_ioremap_resource(pdev, 1); if (!IS_ERR(dev->ext)) dev->set_sda_hold_time = mscc_twi_set_sda_hold_time; break; default: break; } return 0; } static const struct of_device_id dw_i2c_of_match[] = { { .compatible = "snps,designware-i2c", }, { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, {}, }; MODULE_DEVICE_TABLE(of, dw_i2c_of_match); #else static int bt1_i2c_request_regs(struct dw_i2c_dev *dev) { return -ENODEV; } static inline int dw_i2c_of_configure(struct platform_device *pdev) { return -ENODEV; } #endif static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev) { dev->map = dev_get_regmap(dev->dev->parent, NULL); if (!dev->map) return -ENODEV; return 0; } static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev) { pm_runtime_disable(dev->dev); if (dev->shared_with_punit) pm_runtime_put_noidle(dev->dev); } static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev) { struct platform_device *pdev = to_platform_device(dev->dev); int ret; switch (dev->flags & MODEL_MASK) { case MODEL_BAIKAL_BT1: ret = bt1_i2c_request_regs(dev); break; case MODEL_WANGXUN_SP: ret = txgbe_i2c_request_regs(dev); break; default: dev->base = devm_platform_ioremap_resource(pdev, 0); ret = PTR_ERR_OR_ZERO(dev->base); break; } return ret; } static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = { { .ident = "Qtechnology QT5222", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"), DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"), }, }, { } /* terminate list */ }; static const struct i2c_dw_semaphore_callbacks i2c_dw_semaphore_cb_table[] = { #ifdef CONFIG_I2C_DESIGNWARE_BAYTRAIL { .probe = i2c_dw_baytrail_probe_lock_support, }, #endif #ifdef CONFIG_I2C_DESIGNWARE_AMDPSP { .probe = i2c_dw_amdpsp_probe_lock_support, }, #endif {} }; static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { const struct i2c_dw_semaphore_callbacks *ptr; int i = 0; int ret; ptr = i2c_dw_semaphore_cb_table; dev->semaphore_idx = -1; while (ptr->probe) { ret = ptr->probe(dev); if (ret) { /* * If there is no semaphore device attached to this * controller, we shouldn't abort general i2c_controller * probe. */ if (ret != -ENODEV) return ret; i++; ptr++; continue; } dev->semaphore_idx = i; break; } return 0; } static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) { if (dev->semaphore_idx < 0) return; if (i2c_dw_semaphore_cb_table[dev->semaphore_idx].remove) i2c_dw_semaphore_cb_table[dev->semaphore_idx].remove(dev); } static int dw_i2c_plat_probe(struct platform_device *pdev) { struct i2c_adapter *adap; struct dw_i2c_dev *dev; struct i2c_timings *t; int irq, ret; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); if (device_property_present(&pdev->dev, "wx,i2c-snps-model")) dev->flags = MODEL_WANGXUN_SP; dev->dev = &pdev->dev; dev->irq = irq; platform_set_drvdata(pdev, dev); ret = dw_i2c_plat_request_regs(dev); if (ret) return ret; dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); if (IS_ERR(dev->rst)) return PTR_ERR(dev->rst); reset_control_deassert(dev->rst); t = &dev->timings; i2c_parse_fw_timings(&pdev->dev, t, false); i2c_dw_adjust_bus_speed(dev); if (pdev->dev.of_node) dw_i2c_of_configure(pdev); if (has_acpi_companion(&pdev->dev)) i2c_dw_acpi_configure(&pdev->dev); ret = i2c_dw_validate_speed(dev); if (ret) goto exit_reset; ret = i2c_dw_probe_lock_support(dev); if (ret) goto exit_reset; i2c_dw_configure(dev); /* Optional interface clock */ dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); if (IS_ERR(dev->pclk)) { ret = PTR_ERR(dev->pclk); goto exit_reset; } dev->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(dev->clk)) { ret = PTR_ERR(dev->clk); goto exit_reset; } ret = i2c_dw_prepare_clk(dev, true); if (ret) goto exit_reset; if (dev->clk) { u64 clk_khz; dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; clk_khz = dev->get_clk_rate_khz(dev); if (!dev->sda_hold_time && t->sda_hold_ns) dev->sda_hold_time = DIV_S64_ROUND_CLOSEST(clk_khz * t->sda_hold_ns, MICRO); } adap = &dev->adapter; adap->owner = THIS_MODULE; adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ? I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); adap->dev.of_node = pdev->dev.of_node; adap->nr = -1; if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); } else { dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND); } device_enable_async_suspend(&pdev->dev); /* The code below assumes runtime PM to be disabled. */ WARN_ON(pm_runtime_enabled(&pdev->dev)); pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_active(&pdev->dev); if (dev->shared_with_punit) pm_runtime_get_noresume(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = i2c_dw_probe(dev); if (ret) goto exit_probe; return ret; exit_probe: dw_i2c_plat_pm_cleanup(dev); exit_reset: reset_control_assert(dev->rst); return ret; } static void dw_i2c_plat_remove(struct platform_device *pdev) { struct dw_i2c_dev *dev = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); i2c_del_adapter(&dev->adapter); dev->disable(dev); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); dw_i2c_plat_pm_cleanup(dev); i2c_dw_remove_lock_support(dev); reset_control_assert(dev->rst); } static int dw_i2c_plat_prepare(struct device *dev) { /* * If the ACPI companion device object is present for this device, it * may be accessed during suspend and resume of other devices via I2C * operation regions, so tell the PM core and middle layers to avoid * skipping system suspend/resume callbacks for it in that case. */ return !has_acpi_companion(dev); } static int dw_i2c_plat_runtime_suspend(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); if (i_dev->shared_with_punit) return 0; i_dev->disable(i_dev); i2c_dw_prepare_clk(i_dev, false); return 0; } static int dw_i2c_plat_suspend(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&i_dev->adapter); return dw_i2c_plat_runtime_suspend(dev); } static int dw_i2c_plat_runtime_resume(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); if (!i_dev->shared_with_punit) i2c_dw_prepare_clk(i_dev, true); i_dev->init(i_dev); return 0; } static int dw_i2c_plat_resume(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); dw_i2c_plat_runtime_resume(dev); i2c_mark_adapter_resumed(&i_dev->adapter); return 0; } static const struct dev_pm_ops dw_i2c_dev_pm_ops = { .prepare = pm_sleep_ptr(dw_i2c_plat_prepare), LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL) }; /* Work with hotplug and coldplug */ MODULE_ALIAS("platform:i2c_designware"); static struct platform_driver dw_i2c_driver = { .probe = dw_i2c_plat_probe, .remove_new = dw_i2c_plat_remove, .driver = { .name = "i2c_designware", .of_match_table = of_match_ptr(dw_i2c_of_match), .acpi_match_table = ACPI_PTR(dw_i2c_acpi_match), .pm = pm_ptr(&dw_i2c_dev_pm_ops), }, }; static int __init dw_i2c_init_driver(void) { return platform_driver_register(&dw_i2c_driver); } subsys_initcall(dw_i2c_init_driver); static void __exit dw_i2c_exit_driver(void) { platform_driver_unregister(&dw_i2c_driver); } module_exit(dw_i2c_exit_driver); MODULE_AUTHOR("Baruch Siach <[email protected]>"); MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-designware-platdrv.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Wondermedia I2C Master Mode Driver * * Copyright (C) 2012 Tony Prisk <[email protected]> * * Derived from GPLv2+ licensed source: * - Copyright (C) 2008 WonderMedia Technologies, Inc. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #define REG_CR 0x00 #define REG_TCR 0x02 #define REG_CSR 0x04 #define REG_ISR 0x06 #define REG_IMR 0x08 #define REG_CDR 0x0A #define REG_TR 0x0C #define REG_MCR 0x0E #define REG_SLAVE_CR 0x10 #define REG_SLAVE_SR 0x12 #define REG_SLAVE_ISR 0x14 #define REG_SLAVE_IMR 0x16 #define REG_SLAVE_DR 0x18 #define REG_SLAVE_TR 0x1A /* REG_CR Bit fields */ #define CR_TX_NEXT_ACK 0x0000 #define CR_ENABLE 0x0001 #define CR_TX_NEXT_NO_ACK 0x0002 #define CR_TX_END 0x0004 #define CR_CPU_RDY 0x0008 #define SLAV_MODE_SEL 0x8000 /* REG_TCR Bit fields */ #define TCR_STANDARD_MODE 0x0000 #define TCR_MASTER_WRITE 0x0000 #define TCR_HS_MODE 0x2000 #define TCR_MASTER_READ 0x4000 #define TCR_FAST_MODE 0x8000 #define TCR_SLAVE_ADDR_MASK 0x007F /* REG_ISR Bit fields */ #define ISR_NACK_ADDR 0x0001 #define ISR_BYTE_END 0x0002 #define ISR_SCL_TIMEOUT 0x0004 #define ISR_WRITE_ALL 0x0007 /* REG_IMR Bit fields */ #define IMR_ENABLE_ALL 0x0007 /* REG_CSR Bit fields */ #define CSR_RCV_NOT_ACK 0x0001 #define CSR_RCV_ACK_MASK 0x0001 #define CSR_READY_MASK 0x0002 /* REG_TR */ #define SCL_TIMEOUT(x) (((x) & 0xFF) << 8) #define TR_STD 0x0064 #define TR_HS 0x0019 /* REG_MCR */ #define MCR_APB_96M 7 #define MCR_APB_166M 12 #define I2C_MODE_STANDARD 0 #define I2C_MODE_FAST 1 #define WMT_I2C_TIMEOUT (msecs_to_jiffies(1000)) struct wmt_i2c_dev { struct i2c_adapter adapter; struct completion complete; struct device *dev; void __iomem *base; struct clk *clk; int mode; int irq; u16 cmd_status; }; static int wmt_i2c_wait_bus_not_busy(struct wmt_i2c_dev *i2c_dev) { unsigned long timeout; timeout = jiffies + WMT_I2C_TIMEOUT; while (!(readw(i2c_dev->base + REG_CSR) & CSR_READY_MASK)) { if (time_after(jiffies, timeout)) { dev_warn(i2c_dev->dev, "timeout waiting for bus ready\n"); return -EBUSY; } msleep(20); } return 0; } static int wmt_check_status(struct wmt_i2c_dev *i2c_dev) { int ret = 0; if (i2c_dev->cmd_status & ISR_NACK_ADDR) ret = -EIO; if (i2c_dev->cmd_status & ISR_SCL_TIMEOUT) ret = -ETIMEDOUT; return ret; } static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg, int last) { struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap); u16 val, tcr_val; int ret; unsigned long wait_result; int xfer_len = 0; if (!(pmsg->flags & I2C_M_NOSTART)) { ret = wmt_i2c_wait_bus_not_busy(i2c_dev); if (ret < 0) return ret; } if (pmsg->len == 0) { /* * We still need to run through the while (..) once, so * start at -1 and break out early from the loop */ xfer_len = -1; writew(0, i2c_dev->base + REG_CDR); } else { writew(pmsg->buf[0] & 0xFF, i2c_dev->base + REG_CDR); } if (!(pmsg->flags & I2C_M_NOSTART)) { val = readw(i2c_dev->base + REG_CR); val &= ~CR_TX_END; writew(val, i2c_dev->base + REG_CR); val = readw(i2c_dev->base + REG_CR); val |= CR_CPU_RDY; writew(val, i2c_dev->base + REG_CR); } reinit_completion(&i2c_dev->complete); if (i2c_dev->mode == I2C_MODE_STANDARD) tcr_val = TCR_STANDARD_MODE; else tcr_val = TCR_FAST_MODE; tcr_val |= (TCR_MASTER_WRITE | (pmsg->addr & TCR_SLAVE_ADDR_MASK)); writew(tcr_val, i2c_dev->base + REG_TCR); if (pmsg->flags & I2C_M_NOSTART) { val = readw(i2c_dev->base + REG_CR); val |= CR_CPU_RDY; writew(val, i2c_dev->base + REG_CR); } while (xfer_len < pmsg->len) { wait_result = wait_for_completion_timeout(&i2c_dev->complete, msecs_to_jiffies(500)); if (wait_result == 0) return -ETIMEDOUT; ret = wmt_check_status(i2c_dev); if (ret) return ret; xfer_len++; val = readw(i2c_dev->base + REG_CSR); if ((val & CSR_RCV_ACK_MASK) == CSR_RCV_NOT_ACK) { dev_dbg(i2c_dev->dev, "write RCV NACK error\n"); return -EIO; } if (pmsg->len == 0) { val = CR_TX_END | CR_CPU_RDY | CR_ENABLE; writew(val, i2c_dev->base + REG_CR); break; } if (xfer_len == pmsg->len) { if (last != 1) writew(CR_ENABLE, i2c_dev->base + REG_CR); } else { writew(pmsg->buf[xfer_len] & 0xFF, i2c_dev->base + REG_CDR); writew(CR_CPU_RDY | CR_ENABLE, i2c_dev->base + REG_CR); } } return 0; } static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg, int last) { struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap); u16 val, tcr_val; int ret; unsigned long wait_result; u32 xfer_len = 0; if (!(pmsg->flags & I2C_M_NOSTART)) { ret = wmt_i2c_wait_bus_not_busy(i2c_dev); if (ret < 0) return ret; } val = readw(i2c_dev->base + REG_CR); val &= ~CR_TX_END; writew(val, i2c_dev->base + REG_CR); val = readw(i2c_dev->base + REG_CR); val &= ~CR_TX_NEXT_NO_ACK; writew(val, i2c_dev->base + REG_CR); if (!(pmsg->flags & I2C_M_NOSTART)) { val = readw(i2c_dev->base + REG_CR); val |= CR_CPU_RDY; writew(val, i2c_dev->base + REG_CR); } if (pmsg->len == 1) { val = readw(i2c_dev->base + REG_CR); val |= CR_TX_NEXT_NO_ACK; writew(val, i2c_dev->base + REG_CR); } reinit_completion(&i2c_dev->complete); if (i2c_dev->mode == I2C_MODE_STANDARD) tcr_val = TCR_STANDARD_MODE; else tcr_val = TCR_FAST_MODE; tcr_val |= TCR_MASTER_READ | (pmsg->addr & TCR_SLAVE_ADDR_MASK); writew(tcr_val, i2c_dev->base + REG_TCR); if (pmsg->flags & I2C_M_NOSTART) { val = readw(i2c_dev->base + REG_CR); val |= CR_CPU_RDY; writew(val, i2c_dev->base + REG_CR); } while (xfer_len < pmsg->len) { wait_result = wait_for_completion_timeout(&i2c_dev->complete, msecs_to_jiffies(500)); if (!wait_result) return -ETIMEDOUT; ret = wmt_check_status(i2c_dev); if (ret) return ret; pmsg->buf[xfer_len] = readw(i2c_dev->base + REG_CDR) >> 8; xfer_len++; if (xfer_len == pmsg->len - 1) { val = readw(i2c_dev->base + REG_CR); val |= (CR_TX_NEXT_NO_ACK | CR_CPU_RDY); writew(val, i2c_dev->base + REG_CR); } else { val = readw(i2c_dev->base + REG_CR); val |= CR_CPU_RDY; writew(val, i2c_dev->base + REG_CR); } } return 0; } static int wmt_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct i2c_msg *pmsg; int i, is_last; int ret = 0; for (i = 0; ret >= 0 && i < num; i++) { is_last = ((i + 1) == num); pmsg = &msgs[i]; if (pmsg->flags & I2C_M_RD) ret = wmt_i2c_read(adap, pmsg, is_last); else ret = wmt_i2c_write(adap, pmsg, is_last); } return (ret < 0) ? ret : i; } static u32 wmt_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART; } static const struct i2c_algorithm wmt_i2c_algo = { .master_xfer = wmt_i2c_xfer, .functionality = wmt_i2c_func, }; static irqreturn_t wmt_i2c_isr(int irq, void *data) { struct wmt_i2c_dev *i2c_dev = data; /* save the status and write-clear it */ i2c_dev->cmd_status = readw(i2c_dev->base + REG_ISR); writew(i2c_dev->cmd_status, i2c_dev->base + REG_ISR); complete(&i2c_dev->complete); return IRQ_HANDLED; } static int wmt_i2c_reset_hardware(struct wmt_i2c_dev *i2c_dev) { int err; err = clk_prepare_enable(i2c_dev->clk); if (err) { dev_err(i2c_dev->dev, "failed to enable clock\n"); return err; } err = clk_set_rate(i2c_dev->clk, 20000000); if (err) { dev_err(i2c_dev->dev, "failed to set clock = 20Mhz\n"); clk_disable_unprepare(i2c_dev->clk); return err; } writew(0, i2c_dev->base + REG_CR); writew(MCR_APB_166M, i2c_dev->base + REG_MCR); writew(ISR_WRITE_ALL, i2c_dev->base + REG_ISR); writew(IMR_ENABLE_ALL, i2c_dev->base + REG_IMR); writew(CR_ENABLE, i2c_dev->base + REG_CR); readw(i2c_dev->base + REG_CSR); /* read clear */ writew(ISR_WRITE_ALL, i2c_dev->base + REG_ISR); if (i2c_dev->mode == I2C_MODE_STANDARD) writew(SCL_TIMEOUT(128) | TR_STD, i2c_dev->base + REG_TR); else writew(SCL_TIMEOUT(128) | TR_HS, i2c_dev->base + REG_TR); return 0; } static int wmt_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct wmt_i2c_dev *i2c_dev; struct i2c_adapter *adap; int err; u32 clk_rate; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); i2c_dev->irq = irq_of_parse_and_map(np, 0); if (!i2c_dev->irq) { dev_err(&pdev->dev, "irq missing or invalid\n"); return -EINVAL; } i2c_dev->clk = of_clk_get(np, 0); if (IS_ERR(i2c_dev->clk)) { dev_err(&pdev->dev, "unable to request clock\n"); return PTR_ERR(i2c_dev->clk); } i2c_dev->mode = I2C_MODE_STANDARD; err = of_property_read_u32(np, "clock-frequency", &clk_rate); if (!err && (clk_rate == I2C_MAX_FAST_MODE_FREQ)) i2c_dev->mode = I2C_MODE_FAST; i2c_dev->dev = &pdev->dev; err = devm_request_irq(&pdev->dev, i2c_dev->irq, wmt_i2c_isr, 0, "i2c", i2c_dev); if (err) { dev_err(&pdev->dev, "failed to request irq %i\n", i2c_dev->irq); return err; } adap = &i2c_dev->adapter; i2c_set_adapdata(adap, i2c_dev); strscpy(adap->name, "WMT I2C adapter", sizeof(adap->name)); adap->owner = THIS_MODULE; adap->algo = &wmt_i2c_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; init_completion(&i2c_dev->complete); err = wmt_i2c_reset_hardware(i2c_dev); if (err) { dev_err(&pdev->dev, "error initializing hardware\n"); return err; } err = i2c_add_adapter(adap); if (err) return err; platform_set_drvdata(pdev, i2c_dev); return 0; } static void wmt_i2c_remove(struct platform_device *pdev) { struct wmt_i2c_dev *i2c_dev = platform_get_drvdata(pdev); /* Disable interrupts, clock and delete adapter */ writew(0, i2c_dev->base + REG_IMR); clk_disable_unprepare(i2c_dev->clk); i2c_del_adapter(&i2c_dev->adapter); } static const struct of_device_id wmt_i2c_dt_ids[] = { { .compatible = "wm,wm8505-i2c" }, { /* Sentinel */ }, }; static struct platform_driver wmt_i2c_driver = { .probe = wmt_i2c_probe, .remove_new = wmt_i2c_remove, .driver = { .name = "wmt-i2c", .of_match_table = wmt_i2c_dt_ids, }, }; module_platform_driver(wmt_i2c_driver); MODULE_DESCRIPTION("Wondermedia I2C master-mode bus adapter"); MODULE_AUTHOR("Tony Prisk <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(of, wmt_i2c_dt_ids);
linux-master
drivers/i2c/busses/i2c-wmt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Synopsys DesignWare I2C adapter driver. * * Based on the TI DAVINCI I2C adapter driver. * * Copyright (C) 2006 Texas Instruments. * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/swab.h> #include <linux/types.h> #include <linux/units.h> #include "i2c-designware-core.h" static char *abort_sources[] = { [ABRT_7B_ADDR_NOACK] = "slave address not acknowledged (7bit mode)", [ABRT_10ADDR1_NOACK] = "first address byte not acknowledged (10bit mode)", [ABRT_10ADDR2_NOACK] = "second address byte not acknowledged (10bit mode)", [ABRT_TXDATA_NOACK] = "data not acknowledged", [ABRT_GCALL_NOACK] = "no acknowledgement for a general call", [ABRT_GCALL_READ] = "read after general call", [ABRT_SBYTE_ACKDET] = "start byte acknowledged", [ABRT_SBYTE_NORSTRT] = "trying to send start byte when restart is disabled", [ABRT_10B_RD_NORSTRT] = "trying to read when restart is disabled (10bit mode)", [ABRT_MASTER_DIS] = "trying to use disabled adapter", [ARB_LOST] = "lost arbitration", [ABRT_SLAVE_FLUSH_TXFIFO] = "read command so flush old data in the TX FIFO", [ABRT_SLAVE_ARBLOST] = "slave lost the bus while transmitting data to a remote master", [ABRT_SLAVE_RD_INTX] = "incorrect slave-transmitter mode configuration", }; static int dw_reg_read(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; *val = readl_relaxed(dev->base + reg); return 0; } static int dw_reg_write(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; writel_relaxed(val, dev->base + reg); return 0; } static int dw_reg_read_swab(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; *val = swab32(readl_relaxed(dev->base + reg)); return 0; } static int dw_reg_write_swab(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; writel_relaxed(swab32(val), dev->base + reg); return 0; } static int dw_reg_read_word(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; *val = readw_relaxed(dev->base + reg) | (readw_relaxed(dev->base + reg + 2) << 16); return 0; } static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; writew_relaxed(val, dev->base + reg); writew_relaxed(val >> 16, dev->base + reg + 2); return 0; } /** * i2c_dw_init_regmap() - Initialize registers map * @dev: device private data * * Autodetects needed register access mode and creates the regmap with * corresponding read/write callbacks. This must be called before doing any * other register access. */ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) { struct regmap_config map_cfg = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .disable_locking = true, .reg_read = dw_reg_read, .reg_write = dw_reg_write, .max_register = DW_IC_COMP_TYPE, }; u32 reg; int ret; /* * Skip detecting the registers map configuration if the regmap has * already been provided by a higher code. */ if (dev->map) return 0; ret = i2c_dw_acquire_lock(dev); if (ret) return ret; reg = readl(dev->base + DW_IC_COMP_TYPE); i2c_dw_release_lock(dev); if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) map_cfg.max_register = AMD_UCSI_INTR_REG; if (reg == swab32(DW_IC_COMP_TYPE_VALUE)) { map_cfg.reg_read = dw_reg_read_swab; map_cfg.reg_write = dw_reg_write_swab; } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) { map_cfg.reg_read = dw_reg_read_word; map_cfg.reg_write = dw_reg_write_word; } else if (reg != DW_IC_COMP_TYPE_VALUE) { dev_err(dev->dev, "Unknown Synopsys component type: 0x%08x\n", reg); return -ENODEV; } /* * Note we'll check the return value of the regmap IO accessors only * at the probe stage. The rest of the code won't do this because * basically we have MMIO-based regmap so non of the read/write methods * can fail. */ dev->map = devm_regmap_init(dev->dev, NULL, dev, &map_cfg); if (IS_ERR(dev->map)) { dev_err(dev->dev, "Failed to init the registers map\n"); return PTR_ERR(dev->map); } return 0; } static const u32 supported_speeds[] = { I2C_MAX_HIGH_SPEED_MODE_FREQ, I2C_MAX_FAST_MODE_PLUS_FREQ, I2C_MAX_FAST_MODE_FREQ, I2C_MAX_STANDARD_MODE_FREQ, }; int i2c_dw_validate_speed(struct dw_i2c_dev *dev) { struct i2c_timings *t = &dev->timings; unsigned int i; /* * Only standard mode at 100kHz, fast mode at 400kHz, * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported. */ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { if (t->bus_freq_hz == supported_speeds[i]) return 0; } dev_err(dev->dev, "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", t->bus_freq_hz); return -EINVAL; } EXPORT_SYMBOL_GPL(i2c_dw_validate_speed); #ifdef CONFIG_ACPI #include <linux/dmi.h> /* * The HCNT/LCNT information coming from ACPI should be the most accurate * for given platform. However, some systems get it wrong. On such systems * we get better results by calculating those based on the input clock. */ static const struct dmi_system_id i2c_dw_no_acpi_params[] = { { .ident = "Dell Inspiron 7348", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"), }, }, {} }; static void i2c_dw_acpi_params(struct device *device, char method[], u16 *hcnt, u16 *lcnt, u32 *sda_hold) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; acpi_handle handle = ACPI_HANDLE(device); union acpi_object *obj; if (dmi_check_system(i2c_dw_no_acpi_params)) return; if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) return; obj = (union acpi_object *)buf.pointer; if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 3) { const union acpi_object *objs = obj->package.elements; *hcnt = (u16)objs[0].integer.value; *lcnt = (u16)objs[1].integer.value; *sda_hold = (u32)objs[2].integer.value; } kfree(buf.pointer); } int i2c_dw_acpi_configure(struct device *device) { struct dw_i2c_dev *dev = dev_get_drvdata(device); struct i2c_timings *t = &dev->timings; u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; /* * Try to get SDA hold time and *CNT values from an ACPI method for * selected speed modes. */ i2c_dw_acpi_params(device, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht); i2c_dw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); i2c_dw_acpi_params(device, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht); i2c_dw_acpi_params(device, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht); switch (t->bus_freq_hz) { case I2C_MAX_STANDARD_MODE_FREQ: dev->sda_hold_time = ss_ht; break; case I2C_MAX_FAST_MODE_PLUS_FREQ: dev->sda_hold_time = fp_ht; break; case I2C_MAX_HIGH_SPEED_MODE_FREQ: dev->sda_hold_time = hs_ht; break; case I2C_MAX_FAST_MODE_FREQ: default: dev->sda_hold_time = fs_ht; break; } return 0; } EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure); static u32 i2c_dw_acpi_round_bus_speed(struct device *device) { u32 acpi_speed; int i; acpi_speed = i2c_acpi_find_bus_speed(device); /* * Some DSTDs use a non standard speed, round down to the lowest * standard speed. */ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { if (acpi_speed >= supported_speeds[i]) return supported_speeds[i]; } return 0; } #else /* CONFIG_ACPI */ static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; } #endif /* CONFIG_ACPI */ void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev) { u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev); struct i2c_timings *t = &dev->timings; /* * Find bus speed from the "clock-frequency" device property, ACPI * or by using fast mode if neither is set. */ if (acpi_speed && t->bus_freq_hz) t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed); else if (acpi_speed || t->bus_freq_hz) t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); else t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; } EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed); u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) { /* * DesignWare I2C core doesn't seem to have solid strategy to meet * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec * will result in violation of the tHD;STA spec. */ if (cond) /* * Conditional expression: * * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH * * This is based on the DW manuals, and represents an ideal * configuration. The resulting I2C bus speed will be * faster than any of the others. * * If your hardware is free from tHD;STA issue, try this one. */ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) - 8 + offset; else /* * Conditional expression: * * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) * * This is just experimental rule; the tHD;STA period turned * out to be proportinal to (_HCNT + 3). With this setting, * we could meet both tHIGH and tHD;STA timing specs. * * If unsure, you'd better to take this alternative. * * The reason why we need to take into account "tf" here, * is the same as described in i2c_dw_scl_lcnt(). */ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset; } u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) { /* * Conditional expression: * * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf) * * DW I2C core starts counting the SCL CNTs for the LOW period * of the SCL clock (tLOW) as soon as it pulls the SCL line. * In order to meet the tLOW timing spec, we need to take into * account the fall time of SCL signal (tf). Default tf value * should be 0.3 us, for safety. */ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) - 1 + offset; } int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) { unsigned int reg; int ret; ret = i2c_dw_acquire_lock(dev); if (ret) return ret; /* Configure SDA Hold Time if required */ ret = regmap_read(dev->map, DW_IC_COMP_VERSION, &reg); if (ret) goto err_release_lock; if (reg >= DW_IC_SDA_HOLD_MIN_VERS) { if (!dev->sda_hold_time) { /* Keep previous hold time setting if no one set it */ ret = regmap_read(dev->map, DW_IC_SDA_HOLD, &dev->sda_hold_time); if (ret) goto err_release_lock; } /* * Workaround for avoiding TX arbitration lost in case I2C * slave pulls SDA down "too quickly" after falling edge of * SCL by enabling non-zero SDA RX hold. Specification says it * extends incoming SDA low to high transition while SCL is * high but it appears to help also above issue. */ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK)) dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT; dev_dbg(dev->dev, "SDA Hold Time TX:RX = %d:%d\n", dev->sda_hold_time & ~(u32)DW_IC_SDA_HOLD_RX_MASK, dev->sda_hold_time >> DW_IC_SDA_HOLD_RX_SHIFT); } else if (dev->set_sda_hold_time) { dev->set_sda_hold_time(dev); } else if (dev->sda_hold_time) { dev_warn(dev->dev, "Hardware too old to adjust SDA hold time.\n"); dev->sda_hold_time = 0; } err_release_lock: i2c_dw_release_lock(dev); return ret; } void __i2c_dw_disable(struct dw_i2c_dev *dev) { unsigned int raw_intr_stats; unsigned int enable; int timeout = 100; bool abort_needed; unsigned int status; int ret; regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats); regmap_read(dev->map, DW_IC_ENABLE, &enable); abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD; if (abort_needed) { regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT); ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable, !(enable & DW_IC_ENABLE_ABORT), 10, 100); if (ret) dev_err(dev->dev, "timeout while trying to abort current transfer\n"); } do { __i2c_dw_disable_nowait(dev); /* * The enable status register may be unimplemented, but * in that case this test reads zero and exits the loop. */ regmap_read(dev->map, DW_IC_ENABLE_STATUS, &status); if ((status & 1) == 0) return; /* * Wait 10 times the signaling period of the highest I2C * transfer supported by the driver (for 400KHz this is * 25us) as described in the DesignWare I2C databook. */ usleep_range(25, 250); } while (timeout--); dev_warn(dev->dev, "timeout in disabling adapter\n"); } u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev) { /* * Clock is not necessary if we got LCNT/HCNT values directly from * the platform code. */ if (WARN_ON_ONCE(!dev->get_clk_rate_khz)) return 0; return dev->get_clk_rate_khz(dev); } int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare) { int ret; if (prepare) { /* Optional interface clock */ ret = clk_prepare_enable(dev->pclk); if (ret) return ret; ret = clk_prepare_enable(dev->clk); if (ret) clk_disable_unprepare(dev->pclk); return ret; } clk_disable_unprepare(dev->clk); clk_disable_unprepare(dev->pclk); return 0; } EXPORT_SYMBOL_GPL(i2c_dw_prepare_clk); int i2c_dw_acquire_lock(struct dw_i2c_dev *dev) { int ret; if (!dev->acquire_lock) return 0; ret = dev->acquire_lock(); if (!ret) return 0; dev_err(dev->dev, "couldn't acquire bus ownership\n"); return ret; } void i2c_dw_release_lock(struct dw_i2c_dev *dev) { if (dev->release_lock) dev->release_lock(); } /* * Waiting for bus not busy */ int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) { unsigned int status; int ret; ret = regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status, !(status & DW_IC_STATUS_ACTIVITY), 1100, 20000); if (ret) { dev_warn(dev->dev, "timeout waiting for bus ready\n"); i2c_recover_bus(&dev->adapter); regmap_read(dev->map, DW_IC_STATUS, &status); if (!(status & DW_IC_STATUS_ACTIVITY)) ret = 0; } return ret; } int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) { unsigned long abort_source = dev->abort_source; int i; if (abort_source & DW_IC_TX_ABRT_NOACK) { for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) dev_dbg(dev->dev, "%s: %s\n", __func__, abort_sources[i]); return -EREMOTEIO; } for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); if (abort_source & DW_IC_TX_ARB_LOST) return -EAGAIN; else if (abort_source & DW_IC_TX_ABRT_GCALL_READ) return -EINVAL; /* wrong msgs[] data */ else return -EIO; } int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev) { u32 tx_fifo_depth, rx_fifo_depth; unsigned int param; int ret; /* DW_IC_COMP_PARAM_1 not implement for IP issue */ if ((dev->flags & MODEL_MASK) == MODEL_WANGXUN_SP) { dev->tx_fifo_depth = TXGBE_TX_FIFO_DEPTH; dev->rx_fifo_depth = TXGBE_RX_FIFO_DEPTH; return 0; } /* * Try to detect the FIFO depth if not set by interface driver, * the depth could be from 2 to 256 from HW spec. */ ret = i2c_dw_acquire_lock(dev); if (ret) return ret; ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &param); i2c_dw_release_lock(dev); if (ret) return ret; tx_fifo_depth = ((param >> 16) & 0xff) + 1; rx_fifo_depth = ((param >> 8) & 0xff) + 1; if (!dev->tx_fifo_depth) { dev->tx_fifo_depth = tx_fifo_depth; dev->rx_fifo_depth = rx_fifo_depth; } else if (tx_fifo_depth >= 2) { dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth, tx_fifo_depth); dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth, rx_fifo_depth); } return 0; } u32 i2c_dw_func(struct i2c_adapter *adap) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); return dev->functionality; } void i2c_dw_disable(struct dw_i2c_dev *dev) { unsigned int dummy; int ret; ret = i2c_dw_acquire_lock(dev); if (ret) return; /* Disable controller */ __i2c_dw_disable(dev); /* Disable all interrupts */ regmap_write(dev->map, DW_IC_INTR_MASK, 0); regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); i2c_dw_release_lock(dev); } MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-designware-common.c
// SPDX-License-Identifier: GPL-2.0-or-later /* ------------------------------------------------------------------------- */ /* i2c-elektor.c i2c-hw access for PCF8584 style isa bus adaptes */ /* ------------------------------------------------------------------------- */ /* Copyright (C) 1995-97 Simon G. Vogl 1998-99 Hans Berglund */ /* ------------------------------------------------------------------------- */ /* With some changes from Kyösti Mälkki <[email protected]> and even Frodo Looijaard <[email protected]> */ /* Partially rewriten by Oleg I. Vdovikin for mmapped support of for Alpha Processor Inc. UP-2000(+) boards */ #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/wait.h> #include <linux/isa.h> #include <linux/i2c.h> #include <linux/i2c-algo-pcf.h> #include <linux/io.h> #include <asm/irq.h> #include "../algos/i2c-algo-pcf.h" #define DEFAULT_BASE 0x330 static int base; static u8 __iomem *base_iomem; static int irq; static int clock = 0x1c; static int own = 0x55; static int mmapped; /* vdovikin: removed static struct i2c_pcf_isa gpi; code - this module in real supports only one device, due to missing arguments in some functions, called from the algo-pcf module. Sometimes it's need to be rewriten - but for now just remove this for simpler reading */ static wait_queue_head_t pcf_wait; static int pcf_pending; static DEFINE_SPINLOCK(lock); static struct i2c_adapter pcf_isa_ops; /* ----- local functions ---------------------------------------------- */ static void pcf_isa_setbyte(void *data, int ctl, int val) { u8 __iomem *address = ctl ? (base_iomem + 1) : base_iomem; /* enable irq if any specified for serial operation */ if (ctl && irq && (val & I2C_PCF_ESO)) { val |= I2C_PCF_ENI; } pr_debug("%s: Write %p 0x%02X\n", pcf_isa_ops.name, address, val); iowrite8(val, address); #ifdef __alpha__ /* API UP2000 needs some hardware fudging to make the write stick */ iowrite8(val, address); #endif } static int pcf_isa_getbyte(void *data, int ctl) { u8 __iomem *address = ctl ? (base_iomem + 1) : base_iomem; int val = ioread8(address); pr_debug("%s: Read %p 0x%02X\n", pcf_isa_ops.name, address, val); return (val); } static int pcf_isa_getown(void *data) { return (own); } static int pcf_isa_getclock(void *data) { return (clock); } static void pcf_isa_waitforpin(void *data) { DEFINE_WAIT(wait); int timeout = 2; unsigned long flags; if (irq > 0) { spin_lock_irqsave(&lock, flags); if (pcf_pending == 0) { spin_unlock_irqrestore(&lock, flags); prepare_to_wait(&pcf_wait, &wait, TASK_INTERRUPTIBLE); if (schedule_timeout(timeout*HZ)) { spin_lock_irqsave(&lock, flags); if (pcf_pending == 1) { pcf_pending = 0; } spin_unlock_irqrestore(&lock, flags); } finish_wait(&pcf_wait, &wait); } else { pcf_pending = 0; spin_unlock_irqrestore(&lock, flags); } } else { udelay(100); } } static irqreturn_t pcf_isa_handler(int this_irq, void *dev_id) { spin_lock(&lock); pcf_pending = 1; spin_unlock(&lock); wake_up_interruptible(&pcf_wait); return IRQ_HANDLED; } static int pcf_isa_init(void) { if (!mmapped) { if (!request_region(base, 2, pcf_isa_ops.name)) { printk(KERN_ERR "%s: requested I/O region (%#x:2) is " "in use\n", pcf_isa_ops.name, base); return -ENODEV; } base_iomem = ioport_map(base, 2); if (!base_iomem) { printk(KERN_ERR "%s: remap of I/O region %#x failed\n", pcf_isa_ops.name, base); release_region(base, 2); return -ENODEV; } } else { if (!request_mem_region(base, 2, pcf_isa_ops.name)) { printk(KERN_ERR "%s: requested memory region (%#x:2) " "is in use\n", pcf_isa_ops.name, base); return -ENODEV; } base_iomem = ioremap(base, 2); if (base_iomem == NULL) { printk(KERN_ERR "%s: remap of memory region %#x " "failed\n", pcf_isa_ops.name, base); release_mem_region(base, 2); return -ENODEV; } } pr_debug("%s: registers %#x remapped to %p\n", pcf_isa_ops.name, base, base_iomem); if (irq > 0) { if (request_irq(irq, pcf_isa_handler, 0, pcf_isa_ops.name, NULL) < 0) { printk(KERN_ERR "%s: Request irq%d failed\n", pcf_isa_ops.name, irq); irq = 0; } else enable_irq(irq); } return 0; } /* ------------------------------------------------------------------------ * Encapsulate the above functions in the correct operations structure. * This is only done when more than one hardware adapter is supported. */ static struct i2c_algo_pcf_data pcf_isa_data = { .setpcf = pcf_isa_setbyte, .getpcf = pcf_isa_getbyte, .getown = pcf_isa_getown, .getclock = pcf_isa_getclock, .waitforpin = pcf_isa_waitforpin, }; static struct i2c_adapter pcf_isa_ops = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo_data = &pcf_isa_data, .name = "i2c-elektor", }; static int elektor_match(struct device *dev, unsigned int id) { #ifdef __alpha__ /* check to see we have memory mapped PCF8584 connected to the Cypress cy82c693 PCI-ISA bridge as on UP2000 board */ if (base == 0) { struct pci_dev *cy693_dev; cy693_dev = pci_get_device(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, NULL); if (cy693_dev) { unsigned char config; /* yeap, we've found cypress, let's check config */ if (!pci_read_config_byte(cy693_dev, 0x47, &config)) { dev_dbg(dev, "found cy82c693, config " "register 0x47 = 0x%02x\n", config); /* UP2000 board has this register set to 0xe1, but the most significant bit as seems can be reset during the proper initialisation sequence if guys from API decides to do that (so, we can even enable Tsunami Pchip window for the upper 1 Gb) */ /* so just check for ROMCS at 0xe0000, ROMCS enabled for writes and external XD Bus buffer in use. */ if ((config & 0x7f) == 0x61) { /* seems to be UP2000 like board */ base = 0xe0000; mmapped = 1; /* UP2000 drives ISA with 8.25 MHz (PCI/4) clock (this can be read from cypress) */ clock = I2C_PCF_CLK | I2C_PCF_TRNS90; dev_info(dev, "found API UP2000 like " "board, will probe PCF8584 " "later\n"); } } pci_dev_put(cy693_dev); } } #endif /* sanity checks for mmapped I/O */ if (mmapped && base < 0xc8000) { dev_err(dev, "incorrect base address (%#x) specified " "for mmapped I/O\n", base); return 0; } if (base == 0) { base = DEFAULT_BASE; } return 1; } static int elektor_probe(struct device *dev, unsigned int id) { init_waitqueue_head(&pcf_wait); if (pcf_isa_init()) return -ENODEV; pcf_isa_ops.dev.parent = dev; if (i2c_pcf_add_bus(&pcf_isa_ops) < 0) goto fail; dev_info(dev, "found device at %#x\n", base); return 0; fail: if (irq > 0) { disable_irq(irq); free_irq(irq, NULL); } if (!mmapped) { ioport_unmap(base_iomem); release_region(base, 2); } else { iounmap(base_iomem); release_mem_region(base, 2); } return -ENODEV; } static void elektor_remove(struct device *dev, unsigned int id) { i2c_del_adapter(&pcf_isa_ops); if (irq > 0) { disable_irq(irq); free_irq(irq, NULL); } if (!mmapped) { ioport_unmap(base_iomem); release_region(base, 2); } else { iounmap(base_iomem); release_mem_region(base, 2); } } static struct isa_driver i2c_elektor_driver = { .match = elektor_match, .probe = elektor_probe, .remove = elektor_remove, .driver = { .owner = THIS_MODULE, .name = "i2c-elektor", }, }; MODULE_AUTHOR("Hans Berglund <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus adapter routines for PCF8584 ISA bus adapter"); MODULE_LICENSE("GPL"); module_param_hw(base, int, ioport_or_iomem, 0); module_param_hw(irq, int, irq, 0); module_param(clock, int, 0); module_param(own, int, 0); module_param_hw(mmapped, int, other, 0); module_isa_driver(i2c_elektor_driver, 1);
linux-master
drivers/i2c/busses/i2c-elektor.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c support for Silicon Labs' CP2615 Digital Audio Bridge * * (c) 2021, Bence Csókás <[email protected]> */ #include <linux/errno.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/usb.h> /** CP2615 I/O Protocol implementation */ #define CP2615_VID 0x10c4 #define CP2615_PID 0xeac1 #define IOP_EP_IN 0x82 #define IOP_EP_OUT 0x02 #define IOP_IFN 1 #define IOP_ALTSETTING 2 #define MAX_IOP_SIZE 64 #define MAX_IOP_PAYLOAD_SIZE (MAX_IOP_SIZE - 6) #define MAX_I2C_SIZE (MAX_IOP_PAYLOAD_SIZE - 4) enum cp2615_iop_msg_type { iop_GetAccessoryInfo = 0xD100, iop_AccessoryInfo = 0xA100, iop_GetPortConfiguration = 0xD203, iop_PortConfiguration = 0xA203, iop_DoI2cTransfer = 0xD400, iop_I2cTransferResult = 0xA400, iop_GetSerialState = 0xD501, iop_SerialState = 0xA501 }; struct __packed cp2615_iop_msg { __be16 preamble, length, msg; u8 data[MAX_IOP_PAYLOAD_SIZE]; }; #define PART_ID_A01 0x1400 #define PART_ID_A02 0x1500 struct __packed cp2615_iop_accessory_info { __be16 part_id, option_id, proto_ver; }; struct __packed cp2615_i2c_transfer { u8 tag, i2caddr, read_len, write_len; u8 data[MAX_I2C_SIZE]; }; /* Possible values for struct cp2615_i2c_transfer_result.status */ enum cp2615_i2c_status { /* Writing to the internal EEPROM failed, because it is locked */ CP2615_CFG_LOCKED = -6, /* read_len or write_len out of range */ CP2615_INVALID_PARAM = -4, /* I2C slave did not ACK in time */ CP2615_TIMEOUT, /* I2C bus busy */ CP2615_BUS_BUSY, /* I2C bus error (ie. device NAK'd the request) */ CP2615_BUS_ERROR, CP2615_SUCCESS }; struct __packed cp2615_i2c_transfer_result { u8 tag, i2caddr; s8 status; u8 read_len; u8 data[MAX_I2C_SIZE]; }; static int cp2615_init_iop_msg(struct cp2615_iop_msg *ret, enum cp2615_iop_msg_type msg, const void *data, size_t data_len) { if (data_len > MAX_IOP_PAYLOAD_SIZE) return -EFBIG; if (!ret) return -EINVAL; ret->preamble = 0x2A2A; ret->length = htons(data_len + 6); ret->msg = htons(msg); if (data && data_len) memcpy(&ret->data, data, data_len); return 0; } static int cp2615_init_i2c_msg(struct cp2615_iop_msg *ret, const struct cp2615_i2c_transfer *data) { return cp2615_init_iop_msg(ret, iop_DoI2cTransfer, data, 4 + data->write_len); } /* Translates status codes to Linux errno's */ static int cp2615_check_status(enum cp2615_i2c_status status) { switch (status) { case CP2615_SUCCESS: return 0; case CP2615_BUS_ERROR: return -ENXIO; case CP2615_BUS_BUSY: return -EAGAIN; case CP2615_TIMEOUT: return -ETIMEDOUT; case CP2615_INVALID_PARAM: return -EINVAL; case CP2615_CFG_LOCKED: return -EPERM; } /* Unknown error code */ return -EPROTO; } /** Driver code */ static int cp2615_i2c_send(struct usb_interface *usbif, struct cp2615_i2c_transfer *i2c_w) { struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); struct usb_device *usbdev = interface_to_usbdev(usbif); int res = cp2615_init_i2c_msg(msg, i2c_w); if (!res) res = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, IOP_EP_OUT), msg, ntohs(msg->length), NULL, 0); kfree(msg); return res; } static int cp2615_i2c_recv(struct usb_interface *usbif, unsigned char tag, void *buf) { struct usb_device *usbdev = interface_to_usbdev(usbif); struct cp2615_iop_msg *msg; struct cp2615_i2c_transfer_result *i2c_r; int res; msg = kzalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN), msg, sizeof(struct cp2615_iop_msg), NULL, 0); if (res < 0) { kfree(msg); return res; } i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data; if (msg->msg != htons(iop_I2cTransferResult) || i2c_r->tag != tag) { kfree(msg); return -EIO; } res = cp2615_check_status(i2c_r->status); if (!res) memcpy(buf, &i2c_r->data, i2c_r->read_len); kfree(msg); return res; } /* Checks if the IOP is functional by querying the part's ID */ static int cp2615_check_iop(struct usb_interface *usbif) { struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); struct cp2615_iop_accessory_info *info = (struct cp2615_iop_accessory_info *)&msg->data; struct usb_device *usbdev = interface_to_usbdev(usbif); int res = cp2615_init_iop_msg(msg, iop_GetAccessoryInfo, NULL, 0); if (res) goto out; res = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, IOP_EP_OUT), msg, ntohs(msg->length), NULL, 0); if (res) goto out; res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN), msg, sizeof(struct cp2615_iop_msg), NULL, 0); if (res) goto out; if (msg->msg != htons(iop_AccessoryInfo)) { res = -EIO; goto out; } switch (ntohs(info->part_id)) { case PART_ID_A01: dev_dbg(&usbif->dev, "Found A01 part. (WARNING: errata exists!)\n"); break; case PART_ID_A02: dev_dbg(&usbif->dev, "Found good A02 part.\n"); break; default: dev_warn(&usbif->dev, "Unknown part ID %04X\n", ntohs(info->part_id)); } out: kfree(msg); return res; } static int cp2615_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct usb_interface *usbif = adap->algo_data; int i = 0, ret = 0; struct i2c_msg *msg; struct cp2615_i2c_transfer i2c_w = {0}; dev_dbg(&usbif->dev, "Doing %d I2C transactions\n", num); for (; !ret && i < num; i++) { msg = &msgs[i]; i2c_w.tag = 0xdd; i2c_w.i2caddr = i2c_8bit_addr_from_msg(msg); if (msg->flags & I2C_M_RD) { i2c_w.read_len = msg->len; i2c_w.write_len = 0; } else { i2c_w.read_len = 0; i2c_w.write_len = msg->len; memcpy(&i2c_w.data, msg->buf, i2c_w.write_len); } ret = cp2615_i2c_send(usbif, &i2c_w); if (ret) break; ret = cp2615_i2c_recv(usbif, i2c_w.tag, msg->buf); } if (ret < 0) return ret; return i; } static u32 cp2615_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm cp2615_i2c_algo = { .master_xfer = cp2615_i2c_master_xfer, .functionality = cp2615_i2c_func, }; /* * This chip has some limitations: one is that the USB endpoint * can only receive 64 bytes/transfer, that leaves 54 bytes for * the I2C transfer. On top of that, EITHER read_len OR write_len * may be zero, but not both. If both are non-zero, the adapter * issues a write followed by a read. And the chip does not * support repeated START between the write and read phases. */ static struct i2c_adapter_quirks cp2615_i2c_quirks = { .max_write_len = MAX_I2C_SIZE, .max_read_len = MAX_I2C_SIZE, .flags = I2C_AQ_COMB_WRITE_THEN_READ | I2C_AQ_NO_ZERO_LEN | I2C_AQ_NO_REP_START, .max_comb_1st_msg_len = MAX_I2C_SIZE, .max_comb_2nd_msg_len = MAX_I2C_SIZE }; static void cp2615_i2c_remove(struct usb_interface *usbif) { struct i2c_adapter *adap = usb_get_intfdata(usbif); usb_set_intfdata(usbif, NULL); i2c_del_adapter(adap); } static int cp2615_i2c_probe(struct usb_interface *usbif, const struct usb_device_id *id) { int ret = 0; struct i2c_adapter *adap; struct usb_device *usbdev = interface_to_usbdev(usbif); ret = usb_set_interface(usbdev, IOP_IFN, IOP_ALTSETTING); if (ret) return ret; ret = cp2615_check_iop(usbif); if (ret) return ret; adap = devm_kzalloc(&usbif->dev, sizeof(struct i2c_adapter), GFP_KERNEL); if (!adap) return -ENOMEM; strncpy(adap->name, usbdev->serial, sizeof(adap->name) - 1); adap->owner = THIS_MODULE; adap->dev.parent = &usbif->dev; adap->dev.of_node = usbif->dev.of_node; adap->timeout = HZ; adap->algo = &cp2615_i2c_algo; adap->quirks = &cp2615_i2c_quirks; adap->algo_data = usbif; ret = i2c_add_adapter(adap); if (ret) return ret; usb_set_intfdata(usbif, adap); return 0; } static const struct usb_device_id id_table[] = { { USB_DEVICE_INTERFACE_NUMBER(CP2615_VID, CP2615_PID, IOP_IFN) }, { } }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver cp2615_i2c_driver = { .name = "i2c-cp2615", .probe = cp2615_i2c_probe, .disconnect = cp2615_i2c_remove, .id_table = id_table, }; module_usb_driver(cp2615_i2c_driver); MODULE_AUTHOR("Bence Csókás <[email protected]>"); MODULE_DESCRIPTION("CP2615 I2C bus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-cp2615.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Synopsys DesignWare I2C adapter driver (master only). * * Based on the TI DAVINCI I2C adapter driver. * * Copyright (C) 2006 Texas Instruments. * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> #include "i2c-designware-core.h" #define AMD_TIMEOUT_MIN_US 25 #define AMD_TIMEOUT_MAX_US 250 #define AMD_MASTERCFG_MASK GENMASK(15, 0) static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) { /* Configure Tx/Rx FIFO threshold levels */ regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2); regmap_write(dev->map, DW_IC_RX_TL, 0); /* Configure the I2C master */ regmap_write(dev->map, DW_IC_CON, dev->master_cfg); } static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) { unsigned int comp_param1; u32 sda_falling_time, scl_falling_time; struct i2c_timings *t = &dev->timings; const char *fp_str = ""; u32 ic_clk; int ret; ret = i2c_dw_acquire_lock(dev); if (ret) return ret; ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1); i2c_dw_release_lock(dev); if (ret) return ret; /* Set standard and fast speed dividers for high/low periods */ sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ /* Calculate SCL timing parameters for standard mode if not set */ if (!dev->ss_hcnt || !dev->ss_lcnt) { ic_clk = i2c_dw_clk_rate(dev); dev->ss_hcnt = i2c_dw_scl_hcnt(ic_clk, 4000, /* tHD;STA = tHIGH = 4.0 us */ sda_falling_time, 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ dev->ss_lcnt = i2c_dw_scl_lcnt(ic_clk, 4700, /* tLOW = 4.7 us */ scl_falling_time, 0); /* No offset */ } dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", dev->ss_hcnt, dev->ss_lcnt); /* * Set SCL timing parameters for fast mode or fast mode plus. Only * difference is the timing parameter values since the registers are * the same. */ if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) { /* * Check are Fast Mode Plus parameters available. Calculate * SCL timing parameters for Fast Mode Plus if not set. */ if (dev->fp_hcnt && dev->fp_lcnt) { dev->fs_hcnt = dev->fp_hcnt; dev->fs_lcnt = dev->fp_lcnt; } else { ic_clk = i2c_dw_clk_rate(dev); dev->fs_hcnt = i2c_dw_scl_hcnt(ic_clk, 260, /* tHIGH = 260 ns */ sda_falling_time, 0, /* DW default */ 0); /* No offset */ dev->fs_lcnt = i2c_dw_scl_lcnt(ic_clk, 500, /* tLOW = 500 ns */ scl_falling_time, 0); /* No offset */ } fp_str = " Plus"; } /* * Calculate SCL timing parameters for fast mode if not set. They are * needed also in high speed mode. */ if (!dev->fs_hcnt || !dev->fs_lcnt) { ic_clk = i2c_dw_clk_rate(dev); dev->fs_hcnt = i2c_dw_scl_hcnt(ic_clk, 600, /* tHD;STA = tHIGH = 0.6 us */ sda_falling_time, 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ dev->fs_lcnt = i2c_dw_scl_lcnt(ic_clk, 1300, /* tLOW = 1.3 us */ scl_falling_time, 0); /* No offset */ } dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", fp_str, dev->fs_hcnt, dev->fs_lcnt); /* Check is high speed possible and fall back to fast mode if not */ if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) == DW_IC_CON_SPEED_HIGH) { if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { dev_err(dev->dev, "High Speed not supported!\n"); t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; dev->master_cfg |= DW_IC_CON_SPEED_FAST; dev->hs_hcnt = 0; dev->hs_lcnt = 0; } else if (!dev->hs_hcnt || !dev->hs_lcnt) { ic_clk = i2c_dw_clk_rate(dev); dev->hs_hcnt = i2c_dw_scl_hcnt(ic_clk, 160, /* tHIGH = 160 ns */ sda_falling_time, 0, /* DW default */ 0); /* No offset */ dev->hs_lcnt = i2c_dw_scl_lcnt(ic_clk, 320, /* tLOW = 320 ns */ scl_falling_time, 0); /* No offset */ } dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", dev->hs_hcnt, dev->hs_lcnt); } ret = i2c_dw_set_sda_hold(dev); if (ret) return ret; dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz)); return 0; } /** * i2c_dw_init_master() - Initialize the designware I2C master hardware * @dev: device private data * * This functions configures and enables the I2C master. * This function is called during I2C init function, and in case of timeout at * run time. */ static int i2c_dw_init_master(struct dw_i2c_dev *dev) { int ret; ret = i2c_dw_acquire_lock(dev); if (ret) return ret; /* Disable the adapter */ __i2c_dw_disable(dev); /* Write standard speed timing parameters */ regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); /* Write fast mode/fast mode plus timing parameters */ regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt); regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt); /* Write high speed timing parameters if supported */ if (dev->hs_hcnt && dev->hs_lcnt) { regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt); regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt); } /* Write SDA hold time if supported */ if (dev->sda_hold_time) regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); i2c_dw_configure_fifo_master(dev); i2c_dw_release_lock(dev); return 0; } static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; u32 ic_con = 0, ic_tar = 0; unsigned int dummy; /* Disable the adapter */ __i2c_dw_disable(dev); /* If the slave address is ten bit address, enable 10BITADDR */ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { ic_con = DW_IC_CON_10BITADDR_MASTER; /* * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing * mode has to be enabled via bit 12 of IC_TAR register. * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be * detected from registers. */ ic_tar = DW_IC_TAR_10BITADDR_MASTER; } regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER, ic_con); /* * Set the slave (target) address and enable 10-bit addressing mode * if applicable. */ regmap_write(dev->map, DW_IC_TAR, msgs[dev->msg_write_idx].addr | ic_tar); /* Enforce disabled interrupts (due to HW issues) */ regmap_write(dev->map, DW_IC_INTR_MASK, 0); /* Enable the adapter */ __i2c_dw_enable(dev); /* Dummy read to avoid the register getting stuck on Bay Trail */ regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy); /* Clear and enable interrupts */ regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK); } static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev) { u32 val; int ret; ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val, !(val & DW_IC_INTR_STOP_DET), 1100, 20000); if (ret) dev_err(dev->dev, "i2c timeout error %d\n", ret); return ret; } static int i2c_dw_status(struct dw_i2c_dev *dev) { int status; status = i2c_dw_wait_bus_not_busy(dev); if (status) return status; return i2c_dw_check_stopbit(dev); } /* * Initiate and continue master read/write transaction with polling * based transfer routine afterward write messages into the Tx buffer. */ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx; int cmd = 0, status; u8 *tx_buf; unsigned int val; /* * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card, * it is mandatory to set the right value in specific register * (offset:0x474) as per the hardware IP specification. */ regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN); dev->msgs = msgs; dev->msgs_num = num_msgs; i2c_dw_xfer_init(dev); regmap_write(dev->map, DW_IC_INTR_MASK, 0); /* Initiate messages read/write transaction */ for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) { tx_buf = msgs[msg_wrt_idx].buf; buf_len = msgs[msg_wrt_idx].len; if (!(msgs[msg_wrt_idx].flags & I2C_M_RD)) regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1); /* * Initiate the i2c read/write transaction of buffer length, * and poll for bus busy status. For the last message transfer, * update the command with stopbit enable. */ for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) { if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1) cmd |= BIT(9); if (msgs[msg_wrt_idx].flags & I2C_M_RD) { /* Due to hardware bug, need to write the same command twice. */ regmap_write(dev->map, DW_IC_DATA_CMD, 0x100); regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd); if (cmd) { regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1)); regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1)); /* * Need to check the stop bit. However, it cannot be * detected from the registers so we check it always * when read/write the last byte. */ status = i2c_dw_status(dev); if (status) return status; for (data_idx = 0; data_idx < buf_len; data_idx++) { regmap_read(dev->map, DW_IC_DATA_CMD, &val); tx_buf[data_idx] = val; } status = i2c_dw_check_stopbit(dev); if (status) return status; } } else { regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd); usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US); } } status = i2c_dw_check_stopbit(dev); if (status) return status; } return 0; } static int i2c_dw_poll_tx_empty(struct dw_i2c_dev *dev) { u32 val; return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val, val & DW_IC_INTR_TX_EMPTY, 100, 1000); } static int i2c_dw_poll_rx_full(struct dw_i2c_dev *dev) { u32 val; return regmap_read_poll_timeout(dev->map, DW_IC_RAW_INTR_STAT, val, val & DW_IC_INTR_RX_FULL, 100, 1000); } static int txgbe_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); int msg_idx, buf_len, data_idx, ret; unsigned int val, stop = 0; u8 *buf; dev->msgs = msgs; dev->msgs_num = num_msgs; i2c_dw_xfer_init(dev); regmap_write(dev->map, DW_IC_INTR_MASK, 0); for (msg_idx = 0; msg_idx < num_msgs; msg_idx++) { buf = msgs[msg_idx].buf; buf_len = msgs[msg_idx].len; for (data_idx = 0; data_idx < buf_len; data_idx++) { if (msg_idx == num_msgs - 1 && data_idx == buf_len - 1) stop |= BIT(9); if (msgs[msg_idx].flags & I2C_M_RD) { regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | stop); ret = i2c_dw_poll_rx_full(dev); if (ret) return ret; regmap_read(dev->map, DW_IC_DATA_CMD, &val); buf[data_idx] = val; } else { ret = i2c_dw_poll_tx_empty(dev); if (ret) return ret; regmap_write(dev->map, DW_IC_DATA_CMD, buf[data_idx] | stop); } } } return num_msgs; } /* * Initiate (and continue) low level master read/write transaction. * This function is only called from i2c_dw_isr, and pumping i2c_msg * messages into the tx buffer. Even if the size of i2c_msg data is * longer than the size of the tx buffer, it handles everything. */ static void i2c_dw_xfer_msg(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; u32 intr_mask; int tx_limit, rx_limit; u32 addr = msgs[dev->msg_write_idx].addr; u32 buf_len = dev->tx_buf_len; u8 *buf = dev->tx_buf; bool need_restart = false; unsigned int flr; intr_mask = DW_IC_INTR_MASTER_MASK; for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { u32 flags = msgs[dev->msg_write_idx].flags; /* * If target address has changed, we need to * reprogram the target address in the I2C * adapter when we are done with this transfer. */ if (msgs[dev->msg_write_idx].addr != addr) { dev_err(dev->dev, "%s: invalid target address\n", __func__); dev->msg_err = -EINVAL; break; } if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { /* new i2c_msg */ buf = msgs[dev->msg_write_idx].buf; buf_len = msgs[dev->msg_write_idx].len; /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and * IC_RESTART_EN are set, we must manually * set restart bit between messages. */ if ((dev->master_cfg & DW_IC_CON_RESTART_EN) && (dev->msg_write_idx > 0)) need_restart = true; } regmap_read(dev->map, DW_IC_TXFLR, &flr); tx_limit = dev->tx_fifo_depth - flr; regmap_read(dev->map, DW_IC_RXFLR, &flr); rx_limit = dev->rx_fifo_depth - flr; while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { u32 cmd = 0; /* * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must * manually set the stop bit. However, it cannot be * detected from the registers so we set it always * when writing/reading the last byte. */ /* * i2c-core always sets the buffer length of * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will * be adjusted when receiving the first byte. * Thus we can't stop the transaction here. */ if (dev->msg_write_idx == dev->msgs_num - 1 && buf_len == 1 && !(flags & I2C_M_RECV_LEN)) cmd |= BIT(9); if (need_restart) { cmd |= BIT(10); need_restart = false; } if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { /* Avoid rx buffer overrun */ if (dev->rx_outstanding >= dev->rx_fifo_depth) break; regmap_write(dev->map, DW_IC_DATA_CMD, cmd | 0x100); rx_limit--; dev->rx_outstanding++; } else { regmap_write(dev->map, DW_IC_DATA_CMD, cmd | *buf++); } tx_limit--; buf_len--; } dev->tx_buf = buf; dev->tx_buf_len = buf_len; /* * Because we don't know the buffer length in the * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop * the transaction here. */ if (buf_len > 0 || flags & I2C_M_RECV_LEN) { /* more bytes to be written */ dev->status |= STATUS_WRITE_IN_PROGRESS; break; } else dev->status &= ~STATUS_WRITE_IN_PROGRESS; } /* * If i2c_msg index search is completed, we don't need TX_EMPTY * interrupt any more. */ if (dev->msg_write_idx == dev->msgs_num) intr_mask &= ~DW_IC_INTR_TX_EMPTY; if (dev->msg_err) intr_mask = 0; regmap_write(dev->map, DW_IC_INTR_MASK, intr_mask); } static u8 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len) { struct i2c_msg *msgs = dev->msgs; u32 flags = msgs[dev->msg_read_idx].flags; /* * Adjust the buffer length and mask the flag * after receiving the first byte. */ len += (flags & I2C_CLIENT_PEC) ? 2 : 1; dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); msgs[dev->msg_read_idx].len = len; msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; return len; } static void i2c_dw_read(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; unsigned int rx_valid; for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { unsigned int tmp; u32 len; u8 *buf; if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) continue; if (!(dev->status & STATUS_READ_IN_PROGRESS)) { len = msgs[dev->msg_read_idx].len; buf = msgs[dev->msg_read_idx].buf; } else { len = dev->rx_buf_len; buf = dev->rx_buf; } regmap_read(dev->map, DW_IC_RXFLR, &rx_valid); for (; len > 0 && rx_valid > 0; len--, rx_valid--) { u32 flags = msgs[dev->msg_read_idx].flags; regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); tmp &= DW_IC_DATA_CMD_DAT; /* Ensure length byte is a valid value */ if (flags & I2C_M_RECV_LEN) { /* * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be * detected from the registers, the controller can be * disabled if the STOP bit is set. But it is only set * after receiving block data response length in * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read * another byte with STOP bit set when the block data * response length is invalid to complete the transaction. */ if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) tmp = 1; len = i2c_dw_recv_len(dev, tmp); } *buf++ = tmp; dev->rx_outstanding--; } if (len > 0) { dev->status |= STATUS_READ_IN_PROGRESS; dev->rx_buf_len = len; dev->rx_buf = buf; return; } else dev->status &= ~STATUS_READ_IN_PROGRESS; } } /* * Prepare controller for a transaction and call i2c_dw_xfer_msg. */ static int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); int ret; dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); pm_runtime_get_sync(dev->dev); /* * Initiate I2C message transfer when polling mode is enabled, * As it is polling based transfer mechanism, which does not support * interrupt based functionalities of existing DesignWare driver. */ switch (dev->flags & MODEL_MASK) { case MODEL_AMD_NAVI_GPU: ret = amd_i2c_dw_xfer_quirk(adap, msgs, num); goto done_nolock; case MODEL_WANGXUN_SP: ret = txgbe_i2c_dw_xfer_quirk(adap, msgs, num); goto done_nolock; default: break; } reinit_completion(&dev->cmd_complete); dev->msgs = msgs; dev->msgs_num = num; dev->cmd_err = 0; dev->msg_write_idx = 0; dev->msg_read_idx = 0; dev->msg_err = 0; dev->status = 0; dev->abort_source = 0; dev->rx_outstanding = 0; ret = i2c_dw_acquire_lock(dev); if (ret) goto done_nolock; ret = i2c_dw_wait_bus_not_busy(dev); if (ret < 0) goto done; /* Start the transfers */ i2c_dw_xfer_init(dev); /* Wait for tx to complete */ if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) { dev_err(dev->dev, "controller timed out\n"); /* i2c_dw_init implicitly disables the adapter */ i2c_recover_bus(&dev->adapter); i2c_dw_init_master(dev); ret = -ETIMEDOUT; goto done; } /* * We must disable the adapter before returning and signaling the end * of the current transfer. Otherwise the hardware might continue * generating interrupts which in turn causes a race condition with * the following transfer. Needs some more investigation if the * additional interrupts are a hardware bug or this driver doesn't * handle them correctly yet. */ __i2c_dw_disable_nowait(dev); if (dev->msg_err) { ret = dev->msg_err; goto done; } /* No error */ if (likely(!dev->cmd_err && !dev->status)) { ret = num; goto done; } /* We have an error */ if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { ret = i2c_dw_handle_tx_abort(dev); goto done; } if (dev->status) dev_err(dev->dev, "transfer terminated early - interrupt latency too high?\n"); ret = -EIO; done: i2c_dw_release_lock(dev); done_nolock: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; } static const struct i2c_algorithm i2c_dw_algo = { .master_xfer = i2c_dw_xfer, .functionality = i2c_dw_func, }; static const struct i2c_adapter_quirks i2c_dw_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) { unsigned int stat, dummy; /* * The IC_INTR_STAT register just indicates "enabled" interrupts. * The unmasked raw version of interrupt status bits is available * in the IC_RAW_INTR_STAT register. * * That is, * stat = readl(IC_INTR_STAT); * equals to, * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); * * The raw version might be useful for debugging purposes. */ regmap_read(dev->map, DW_IC_INTR_STAT, &stat); /* * Do not use the IC_CLR_INTR register to clear interrupts, or * you'll miss some interrupts, triggered during the period from * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). * * Instead, use the separately-prepared IC_CLR_* registers. */ if (stat & DW_IC_INTR_RX_UNDER) regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); if (stat & DW_IC_INTR_RX_OVER) regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); if (stat & DW_IC_INTR_TX_OVER) regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); if (stat & DW_IC_INTR_RD_REQ) regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy); if (stat & DW_IC_INTR_TX_ABRT) { /* * The IC_TX_ABRT_SOURCE register is cleared whenever * the IC_CLR_TX_ABRT is read. Preserve it beforehand. */ regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source); regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); } if (stat & DW_IC_INTR_RX_DONE) regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); if (stat & DW_IC_INTR_ACTIVITY) regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); if ((stat & DW_IC_INTR_STOP_DET) && ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL))) regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); if (stat & DW_IC_INTR_START_DET) regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); if (stat & DW_IC_INTR_GEN_CALL) regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); return stat; } /* * Interrupt service routine. This gets called whenever an I2C master interrupt * occurs. */ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) { struct dw_i2c_dev *dev = dev_id; unsigned int stat, enabled; regmap_read(dev->map, DW_IC_ENABLE, &enabled); regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) return IRQ_NONE; if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0)) return IRQ_NONE; dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); stat = i2c_dw_read_clear_intrbits(dev); if (!(dev->status & STATUS_ACTIVE)) { /* * Unexpected interrupt in driver point of view. State * variables are either unset or stale so acknowledge and * disable interrupts for suppressing further interrupts if * interrupt really came from this HW (E.g. firmware has left * the HW active). */ regmap_write(dev->map, DW_IC_INTR_MASK, 0); return IRQ_HANDLED; } if (stat & DW_IC_INTR_TX_ABRT) { dev->cmd_err |= DW_IC_ERR_TX_ABRT; dev->status &= ~STATUS_MASK; dev->rx_outstanding = 0; /* * Anytime TX_ABRT is set, the contents of the tx/rx * buffers are flushed. Make sure to skip them. */ regmap_write(dev->map, DW_IC_INTR_MASK, 0); goto tx_aborted; } if (stat & DW_IC_INTR_RX_FULL) i2c_dw_read(dev); if (stat & DW_IC_INTR_TX_EMPTY) i2c_dw_xfer_msg(dev); /* * No need to modify or disable the interrupt mask here. * i2c_dw_xfer_msg() will take care of it according to * the current transmit status. */ tx_aborted: if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) && (dev->rx_outstanding == 0)) complete(&dev->cmd_complete); else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { /* Workaround to trigger pending interrupt */ regmap_read(dev->map, DW_IC_INTR_MASK, &stat); regmap_write(dev->map, DW_IC_INTR_MASK, 0); regmap_write(dev->map, DW_IC_INTR_MASK, stat); } return IRQ_HANDLED; } void i2c_dw_configure_master(struct dw_i2c_dev *dev) { struct i2c_timings *t = &dev->timings; dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | DW_IC_CON_RESTART_EN; dev->mode = DW_IC_MASTER; switch (t->bus_freq_hz) { case I2C_MAX_STANDARD_MODE_FREQ: dev->master_cfg |= DW_IC_CON_SPEED_STD; break; case I2C_MAX_HIGH_SPEED_MODE_FREQ: dev->master_cfg |= DW_IC_CON_SPEED_HIGH; break; default: dev->master_cfg |= DW_IC_CON_SPEED_FAST; } } EXPORT_SYMBOL_GPL(i2c_dw_configure_master); static void i2c_dw_prepare_recovery(struct i2c_adapter *adap) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); i2c_dw_disable(dev); reset_control_assert(dev->rst); i2c_dw_prepare_clk(dev, false); } static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); i2c_dw_prepare_clk(dev, true); reset_control_deassert(dev->rst); i2c_dw_init_master(dev); } static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) { struct i2c_bus_recovery_info *rinfo = &dev->rinfo; struct i2c_adapter *adap = &dev->adapter; struct gpio_desc *gpio; gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH); if (IS_ERR_OR_NULL(gpio)) return PTR_ERR_OR_ZERO(gpio); rinfo->scl_gpiod = gpio; gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN); if (IS_ERR(gpio)) return PTR_ERR(gpio); rinfo->sda_gpiod = gpio; rinfo->pinctrl = devm_pinctrl_get(dev->dev); if (IS_ERR(rinfo->pinctrl)) { if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER) return PTR_ERR(rinfo->pinctrl); rinfo->pinctrl = NULL; dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n"); } else if (!rinfo->pinctrl) { dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n"); } rinfo->recover_bus = i2c_generic_scl_recovery; rinfo->prepare_recovery = i2c_dw_prepare_recovery; rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; adap->bus_recovery_info = rinfo; dev_info(dev->dev, "running with gpio recovery mode! scl%s", rinfo->sda_gpiod ? ",sda" : ""); return 0; } static int i2c_dw_poll_adap_quirk(struct dw_i2c_dev *dev) { struct i2c_adapter *adap = &dev->adapter; int ret; pm_runtime_get_noresume(dev->dev); ret = i2c_add_numbered_adapter(adap); if (ret) dev_err(dev->dev, "Failed to add adapter: %d\n", ret); pm_runtime_put_noidle(dev->dev); return ret; } static bool i2c_dw_is_model_poll(struct dw_i2c_dev *dev) { switch (dev->flags & MODEL_MASK) { case MODEL_AMD_NAVI_GPU: case MODEL_WANGXUN_SP: return true; default: return false; } } int i2c_dw_probe_master(struct dw_i2c_dev *dev) { struct i2c_adapter *adap = &dev->adapter; unsigned long irq_flags; unsigned int ic_con; int ret; init_completion(&dev->cmd_complete); dev->init = i2c_dw_init_master; dev->disable = i2c_dw_disable; ret = i2c_dw_init_regmap(dev); if (ret) return ret; ret = i2c_dw_set_timings_master(dev); if (ret) return ret; ret = i2c_dw_set_fifo_size(dev); if (ret) return ret; /* Lock the bus for accessing DW_IC_CON */ ret = i2c_dw_acquire_lock(dev); if (ret) return ret; /* * On AMD platforms BIOS advertises the bus clear feature * and enables the SCL/SDA stuck low. SMU FW does the * bus recovery process. Driver should not ignore this BIOS * advertisement of bus clear feature. */ ret = regmap_read(dev->map, DW_IC_CON, &ic_con); i2c_dw_release_lock(dev); if (ret) return ret; if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL) dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL; ret = dev->init(dev); if (ret) return ret; snprintf(adap->name, sizeof(adap->name), "Synopsys DesignWare I2C adapter"); adap->retries = 3; adap->algo = &i2c_dw_algo; adap->quirks = &i2c_dw_quirks; adap->dev.parent = dev->dev; i2c_set_adapdata(adap, dev); if (i2c_dw_is_model_poll(dev)) return i2c_dw_poll_adap_quirk(dev); if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { irq_flags = IRQF_NO_SUSPEND; } else { irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; } ret = i2c_dw_acquire_lock(dev); if (ret) return ret; regmap_write(dev->map, DW_IC_INTR_MASK, 0); i2c_dw_release_lock(dev); ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags, dev_name(dev->dev), dev); if (ret) { dev_err(dev->dev, "failure requesting irq %i: %d\n", dev->irq, ret); return ret; } ret = i2c_dw_init_recovery_info(dev); if (ret) return ret; /* * Increment PM usage count during adapter registration in order to * avoid possible spurious runtime suspend when adapter device is * registered to the device core and immediate resume in case bus has * registered I2C slaves that do I2C transfers in their probe. */ pm_runtime_get_noresume(dev->dev); ret = i2c_add_numbered_adapter(adap); if (ret) dev_err(dev->dev, "failure adding adapter: %d\n", ret); pm_runtime_put_noidle(dev->dev); return ret; } EXPORT_SYMBOL_GPL(i2c_dw_probe_master); MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-designware-master.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Ingenic JZ4780 I2C bus driver * * Copyright (C) 2006 - 2009 Ingenic Semiconductor Inc. * Copyright (C) 2015 Imagination Technologies * Copyright (C) 2019 周琰杰 (Zhou Yanjie) <[email protected]> */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/time.h> #define JZ4780_I2C_CTRL 0x00 #define JZ4780_I2C_TAR 0x04 #define JZ4780_I2C_SAR 0x08 #define JZ4780_I2C_DC 0x10 #define JZ4780_I2C_SHCNT 0x14 #define JZ4780_I2C_SLCNT 0x18 #define JZ4780_I2C_FHCNT 0x1C #define JZ4780_I2C_FLCNT 0x20 #define JZ4780_I2C_INTST 0x2C #define JZ4780_I2C_INTM 0x30 #define JZ4780_I2C_RXTL 0x38 #define JZ4780_I2C_TXTL 0x3C #define JZ4780_I2C_CINTR 0x40 #define JZ4780_I2C_CRXUF 0x44 #define JZ4780_I2C_CRXOF 0x48 #define JZ4780_I2C_CTXOF 0x4C #define JZ4780_I2C_CRXREQ 0x50 #define JZ4780_I2C_CTXABRT 0x54 #define JZ4780_I2C_CRXDONE 0x58 #define JZ4780_I2C_CACT 0x5C #define JZ4780_I2C_CSTP 0x60 #define JZ4780_I2C_CSTT 0x64 #define JZ4780_I2C_CGC 0x68 #define JZ4780_I2C_ENB 0x6C #define JZ4780_I2C_STA 0x70 #define JZ4780_I2C_TXABRT 0x80 #define JZ4780_I2C_DMACR 0x88 #define JZ4780_I2C_DMATDLR 0x8C #define JZ4780_I2C_DMARDLR 0x90 #define JZ4780_I2C_SDASU 0x94 #define JZ4780_I2C_ACKGC 0x98 #define JZ4780_I2C_ENSTA 0x9C #define JZ4780_I2C_SDAHD 0xD0 #define X1000_I2C_SDAHD 0x7C #define JZ4780_I2C_CTRL_STPHLD BIT(7) #define JZ4780_I2C_CTRL_SLVDIS BIT(6) #define JZ4780_I2C_CTRL_REST BIT(5) #define JZ4780_I2C_CTRL_MATP BIT(4) #define JZ4780_I2C_CTRL_SATP BIT(3) #define JZ4780_I2C_CTRL_SPDF BIT(2) #define JZ4780_I2C_CTRL_SPDS BIT(1) #define JZ4780_I2C_CTRL_MD BIT(0) #define JZ4780_I2C_STA_SLVACT BIT(6) #define JZ4780_I2C_STA_MSTACT BIT(5) #define JZ4780_I2C_STA_RFF BIT(4) #define JZ4780_I2C_STA_RFNE BIT(3) #define JZ4780_I2C_STA_TFE BIT(2) #define JZ4780_I2C_STA_TFNF BIT(1) #define JZ4780_I2C_STA_ACT BIT(0) #define X1000_I2C_DC_STOP BIT(9) #define JZ4780_I2C_INTST_IGC BIT(11) #define JZ4780_I2C_INTST_ISTT BIT(10) #define JZ4780_I2C_INTST_ISTP BIT(9) #define JZ4780_I2C_INTST_IACT BIT(8) #define JZ4780_I2C_INTST_RXDN BIT(7) #define JZ4780_I2C_INTST_TXABT BIT(6) #define JZ4780_I2C_INTST_RDREQ BIT(5) #define JZ4780_I2C_INTST_TXEMP BIT(4) #define JZ4780_I2C_INTST_TXOF BIT(3) #define JZ4780_I2C_INTST_RXFL BIT(2) #define JZ4780_I2C_INTST_RXOF BIT(1) #define JZ4780_I2C_INTST_RXUF BIT(0) #define JZ4780_I2C_INTM_MIGC BIT(11) #define JZ4780_I2C_INTM_MISTT BIT(10) #define JZ4780_I2C_INTM_MISTP BIT(9) #define JZ4780_I2C_INTM_MIACT BIT(8) #define JZ4780_I2C_INTM_MRXDN BIT(7) #define JZ4780_I2C_INTM_MTXABT BIT(6) #define JZ4780_I2C_INTM_MRDREQ BIT(5) #define JZ4780_I2C_INTM_MTXEMP BIT(4) #define JZ4780_I2C_INTM_MTXOF BIT(3) #define JZ4780_I2C_INTM_MRXFL BIT(2) #define JZ4780_I2C_INTM_MRXOF BIT(1) #define JZ4780_I2C_INTM_MRXUF BIT(0) #define JZ4780_I2C_DC_READ BIT(8) #define JZ4780_I2C_SDAHD_HDENB BIT(8) #define JZ4780_I2C_ENB_I2C BIT(0) #define JZ4780_I2CSHCNT_ADJUST(n) (((n) - 8) < 6 ? 6 : ((n) - 8)) #define JZ4780_I2CSLCNT_ADJUST(n) (((n) - 1) < 8 ? 8 : ((n) - 1)) #define JZ4780_I2CFHCNT_ADJUST(n) (((n) - 8) < 6 ? 6 : ((n) - 8)) #define JZ4780_I2CFLCNT_ADJUST(n) (((n) - 1) < 8 ? 8 : ((n) - 1)) #define JZ4780_I2C_FIFO_LEN 16 #define X1000_I2C_FIFO_LEN 64 #define JZ4780_I2C_TIMEOUT 300 #define BUFSIZE 200 enum ingenic_i2c_version { ID_JZ4780, ID_X1000, }; /* ingenic_i2c_config: SoC specific config data. */ struct ingenic_i2c_config { enum ingenic_i2c_version version; int fifosize; int tx_level; int rx_level; }; struct jz4780_i2c { void __iomem *iomem; int irq; struct clk *clk; struct i2c_adapter adap; const struct ingenic_i2c_config *cdata; /* lock to protect rbuf and wbuf between xfer_rd/wr and irq handler */ spinlock_t lock; /* beginning of lock scope */ unsigned char *rbuf; int rd_total_len; int rd_data_xfered; int rd_cmd_xfered; unsigned char *wbuf; int wt_len; int is_write; int stop_hold; int speed; int data_buf[BUFSIZE]; int cmd_buf[BUFSIZE]; int cmd; /* end of lock scope */ struct completion trans_waitq; }; static inline unsigned short jz4780_i2c_readw(struct jz4780_i2c *i2c, unsigned long offset) { return readw(i2c->iomem + offset); } static inline void jz4780_i2c_writew(struct jz4780_i2c *i2c, unsigned long offset, unsigned short val) { writew(val, i2c->iomem + offset); } static int jz4780_i2c_disable(struct jz4780_i2c *i2c) { unsigned short regval; unsigned long loops = 5; jz4780_i2c_writew(i2c, JZ4780_I2C_ENB, 0); do { regval = jz4780_i2c_readw(i2c, JZ4780_I2C_ENSTA); if (!(regval & JZ4780_I2C_ENB_I2C)) return 0; usleep_range(5000, 15000); } while (--loops); dev_err(&i2c->adap.dev, "disable failed: ENSTA=0x%04x\n", regval); return -ETIMEDOUT; } static int jz4780_i2c_enable(struct jz4780_i2c *i2c) { unsigned short regval; unsigned long loops = 5; jz4780_i2c_writew(i2c, JZ4780_I2C_ENB, 1); do { regval = jz4780_i2c_readw(i2c, JZ4780_I2C_ENSTA); if (regval & JZ4780_I2C_ENB_I2C) return 0; usleep_range(5000, 15000); } while (--loops); dev_err(&i2c->adap.dev, "enable failed: ENSTA=0x%04x\n", regval); return -ETIMEDOUT; } static int jz4780_i2c_set_target(struct jz4780_i2c *i2c, unsigned char address) { unsigned short regval; unsigned long loops = 5; do { regval = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); if ((regval & JZ4780_I2C_STA_TFE) && !(regval & JZ4780_I2C_STA_MSTACT)) break; usleep_range(5000, 15000); } while (--loops); if (loops) { jz4780_i2c_writew(i2c, JZ4780_I2C_TAR, address); return 0; } dev_err(&i2c->adap.dev, "set device to address 0x%02x failed, STA=0x%04x\n", address, regval); return -ENXIO; } static int jz4780_i2c_set_speed(struct jz4780_i2c *i2c) { int dev_clk_khz = clk_get_rate(i2c->clk) / 1000; int cnt_high = 0; /* HIGH period count of the SCL clock */ int cnt_low = 0; /* LOW period count of the SCL clock */ int cnt_period = 0; /* period count of the SCL clock */ int setup_time = 0; int hold_time = 0; unsigned short tmp = 0; int i2c_clk = i2c->speed; if (jz4780_i2c_disable(i2c)) dev_dbg(&i2c->adap.dev, "i2c not disabled\n"); /* * 1 JZ4780_I2C cycle equals to cnt_period PCLK(i2c_clk) * standard mode, min LOW and HIGH period are 4700 ns and 4000 ns * fast mode, min LOW and HIGH period are 1300 ns and 600 ns */ cnt_period = dev_clk_khz / i2c_clk; if (i2c_clk <= 100) cnt_high = (cnt_period * 4000) / (4700 + 4000); else cnt_high = (cnt_period * 600) / (1300 + 600); cnt_low = cnt_period - cnt_high; /* * NOTE: JZ4780_I2C_CTRL_REST can't set when i2c enabled, because * normal read are 2 messages, we cannot disable i2c controller * between these two messages, this means that we must always set * JZ4780_I2C_CTRL_REST when init JZ4780_I2C_CTRL * */ if (i2c_clk <= 100) { tmp = JZ4780_I2C_CTRL_SPDS | JZ4780_I2C_CTRL_REST | JZ4780_I2C_CTRL_SLVDIS | JZ4780_I2C_CTRL_MD; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); jz4780_i2c_writew(i2c, JZ4780_I2C_SHCNT, JZ4780_I2CSHCNT_ADJUST(cnt_high)); jz4780_i2c_writew(i2c, JZ4780_I2C_SLCNT, JZ4780_I2CSLCNT_ADJUST(cnt_low)); } else { tmp = JZ4780_I2C_CTRL_SPDF | JZ4780_I2C_CTRL_REST | JZ4780_I2C_CTRL_SLVDIS | JZ4780_I2C_CTRL_MD; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); jz4780_i2c_writew(i2c, JZ4780_I2C_FHCNT, JZ4780_I2CFHCNT_ADJUST(cnt_high)); jz4780_i2c_writew(i2c, JZ4780_I2C_FLCNT, JZ4780_I2CFLCNT_ADJUST(cnt_low)); } /* * a i2c device must internally provide a hold time at least 300ns * tHD:DAT * Standard Mode: min=300ns, max=3450ns * Fast Mode: min=0ns, max=900ns * tSU:DAT * Standard Mode: min=250ns, max=infinite * Fast Mode: min=100(250ns is recommended), max=infinite * * 1i2c_clk = 10^6 / dev_clk_khz * on FPGA, dev_clk_khz = 12000, so 1i2c_clk = 1000/12 = 83ns * on Pisces(1008M), dev_clk_khz=126000, so 1i2c_clk = 1000 / 126 = 8ns * * The actual hold time is (SDAHD + 1) * (i2c_clk period). * * Length of setup time calculated using (SDASU - 1) * (ic_clk_period) * */ if (i2c_clk <= 100) { /* standard mode */ setup_time = 300; hold_time = 400; } else { setup_time = 450; hold_time = 450; } hold_time = ((hold_time * dev_clk_khz) / 1000000) - 1; setup_time = ((setup_time * dev_clk_khz) / 1000000) + 1; if (setup_time > 255) setup_time = 255; if (setup_time <= 0) setup_time = 1; jz4780_i2c_writew(i2c, JZ4780_I2C_SDASU, setup_time); if (hold_time > 255) hold_time = 255; if (hold_time >= 0) { /*i2c hold time enable */ if (i2c->cdata->version >= ID_X1000) { jz4780_i2c_writew(i2c, X1000_I2C_SDAHD, hold_time); } else { hold_time |= JZ4780_I2C_SDAHD_HDENB; jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, hold_time); } } else { /* disable hold time */ if (i2c->cdata->version >= ID_X1000) jz4780_i2c_writew(i2c, X1000_I2C_SDAHD, 0); else jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, 0); } return 0; } static int jz4780_i2c_cleanup(struct jz4780_i2c *i2c) { int ret; unsigned long flags; unsigned short tmp; spin_lock_irqsave(&i2c->lock, flags); /* can send stop now if need */ if (i2c->cdata->version < ID_X1000) { tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); tmp &= ~JZ4780_I2C_CTRL_STPHLD; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); } /* disable all interrupts first */ jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0); /* then clear all interrupts */ jz4780_i2c_readw(i2c, JZ4780_I2C_CTXABRT); jz4780_i2c_readw(i2c, JZ4780_I2C_CINTR); /* then disable the controller */ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); tmp &= ~JZ4780_I2C_ENB_I2C; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); udelay(10); tmp |= JZ4780_I2C_ENB_I2C; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); spin_unlock_irqrestore(&i2c->lock, flags); ret = jz4780_i2c_disable(i2c); if (ret) dev_err(&i2c->adap.dev, "unable to disable device during cleanup!\n"); if (unlikely(jz4780_i2c_readw(i2c, JZ4780_I2C_INTM) & jz4780_i2c_readw(i2c, JZ4780_I2C_INTST))) dev_err(&i2c->adap.dev, "device has interrupts after a complete cleanup!\n"); return ret; } static int jz4780_i2c_prepare(struct jz4780_i2c *i2c) { jz4780_i2c_set_speed(i2c); return jz4780_i2c_enable(i2c); } static void jz4780_i2c_send_rcmd(struct jz4780_i2c *i2c, int cmd_count, int cmd_left) { int i; for (i = 0; i < cmd_count - 1; i++) jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ); if ((cmd_left == 0) && (i2c->cdata->version >= ID_X1000)) jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ | X1000_I2C_DC_STOP); else jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ); } static void jz4780_i2c_trans_done(struct jz4780_i2c *i2c) { jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0); complete(&i2c->trans_waitq); } static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id) { unsigned short tmp; unsigned short intst; unsigned short intmsk; struct jz4780_i2c *i2c = dev_id; spin_lock(&i2c->lock); intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM); intst = jz4780_i2c_readw(i2c, JZ4780_I2C_INTST); intst &= intmsk; if (intst & JZ4780_I2C_INTST_TXABT) { jz4780_i2c_trans_done(i2c); goto done; } if (intst & JZ4780_I2C_INTST_RXOF) { dev_dbg(&i2c->adap.dev, "received fifo overflow!\n"); jz4780_i2c_trans_done(i2c); goto done; } /* * When reading, always drain RX FIFO before we send more Read * Commands to avoid fifo overrun */ if (i2c->is_write == 0) { int rd_left; while ((jz4780_i2c_readw(i2c, JZ4780_I2C_STA) & JZ4780_I2C_STA_RFNE)) { *(i2c->rbuf++) = jz4780_i2c_readw(i2c, JZ4780_I2C_DC) & 0xff; i2c->rd_data_xfered++; if (i2c->rd_data_xfered == i2c->rd_total_len) { jz4780_i2c_trans_done(i2c); goto done; } } rd_left = i2c->rd_total_len - i2c->rd_data_xfered; if (rd_left <= i2c->cdata->fifosize) jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, rd_left - 1); } if (intst & JZ4780_I2C_INTST_TXEMP) { if (i2c->is_write == 0) { int cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered; int max_send = (i2c->cdata->fifosize - 1) - (i2c->rd_cmd_xfered - i2c->rd_data_xfered); int cmd_to_send = min(cmd_left, max_send); if (i2c->rd_cmd_xfered != 0) cmd_to_send = min(cmd_to_send, i2c->cdata->fifosize - i2c->cdata->tx_level - 1); if (cmd_to_send) { i2c->rd_cmd_xfered += cmd_to_send; cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered; jz4780_i2c_send_rcmd(i2c, cmd_to_send, cmd_left); } if (cmd_left == 0) { intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM); intmsk &= ~JZ4780_I2C_INTM_MTXEMP; jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, intmsk); if (i2c->cdata->version < ID_X1000) { tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); tmp &= ~JZ4780_I2C_CTRL_STPHLD; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); } } } else { unsigned short data; unsigned short i2c_sta; i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); while ((i2c_sta & JZ4780_I2C_STA_TFNF) && (i2c->wt_len > 0)) { i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); data = *i2c->wbuf; data &= ~JZ4780_I2C_DC_READ; if ((i2c->wt_len == 1) && (!i2c->stop_hold) && (i2c->cdata->version >= ID_X1000)) data |= X1000_I2C_DC_STOP; jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data); i2c->wbuf++; i2c->wt_len--; } if (i2c->wt_len == 0) { if ((!i2c->stop_hold) && (i2c->cdata->version < ID_X1000)) { tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); tmp &= ~JZ4780_I2C_CTRL_STPHLD; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); } jz4780_i2c_trans_done(i2c); goto done; } } } done: spin_unlock(&i2c->lock); return IRQ_HANDLED; } static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src) { dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n", src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]); } static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c, unsigned char *buf, int len, int cnt, int idx) { int ret = 0; long timeout; int wait_time = JZ4780_I2C_TIMEOUT * (len + 5); unsigned short tmp; unsigned long flags; memset(buf, 0, len); spin_lock_irqsave(&i2c->lock, flags); i2c->stop_hold = 0; i2c->is_write = 0; i2c->rbuf = buf; i2c->rd_total_len = len; i2c->rd_data_xfered = 0; i2c->rd_cmd_xfered = 0; if (len <= i2c->cdata->fifosize) jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, len - 1); else jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, i2c->cdata->rx_level); jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, i2c->cdata->tx_level); jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, JZ4780_I2C_INTM_MRXFL | JZ4780_I2C_INTM_MTXEMP | JZ4780_I2C_INTM_MTXABT | JZ4780_I2C_INTM_MRXOF); if (i2c->cdata->version < ID_X1000) { tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); tmp |= JZ4780_I2C_CTRL_STPHLD; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); } spin_unlock_irqrestore(&i2c->lock, flags); timeout = wait_for_completion_timeout(&i2c->trans_waitq, msecs_to_jiffies(wait_time)); if (!timeout) { dev_err(&i2c->adap.dev, "irq read timeout\n"); dev_dbg(&i2c->adap.dev, "send cmd count:%d %d\n", i2c->cmd, i2c->cmd_buf[i2c->cmd]); dev_dbg(&i2c->adap.dev, "receive data count:%d %d\n", i2c->cmd, i2c->data_buf[i2c->cmd]); ret = -EIO; } tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_TXABRT); if (tmp) { jz4780_i2c_txabrt(i2c, tmp); ret = -EIO; } return ret; } static inline int jz4780_i2c_xfer_write(struct jz4780_i2c *i2c, unsigned char *buf, int len, int cnt, int idx) { int ret = 0; int wait_time = JZ4780_I2C_TIMEOUT * (len + 5); long timeout; unsigned short tmp; unsigned long flags; spin_lock_irqsave(&i2c->lock, flags); if (idx < (cnt - 1)) i2c->stop_hold = 1; else i2c->stop_hold = 0; i2c->is_write = 1; i2c->wbuf = buf; i2c->wt_len = len; jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, i2c->cdata->tx_level); jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, JZ4780_I2C_INTM_MTXEMP | JZ4780_I2C_INTM_MTXABT); if (i2c->cdata->version < ID_X1000) { tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); tmp |= JZ4780_I2C_CTRL_STPHLD; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); } spin_unlock_irqrestore(&i2c->lock, flags); timeout = wait_for_completion_timeout(&i2c->trans_waitq, msecs_to_jiffies(wait_time)); if (timeout && !i2c->stop_hold) { unsigned short i2c_sta; int write_in_process; timeout = JZ4780_I2C_TIMEOUT * 100; for (; timeout > 0; timeout--) { i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); write_in_process = (i2c_sta & JZ4780_I2C_STA_MSTACT) || !(i2c_sta & JZ4780_I2C_STA_TFE); if (!write_in_process) break; udelay(10); } } if (!timeout) { dev_err(&i2c->adap.dev, "write wait timeout\n"); ret = -EIO; } tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_TXABRT); if (tmp) { jz4780_i2c_txabrt(i2c, tmp); ret = -EIO; } return ret; } static int jz4780_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int count) { int i = -EIO; int ret = 0; struct jz4780_i2c *i2c = adap->algo_data; ret = jz4780_i2c_prepare(i2c); if (ret) { dev_err(&i2c->adap.dev, "I2C prepare failed\n"); goto out; } if (msg->addr != jz4780_i2c_readw(i2c, JZ4780_I2C_TAR)) { ret = jz4780_i2c_set_target(i2c, msg->addr); if (ret) goto out; } for (i = 0; i < count; i++, msg++) { if (msg->flags & I2C_M_RD) ret = jz4780_i2c_xfer_read(i2c, msg->buf, msg->len, count, i); else ret = jz4780_i2c_xfer_write(i2c, msg->buf, msg->len, count, i); if (ret) goto out; } ret = i; out: jz4780_i2c_cleanup(i2c); return ret; } static u32 jz4780_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm jz4780_i2c_algorithm = { .master_xfer = jz4780_i2c_xfer, .functionality = jz4780_i2c_functionality, }; static const struct ingenic_i2c_config jz4780_i2c_config = { .version = ID_JZ4780, .fifosize = JZ4780_I2C_FIFO_LEN, .tx_level = JZ4780_I2C_FIFO_LEN / 2, .rx_level = JZ4780_I2C_FIFO_LEN / 2 - 1, }; static const struct ingenic_i2c_config x1000_i2c_config = { .version = ID_X1000, .fifosize = X1000_I2C_FIFO_LEN, .tx_level = X1000_I2C_FIFO_LEN / 2, .rx_level = X1000_I2C_FIFO_LEN / 2 - 1, }; static const struct of_device_id jz4780_i2c_of_matches[] = { { .compatible = "ingenic,jz4770-i2c", .data = &jz4780_i2c_config }, { .compatible = "ingenic,jz4780-i2c", .data = &jz4780_i2c_config }, { .compatible = "ingenic,x1000-i2c", .data = &x1000_i2c_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches); static int jz4780_i2c_probe(struct platform_device *pdev) { int ret = 0; unsigned int clk_freq = 0; unsigned short tmp; struct jz4780_i2c *i2c; i2c = devm_kzalloc(&pdev->dev, sizeof(struct jz4780_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->cdata = device_get_match_data(&pdev->dev); if (!i2c->cdata) { dev_err(&pdev->dev, "Error: No device match found\n"); return -ENODEV; } i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &jz4780_i2c_algorithm; i2c->adap.algo_data = i2c; i2c->adap.retries = 5; i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; sprintf(i2c->adap.name, "%s", pdev->name); init_completion(&i2c->trans_waitq); spin_lock_init(&i2c->lock); i2c->iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->iomem)) return PTR_ERR(i2c->iomem); platform_set_drvdata(pdev, i2c); i2c->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) return PTR_ERR(i2c->clk); ret = clk_prepare_enable(i2c->clk); if (ret) return ret; ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clk_freq); if (ret) { dev_err(&pdev->dev, "clock-frequency not specified in DT\n"); goto err; } i2c->speed = clk_freq / 1000; if (i2c->speed == 0) { ret = -EINVAL; dev_err(&pdev->dev, "clock-frequency minimum is 1000\n"); goto err; } jz4780_i2c_set_speed(i2c); dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed); if (i2c->cdata->version < ID_X1000) { tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); tmp &= ~JZ4780_I2C_CTRL_STPHLD; jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); } jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); ret = platform_get_irq(pdev, 0); if (ret < 0) goto err; i2c->irq = ret; ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, dev_name(&pdev->dev), i2c); if (ret) goto err; ret = i2c_add_adapter(&i2c->adap); if (ret < 0) goto err; return 0; err: clk_disable_unprepare(i2c->clk); return ret; } static void jz4780_i2c_remove(struct platform_device *pdev) { struct jz4780_i2c *i2c = platform_get_drvdata(pdev); clk_disable_unprepare(i2c->clk); i2c_del_adapter(&i2c->adap); } static struct platform_driver jz4780_i2c_driver = { .probe = jz4780_i2c_probe, .remove_new = jz4780_i2c_remove, .driver = { .name = "jz4780-i2c", .of_match_table = jz4780_i2c_of_matches, }, }; module_platform_driver(jz4780_i2c_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("ztyan<[email protected]>"); MODULE_DESCRIPTION("i2c driver for JZ4780 SoCs");
linux-master
drivers/i2c/busses/i2c-jz4780.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2009-2013, 2016-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2014, Sony Mobile Communications AB. * */ #include <linux/acpi.h> #include <linux/atomic.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/scatterlist.h> /* QUP Registers */ #define QUP_CONFIG 0x000 #define QUP_STATE 0x004 #define QUP_IO_MODE 0x008 #define QUP_SW_RESET 0x00c #define QUP_OPERATIONAL 0x018 #define QUP_ERROR_FLAGS 0x01c #define QUP_ERROR_FLAGS_EN 0x020 #define QUP_OPERATIONAL_MASK 0x028 #define QUP_HW_VERSION 0x030 #define QUP_MX_OUTPUT_CNT 0x100 #define QUP_OUT_FIFO_BASE 0x110 #define QUP_MX_WRITE_CNT 0x150 #define QUP_MX_INPUT_CNT 0x200 #define QUP_MX_READ_CNT 0x208 #define QUP_IN_FIFO_BASE 0x218 #define QUP_I2C_CLK_CTL 0x400 #define QUP_I2C_STATUS 0x404 #define QUP_I2C_MASTER_GEN 0x408 /* QUP States and reset values */ #define QUP_RESET_STATE 0 #define QUP_RUN_STATE 1 #define QUP_PAUSE_STATE 3 #define QUP_STATE_MASK 3 #define QUP_STATE_VALID BIT(2) #define QUP_I2C_MAST_GEN BIT(4) #define QUP_I2C_FLUSH BIT(6) #define QUP_OPERATIONAL_RESET 0x000ff0 #define QUP_I2C_STATUS_RESET 0xfffffc /* QUP OPERATIONAL FLAGS */ #define QUP_I2C_NACK_FLAG BIT(3) #define QUP_OUT_NOT_EMPTY BIT(4) #define QUP_IN_NOT_EMPTY BIT(5) #define QUP_OUT_FULL BIT(6) #define QUP_OUT_SVC_FLAG BIT(8) #define QUP_IN_SVC_FLAG BIT(9) #define QUP_MX_OUTPUT_DONE BIT(10) #define QUP_MX_INPUT_DONE BIT(11) #define OUT_BLOCK_WRITE_REQ BIT(12) #define IN_BLOCK_READ_REQ BIT(13) /* I2C mini core related values */ #define QUP_NO_INPUT BIT(7) #define QUP_CLOCK_AUTO_GATE BIT(13) #define I2C_MINI_CORE (2 << 8) #define I2C_N_VAL 15 #define I2C_N_VAL_V2 7 /* Most significant word offset in FIFO port */ #define QUP_MSW_SHIFT (I2C_N_VAL + 1) /* Packing/Unpacking words in FIFOs, and IO modes */ #define QUP_OUTPUT_BLK_MODE (1 << 10) #define QUP_OUTPUT_BAM_MODE (3 << 10) #define QUP_INPUT_BLK_MODE (1 << 12) #define QUP_INPUT_BAM_MODE (3 << 12) #define QUP_BAM_MODE (QUP_OUTPUT_BAM_MODE | QUP_INPUT_BAM_MODE) #define QUP_UNPACK_EN BIT(14) #define QUP_PACK_EN BIT(15) #define QUP_REPACK_EN (QUP_UNPACK_EN | QUP_PACK_EN) #define QUP_V2_TAGS_EN 1 #define QUP_OUTPUT_BLOCK_SIZE(x)(((x) >> 0) & 0x03) #define QUP_OUTPUT_FIFO_SIZE(x) (((x) >> 2) & 0x07) #define QUP_INPUT_BLOCK_SIZE(x) (((x) >> 5) & 0x03) #define QUP_INPUT_FIFO_SIZE(x) (((x) >> 7) & 0x07) /* QUP tags */ #define QUP_TAG_START (1 << 8) #define QUP_TAG_DATA (2 << 8) #define QUP_TAG_STOP (3 << 8) #define QUP_TAG_REC (4 << 8) #define QUP_BAM_INPUT_EOT 0x93 #define QUP_BAM_FLUSH_STOP 0x96 /* QUP v2 tags */ #define QUP_TAG_V2_START 0x81 #define QUP_TAG_V2_DATAWR 0x82 #define QUP_TAG_V2_DATAWR_STOP 0x83 #define QUP_TAG_V2_DATARD 0x85 #define QUP_TAG_V2_DATARD_NACK 0x86 #define QUP_TAG_V2_DATARD_STOP 0x87 /* Status, Error flags */ #define I2C_STATUS_WR_BUFFER_FULL BIT(0) #define I2C_STATUS_BUS_ACTIVE BIT(8) #define I2C_STATUS_ERROR_MASK 0x38000fc #define QUP_STATUS_ERROR_FLAGS 0x7c #define QUP_READ_LIMIT 256 #define SET_BIT 0x1 #define RESET_BIT 0x0 #define ONE_BYTE 0x1 #define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31) /* Maximum transfer length for single DMA descriptor */ #define MX_TX_RX_LEN SZ_64K #define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT) /* Maximum transfer length for all DMA descriptors */ #define MX_DMA_TX_RX_LEN (2 * MX_TX_RX_LEN) #define MX_DMA_BLOCKS (MX_DMA_TX_RX_LEN / QUP_READ_LIMIT) /* * Minimum transfer timeout for i2c transfers in seconds. It will be added on * the top of maximum transfer time calculated from i2c bus speed to compensate * the overheads. */ #define TOUT_MIN 2 /* Default values. Use these if FW query fails */ #define DEFAULT_CLK_FREQ I2C_MAX_STANDARD_MODE_FREQ #define DEFAULT_SRC_CLK 20000000 /* * Max tags length (start, stop and maximum 2 bytes address) for each QUP * data transfer */ #define QUP_MAX_TAGS_LEN 4 /* Max data length for each DATARD tags */ #define RECV_MAX_DATA_LEN 254 /* TAG length for DATA READ in RX FIFO */ #define READ_RX_TAGS_LEN 2 static unsigned int scl_freq; module_param_named(scl_freq, scl_freq, uint, 0444); MODULE_PARM_DESC(scl_freq, "SCL frequency override"); /* * count: no of blocks * pos: current block number * tx_tag_len: tx tag length for current block * rx_tag_len: rx tag length for current block * data_len: remaining data length for current message * cur_blk_len: data length for current block * total_tx_len: total tx length including tag bytes for current QUP transfer * total_rx_len: total rx length including tag bytes for current QUP transfer * tx_fifo_data_pos: current byte number in TX FIFO word * tx_fifo_free: number of free bytes in current QUP block write. * rx_fifo_data_pos: current byte number in RX FIFO word * fifo_available: number of available bytes in RX FIFO for current * QUP block read * tx_fifo_data: QUP TX FIFO write works on word basis (4 bytes). New byte write * to TX FIFO will be appended in this data and will be written to * TX FIFO when all the 4 bytes are available. * rx_fifo_data: QUP RX FIFO read works on word basis (4 bytes). This will * contains the 4 bytes of RX data. * cur_data: pointer to tell cur data position for current message * cur_tx_tags: pointer to tell cur position in tags * tx_tags_sent: all tx tag bytes have been written in FIFO word * send_last_word: for tx FIFO, last word send is pending in current block * rx_bytes_read: if all the bytes have been read from rx FIFO. * rx_tags_fetched: all the rx tag bytes have been fetched from rx fifo word * is_tx_blk_mode: whether tx uses block or FIFO mode in case of non BAM xfer. * is_rx_blk_mode: whether rx uses block or FIFO mode in case of non BAM xfer. * tags: contains tx tag bytes for current QUP transfer */ struct qup_i2c_block { int count; int pos; int tx_tag_len; int rx_tag_len; int data_len; int cur_blk_len; int total_tx_len; int total_rx_len; int tx_fifo_data_pos; int tx_fifo_free; int rx_fifo_data_pos; int fifo_available; u32 tx_fifo_data; u32 rx_fifo_data; u8 *cur_data; u8 *cur_tx_tags; bool tx_tags_sent; bool send_last_word; bool rx_tags_fetched; bool rx_bytes_read; bool is_tx_blk_mode; bool is_rx_blk_mode; u8 tags[6]; }; struct qup_i2c_tag { u8 *start; dma_addr_t addr; }; struct qup_i2c_bam { struct qup_i2c_tag tag; struct dma_chan *dma; struct scatterlist *sg; unsigned int sg_cnt; }; struct qup_i2c_dev { struct device *dev; void __iomem *base; int irq; struct clk *clk; struct clk *pclk; struct i2c_adapter adap; int clk_ctl; int out_fifo_sz; int in_fifo_sz; int out_blk_sz; int in_blk_sz; int blk_xfer_limit; unsigned long one_byte_t; unsigned long xfer_timeout; struct qup_i2c_block blk; struct i2c_msg *msg; /* Current posion in user message buffer */ int pos; /* I2C protocol errors */ u32 bus_err; /* QUP core errors */ u32 qup_err; /* To check if this is the last msg */ bool is_last; bool is_smbus_read; /* To configure when bus is in run state */ u32 config_run; /* dma parameters */ bool is_dma; /* To check if the current transfer is using DMA */ bool use_dma; unsigned int max_xfer_sg_len; unsigned int tag_buf_pos; /* The threshold length above which block mode will be used */ unsigned int blk_mode_threshold; struct dma_pool *dpool; struct qup_i2c_tag start_tag; struct qup_i2c_bam brx; struct qup_i2c_bam btx; struct completion xfer; /* function to write data in tx fifo */ void (*write_tx_fifo)(struct qup_i2c_dev *qup); /* function to read data from rx fifo */ void (*read_rx_fifo)(struct qup_i2c_dev *qup); /* function to write tags in tx fifo for i2c read transfer */ void (*write_rx_tags)(struct qup_i2c_dev *qup); }; static irqreturn_t qup_i2c_interrupt(int irq, void *dev) { struct qup_i2c_dev *qup = dev; struct qup_i2c_block *blk = &qup->blk; u32 bus_err; u32 qup_err; u32 opflags; bus_err = readl(qup->base + QUP_I2C_STATUS); qup_err = readl(qup->base + QUP_ERROR_FLAGS); opflags = readl(qup->base + QUP_OPERATIONAL); if (!qup->msg) { /* Clear Error interrupt */ writel(QUP_RESET_STATE, qup->base + QUP_STATE); return IRQ_HANDLED; } bus_err &= I2C_STATUS_ERROR_MASK; qup_err &= QUP_STATUS_ERROR_FLAGS; /* Clear the error bits in QUP_ERROR_FLAGS */ if (qup_err) writel(qup_err, qup->base + QUP_ERROR_FLAGS); /* Clear the error bits in QUP_I2C_STATUS */ if (bus_err) writel(bus_err, qup->base + QUP_I2C_STATUS); /* * Check for BAM mode and returns if already error has come for current * transfer. In Error case, sometimes, QUP generates more than one * interrupt. */ if (qup->use_dma && (qup->qup_err || qup->bus_err)) return IRQ_HANDLED; /* Reset the QUP State in case of error */ if (qup_err || bus_err) { /* * Don’t reset the QUP state in case of BAM mode. The BAM * flush operation needs to be scheduled in transfer function * which will clear the remaining schedule descriptors in BAM * HW FIFO and generates the BAM interrupt. */ if (!qup->use_dma) writel(QUP_RESET_STATE, qup->base + QUP_STATE); goto done; } if (opflags & QUP_OUT_SVC_FLAG) { writel(QUP_OUT_SVC_FLAG, qup->base + QUP_OPERATIONAL); if (opflags & OUT_BLOCK_WRITE_REQ) { blk->tx_fifo_free += qup->out_blk_sz; if (qup->msg->flags & I2C_M_RD) qup->write_rx_tags(qup); else qup->write_tx_fifo(qup); } } if (opflags & QUP_IN_SVC_FLAG) { writel(QUP_IN_SVC_FLAG, qup->base + QUP_OPERATIONAL); if (!blk->is_rx_blk_mode) { blk->fifo_available += qup->in_fifo_sz; qup->read_rx_fifo(qup); } else if (opflags & IN_BLOCK_READ_REQ) { blk->fifo_available += qup->in_blk_sz; qup->read_rx_fifo(qup); } } if (qup->msg->flags & I2C_M_RD) { if (!blk->rx_bytes_read) return IRQ_HANDLED; } else { /* * Ideally, QUP_MAX_OUTPUT_DONE_FLAG should be checked * for FIFO mode also. But, QUP_MAX_OUTPUT_DONE_FLAG lags * behind QUP_OUTPUT_SERVICE_FLAG sometimes. The only reason * of interrupt for write message in FIFO mode is * QUP_MAX_OUTPUT_DONE_FLAG condition. */ if (blk->is_tx_blk_mode && !(opflags & QUP_MX_OUTPUT_DONE)) return IRQ_HANDLED; } done: qup->qup_err = qup_err; qup->bus_err = bus_err; complete(&qup->xfer); return IRQ_HANDLED; } static int qup_i2c_poll_state_mask(struct qup_i2c_dev *qup, u32 req_state, u32 req_mask) { int retries = 1; u32 state; /* * State transition takes 3 AHB clocks cycles + 3 I2C master clock * cycles. So retry once after a 1uS delay. */ do { state = readl(qup->base + QUP_STATE); if (state & QUP_STATE_VALID && (state & req_mask) == req_state) return 0; udelay(1); } while (retries--); return -ETIMEDOUT; } static int qup_i2c_poll_state(struct qup_i2c_dev *qup, u32 req_state) { return qup_i2c_poll_state_mask(qup, req_state, QUP_STATE_MASK); } static void qup_i2c_flush(struct qup_i2c_dev *qup) { u32 val = readl(qup->base + QUP_STATE); val |= QUP_I2C_FLUSH; writel(val, qup->base + QUP_STATE); } static int qup_i2c_poll_state_valid(struct qup_i2c_dev *qup) { return qup_i2c_poll_state_mask(qup, 0, 0); } static int qup_i2c_poll_state_i2c_master(struct qup_i2c_dev *qup) { return qup_i2c_poll_state_mask(qup, QUP_I2C_MAST_GEN, QUP_I2C_MAST_GEN); } static int qup_i2c_change_state(struct qup_i2c_dev *qup, u32 state) { if (qup_i2c_poll_state_valid(qup) != 0) return -EIO; writel(state, qup->base + QUP_STATE); if (qup_i2c_poll_state(qup, state) != 0) return -EIO; return 0; } /* Check if I2C bus returns to IDLE state */ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len) { unsigned long timeout; u32 status; int ret = 0; timeout = jiffies + len * 4; for (;;) { status = readl(qup->base + QUP_I2C_STATUS); if (!(status & I2C_STATUS_BUS_ACTIVE)) break; if (time_after(jiffies, timeout)) ret = -ETIMEDOUT; usleep_range(len, len * 2); } return ret; } static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; struct i2c_msg *msg = qup->msg; u32 addr = i2c_8bit_addr_from_msg(msg); u32 qup_tag; int idx; u32 val; if (qup->pos == 0) { val = QUP_TAG_START | addr; idx = 1; blk->tx_fifo_free--; } else { val = 0; idx = 0; } while (blk->tx_fifo_free && qup->pos < msg->len) { if (qup->pos == msg->len - 1) qup_tag = QUP_TAG_STOP; else qup_tag = QUP_TAG_DATA; if (idx & 1) val |= (qup_tag | msg->buf[qup->pos]) << QUP_MSW_SHIFT; else val = qup_tag | msg->buf[qup->pos]; /* Write out the pair and the last odd value */ if (idx & 1 || qup->pos == msg->len - 1) writel(val, qup->base + QUP_OUT_FIFO_BASE); qup->pos++; idx++; blk->tx_fifo_free--; } } static void qup_i2c_set_blk_data(struct qup_i2c_dev *qup, struct i2c_msg *msg) { qup->blk.pos = 0; qup->blk.data_len = msg->len; qup->blk.count = DIV_ROUND_UP(msg->len, qup->blk_xfer_limit); } static int qup_i2c_get_data_len(struct qup_i2c_dev *qup) { int data_len; if (qup->blk.data_len > qup->blk_xfer_limit) data_len = qup->blk_xfer_limit; else data_len = qup->blk.data_len; return data_len; } static bool qup_i2c_check_msg_len(struct i2c_msg *msg) { return ((msg->flags & I2C_M_RD) && (msg->flags & I2C_M_RECV_LEN)); } static int qup_i2c_set_tags_smb(u16 addr, u8 *tags, struct qup_i2c_dev *qup, struct i2c_msg *msg) { int len = 0; if (qup->is_smbus_read) { tags[len++] = QUP_TAG_V2_DATARD_STOP; tags[len++] = qup_i2c_get_data_len(qup); } else { tags[len++] = QUP_TAG_V2_START; tags[len++] = addr & 0xff; if (msg->flags & I2C_M_TEN) tags[len++] = addr >> 8; tags[len++] = QUP_TAG_V2_DATARD; /* Read 1 byte indicating the length of the SMBus message */ tags[len++] = 1; } return len; } static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup, struct i2c_msg *msg) { u16 addr = i2c_8bit_addr_from_msg(msg); int len = 0; int data_len; int last = (qup->blk.pos == (qup->blk.count - 1)) && (qup->is_last); /* Handle tags for SMBus block read */ if (qup_i2c_check_msg_len(msg)) return qup_i2c_set_tags_smb(addr, tags, qup, msg); if (qup->blk.pos == 0) { tags[len++] = QUP_TAG_V2_START; tags[len++] = addr & 0xff; if (msg->flags & I2C_M_TEN) tags[len++] = addr >> 8; } /* Send _STOP commands for the last block */ if (last) { if (msg->flags & I2C_M_RD) tags[len++] = QUP_TAG_V2_DATARD_STOP; else tags[len++] = QUP_TAG_V2_DATAWR_STOP; } else { if (msg->flags & I2C_M_RD) tags[len++] = qup->blk.pos == (qup->blk.count - 1) ? QUP_TAG_V2_DATARD_NACK : QUP_TAG_V2_DATARD; else tags[len++] = QUP_TAG_V2_DATAWR; } data_len = qup_i2c_get_data_len(qup); /* 0 implies 256 bytes */ if (data_len == QUP_READ_LIMIT) tags[len++] = 0; else tags[len++] = data_len; return len; } static void qup_i2c_bam_cb(void *data) { struct qup_i2c_dev *qup = data; complete(&qup->xfer); } static int qup_sg_set_buf(struct scatterlist *sg, void *buf, unsigned int buflen, struct qup_i2c_dev *qup, int dir) { int ret; sg_set_buf(sg, buf, buflen); ret = dma_map_sg(qup->dev, sg, 1, dir); if (!ret) return -EINVAL; return 0; } static void qup_i2c_rel_dma(struct qup_i2c_dev *qup) { if (qup->btx.dma) dma_release_channel(qup->btx.dma); if (qup->brx.dma) dma_release_channel(qup->brx.dma); qup->btx.dma = NULL; qup->brx.dma = NULL; } static int qup_i2c_req_dma(struct qup_i2c_dev *qup) { int err; if (!qup->btx.dma) { qup->btx.dma = dma_request_chan(qup->dev, "tx"); if (IS_ERR(qup->btx.dma)) { err = PTR_ERR(qup->btx.dma); qup->btx.dma = NULL; dev_err(qup->dev, "\n tx channel not available"); return err; } } if (!qup->brx.dma) { qup->brx.dma = dma_request_chan(qup->dev, "rx"); if (IS_ERR(qup->brx.dma)) { dev_err(qup->dev, "\n rx channel not available"); err = PTR_ERR(qup->brx.dma); qup->brx.dma = NULL; qup_i2c_rel_dma(qup); return err; } } return 0; } static int qup_i2c_bam_make_desc(struct qup_i2c_dev *qup, struct i2c_msg *msg) { int ret = 0, limit = QUP_READ_LIMIT; u32 len = 0, blocks, rem; u32 i = 0, tlen, tx_len = 0; u8 *tags; qup->blk_xfer_limit = QUP_READ_LIMIT; qup_i2c_set_blk_data(qup, msg); blocks = qup->blk.count; rem = msg->len - (blocks - 1) * limit; if (msg->flags & I2C_M_RD) { while (qup->blk.pos < blocks) { tlen = (i == (blocks - 1)) ? rem : limit; tags = &qup->start_tag.start[qup->tag_buf_pos + len]; len += qup_i2c_set_tags(tags, qup, msg); qup->blk.data_len -= tlen; /* scratch buf to read the start and len tags */ ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++], &qup->brx.tag.start[0], 2, qup, DMA_FROM_DEVICE); if (ret) return ret; ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++], &msg->buf[limit * i], tlen, qup, DMA_FROM_DEVICE); if (ret) return ret; i++; qup->blk.pos = i; } ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], &qup->start_tag.start[qup->tag_buf_pos], len, qup, DMA_TO_DEVICE); if (ret) return ret; qup->tag_buf_pos += len; } else { while (qup->blk.pos < blocks) { tlen = (i == (blocks - 1)) ? rem : limit; tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len]; len = qup_i2c_set_tags(tags, qup, msg); qup->blk.data_len -= tlen; ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], tags, len, qup, DMA_TO_DEVICE); if (ret) return ret; tx_len += len; ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], &msg->buf[limit * i], tlen, qup, DMA_TO_DEVICE); if (ret) return ret; i++; qup->blk.pos = i; } qup->tag_buf_pos += tx_len; } return 0; } static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup) { struct dma_async_tx_descriptor *txd, *rxd = NULL; int ret = 0; dma_cookie_t cookie_rx, cookie_tx; u32 len = 0; u32 tx_cnt = qup->btx.sg_cnt, rx_cnt = qup->brx.sg_cnt; /* schedule the EOT and FLUSH I2C tags */ len = 1; if (rx_cnt) { qup->btx.tag.start[0] = QUP_BAM_INPUT_EOT; len++; /* scratch buf to read the BAM EOT FLUSH tags */ ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], &qup->brx.tag.start[0], 1, qup, DMA_FROM_DEVICE); if (ret) return ret; } qup->btx.tag.start[len - 1] = QUP_BAM_FLUSH_STOP; ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], &qup->btx.tag.start[0], len, qup, DMA_TO_DEVICE); if (ret) return ret; txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_cnt, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_PREP_FENCE); if (!txd) { dev_err(qup->dev, "failed to get tx desc\n"); ret = -EINVAL; goto desc_err; } if (!rx_cnt) { txd->callback = qup_i2c_bam_cb; txd->callback_param = qup; } cookie_tx = dmaengine_submit(txd); if (dma_submit_error(cookie_tx)) { ret = -EINVAL; goto desc_err; } dma_async_issue_pending(qup->btx.dma); if (rx_cnt) { rxd = dmaengine_prep_slave_sg(qup->brx.dma, qup->brx.sg, rx_cnt, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!rxd) { dev_err(qup->dev, "failed to get rx desc\n"); ret = -EINVAL; /* abort TX descriptors */ dmaengine_terminate_sync(qup->btx.dma); goto desc_err; } rxd->callback = qup_i2c_bam_cb; rxd->callback_param = qup; cookie_rx = dmaengine_submit(rxd); if (dma_submit_error(cookie_rx)) { ret = -EINVAL; goto desc_err; } dma_async_issue_pending(qup->brx.dma); } if (!wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout)) { dev_err(qup->dev, "normal trans timed out\n"); ret = -ETIMEDOUT; } if (ret || qup->bus_err || qup->qup_err) { reinit_completion(&qup->xfer); ret = qup_i2c_change_state(qup, QUP_RUN_STATE); if (ret) { dev_err(qup->dev, "change to run state timed out"); goto desc_err; } qup_i2c_flush(qup); /* wait for remaining interrupts to occur */ if (!wait_for_completion_timeout(&qup->xfer, HZ)) dev_err(qup->dev, "flush timed out\n"); ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; } desc_err: dma_unmap_sg(qup->dev, qup->btx.sg, tx_cnt, DMA_TO_DEVICE); if (rx_cnt) dma_unmap_sg(qup->dev, qup->brx.sg, rx_cnt, DMA_FROM_DEVICE); return ret; } static void qup_i2c_bam_clear_tag_buffers(struct qup_i2c_dev *qup) { qup->btx.sg_cnt = 0; qup->brx.sg_cnt = 0; qup->tag_buf_pos = 0; } static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct qup_i2c_dev *qup = i2c_get_adapdata(adap); int ret = 0; int idx = 0; enable_irq(qup->irq); ret = qup_i2c_req_dma(qup); if (ret) goto out; writel(0, qup->base + QUP_MX_INPUT_CNT); writel(0, qup->base + QUP_MX_OUTPUT_CNT); /* set BAM mode */ writel(QUP_REPACK_EN | QUP_BAM_MODE, qup->base + QUP_IO_MODE); /* mask fifo irqs */ writel((0x3 << 8), qup->base + QUP_OPERATIONAL_MASK); /* set RUN STATE */ ret = qup_i2c_change_state(qup, QUP_RUN_STATE); if (ret) goto out; writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); qup_i2c_bam_clear_tag_buffers(qup); for (idx = 0; idx < num; idx++) { qup->msg = msg + idx; qup->is_last = idx == (num - 1); ret = qup_i2c_bam_make_desc(qup, qup->msg); if (ret) break; /* * Make DMA descriptor and schedule the BAM transfer if its * already crossed the maximum length. Since the memory for all * tags buffers have been taken for 2 maximum possible * transfers length so it will never cross the buffer actual * length. */ if (qup->btx.sg_cnt > qup->max_xfer_sg_len || qup->brx.sg_cnt > qup->max_xfer_sg_len || qup->is_last) { ret = qup_i2c_bam_schedule_desc(qup); if (ret) break; qup_i2c_bam_clear_tag_buffers(qup); } } out: disable_irq(qup->irq); qup->msg = NULL; return ret; } static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup, struct i2c_msg *msg) { unsigned long left; int ret = 0; left = wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout); if (!left) { writel(1, qup->base + QUP_SW_RESET); ret = -ETIMEDOUT; } if (qup->bus_err || qup->qup_err) ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; return ret; } static void qup_i2c_read_rx_fifo_v1(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; struct i2c_msg *msg = qup->msg; u32 val = 0; int idx = 0; while (blk->fifo_available && qup->pos < msg->len) { if ((idx & 1) == 0) { /* Reading 2 words at time */ val = readl(qup->base + QUP_IN_FIFO_BASE); msg->buf[qup->pos++] = val & 0xFF; } else { msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT; } idx++; blk->fifo_available--; } if (qup->pos == msg->len) blk->rx_bytes_read = true; } static void qup_i2c_write_rx_tags_v1(struct qup_i2c_dev *qup) { struct i2c_msg *msg = qup->msg; u32 addr, len, val; addr = i2c_8bit_addr_from_msg(msg); /* 0 is used to specify a length 256 (QUP_READ_LIMIT) */ len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len; val = ((QUP_TAG_REC | len) << QUP_MSW_SHIFT) | QUP_TAG_START | addr; writel(val, qup->base + QUP_OUT_FIFO_BASE); } static void qup_i2c_conf_v1(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; u32 qup_config = I2C_MINI_CORE | I2C_N_VAL; u32 io_mode = QUP_REPACK_EN; blk->is_tx_blk_mode = blk->total_tx_len > qup->out_fifo_sz; blk->is_rx_blk_mode = blk->total_rx_len > qup->in_fifo_sz; if (blk->is_tx_blk_mode) { io_mode |= QUP_OUTPUT_BLK_MODE; writel(0, qup->base + QUP_MX_WRITE_CNT); writel(blk->total_tx_len, qup->base + QUP_MX_OUTPUT_CNT); } else { writel(0, qup->base + QUP_MX_OUTPUT_CNT); writel(blk->total_tx_len, qup->base + QUP_MX_WRITE_CNT); } if (blk->total_rx_len) { if (blk->is_rx_blk_mode) { io_mode |= QUP_INPUT_BLK_MODE; writel(0, qup->base + QUP_MX_READ_CNT); writel(blk->total_rx_len, qup->base + QUP_MX_INPUT_CNT); } else { writel(0, qup->base + QUP_MX_INPUT_CNT); writel(blk->total_rx_len, qup->base + QUP_MX_READ_CNT); } } else { qup_config |= QUP_NO_INPUT; } writel(qup_config, qup->base + QUP_CONFIG); writel(io_mode, qup->base + QUP_IO_MODE); } static void qup_i2c_clear_blk_v1(struct qup_i2c_block *blk) { blk->tx_fifo_free = 0; blk->fifo_available = 0; blk->rx_bytes_read = false; } static int qup_i2c_conf_xfer_v1(struct qup_i2c_dev *qup, bool is_rx) { struct qup_i2c_block *blk = &qup->blk; int ret; qup_i2c_clear_blk_v1(blk); qup_i2c_conf_v1(qup); ret = qup_i2c_change_state(qup, QUP_RUN_STATE); if (ret) return ret; writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); if (ret) return ret; reinit_completion(&qup->xfer); enable_irq(qup->irq); if (!blk->is_tx_blk_mode) { blk->tx_fifo_free = qup->out_fifo_sz; if (is_rx) qup_i2c_write_rx_tags_v1(qup); else qup_i2c_write_tx_fifo_v1(qup); } ret = qup_i2c_change_state(qup, QUP_RUN_STATE); if (ret) goto err; ret = qup_i2c_wait_for_complete(qup, qup->msg); if (ret) goto err; ret = qup_i2c_bus_active(qup, ONE_BYTE); err: disable_irq(qup->irq); return ret; } static int qup_i2c_write_one(struct qup_i2c_dev *qup) { struct i2c_msg *msg = qup->msg; struct qup_i2c_block *blk = &qup->blk; qup->pos = 0; blk->total_tx_len = msg->len + 1; blk->total_rx_len = 0; return qup_i2c_conf_xfer_v1(qup, false); } static int qup_i2c_read_one(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; qup->pos = 0; blk->total_tx_len = 2; blk->total_rx_len = qup->msg->len; return qup_i2c_conf_xfer_v1(qup, true); } static int qup_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct qup_i2c_dev *qup = i2c_get_adapdata(adap); int ret, idx; ret = pm_runtime_get_sync(qup->dev); if (ret < 0) goto out; qup->bus_err = 0; qup->qup_err = 0; writel(1, qup->base + QUP_SW_RESET); ret = qup_i2c_poll_state(qup, QUP_RESET_STATE); if (ret) goto out; /* Configure QUP as I2C mini core */ writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG); for (idx = 0; idx < num; idx++) { if (qup_i2c_poll_state_i2c_master(qup)) { ret = -EIO; goto out; } if (qup_i2c_check_msg_len(&msgs[idx])) { ret = -EINVAL; goto out; } qup->msg = &msgs[idx]; if (msgs[idx].flags & I2C_M_RD) ret = qup_i2c_read_one(qup); else ret = qup_i2c_write_one(qup); if (ret) break; ret = qup_i2c_change_state(qup, QUP_RESET_STATE); if (ret) break; } if (ret == 0) ret = num; out: pm_runtime_mark_last_busy(qup->dev); pm_runtime_put_autosuspend(qup->dev); return ret; } /* * Configure registers related with reconfiguration during run and call it * before each i2c sub transfer. */ static void qup_i2c_conf_count_v2(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; u32 qup_config = I2C_MINI_CORE | I2C_N_VAL_V2; if (blk->is_tx_blk_mode) writel(qup->config_run | blk->total_tx_len, qup->base + QUP_MX_OUTPUT_CNT); else writel(qup->config_run | blk->total_tx_len, qup->base + QUP_MX_WRITE_CNT); if (blk->total_rx_len) { if (blk->is_rx_blk_mode) writel(qup->config_run | blk->total_rx_len, qup->base + QUP_MX_INPUT_CNT); else writel(qup->config_run | blk->total_rx_len, qup->base + QUP_MX_READ_CNT); } else { qup_config |= QUP_NO_INPUT; } writel(qup_config, qup->base + QUP_CONFIG); } /* * Configure registers related with transfer mode (FIFO/Block) * before starting of i2c transfer. It will be called only once in * QUP RESET state. */ static void qup_i2c_conf_mode_v2(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; u32 io_mode = QUP_REPACK_EN; if (blk->is_tx_blk_mode) { io_mode |= QUP_OUTPUT_BLK_MODE; writel(0, qup->base + QUP_MX_WRITE_CNT); } else { writel(0, qup->base + QUP_MX_OUTPUT_CNT); } if (blk->is_rx_blk_mode) { io_mode |= QUP_INPUT_BLK_MODE; writel(0, qup->base + QUP_MX_READ_CNT); } else { writel(0, qup->base + QUP_MX_INPUT_CNT); } writel(io_mode, qup->base + QUP_IO_MODE); } /* Clear required variables before starting of any QUP v2 sub transfer. */ static void qup_i2c_clear_blk_v2(struct qup_i2c_block *blk) { blk->send_last_word = false; blk->tx_tags_sent = false; blk->tx_fifo_data = 0; blk->tx_fifo_data_pos = 0; blk->tx_fifo_free = 0; blk->rx_tags_fetched = false; blk->rx_bytes_read = false; blk->rx_fifo_data = 0; blk->rx_fifo_data_pos = 0; blk->fifo_available = 0; } /* Receive data from RX FIFO for read message in QUP v2 i2c transfer. */ static void qup_i2c_recv_data(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; int j; for (j = blk->rx_fifo_data_pos; blk->cur_blk_len && blk->fifo_available; blk->cur_blk_len--, blk->fifo_available--) { if (j == 0) blk->rx_fifo_data = readl(qup->base + QUP_IN_FIFO_BASE); *(blk->cur_data++) = blk->rx_fifo_data; blk->rx_fifo_data >>= 8; if (j == 3) j = 0; else j++; } blk->rx_fifo_data_pos = j; } /* Receive tags for read message in QUP v2 i2c transfer. */ static void qup_i2c_recv_tags(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; blk->rx_fifo_data = readl(qup->base + QUP_IN_FIFO_BASE); blk->rx_fifo_data >>= blk->rx_tag_len * 8; blk->rx_fifo_data_pos = blk->rx_tag_len; blk->fifo_available -= blk->rx_tag_len; } /* * Read the data and tags from RX FIFO. Since in read case, the tags will be * preceded by received data bytes so * 1. Check if rx_tags_fetched is false i.e. the start of QUP block so receive * all tag bytes and discard that. * 2. Read the data from RX FIFO. When all the data bytes have been read then * set rx_bytes_read to true. */ static void qup_i2c_read_rx_fifo_v2(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; if (!blk->rx_tags_fetched) { qup_i2c_recv_tags(qup); blk->rx_tags_fetched = true; } qup_i2c_recv_data(qup); if (!blk->cur_blk_len) blk->rx_bytes_read = true; } /* * Write bytes in TX FIFO for write message in QUP v2 i2c transfer. QUP TX FIFO * write works on word basis (4 bytes). Append new data byte write for TX FIFO * in tx_fifo_data and write to TX FIFO when all the 4 bytes are present. */ static void qup_i2c_write_blk_data(struct qup_i2c_dev *qup, u8 **data, unsigned int *len) { struct qup_i2c_block *blk = &qup->blk; unsigned int j; for (j = blk->tx_fifo_data_pos; *len && blk->tx_fifo_free; (*len)--, blk->tx_fifo_free--) { blk->tx_fifo_data |= *(*data)++ << (j * 8); if (j == 3) { writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE); blk->tx_fifo_data = 0x0; j = 0; } else { j++; } } blk->tx_fifo_data_pos = j; } /* Transfer tags for read message in QUP v2 i2c transfer. */ static void qup_i2c_write_rx_tags_v2(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; qup_i2c_write_blk_data(qup, &blk->cur_tx_tags, &blk->tx_tag_len); if (blk->tx_fifo_data_pos) writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE); } /* * Write the data and tags in TX FIFO. Since in write case, both tags and data * need to be written and QUP write tags can have maximum 256 data length, so * * 1. Check if tx_tags_sent is false i.e. the start of QUP block so write the * tags to TX FIFO and set tx_tags_sent to true. * 2. Check if send_last_word is true. It will be set when last few data bytes * (less than 4 bytes) are remaining to be written in FIFO because of no FIFO * space. All this data bytes are available in tx_fifo_data so write this * in FIFO. * 3. Write the data to TX FIFO and check for cur_blk_len. If it is non zero * then more data is pending otherwise following 3 cases can be possible * a. if tx_fifo_data_pos is zero i.e. all the data bytes in this block * have been written in TX FIFO so nothing else is required. * b. tx_fifo_free is non zero i.e tx FIFO is free so copy the remaining data * from tx_fifo_data to tx FIFO. Since, qup_i2c_write_blk_data do write * in 4 bytes and FIFO space is in multiple of 4 bytes so tx_fifo_free * will be always greater than or equal to 4 bytes. * c. tx_fifo_free is zero. In this case, last few bytes (less than 4 * bytes) are copied to tx_fifo_data but couldn't be sent because of * FIFO full so make send_last_word true. */ static void qup_i2c_write_tx_fifo_v2(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; if (!blk->tx_tags_sent) { qup_i2c_write_blk_data(qup, &blk->cur_tx_tags, &blk->tx_tag_len); blk->tx_tags_sent = true; } if (blk->send_last_word) goto send_last_word; qup_i2c_write_blk_data(qup, &blk->cur_data, &blk->cur_blk_len); if (!blk->cur_blk_len) { if (!blk->tx_fifo_data_pos) return; if (blk->tx_fifo_free) goto send_last_word; blk->send_last_word = true; } return; send_last_word: writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE); } /* * Main transfer function which read or write i2c data. * The QUP v2 supports reconfiguration during run in which multiple i2c sub * transfers can be scheduled. */ static int qup_i2c_conf_xfer_v2(struct qup_i2c_dev *qup, bool is_rx, bool is_first, bool change_pause_state) { struct qup_i2c_block *blk = &qup->blk; struct i2c_msg *msg = qup->msg; int ret; /* * Check if its SMBus Block read for which the top level read will be * done into 2 QUP reads. One with message length 1 while other one is * with actual length. */ if (qup_i2c_check_msg_len(msg)) { if (qup->is_smbus_read) { /* * If the message length is already read in * the first byte of the buffer, account for * that by setting the offset */ blk->cur_data += 1; is_first = false; } else { change_pause_state = false; } } qup->config_run = is_first ? 0 : QUP_I2C_MX_CONFIG_DURING_RUN; qup_i2c_clear_blk_v2(blk); qup_i2c_conf_count_v2(qup); /* If it is first sub transfer, then configure i2c bus clocks */ if (is_first) { ret = qup_i2c_change_state(qup, QUP_RUN_STATE); if (ret) return ret; writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); if (ret) return ret; } reinit_completion(&qup->xfer); enable_irq(qup->irq); /* * In FIFO mode, tx FIFO can be written directly while in block mode the * it will be written after getting OUT_BLOCK_WRITE_REQ interrupt */ if (!blk->is_tx_blk_mode) { blk->tx_fifo_free = qup->out_fifo_sz; if (is_rx) qup_i2c_write_rx_tags_v2(qup); else qup_i2c_write_tx_fifo_v2(qup); } ret = qup_i2c_change_state(qup, QUP_RUN_STATE); if (ret) goto err; ret = qup_i2c_wait_for_complete(qup, msg); if (ret) goto err; /* Move to pause state for all the transfers, except last one */ if (change_pause_state) { ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); if (ret) goto err; } err: disable_irq(qup->irq); return ret; } /* * Transfer one read/write message in i2c transfer. It splits the message into * multiple of blk_xfer_limit data length blocks and schedule each * QUP block individually. */ static int qup_i2c_xfer_v2_msg(struct qup_i2c_dev *qup, int msg_id, bool is_rx) { int ret = 0; unsigned int data_len, i; struct i2c_msg *msg = qup->msg; struct qup_i2c_block *blk = &qup->blk; u8 *msg_buf = msg->buf; qup->blk_xfer_limit = is_rx ? RECV_MAX_DATA_LEN : QUP_READ_LIMIT; qup_i2c_set_blk_data(qup, msg); for (i = 0; i < blk->count; i++) { data_len = qup_i2c_get_data_len(qup); blk->pos = i; blk->cur_tx_tags = blk->tags; blk->cur_blk_len = data_len; blk->tx_tag_len = qup_i2c_set_tags(blk->cur_tx_tags, qup, qup->msg); blk->cur_data = msg_buf; if (is_rx) { blk->total_tx_len = blk->tx_tag_len; blk->rx_tag_len = 2; blk->total_rx_len = blk->rx_tag_len + data_len; } else { blk->total_tx_len = blk->tx_tag_len + data_len; blk->total_rx_len = 0; } ret = qup_i2c_conf_xfer_v2(qup, is_rx, !msg_id && !i, !qup->is_last || i < blk->count - 1); if (ret) return ret; /* Handle SMBus block read length */ if (qup_i2c_check_msg_len(msg) && msg->len == 1 && !qup->is_smbus_read) { if (msg->buf[0] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; msg->len = msg->buf[0]; qup->is_smbus_read = true; ret = qup_i2c_xfer_v2_msg(qup, msg_id, true); qup->is_smbus_read = false; if (ret) return ret; msg->len += 1; } msg_buf += data_len; blk->data_len -= qup->blk_xfer_limit; } return ret; } /* * QUP v2 supports 3 modes * Programmed IO using FIFO mode : Less than FIFO size * Programmed IO using Block mode : Greater than FIFO size * DMA using BAM : Appropriate for any transaction size but the address should * be DMA applicable * * This function determines the mode which will be used for this transfer. An * i2c transfer contains multiple message. Following are the rules to determine * the mode used. * 1. Determine complete length, maximum tx and rx length for complete transfer. * 2. If complete transfer length is greater than fifo size then use the DMA * mode. * 3. In FIFO or block mode, tx and rx can operate in different mode so check * for maximum tx and rx length to determine mode. */ static int qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup, struct i2c_msg msgs[], int num) { int idx; bool no_dma = false; unsigned int max_tx_len = 0, max_rx_len = 0, total_len = 0; /* All i2c_msgs should be transferred using either dma or cpu */ for (idx = 0; idx < num; idx++) { if (msgs[idx].flags & I2C_M_RD) max_rx_len = max_t(unsigned int, max_rx_len, msgs[idx].len); else max_tx_len = max_t(unsigned int, max_tx_len, msgs[idx].len); if (is_vmalloc_addr(msgs[idx].buf)) no_dma = true; total_len += msgs[idx].len; } if (!no_dma && qup->is_dma && (total_len > qup->out_fifo_sz || total_len > qup->in_fifo_sz)) { qup->use_dma = true; } else { qup->blk.is_tx_blk_mode = max_tx_len > qup->out_fifo_sz - QUP_MAX_TAGS_LEN; qup->blk.is_rx_blk_mode = max_rx_len > qup->in_fifo_sz - READ_RX_TAGS_LEN; } return 0; } static int qup_i2c_xfer_v2(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct qup_i2c_dev *qup = i2c_get_adapdata(adap); int ret, idx = 0; qup->bus_err = 0; qup->qup_err = 0; ret = pm_runtime_get_sync(qup->dev); if (ret < 0) goto out; ret = qup_i2c_determine_mode_v2(qup, msgs, num); if (ret) goto out; writel(1, qup->base + QUP_SW_RESET); ret = qup_i2c_poll_state(qup, QUP_RESET_STATE); if (ret) goto out; /* Configure QUP as I2C mini core */ writel(I2C_MINI_CORE | I2C_N_VAL_V2, qup->base + QUP_CONFIG); writel(QUP_V2_TAGS_EN, qup->base + QUP_I2C_MASTER_GEN); if (qup_i2c_poll_state_i2c_master(qup)) { ret = -EIO; goto out; } if (qup->use_dma) { reinit_completion(&qup->xfer); ret = qup_i2c_bam_xfer(adap, &msgs[0], num); qup->use_dma = false; } else { qup_i2c_conf_mode_v2(qup); for (idx = 0; idx < num; idx++) { qup->msg = &msgs[idx]; qup->is_last = idx == (num - 1); ret = qup_i2c_xfer_v2_msg(qup, idx, !!(msgs[idx].flags & I2C_M_RD)); if (ret) break; } qup->msg = NULL; } if (!ret) ret = qup_i2c_bus_active(qup, ONE_BYTE); if (!ret) qup_i2c_change_state(qup, QUP_RESET_STATE); if (ret == 0) ret = num; out: pm_runtime_mark_last_busy(qup->dev); pm_runtime_put_autosuspend(qup->dev); return ret; } static u32 qup_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL_ALL & ~I2C_FUNC_SMBUS_QUICK); } static const struct i2c_algorithm qup_i2c_algo = { .master_xfer = qup_i2c_xfer, .functionality = qup_i2c_func, }; static const struct i2c_algorithm qup_i2c_algo_v2 = { .master_xfer = qup_i2c_xfer_v2, .functionality = qup_i2c_func, }; /* * The QUP block will issue a NACK and STOP on the bus when reaching * the end of the read, the length of the read is specified as one byte * which limits the possible read to 256 (QUP_READ_LIMIT) bytes. */ static const struct i2c_adapter_quirks qup_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, .max_read_len = QUP_READ_LIMIT, }; static const struct i2c_adapter_quirks qup_i2c_quirks_v2 = { .flags = I2C_AQ_NO_ZERO_LEN, }; static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup) { clk_prepare_enable(qup->clk); clk_prepare_enable(qup->pclk); } static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup) { u32 config; qup_i2c_change_state(qup, QUP_RESET_STATE); clk_disable_unprepare(qup->clk); config = readl(qup->base + QUP_CONFIG); config |= QUP_CLOCK_AUTO_GATE; writel(config, qup->base + QUP_CONFIG); clk_disable_unprepare(qup->pclk); } static const struct acpi_device_id qup_i2c_acpi_match[] = { { "QCOM8010"}, { }, }; MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match); static int qup_i2c_probe(struct platform_device *pdev) { static const int blk_sizes[] = {4, 16, 32}; struct qup_i2c_dev *qup; unsigned long one_bit_t; u32 io_mode, hw_ver, size; int ret, fs_div, hs_div; u32 src_clk_freq = DEFAULT_SRC_CLK; u32 clk_freq = DEFAULT_CLK_FREQ; int blocks; bool is_qup_v1; qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL); if (!qup) return -ENOMEM; qup->dev = &pdev->dev; init_completion(&qup->xfer); platform_set_drvdata(pdev, qup); if (scl_freq) { dev_notice(qup->dev, "Using override frequency of %u\n", scl_freq); clk_freq = scl_freq; } else { ret = device_property_read_u32(qup->dev, "clock-frequency", &clk_freq); if (ret) { dev_notice(qup->dev, "using default clock-frequency %d", DEFAULT_CLK_FREQ); } } if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) { qup->adap.algo = &qup_i2c_algo; qup->adap.quirks = &qup_i2c_quirks; is_qup_v1 = true; } else { qup->adap.algo = &qup_i2c_algo_v2; qup->adap.quirks = &qup_i2c_quirks_v2; is_qup_v1 = false; if (acpi_match_device(qup_i2c_acpi_match, qup->dev)) goto nodma; else ret = qup_i2c_req_dma(qup); if (ret == -EPROBE_DEFER) goto fail_dma; else if (ret != 0) goto nodma; qup->max_xfer_sg_len = (MX_BLOCKS << 1); blocks = (MX_DMA_BLOCKS << 1) + 1; qup->btx.sg = devm_kcalloc(&pdev->dev, blocks, sizeof(*qup->btx.sg), GFP_KERNEL); if (!qup->btx.sg) { ret = -ENOMEM; goto fail_dma; } sg_init_table(qup->btx.sg, blocks); qup->brx.sg = devm_kcalloc(&pdev->dev, blocks, sizeof(*qup->brx.sg), GFP_KERNEL); if (!qup->brx.sg) { ret = -ENOMEM; goto fail_dma; } sg_init_table(qup->brx.sg, blocks); /* 2 tag bytes for each block + 5 for start, stop tags */ size = blocks * 2 + 5; qup->start_tag.start = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (!qup->start_tag.start) { ret = -ENOMEM; goto fail_dma; } qup->brx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); if (!qup->brx.tag.start) { ret = -ENOMEM; goto fail_dma; } qup->btx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); if (!qup->btx.tag.start) { ret = -ENOMEM; goto fail_dma; } qup->is_dma = true; } nodma: /* We support frequencies up to FAST Mode Plus (1MHz) */ if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) { dev_err(qup->dev, "clock frequency not supported %d\n", clk_freq); ret = -EINVAL; goto fail_dma; } qup->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(qup->base)) { ret = PTR_ERR(qup->base); goto fail_dma; } qup->irq = platform_get_irq(pdev, 0); if (qup->irq < 0) { ret = qup->irq; goto fail_dma; } if (has_acpi_companion(qup->dev)) { ret = device_property_read_u32(qup->dev, "src-clock-hz", &src_clk_freq); if (ret) { dev_notice(qup->dev, "using default src-clock-hz %d", DEFAULT_SRC_CLK); } ACPI_COMPANION_SET(&qup->adap.dev, ACPI_COMPANION(qup->dev)); } else { qup->clk = devm_clk_get(qup->dev, "core"); if (IS_ERR(qup->clk)) { dev_err(qup->dev, "Could not get core clock\n"); ret = PTR_ERR(qup->clk); goto fail_dma; } qup->pclk = devm_clk_get(qup->dev, "iface"); if (IS_ERR(qup->pclk)) { dev_err(qup->dev, "Could not get iface clock\n"); ret = PTR_ERR(qup->pclk); goto fail_dma; } qup_i2c_enable_clocks(qup); src_clk_freq = clk_get_rate(qup->clk); } /* * Bootloaders might leave a pending interrupt on certain QUP's, * so we reset the core before registering for interrupts. */ writel(1, qup->base + QUP_SW_RESET); ret = qup_i2c_poll_state_valid(qup); if (ret) goto fail; ret = devm_request_irq(qup->dev, qup->irq, qup_i2c_interrupt, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, "i2c_qup", qup); if (ret) { dev_err(qup->dev, "Request %d IRQ failed\n", qup->irq); goto fail; } hw_ver = readl(qup->base + QUP_HW_VERSION); dev_dbg(qup->dev, "Revision %x\n", hw_ver); io_mode = readl(qup->base + QUP_IO_MODE); /* * The block/fifo size w.r.t. 'actual data' is 1/2 due to 'tag' * associated with each byte written/received */ size = QUP_OUTPUT_BLOCK_SIZE(io_mode); if (size >= ARRAY_SIZE(blk_sizes)) { ret = -EIO; goto fail; } qup->out_blk_sz = blk_sizes[size]; size = QUP_INPUT_BLOCK_SIZE(io_mode); if (size >= ARRAY_SIZE(blk_sizes)) { ret = -EIO; goto fail; } qup->in_blk_sz = blk_sizes[size]; if (is_qup_v1) { /* * in QUP v1, QUP_CONFIG uses N as 15 i.e 16 bits constitutes a * single transfer but the block size is in bytes so divide the * in_blk_sz and out_blk_sz by 2 */ qup->in_blk_sz /= 2; qup->out_blk_sz /= 2; qup->write_tx_fifo = qup_i2c_write_tx_fifo_v1; qup->read_rx_fifo = qup_i2c_read_rx_fifo_v1; qup->write_rx_tags = qup_i2c_write_rx_tags_v1; } else { qup->write_tx_fifo = qup_i2c_write_tx_fifo_v2; qup->read_rx_fifo = qup_i2c_read_rx_fifo_v2; qup->write_rx_tags = qup_i2c_write_rx_tags_v2; } size = QUP_OUTPUT_FIFO_SIZE(io_mode); qup->out_fifo_sz = qup->out_blk_sz * (2 << size); size = QUP_INPUT_FIFO_SIZE(io_mode); qup->in_fifo_sz = qup->in_blk_sz * (2 << size); hs_div = 3; if (clk_freq <= I2C_MAX_STANDARD_MODE_FREQ) { fs_div = ((src_clk_freq / clk_freq) / 2) - 3; qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff); } else { /* 33%/66% duty cycle */ fs_div = ((src_clk_freq / clk_freq) - 6) * 2 / 3; qup->clk_ctl = ((fs_div / 2) << 16) | (hs_div << 8) | (fs_div & 0xff); } /* * Time it takes for a byte to be clocked out on the bus. * Each byte takes 9 clock cycles (8 bits + 1 ack). */ one_bit_t = (USEC_PER_SEC / clk_freq) + 1; qup->one_byte_t = one_bit_t * 9; qup->xfer_timeout = TOUT_MIN * HZ + usecs_to_jiffies(MX_DMA_TX_RX_LEN * qup->one_byte_t); dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n", qup->in_blk_sz, qup->in_fifo_sz, qup->out_blk_sz, qup->out_fifo_sz); i2c_set_adapdata(&qup->adap, qup); qup->adap.dev.parent = qup->dev; qup->adap.dev.of_node = pdev->dev.of_node; qup->is_last = true; strscpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name)); pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(qup->dev); pm_runtime_set_active(qup->dev); pm_runtime_enable(qup->dev); ret = i2c_add_adapter(&qup->adap); if (ret) goto fail_runtime; return 0; fail_runtime: pm_runtime_disable(qup->dev); pm_runtime_set_suspended(qup->dev); fail: qup_i2c_disable_clocks(qup); fail_dma: if (qup->btx.dma) dma_release_channel(qup->btx.dma); if (qup->brx.dma) dma_release_channel(qup->brx.dma); return ret; } static void qup_i2c_remove(struct platform_device *pdev) { struct qup_i2c_dev *qup = platform_get_drvdata(pdev); if (qup->is_dma) { dma_release_channel(qup->btx.dma); dma_release_channel(qup->brx.dma); } disable_irq(qup->irq); qup_i2c_disable_clocks(qup); i2c_del_adapter(&qup->adap); pm_runtime_disable(qup->dev); pm_runtime_set_suspended(qup->dev); } static int qup_i2c_pm_suspend_runtime(struct device *device) { struct qup_i2c_dev *qup = dev_get_drvdata(device); dev_dbg(device, "pm_runtime: suspending...\n"); qup_i2c_disable_clocks(qup); return 0; } static int qup_i2c_pm_resume_runtime(struct device *device) { struct qup_i2c_dev *qup = dev_get_drvdata(device); dev_dbg(device, "pm_runtime: resuming...\n"); qup_i2c_enable_clocks(qup); return 0; } static int qup_i2c_suspend(struct device *device) { if (!pm_runtime_suspended(device)) return qup_i2c_pm_suspend_runtime(device); return 0; } static int qup_i2c_resume(struct device *device) { qup_i2c_pm_resume_runtime(device); pm_runtime_mark_last_busy(device); pm_request_autosuspend(device); return 0; } static const struct dev_pm_ops qup_i2c_qup_pm_ops = { SYSTEM_SLEEP_PM_OPS(qup_i2c_suspend, qup_i2c_resume) RUNTIME_PM_OPS(qup_i2c_pm_suspend_runtime, qup_i2c_pm_resume_runtime, NULL) }; static const struct of_device_id qup_i2c_dt_match[] = { { .compatible = "qcom,i2c-qup-v1.1.1" }, { .compatible = "qcom,i2c-qup-v2.1.1" }, { .compatible = "qcom,i2c-qup-v2.2.1" }, {} }; MODULE_DEVICE_TABLE(of, qup_i2c_dt_match); static struct platform_driver qup_i2c_driver = { .probe = qup_i2c_probe, .remove_new = qup_i2c_remove, .driver = { .name = "i2c_qup", .pm = pm_ptr(&qup_i2c_qup_pm_ops), .of_match_table = qup_i2c_dt_match, .acpi_match_table = ACPI_PTR(qup_i2c_acpi_match), }, }; module_platform_driver(qup_i2c_driver); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c_qup");
linux-master
drivers/i2c/busses/i2c-qup.c
// SPDX-License-Identifier: GPL-2.0-only /* * i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge * * Copyright (C) 2004 Patrick Mochel * 2005 Rudolf Marek <[email protected]> * * The 1563 southbridge is deceptively similar to the 1533, with a * few notable exceptions. One of those happens to be the fact they * upgraded the i2c core to be 2.0 compliant, and happens to be almost * identical to the i2c controller found in the Intel 801 south * bridges. * * This driver is based on a mix of the 15x3, 1535, and i801 drivers, * with a little help from the ALi 1563 spec. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/pci.h> #include <linux/acpi.h> #define ALI1563_MAX_TIMEOUT 500 #define ALI1563_SMBBA 0x80 #define ALI1563_SMB_IOEN 1 #define ALI1563_SMB_HOSTEN 2 #define ALI1563_SMB_IOSIZE 16 #define SMB_HST_STS (ali1563_smba + 0) #define SMB_HST_CNTL1 (ali1563_smba + 1) #define SMB_HST_CNTL2 (ali1563_smba + 2) #define SMB_HST_CMD (ali1563_smba + 3) #define SMB_HST_ADD (ali1563_smba + 4) #define SMB_HST_DAT0 (ali1563_smba + 5) #define SMB_HST_DAT1 (ali1563_smba + 6) #define SMB_BLK_DAT (ali1563_smba + 7) #define HST_STS_BUSY 0x01 #define HST_STS_INTR 0x02 #define HST_STS_DEVERR 0x04 #define HST_STS_BUSERR 0x08 #define HST_STS_FAIL 0x10 #define HST_STS_DONE 0x80 #define HST_STS_BAD 0x1c #define HST_CNTL1_TIMEOUT 0x80 #define HST_CNTL1_LAST 0x40 #define HST_CNTL2_KILL 0x04 #define HST_CNTL2_START 0x40 #define HST_CNTL2_QUICK 0x00 #define HST_CNTL2_BYTE 0x01 #define HST_CNTL2_BYTE_DATA 0x02 #define HST_CNTL2_WORD_DATA 0x03 #define HST_CNTL2_BLOCK 0x05 #define HST_CNTL2_SIZEMASK 0x38 static struct pci_driver ali1563_pci_driver; static unsigned short ali1563_smba; static int ali1563_transaction(struct i2c_adapter *a, int size) { u32 data; int timeout; int status = -EIO; dev_dbg(&a->dev, "Transaction (pre): STS=%02x, CNTL1=%02x, " "CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMB_HST_STS), inb_p(SMB_HST_CNTL1), inb_p(SMB_HST_CNTL2), inb_p(SMB_HST_CMD), inb_p(SMB_HST_ADD), inb_p(SMB_HST_DAT0), inb_p(SMB_HST_DAT1)); data = inb_p(SMB_HST_STS); if (data & HST_STS_BAD) { dev_err(&a->dev, "ali1563: Trying to reset busy device\n"); outb_p(data | HST_STS_BAD, SMB_HST_STS); data = inb_p(SMB_HST_STS); if (data & HST_STS_BAD) return -EBUSY; } outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2); timeout = ALI1563_MAX_TIMEOUT; do { msleep(1); } while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout); dev_dbg(&a->dev, "Transaction (post): STS=%02x, CNTL1=%02x, " "CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMB_HST_STS), inb_p(SMB_HST_CNTL1), inb_p(SMB_HST_CNTL2), inb_p(SMB_HST_CMD), inb_p(SMB_HST_ADD), inb_p(SMB_HST_DAT0), inb_p(SMB_HST_DAT1)); if (timeout && !(data & HST_STS_BAD)) return 0; if (!timeout) { dev_err(&a->dev, "Timeout - Trying to KILL transaction!\n"); /* Issue 'kill' to host controller */ outb_p(HST_CNTL2_KILL, SMB_HST_CNTL2); data = inb_p(SMB_HST_STS); status = -ETIMEDOUT; } /* device error - no response, ignore the autodetection case */ if (data & HST_STS_DEVERR) { if (size != HST_CNTL2_QUICK) dev_err(&a->dev, "Device error!\n"); status = -ENXIO; } /* bus collision */ if (data & HST_STS_BUSERR) { dev_err(&a->dev, "Bus collision!\n"); /* Issue timeout, hoping it helps */ outb_p(HST_CNTL1_TIMEOUT, SMB_HST_CNTL1); } if (data & HST_STS_FAIL) { dev_err(&a->dev, "Cleaning fail after KILL!\n"); outb_p(0x0, SMB_HST_CNTL2); } return status; } static int ali1563_block_start(struct i2c_adapter *a) { u32 data; int timeout; int status = -EIO; dev_dbg(&a->dev, "Block (pre): STS=%02x, CNTL1=%02x, " "CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMB_HST_STS), inb_p(SMB_HST_CNTL1), inb_p(SMB_HST_CNTL2), inb_p(SMB_HST_CMD), inb_p(SMB_HST_ADD), inb_p(SMB_HST_DAT0), inb_p(SMB_HST_DAT1)); data = inb_p(SMB_HST_STS); if (data & HST_STS_BAD) { dev_warn(&a->dev, "ali1563: Trying to reset busy device\n"); outb_p(data | HST_STS_BAD, SMB_HST_STS); data = inb_p(SMB_HST_STS); if (data & HST_STS_BAD) return -EBUSY; } /* Clear byte-ready bit */ outb_p(data | HST_STS_DONE, SMB_HST_STS); /* Start transaction and wait for byte-ready bit to be set */ outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2); timeout = ALI1563_MAX_TIMEOUT; do { msleep(1); } while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout); dev_dbg(&a->dev, "Block (post): STS=%02x, CNTL1=%02x, " "CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMB_HST_STS), inb_p(SMB_HST_CNTL1), inb_p(SMB_HST_CNTL2), inb_p(SMB_HST_CMD), inb_p(SMB_HST_ADD), inb_p(SMB_HST_DAT0), inb_p(SMB_HST_DAT1)); if (timeout && !(data & HST_STS_BAD)) return 0; if (timeout == 0) status = -ETIMEDOUT; if (data & HST_STS_DEVERR) status = -ENXIO; dev_err(&a->dev, "SMBus Error: %s%s%s%s%s\n", timeout ? "" : "Timeout ", data & HST_STS_FAIL ? "Transaction Failed " : "", data & HST_STS_BUSERR ? "No response or Bus Collision " : "", data & HST_STS_DEVERR ? "Device Error " : "", !(data & HST_STS_DONE) ? "Transaction Never Finished " : ""); return status; } static int ali1563_block(struct i2c_adapter *a, union i2c_smbus_data *data, u8 rw) { int i, len; int error = 0; /* Do we need this? */ outb_p(HST_CNTL1_LAST, SMB_HST_CNTL1); if (rw == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 1) len = 1; else if (len > 32) len = 32; outb_p(len, SMB_HST_DAT0); outb_p(data->block[1], SMB_BLK_DAT); } else len = 32; outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_BLOCK, SMB_HST_CNTL2); for (i = 0; i < len; i++) { if (rw == I2C_SMBUS_WRITE) { outb_p(data->block[i + 1], SMB_BLK_DAT); error = ali1563_block_start(a); if (error) break; } else { error = ali1563_block_start(a); if (error) break; if (i == 0) { len = inb_p(SMB_HST_DAT0); if (len < 1) len = 1; else if (len > 32) len = 32; } data->block[i+1] = inb_p(SMB_BLK_DAT); } } /* Do we need this? */ outb_p(HST_CNTL1_LAST, SMB_HST_CNTL1); return error; } static s32 ali1563_access(struct i2c_adapter *a, u16 addr, unsigned short flags, char rw, u8 cmd, int size, union i2c_smbus_data *data) { int error = 0; int timeout; u32 reg; for (timeout = ALI1563_MAX_TIMEOUT; timeout; timeout--) { reg = inb_p(SMB_HST_STS); if (!(reg & HST_STS_BUSY)) break; } if (!timeout) dev_warn(&a->dev, "SMBus not idle. HST_STS = %02x\n", reg); outb_p(0xff, SMB_HST_STS); /* Map the size to what the chip understands */ switch (size) { case I2C_SMBUS_QUICK: size = HST_CNTL2_QUICK; break; case I2C_SMBUS_BYTE: size = HST_CNTL2_BYTE; break; case I2C_SMBUS_BYTE_DATA: size = HST_CNTL2_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: size = HST_CNTL2_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: size = HST_CNTL2_BLOCK; break; default: dev_warn(&a->dev, "Unsupported transaction %d\n", size); error = -EOPNOTSUPP; goto Done; } outb_p(((addr & 0x7f) << 1) | (rw & 0x01), SMB_HST_ADD); outb_p((inb_p(SMB_HST_CNTL2) & ~HST_CNTL2_SIZEMASK) | (size << 3), SMB_HST_CNTL2); /* Write the command register */ switch (size) { case HST_CNTL2_BYTE: if (rw == I2C_SMBUS_WRITE) /* Beware it uses DAT0 register and not CMD! */ outb_p(cmd, SMB_HST_DAT0); break; case HST_CNTL2_BYTE_DATA: outb_p(cmd, SMB_HST_CMD); if (rw == I2C_SMBUS_WRITE) outb_p(data->byte, SMB_HST_DAT0); break; case HST_CNTL2_WORD_DATA: outb_p(cmd, SMB_HST_CMD); if (rw == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMB_HST_DAT0); outb_p((data->word & 0xff00) >> 8, SMB_HST_DAT1); } break; case HST_CNTL2_BLOCK: outb_p(cmd, SMB_HST_CMD); error = ali1563_block(a, data, rw); goto Done; } error = ali1563_transaction(a, size); if (error) goto Done; if ((rw == I2C_SMBUS_WRITE) || (size == HST_CNTL2_QUICK)) goto Done; switch (size) { case HST_CNTL2_BYTE: /* Result put in SMBHSTDAT0 */ data->byte = inb_p(SMB_HST_DAT0); break; case HST_CNTL2_BYTE_DATA: data->byte = inb_p(SMB_HST_DAT0); break; case HST_CNTL2_WORD_DATA: data->word = inb_p(SMB_HST_DAT0) + (inb_p(SMB_HST_DAT1) << 8); break; } Done: return error; } static u32 ali1563_func(struct i2c_adapter *a) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static int ali1563_setup(struct pci_dev *dev) { u16 ctrl; pci_read_config_word(dev, ALI1563_SMBBA, &ctrl); /* SMB I/O Base in high 12 bits and must be aligned with the * size of the I/O space. */ ali1563_smba = ctrl & ~(ALI1563_SMB_IOSIZE - 1); if (!ali1563_smba) { dev_warn(&dev->dev, "ali1563_smba Uninitialized\n"); goto Err; } /* Check if device is enabled */ if (!(ctrl & ALI1563_SMB_HOSTEN)) { dev_warn(&dev->dev, "Host Controller not enabled\n"); goto Err; } if (!(ctrl & ALI1563_SMB_IOEN)) { dev_warn(&dev->dev, "I/O space not enabled, trying manually\n"); pci_write_config_word(dev, ALI1563_SMBBA, ctrl | ALI1563_SMB_IOEN); pci_read_config_word(dev, ALI1563_SMBBA, &ctrl); if (!(ctrl & ALI1563_SMB_IOEN)) { dev_err(&dev->dev, "I/O space still not enabled, giving up\n"); goto Err; } } if (acpi_check_region(ali1563_smba, ALI1563_SMB_IOSIZE, ali1563_pci_driver.name)) goto Err; if (!request_region(ali1563_smba, ALI1563_SMB_IOSIZE, ali1563_pci_driver.name)) { dev_err(&dev->dev, "Could not allocate I/O space at 0x%04x\n", ali1563_smba); goto Err; } dev_info(&dev->dev, "Found ALi1563 SMBus at 0x%04x\n", ali1563_smba); return 0; Err: return -ENODEV; } static void ali1563_shutdown(struct pci_dev *dev) { release_region(ali1563_smba, ALI1563_SMB_IOSIZE); } static const struct i2c_algorithm ali1563_algorithm = { .smbus_xfer = ali1563_access, .functionality = ali1563_func, }; static struct i2c_adapter ali1563_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &ali1563_algorithm, }; static int ali1563_probe(struct pci_dev *dev, const struct pci_device_id *id_table) { int error; error = ali1563_setup(dev); if (error) goto exit; ali1563_adapter.dev.parent = &dev->dev; snprintf(ali1563_adapter.name, sizeof(ali1563_adapter.name), "SMBus ALi 1563 Adapter @ %04x", ali1563_smba); error = i2c_add_adapter(&ali1563_adapter); if (error) goto exit_shutdown; return 0; exit_shutdown: ali1563_shutdown(dev); exit: dev_warn(&dev->dev, "ALi1563 SMBus probe failed (%d)\n", error); return error; } static void ali1563_remove(struct pci_dev *dev) { i2c_del_adapter(&ali1563_adapter); ali1563_shutdown(dev); } static const struct pci_device_id ali1563_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) }, {}, }; MODULE_DEVICE_TABLE(pci, ali1563_id_table); static struct pci_driver ali1563_pci_driver = { .name = "ali1563_smbus", .id_table = ali1563_id_table, .probe = ali1563_probe, .remove = ali1563_remove, }; module_pci_driver(ali1563_pci_driver); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-ali1563.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2006-2007 PA Semi, Inc * * SMBus host driver for PA Semi PWRficient */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/io.h> #include "i2c-pasemi-core.h" #define CLK_100K_DIV 84 #define CLK_400K_DIV 21 static struct pci_driver pasemi_smb_pci_driver; static int pasemi_smb_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct pasemi_smbus *smbus; unsigned long base; int size; int error; if (!(pci_resource_flags(dev, 0) & IORESOURCE_IO)) return -ENODEV; smbus = devm_kzalloc(&dev->dev, sizeof(*smbus), GFP_KERNEL); if (!smbus) return -ENOMEM; smbus->dev = &dev->dev; base = pci_resource_start(dev, 0); size = pci_resource_len(dev, 0); smbus->clk_div = CLK_100K_DIV; /* * The original PASemi PCI controllers don't have a register for * their HW revision. */ smbus->hw_rev = PASEMI_HW_REV_PCI; if (!devm_request_region(&dev->dev, base, size, pasemi_smb_pci_driver.name)) return -EBUSY; smbus->ioaddr = pcim_iomap(dev, 0, 0); if (!smbus->ioaddr) return -EBUSY; smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; error = pasemi_i2c_common_probe(smbus); if (error) return error; pci_set_drvdata(dev, smbus); return 0; } static const struct pci_device_id pasemi_smb_pci_ids[] = { { PCI_DEVICE(0x1959, 0xa003) }, { 0, } }; MODULE_DEVICE_TABLE(pci, pasemi_smb_pci_ids); static struct pci_driver pasemi_smb_pci_driver = { .name = "i2c-pasemi", .id_table = pasemi_smb_pci_ids, .probe = pasemi_smb_pci_probe, }; module_pci_driver(pasemi_smb_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Olof Johansson <[email protected]>"); MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver");
linux-master
drivers/i2c/busses/i2c-pasemi-pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015 Masahiro Yamada <[email protected]> */ #include <linux/clk.h> #include <linux/i2c.h> #include <linux/iopoll.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #define UNIPHIER_FI2C_CR 0x00 /* control register */ #define UNIPHIER_FI2C_CR_MST BIT(3) /* master mode */ #define UNIPHIER_FI2C_CR_STA BIT(2) /* start condition */ #define UNIPHIER_FI2C_CR_STO BIT(1) /* stop condition */ #define UNIPHIER_FI2C_CR_NACK BIT(0) /* do not return ACK */ #define UNIPHIER_FI2C_DTTX 0x04 /* TX FIFO */ #define UNIPHIER_FI2C_DTTX_CMD BIT(8) /* send command (slave addr) */ #define UNIPHIER_FI2C_DTTX_RD BIT(0) /* read transaction */ #define UNIPHIER_FI2C_DTRX 0x04 /* RX FIFO */ #define UNIPHIER_FI2C_SLAD 0x0c /* slave address */ #define UNIPHIER_FI2C_CYC 0x10 /* clock cycle control */ #define UNIPHIER_FI2C_LCTL 0x14 /* clock low period control */ #define UNIPHIER_FI2C_SSUT 0x18 /* restart/stop setup time control */ #define UNIPHIER_FI2C_DSUT 0x1c /* data setup time control */ #define UNIPHIER_FI2C_INT 0x20 /* interrupt status */ #define UNIPHIER_FI2C_IE 0x24 /* interrupt enable */ #define UNIPHIER_FI2C_IC 0x28 /* interrupt clear */ #define UNIPHIER_FI2C_INT_TE BIT(9) /* TX FIFO empty */ #define UNIPHIER_FI2C_INT_RF BIT(8) /* RX FIFO full */ #define UNIPHIER_FI2C_INT_TC BIT(7) /* send complete (STOP) */ #define UNIPHIER_FI2C_INT_RC BIT(6) /* receive complete (STOP) */ #define UNIPHIER_FI2C_INT_TB BIT(5) /* sent specified bytes */ #define UNIPHIER_FI2C_INT_RB BIT(4) /* received specified bytes */ #define UNIPHIER_FI2C_INT_NA BIT(2) /* no ACK */ #define UNIPHIER_FI2C_INT_AL BIT(1) /* arbitration lost */ #define UNIPHIER_FI2C_SR 0x2c /* status register */ #define UNIPHIER_FI2C_SR_DB BIT(12) /* device busy */ #define UNIPHIER_FI2C_SR_STS BIT(11) /* stop condition detected */ #define UNIPHIER_FI2C_SR_BB BIT(8) /* bus busy */ #define UNIPHIER_FI2C_SR_RFF BIT(3) /* RX FIFO full */ #define UNIPHIER_FI2C_SR_RNE BIT(2) /* RX FIFO not empty */ #define UNIPHIER_FI2C_SR_TNF BIT(1) /* TX FIFO not full */ #define UNIPHIER_FI2C_SR_TFE BIT(0) /* TX FIFO empty */ #define UNIPHIER_FI2C_RST 0x34 /* reset control */ #define UNIPHIER_FI2C_RST_TBRST BIT(2) /* clear TX FIFO */ #define UNIPHIER_FI2C_RST_RBRST BIT(1) /* clear RX FIFO */ #define UNIPHIER_FI2C_RST_RST BIT(0) /* forcible bus reset */ #define UNIPHIER_FI2C_BM 0x38 /* bus monitor */ #define UNIPHIER_FI2C_BM_SDAO BIT(3) /* output for SDA line */ #define UNIPHIER_FI2C_BM_SDAS BIT(2) /* readback of SDA line */ #define UNIPHIER_FI2C_BM_SCLO BIT(1) /* output for SCL line */ #define UNIPHIER_FI2C_BM_SCLS BIT(0) /* readback of SCL line */ #define UNIPHIER_FI2C_NOISE 0x3c /* noise filter control */ #define UNIPHIER_FI2C_TBC 0x40 /* TX byte count setting */ #define UNIPHIER_FI2C_RBC 0x44 /* RX byte count setting */ #define UNIPHIER_FI2C_TBCM 0x48 /* TX byte count monitor */ #define UNIPHIER_FI2C_RBCM 0x4c /* RX byte count monitor */ #define UNIPHIER_FI2C_BRST 0x50 /* bus reset */ #define UNIPHIER_FI2C_BRST_FOEN BIT(1) /* normal operation */ #define UNIPHIER_FI2C_BRST_RSCL BIT(0) /* release SCL */ #define UNIPHIER_FI2C_INT_FAULTS \ (UNIPHIER_FI2C_INT_NA | UNIPHIER_FI2C_INT_AL) #define UNIPHIER_FI2C_INT_STOP \ (UNIPHIER_FI2C_INT_TC | UNIPHIER_FI2C_INT_RC) #define UNIPHIER_FI2C_RD BIT(0) #define UNIPHIER_FI2C_STOP BIT(1) #define UNIPHIER_FI2C_MANUAL_NACK BIT(2) #define UNIPHIER_FI2C_BYTE_WISE BIT(3) #define UNIPHIER_FI2C_DEFER_STOP_COMP BIT(4) #define UNIPHIER_FI2C_FIFO_SIZE 8 struct uniphier_fi2c_priv { struct completion comp; struct i2c_adapter adap; void __iomem *membase; struct clk *clk; unsigned int len; u8 *buf; u32 enabled_irqs; int error; unsigned int flags; unsigned int busy_cnt; unsigned int clk_cycle; spinlock_t lock; /* IRQ synchronization */ }; static void uniphier_fi2c_fill_txfifo(struct uniphier_fi2c_priv *priv, bool first) { int fifo_space = UNIPHIER_FI2C_FIFO_SIZE; /* * TX-FIFO stores slave address in it for the first access. * Decrement the counter. */ if (first) fifo_space--; while (priv->len) { if (fifo_space-- <= 0) break; writel(*priv->buf++, priv->membase + UNIPHIER_FI2C_DTTX); priv->len--; } } static void uniphier_fi2c_drain_rxfifo(struct uniphier_fi2c_priv *priv) { int fifo_left = priv->flags & UNIPHIER_FI2C_BYTE_WISE ? 1 : UNIPHIER_FI2C_FIFO_SIZE; while (priv->len) { if (fifo_left-- <= 0) break; *priv->buf++ = readl(priv->membase + UNIPHIER_FI2C_DTRX); priv->len--; } } static void uniphier_fi2c_set_irqs(struct uniphier_fi2c_priv *priv) { writel(priv->enabled_irqs, priv->membase + UNIPHIER_FI2C_IE); } static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv, u32 mask) { writel(mask, priv->membase + UNIPHIER_FI2C_IC); } static void uniphier_fi2c_stop(struct uniphier_fi2c_priv *priv) { priv->enabled_irqs |= UNIPHIER_FI2C_INT_STOP; uniphier_fi2c_set_irqs(priv); writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STO, priv->membase + UNIPHIER_FI2C_CR); } static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) { struct uniphier_fi2c_priv *priv = dev_id; u32 irq_status; spin_lock(&priv->lock); irq_status = readl(priv->membase + UNIPHIER_FI2C_INT); irq_status &= priv->enabled_irqs; if (irq_status & UNIPHIER_FI2C_INT_STOP) goto complete; if (unlikely(irq_status & UNIPHIER_FI2C_INT_AL)) { priv->error = -EAGAIN; goto complete; } if (unlikely(irq_status & UNIPHIER_FI2C_INT_NA)) { priv->error = -ENXIO; if (priv->flags & UNIPHIER_FI2C_RD) { /* * work around a hardware bug: * The receive-completed interrupt is never set even if * STOP condition is detected after the address phase * of read transaction fails to get ACK. * To avoid time-out error, we issue STOP here, * but do not wait for its completion. * It should be checked after exiting this handler. */ uniphier_fi2c_stop(priv); priv->flags |= UNIPHIER_FI2C_DEFER_STOP_COMP; goto complete; } goto stop; } if (irq_status & UNIPHIER_FI2C_INT_TE) { if (!priv->len) goto data_done; uniphier_fi2c_fill_txfifo(priv, false); goto handled; } if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) { uniphier_fi2c_drain_rxfifo(priv); /* * If the number of bytes to read is multiple of the FIFO size * (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little * earlier than INT_RB. We wait for INT_RB to confirm the * completion of the current message. */ if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB)) goto data_done; if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) { if (priv->len <= UNIPHIER_FI2C_FIFO_SIZE && !(priv->flags & UNIPHIER_FI2C_BYTE_WISE)) { priv->enabled_irqs |= UNIPHIER_FI2C_INT_RB; uniphier_fi2c_set_irqs(priv); priv->flags |= UNIPHIER_FI2C_BYTE_WISE; } if (priv->len <= 1) writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_NACK, priv->membase + UNIPHIER_FI2C_CR); } goto handled; } spin_unlock(&priv->lock); return IRQ_NONE; data_done: if (priv->flags & UNIPHIER_FI2C_STOP) { stop: uniphier_fi2c_stop(priv); } else { complete: priv->enabled_irqs = 0; uniphier_fi2c_set_irqs(priv); complete(&priv->comp); } handled: /* * This controller makes a pause while any bit of the IRQ status is * asserted. Clear the asserted bit to kick the controller just before * exiting the handler. */ uniphier_fi2c_clear_irqs(priv, irq_status); spin_unlock(&priv->lock); return IRQ_HANDLED; } static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr, bool repeat) { priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE; uniphier_fi2c_set_irqs(priv); /* do not use TX byte counter */ writel(0, priv->membase + UNIPHIER_FI2C_TBC); /* set slave address */ writel(UNIPHIER_FI2C_DTTX_CMD | addr << 1, priv->membase + UNIPHIER_FI2C_DTTX); /* * First chunk of data. For a repeated START condition, do not write * data to the TX fifo here to avoid the timing issue. */ if (!repeat) uniphier_fi2c_fill_txfifo(priv, true); } static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr) { priv->flags |= UNIPHIER_FI2C_RD; if (likely(priv->len < 256)) { /* * If possible, use RX byte counter. * It can automatically handle NACK for the last byte. */ writel(priv->len, priv->membase + UNIPHIER_FI2C_RBC); priv->enabled_irqs |= UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB; } else { /* * The byte counter can not count over 256. In this case, * do not use it at all. Drain data when FIFO gets full, * but treat the last portion as a special case. */ writel(0, priv->membase + UNIPHIER_FI2C_RBC); priv->flags |= UNIPHIER_FI2C_MANUAL_NACK; priv->enabled_irqs |= UNIPHIER_FI2C_INT_RF; } uniphier_fi2c_set_irqs(priv); /* set slave address with RD bit */ writel(UNIPHIER_FI2C_DTTX_CMD | UNIPHIER_FI2C_DTTX_RD | addr << 1, priv->membase + UNIPHIER_FI2C_DTTX); } static void uniphier_fi2c_reset(struct uniphier_fi2c_priv *priv) { writel(UNIPHIER_FI2C_RST_RST, priv->membase + UNIPHIER_FI2C_RST); } static void uniphier_fi2c_prepare_operation(struct uniphier_fi2c_priv *priv) { writel(UNIPHIER_FI2C_BRST_FOEN | UNIPHIER_FI2C_BRST_RSCL, priv->membase + UNIPHIER_FI2C_BRST); } static void uniphier_fi2c_recover(struct uniphier_fi2c_priv *priv) { uniphier_fi2c_reset(priv); i2c_recover_bus(&priv->adap); } static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap, struct i2c_msg *msg, bool repeat, bool stop) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); bool is_read = msg->flags & I2C_M_RD; unsigned long time_left, flags; priv->len = msg->len; priv->buf = msg->buf; priv->enabled_irqs = UNIPHIER_FI2C_INT_FAULTS; priv->error = 0; priv->flags = 0; if (stop) priv->flags |= UNIPHIER_FI2C_STOP; reinit_completion(&priv->comp); uniphier_fi2c_clear_irqs(priv, U32_MAX); writel(UNIPHIER_FI2C_RST_TBRST | UNIPHIER_FI2C_RST_RBRST, priv->membase + UNIPHIER_FI2C_RST); /* reset TX/RX FIFO */ spin_lock_irqsave(&priv->lock, flags); if (is_read) uniphier_fi2c_rx_init(priv, msg->addr); else uniphier_fi2c_tx_init(priv, msg->addr, repeat); /* * For a repeated START condition, writing a slave address to the FIFO * kicks the controller. So, the UNIPHIER_FI2C_CR register should be * written only for a non-repeated START condition. */ if (!repeat) writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA, priv->membase + UNIPHIER_FI2C_CR); spin_unlock_irqrestore(&priv->lock, flags); time_left = wait_for_completion_timeout(&priv->comp, adap->timeout); spin_lock_irqsave(&priv->lock, flags); priv->enabled_irqs = 0; uniphier_fi2c_set_irqs(priv); spin_unlock_irqrestore(&priv->lock, flags); if (!time_left) { dev_err(&adap->dev, "transaction timeout.\n"); uniphier_fi2c_recover(priv); return -ETIMEDOUT; } if (unlikely(priv->flags & UNIPHIER_FI2C_DEFER_STOP_COMP)) { u32 status; int ret; ret = readl_poll_timeout(priv->membase + UNIPHIER_FI2C_SR, status, (status & UNIPHIER_FI2C_SR_STS) && !(status & UNIPHIER_FI2C_SR_BB), 1, 20); if (ret) { dev_err(&adap->dev, "stop condition was not completed.\n"); uniphier_fi2c_recover(priv); return ret; } } return priv->error; } static int uniphier_fi2c_check_bus_busy(struct i2c_adapter *adap) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); if (readl(priv->membase + UNIPHIER_FI2C_SR) & UNIPHIER_FI2C_SR_DB) { if (priv->busy_cnt++ > 3) { /* * If bus busy continues too long, it is probably * in a wrong state. Try bus recovery. */ uniphier_fi2c_recover(priv); priv->busy_cnt = 0; } return -EAGAIN; } priv->busy_cnt = 0; return 0; } static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *msg, *emsg = msgs + num; bool repeat = false; int ret; ret = uniphier_fi2c_check_bus_busy(adap); if (ret) return ret; for (msg = msgs; msg < emsg; msg++) { /* Emit STOP if it is the last message or I2C_M_STOP is set. */ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); ret = uniphier_fi2c_master_xfer_one(adap, msg, repeat, stop); if (ret) return ret; repeat = !stop; } return num; } static u32 uniphier_fi2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm uniphier_fi2c_algo = { .master_xfer = uniphier_fi2c_master_xfer, .functionality = uniphier_fi2c_functionality, }; static int uniphier_fi2c_get_scl(struct i2c_adapter *adap) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); return !!(readl(priv->membase + UNIPHIER_FI2C_BM) & UNIPHIER_FI2C_BM_SCLS); } static void uniphier_fi2c_set_scl(struct i2c_adapter *adap, int val) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); writel(val ? UNIPHIER_FI2C_BRST_RSCL : 0, priv->membase + UNIPHIER_FI2C_BRST); } static int uniphier_fi2c_get_sda(struct i2c_adapter *adap) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); return !!(readl(priv->membase + UNIPHIER_FI2C_BM) & UNIPHIER_FI2C_BM_SDAS); } static void uniphier_fi2c_unprepare_recovery(struct i2c_adapter *adap) { uniphier_fi2c_prepare_operation(i2c_get_adapdata(adap)); } static struct i2c_bus_recovery_info uniphier_fi2c_bus_recovery_info = { .recover_bus = i2c_generic_scl_recovery, .get_scl = uniphier_fi2c_get_scl, .set_scl = uniphier_fi2c_set_scl, .get_sda = uniphier_fi2c_get_sda, .unprepare_recovery = uniphier_fi2c_unprepare_recovery, }; static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv) { unsigned int cyc = priv->clk_cycle; u32 tmp; tmp = readl(priv->membase + UNIPHIER_FI2C_CR); tmp |= UNIPHIER_FI2C_CR_MST; writel(tmp, priv->membase + UNIPHIER_FI2C_CR); uniphier_fi2c_reset(priv); /* * Standard-mode: tLOW + tHIGH = 10 us * Fast-mode: tLOW + tHIGH = 2.5 us */ writel(cyc, priv->membase + UNIPHIER_FI2C_CYC); /* * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us * "tLow/tHIGH = 5/4" meets both. */ writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL); /* * Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us * Fast-mode: tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us */ writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT); /* * Standard-mode: tSU;DAT = 250 ns * Fast-mode: tSU;DAT = 100 ns */ writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT); uniphier_fi2c_prepare_operation(priv); } static int uniphier_fi2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct uniphier_fi2c_priv *priv; u32 bus_speed; unsigned long clk_rate; int irq, ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->membase)) return PTR_ERR(priv->membase); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed)) bus_speed = I2C_MAX_STANDARD_MODE_FREQ; if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) { dev_err(dev, "invalid clock-frequency %d\n", bus_speed); return -EINVAL; } priv->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(priv->clk)) { dev_err(dev, "failed to enable clock\n"); return PTR_ERR(priv->clk); } clk_rate = clk_get_rate(priv->clk); if (!clk_rate) { dev_err(dev, "input clock rate should not be zero\n"); return -EINVAL; } priv->clk_cycle = clk_rate / bus_speed; init_completion(&priv->comp); spin_lock_init(&priv->lock); priv->adap.owner = THIS_MODULE; priv->adap.algo = &uniphier_fi2c_algo; priv->adap.dev.parent = dev; priv->adap.dev.of_node = dev->of_node; strscpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name)); priv->adap.bus_recovery_info = &uniphier_fi2c_bus_recovery_info; i2c_set_adapdata(&priv->adap, priv); platform_set_drvdata(pdev, priv); uniphier_fi2c_hw_init(priv); ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0, pdev->name, priv); if (ret) { dev_err(dev, "failed to request irq %d\n", irq); return ret; } return i2c_add_adapter(&priv->adap); } static void uniphier_fi2c_remove(struct platform_device *pdev) { struct uniphier_fi2c_priv *priv = platform_get_drvdata(pdev); i2c_del_adapter(&priv->adap); } static int __maybe_unused uniphier_fi2c_suspend(struct device *dev) { struct uniphier_fi2c_priv *priv = dev_get_drvdata(dev); clk_disable_unprepare(priv->clk); return 0; } static int __maybe_unused uniphier_fi2c_resume(struct device *dev) { struct uniphier_fi2c_priv *priv = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(priv->clk); if (ret) return ret; uniphier_fi2c_hw_init(priv); return 0; } static const struct dev_pm_ops uniphier_fi2c_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(uniphier_fi2c_suspend, uniphier_fi2c_resume) }; static const struct of_device_id uniphier_fi2c_match[] = { { .compatible = "socionext,uniphier-fi2c" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_fi2c_match); static struct platform_driver uniphier_fi2c_drv = { .probe = uniphier_fi2c_probe, .remove_new = uniphier_fi2c_remove, .driver = { .name = "uniphier-fi2c", .of_match_table = uniphier_fi2c_match, .pm = &uniphier_fi2c_pm_ops, }, }; module_platform_driver(uniphier_fi2c_drv); MODULE_AUTHOR("Masahiro Yamada <[email protected]>"); MODULE_DESCRIPTION("UniPhier FIFO-builtin I2C bus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-uniphier-f.c
// SPDX-License-Identifier: GPL-2.0 /* * Renesas Solutions Highlander FPGA I2C/SMBus support. * * Supported devices: R0P7780LC0011RL, R0P7785LC0011RL * * Copyright (C) 2008 Paul Mundt * Copyright (C) 2008 Renesas Solutions Corp. * Copyright (C) 2008 Atom Create Engineering Co., Ltd. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/completion.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/slab.h> #define SMCR 0x00 #define SMCR_START (1 << 0) #define SMCR_IRIC (1 << 1) #define SMCR_BBSY (1 << 2) #define SMCR_ACKE (1 << 3) #define SMCR_RST (1 << 4) #define SMCR_IEIC (1 << 6) #define SMSMADR 0x02 #define SMMR 0x04 #define SMMR_MODE0 (1 << 0) #define SMMR_MODE1 (1 << 1) #define SMMR_CAP (1 << 3) #define SMMR_TMMD (1 << 4) #define SMMR_SP (1 << 7) #define SMSADR 0x06 #define SMTRDR 0x46 struct highlander_i2c_dev { struct device *dev; void __iomem *base; struct i2c_adapter adapter; struct completion cmd_complete; unsigned long last_read_time; int irq; u8 *buf; size_t buf_len; }; static bool iic_force_poll, iic_force_normal; static int iic_timeout = 1000, iic_read_delay; static inline void highlander_i2c_irq_enable(struct highlander_i2c_dev *dev) { iowrite16(ioread16(dev->base + SMCR) | SMCR_IEIC, dev->base + SMCR); } static inline void highlander_i2c_irq_disable(struct highlander_i2c_dev *dev) { iowrite16(ioread16(dev->base + SMCR) & ~SMCR_IEIC, dev->base + SMCR); } static inline void highlander_i2c_start(struct highlander_i2c_dev *dev) { iowrite16(ioread16(dev->base + SMCR) | SMCR_START, dev->base + SMCR); } static inline void highlander_i2c_done(struct highlander_i2c_dev *dev) { iowrite16(ioread16(dev->base + SMCR) | SMCR_IRIC, dev->base + SMCR); } static void highlander_i2c_setup(struct highlander_i2c_dev *dev) { u16 smmr; smmr = ioread16(dev->base + SMMR); smmr |= SMMR_TMMD; if (iic_force_normal) smmr &= ~SMMR_SP; else smmr |= SMMR_SP; iowrite16(smmr, dev->base + SMMR); } static void smbus_write_data(u8 *src, u16 *dst, int len) { for (; len > 1; len -= 2) { *dst++ = be16_to_cpup((__be16 *)src); src += 2; } if (len) *dst = *src << 8; } static void smbus_read_data(u16 *src, u8 *dst, int len) { for (; len > 1; len -= 2) { *(__be16 *)dst = cpu_to_be16p(src++); dst += 2; } if (len) *dst = *src >> 8; } static void highlander_i2c_command(struct highlander_i2c_dev *dev, u8 command, int len) { unsigned int i; u16 cmd = (command << 8) | command; for (i = 0; i < len; i += 2) { if (len - i == 1) cmd = command << 8; iowrite16(cmd, dev->base + SMSADR + i); dev_dbg(dev->dev, "command data[%x] 0x%04x\n", i/2, cmd); } } static int highlander_i2c_wait_for_bbsy(struct highlander_i2c_dev *dev) { unsigned long timeout; timeout = jiffies + msecs_to_jiffies(iic_timeout); while (ioread16(dev->base + SMCR) & SMCR_BBSY) { if (time_after(jiffies, timeout)) { dev_warn(dev->dev, "timeout waiting for bus ready\n"); return -ETIMEDOUT; } msleep(1); } return 0; } static int highlander_i2c_reset(struct highlander_i2c_dev *dev) { iowrite16(ioread16(dev->base + SMCR) | SMCR_RST, dev->base + SMCR); return highlander_i2c_wait_for_bbsy(dev); } static int highlander_i2c_wait_for_ack(struct highlander_i2c_dev *dev) { u16 tmp = ioread16(dev->base + SMCR); if ((tmp & (SMCR_IRIC | SMCR_ACKE)) == SMCR_ACKE) { dev_warn(dev->dev, "ack abnormality\n"); return highlander_i2c_reset(dev); } return 0; } static irqreturn_t highlander_i2c_irq(int irq, void *dev_id) { struct highlander_i2c_dev *dev = dev_id; highlander_i2c_done(dev); complete(&dev->cmd_complete); return IRQ_HANDLED; } static void highlander_i2c_poll(struct highlander_i2c_dev *dev) { unsigned long timeout; u16 smcr; timeout = jiffies + msecs_to_jiffies(iic_timeout); for (;;) { smcr = ioread16(dev->base + SMCR); /* * Don't bother checking ACKE here, this and the reset * are handled in highlander_i2c_wait_xfer_done() when * waiting for the ACK. */ if (smcr & SMCR_IRIC) return; if (time_after(jiffies, timeout)) break; cpu_relax(); cond_resched(); } dev_err(dev->dev, "polling timed out\n"); } static inline int highlander_i2c_wait_xfer_done(struct highlander_i2c_dev *dev) { if (dev->irq) wait_for_completion_timeout(&dev->cmd_complete, msecs_to_jiffies(iic_timeout)); else /* busy looping, the IRQ of champions */ highlander_i2c_poll(dev); return highlander_i2c_wait_for_ack(dev); } static int highlander_i2c_read(struct highlander_i2c_dev *dev) { int i, cnt; u16 data[16]; if (highlander_i2c_wait_for_bbsy(dev)) return -EAGAIN; highlander_i2c_start(dev); if (highlander_i2c_wait_xfer_done(dev)) { dev_err(dev->dev, "Arbitration loss\n"); return -EAGAIN; } /* * The R0P7780LC0011RL FPGA needs a significant delay between * data read cycles, otherwise the transceiver gets confused and * garbage is returned when the read is subsequently aborted. * * It is not sufficient to wait for BBSY. * * While this generally only applies to the older SH7780-based * Highlanders, the same issue can be observed on SH7785 ones, * albeit less frequently. SH7780-based Highlanders may need * this to be as high as 1000 ms. */ if (iic_read_delay && time_before(jiffies, dev->last_read_time + msecs_to_jiffies(iic_read_delay))) msleep(jiffies_to_msecs((dev->last_read_time + msecs_to_jiffies(iic_read_delay)) - jiffies)); cnt = (dev->buf_len + 1) >> 1; for (i = 0; i < cnt; i++) { data[i] = ioread16(dev->base + SMTRDR + (i * sizeof(u16))); dev_dbg(dev->dev, "read data[%x] 0x%04x\n", i, data[i]); } smbus_read_data(data, dev->buf, dev->buf_len); dev->last_read_time = jiffies; return 0; } static int highlander_i2c_write(struct highlander_i2c_dev *dev) { int i, cnt; u16 data[16]; smbus_write_data(dev->buf, data, dev->buf_len); cnt = (dev->buf_len + 1) >> 1; for (i = 0; i < cnt; i++) { iowrite16(data[i], dev->base + SMTRDR + (i * sizeof(u16))); dev_dbg(dev->dev, "write data[%x] 0x%04x\n", i, data[i]); } if (highlander_i2c_wait_for_bbsy(dev)) return -EAGAIN; highlander_i2c_start(dev); return highlander_i2c_wait_xfer_done(dev); } static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct highlander_i2c_dev *dev = i2c_get_adapdata(adap); u16 tmp; init_completion(&dev->cmd_complete); dev_dbg(dev->dev, "addr %04x, command %02x, read_write %d, size %d\n", addr, command, read_write, size); /* * Set up the buffer and transfer size */ switch (size) { case I2C_SMBUS_BYTE_DATA: dev->buf = &data->byte; dev->buf_len = 1; break; case I2C_SMBUS_I2C_BLOCK_DATA: dev->buf = &data->block[1]; dev->buf_len = data->block[0]; break; default: dev_err(dev->dev, "unsupported command %d\n", size); return -EINVAL; } /* * Encode the mode setting */ tmp = ioread16(dev->base + SMMR); tmp &= ~(SMMR_MODE0 | SMMR_MODE1); switch (dev->buf_len) { case 1: /* default */ break; case 8: tmp |= SMMR_MODE0; break; case 16: tmp |= SMMR_MODE1; break; case 32: tmp |= (SMMR_MODE0 | SMMR_MODE1); break; default: dev_err(dev->dev, "unsupported xfer size %zu\n", dev->buf_len); return -EINVAL; } iowrite16(tmp, dev->base + SMMR); /* Ensure we're in a sane state */ highlander_i2c_done(dev); /* Set slave address */ iowrite16((addr << 1) | read_write, dev->base + SMSMADR); highlander_i2c_command(dev, command, dev->buf_len); if (read_write == I2C_SMBUS_READ) return highlander_i2c_read(dev); else return highlander_i2c_write(dev); } static u32 highlander_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK; } static const struct i2c_algorithm highlander_i2c_algo = { .smbus_xfer = highlander_i2c_smbus_xfer, .functionality = highlander_i2c_func, }; static int highlander_i2c_probe(struct platform_device *pdev) { struct highlander_i2c_dev *dev; struct i2c_adapter *adap; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(!res)) { dev_err(&pdev->dev, "no mem resource\n"); return -ENODEV; } dev = kzalloc(sizeof(struct highlander_i2c_dev), GFP_KERNEL); if (unlikely(!dev)) return -ENOMEM; dev->base = ioremap(res->start, resource_size(res)); if (unlikely(!dev->base)) { ret = -ENXIO; goto err; } dev->dev = &pdev->dev; platform_set_drvdata(pdev, dev); dev->irq = platform_get_irq(pdev, 0); if (dev->irq < 0 || iic_force_poll) dev->irq = 0; if (dev->irq) { ret = request_irq(dev->irq, highlander_i2c_irq, 0, pdev->name, dev); if (unlikely(ret)) goto err_unmap; highlander_i2c_irq_enable(dev); } else { dev_notice(&pdev->dev, "no IRQ, using polling mode\n"); highlander_i2c_irq_disable(dev); } dev->last_read_time = jiffies; /* initial read jiffies */ highlander_i2c_setup(dev); adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON; strscpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name)); adap->algo = &highlander_i2c_algo; adap->dev.parent = &pdev->dev; adap->nr = pdev->id; /* * Reset the adapter */ ret = highlander_i2c_reset(dev); if (unlikely(ret)) { dev_err(&pdev->dev, "controller didn't come up\n"); goto err_free_irq; } ret = i2c_add_numbered_adapter(adap); if (unlikely(ret)) { dev_err(&pdev->dev, "failure adding adapter\n"); goto err_free_irq; } return 0; err_free_irq: if (dev->irq) free_irq(dev->irq, dev); err_unmap: iounmap(dev->base); err: kfree(dev); return ret; } static void highlander_i2c_remove(struct platform_device *pdev) { struct highlander_i2c_dev *dev = platform_get_drvdata(pdev); i2c_del_adapter(&dev->adapter); if (dev->irq) free_irq(dev->irq, dev); iounmap(dev->base); kfree(dev); } static struct platform_driver highlander_i2c_driver = { .driver = { .name = "i2c-highlander", }, .probe = highlander_i2c_probe, .remove_new = highlander_i2c_remove, }; module_platform_driver(highlander_i2c_driver); MODULE_AUTHOR("Paul Mundt"); MODULE_DESCRIPTION("Renesas Highlander FPGA I2C/SMBus adapter"); MODULE_LICENSE("GPL v2"); module_param(iic_force_poll, bool, 0); module_param(iic_force_normal, bool, 0); module_param(iic_timeout, int, 0); module_param(iic_read_delay, int, 0); MODULE_PARM_DESC(iic_force_poll, "Force polling mode"); MODULE_PARM_DESC(iic_force_normal, "Force normal mode (100 kHz), default is fast mode (400 kHz)"); MODULE_PARM_DESC(iic_timeout, "Set timeout value in msecs (default 1000 ms)"); MODULE_PARM_DESC(iic_read_delay, "Delay between data read cycles (default 0 ms)");
linux-master
drivers/i2c/busses/i2c-highlander.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Nano River Technologies viperboard i2c master driver * * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel <[email protected]> * All rights reserved. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/mfd/viperboard.h> struct vprbrd_i2c { struct i2c_adapter i2c; u8 bus_freq_param; }; /* i2c bus frequency module parameter */ static u8 i2c_bus_param; static unsigned int i2c_bus_freq = 100; module_param(i2c_bus_freq, int, 0); MODULE_PARM_DESC(i2c_bus_freq, "i2c bus frequency in khz (default is 100) valid values: 10, 100, 200, 400, 1000, 3000, 6000"); static int vprbrd_i2c_status(struct i2c_adapter *i2c, struct vprbrd_i2c_status *status, bool prev_error) { u16 bytes_xfer; int ret; struct vprbrd *vb = (struct vprbrd *)i2c->algo_data; /* check for protocol error */ bytes_xfer = sizeof(struct vprbrd_i2c_status); ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_I2C, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, status, bytes_xfer, VPRBRD_USB_TIMEOUT_MS); if (ret != bytes_xfer) prev_error = true; if (prev_error) { dev_err(&i2c->dev, "failure in usb communication\n"); return -EREMOTEIO; } dev_dbg(&i2c->dev, " status = %d\n", status->status); if (status->status != 0x00) { dev_err(&i2c->dev, "failure: i2c protocol error\n"); return -EPROTO; } return 0; } static int vprbrd_i2c_receive(struct usb_device *usb_dev, struct vprbrd_i2c_read_msg *rmsg, int bytes_xfer) { int ret, bytes_actual; int error = 0; /* send the read request */ ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), rmsg, sizeof(struct vprbrd_i2c_read_hdr), &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_actual != sizeof(struct vprbrd_i2c_read_hdr))) { dev_err(&usb_dev->dev, "failure transmitting usb\n"); error = -EREMOTEIO; } /* read the actual data */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, VPRBRD_EP_IN), rmsg, bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_xfer != bytes_actual)) { dev_err(&usb_dev->dev, "failure receiving usb\n"); error = -EREMOTEIO; } return error; } static int vprbrd_i2c_addr(struct usb_device *usb_dev, struct vprbrd_i2c_addr_msg *amsg) { int ret, bytes_actual; ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), amsg, sizeof(struct vprbrd_i2c_addr_msg), &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (sizeof(struct vprbrd_i2c_addr_msg) != bytes_actual)) { dev_err(&usb_dev->dev, "failure transmitting usb\n"); return -EREMOTEIO; } return 0; } static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg) { int ret; u16 remain_len, len1, len2, start = 0x0000; struct vprbrd_i2c_read_msg *rmsg = (struct vprbrd_i2c_read_msg *)vb->buf; remain_len = msg->len; rmsg->header.cmd = VPRBRD_I2C_CMD_READ; while (remain_len > 0) { rmsg->header.addr = cpu_to_le16(start + 0x4000); if (remain_len <= 255) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len; rmsg->header.len1 = 0x00; rmsg->header.len2 = 0x00; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 510) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len - 255; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0x00; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 512) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len - 510; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 767) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 512; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 1022) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 767; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 1024) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 1022; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0xff; remain_len = 0; } else { len1 = 512; len2 = 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = 0x02; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0xff; remain_len -= 1024; start += 1024; } rmsg->header.tf1 = cpu_to_le16(len1); rmsg->header.tf2 = cpu_to_le16(len2); /* first read transfer */ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len1); if (ret < 0) return ret; /* copy the received data */ memcpy(msg->buf + start, rmsg, len1); /* second read transfer if neccessary */ if (len2 > 0) { ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2); if (ret < 0) return ret; /* copy the received data */ memcpy(msg->buf + start + 512, rmsg, len2); } } return 0; } static int vprbrd_i2c_write(struct vprbrd *vb, struct i2c_msg *msg) { int ret, bytes_actual; u16 remain_len, bytes_xfer, start = 0x0000; struct vprbrd_i2c_write_msg *wmsg = (struct vprbrd_i2c_write_msg *)vb->buf; remain_len = msg->len; wmsg->header.cmd = VPRBRD_I2C_CMD_WRITE; wmsg->header.last = 0x00; wmsg->header.chan = 0x00; wmsg->header.spi = 0x0000; while (remain_len > 0) { wmsg->header.addr = cpu_to_le16(start + 0x4000); if (remain_len > 503) { wmsg->header.len1 = 0xff; wmsg->header.len2 = 0xf8; remain_len -= 503; bytes_xfer = 503 + sizeof(struct vprbrd_i2c_write_hdr); start += 503; } else if (remain_len > 255) { wmsg->header.len1 = 0xff; wmsg->header.len2 = (remain_len - 255); bytes_xfer = remain_len + sizeof(struct vprbrd_i2c_write_hdr); remain_len = 0; } else { wmsg->header.len1 = remain_len; wmsg->header.len2 = 0x00; bytes_xfer = remain_len + sizeof(struct vprbrd_i2c_write_hdr); remain_len = 0; } memcpy(wmsg->data, msg->buf + start, bytes_xfer - sizeof(struct vprbrd_i2c_write_hdr)); ret = usb_bulk_msg(vb->usb_dev, usb_sndbulkpipe(vb->usb_dev, VPRBRD_EP_OUT), wmsg, bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_xfer != bytes_actual)) return -EREMOTEIO; } return 0; } static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int i, ret, error = 0; struct vprbrd *vb = (struct vprbrd *)i2c->algo_data; struct vprbrd_i2c_addr_msg *amsg = (struct vprbrd_i2c_addr_msg *)vb->buf; struct vprbrd_i2c_status *smsg = (struct vprbrd_i2c_status *)vb->buf; dev_dbg(&i2c->dev, "master xfer %d messages:\n", num); for (i = 0 ; i < num ; i++) { pmsg = &msgs[i]; dev_dbg(&i2c->dev, " %d: %s (flags %d) %d bytes to 0x%02x\n", i, pmsg->flags & I2C_M_RD ? "read" : "write", pmsg->flags, pmsg->len, pmsg->addr); mutex_lock(&vb->lock); /* directly send the message */ if (pmsg->flags & I2C_M_RD) { /* read data */ amsg->cmd = VPRBRD_I2C_CMD_ADDR; amsg->unknown2 = 0x00; amsg->unknown3 = 0x00; amsg->addr = pmsg->addr; amsg->unknown1 = 0x01; amsg->len = cpu_to_le16(pmsg->len); /* send the addr and len, we're interested to board */ ret = vprbrd_i2c_addr(vb->usb_dev, amsg); if (ret < 0) error = ret; ret = vprbrd_i2c_read(vb, pmsg); if (ret < 0) error = ret; ret = vprbrd_i2c_status(i2c, smsg, error); if (ret < 0) error = ret; /* in case of protocol error, return the error */ if (error < 0) goto error; } else { /* write data */ ret = vprbrd_i2c_write(vb, pmsg); amsg->cmd = VPRBRD_I2C_CMD_ADDR; amsg->unknown2 = 0x00; amsg->unknown3 = 0x00; amsg->addr = pmsg->addr; amsg->unknown1 = 0x00; amsg->len = cpu_to_le16(pmsg->len); /* send the addr, the data goes to to board */ ret = vprbrd_i2c_addr(vb->usb_dev, amsg); if (ret < 0) error = ret; ret = vprbrd_i2c_status(i2c, smsg, error); if (ret < 0) error = ret; if (error < 0) goto error; } mutex_unlock(&vb->lock); } return num; error: mutex_unlock(&vb->lock); return error; } static u32 vprbrd_i2c_func(struct i2c_adapter *i2c) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } /* This is the actual algorithm we define */ static const struct i2c_algorithm vprbrd_algorithm = { .master_xfer = vprbrd_i2c_xfer, .functionality = vprbrd_i2c_func, }; static const struct i2c_adapter_quirks vprbrd_quirks = { .max_read_len = 2048, .max_write_len = 2048, }; static int vprbrd_i2c_probe(struct platform_device *pdev) { struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent); struct vprbrd_i2c *vb_i2c; int ret; int pipe; vb_i2c = devm_kzalloc(&pdev->dev, sizeof(*vb_i2c), GFP_KERNEL); if (vb_i2c == NULL) return -ENOMEM; /* setup i2c adapter description */ vb_i2c->i2c.owner = THIS_MODULE; vb_i2c->i2c.class = I2C_CLASS_HWMON; vb_i2c->i2c.algo = &vprbrd_algorithm; vb_i2c->i2c.quirks = &vprbrd_quirks; vb_i2c->i2c.algo_data = vb; /* save the param in usb capabable memory */ vb_i2c->bus_freq_param = i2c_bus_param; snprintf(vb_i2c->i2c.name, sizeof(vb_i2c->i2c.name), "viperboard at bus %03d device %03d", vb->usb_dev->bus->busnum, vb->usb_dev->devnum); /* setting the bus frequency */ if ((i2c_bus_param <= VPRBRD_I2C_FREQ_10KHZ) && (i2c_bus_param >= VPRBRD_I2C_FREQ_6MHZ)) { pipe = usb_sndctrlpipe(vb->usb_dev, 0); ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, &vb_i2c->bus_freq_param, 1, VPRBRD_USB_TIMEOUT_MS); if (ret != 1) { dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n", i2c_bus_freq); return -EIO; } } else { dev_err(&pdev->dev, "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq); return -EIO; } vb_i2c->i2c.dev.parent = &pdev->dev; /* attach to i2c layer */ i2c_add_adapter(&vb_i2c->i2c); platform_set_drvdata(pdev, vb_i2c); return 0; } static void vprbrd_i2c_remove(struct platform_device *pdev) { struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev); i2c_del_adapter(&vb_i2c->i2c); } static struct platform_driver vprbrd_i2c_driver = { .driver.name = "viperboard-i2c", .driver.owner = THIS_MODULE, .probe = vprbrd_i2c_probe, .remove_new = vprbrd_i2c_remove, }; static int __init vprbrd_i2c_init(void) { switch (i2c_bus_freq) { case 6000: i2c_bus_param = VPRBRD_I2C_FREQ_6MHZ; break; case 3000: i2c_bus_param = VPRBRD_I2C_FREQ_3MHZ; break; case 1000: i2c_bus_param = VPRBRD_I2C_FREQ_1MHZ; break; case 400: i2c_bus_param = VPRBRD_I2C_FREQ_400KHZ; break; case 200: i2c_bus_param = VPRBRD_I2C_FREQ_200KHZ; break; case 100: i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ; break; case 10: i2c_bus_param = VPRBRD_I2C_FREQ_10KHZ; break; default: pr_warn("invalid i2c_bus_freq (%d)\n", i2c_bus_freq); i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ; } return platform_driver_register(&vprbrd_i2c_driver); } subsys_initcall(vprbrd_i2c_init); static void __exit vprbrd_i2c_exit(void) { platform_driver_unregister(&vprbrd_i2c_driver); } module_exit(vprbrd_i2c_exit); MODULE_AUTHOR("Lars Poeschel <[email protected]>"); MODULE_DESCRIPTION("I2C master driver for Nano River Techs Viperboard"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:viperboard-i2c");
linux-master
drivers/i2c/busses/i2c-viperboard.c
// SPDX-License-Identifier: GPL-2.0-only /* i2c-isch.c - Linux kernel driver for Intel SCH chipset SMBus - Based on i2c-piix4.c Copyright (c) 1998 - 2002 Frodo Looijaard <[email protected]> and Philip Edelbrock <[email protected]> - Intel SCH support Copyright (c) 2007 - 2008 Jacob Jun Pan <[email protected]> */ /* Supports: Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L) Note: we assume there can only be one device, with one SMBus interface. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/io.h> /* SCH SMBus address offsets */ #define SMBHSTCNT (0 + sch_smba) #define SMBHSTSTS (1 + sch_smba) #define SMBHSTCLK (2 + sch_smba) #define SMBHSTADD (4 + sch_smba) /* TSA */ #define SMBHSTCMD (5 + sch_smba) #define SMBHSTDAT0 (6 + sch_smba) #define SMBHSTDAT1 (7 + sch_smba) #define SMBBLKDAT (0x20 + sch_smba) /* Other settings */ #define MAX_RETRIES 5000 /* I2C constants */ #define SCH_QUICK 0x00 #define SCH_BYTE 0x01 #define SCH_BYTE_DATA 0x02 #define SCH_WORD_DATA 0x03 #define SCH_BLOCK_DATA 0x05 static unsigned short sch_smba; static struct i2c_adapter sch_adapter; static int backbone_speed = 33000; /* backbone speed in kHz */ module_param(backbone_speed, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(backbone_speed, "Backbone speed in kHz, (default = 33000)"); /* * Start the i2c transaction -- the i2c_access will prepare the transaction * and this function will execute it. * return 0 for success and others for failure. */ static int sch_transaction(void) { int temp; int result = 0; int retries = 0; dev_dbg(&sch_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT), inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0), inb(SMBHSTDAT1)); /* Make sure the SMBus host is ready to start transmitting */ temp = inb(SMBHSTSTS) & 0x0f; if (temp) { /* Can not be busy since we checked it in sch_access */ if (temp & 0x01) { dev_dbg(&sch_adapter.dev, "Completion (%02x). " "Clear...\n", temp); } if (temp & 0x06) { dev_dbg(&sch_adapter.dev, "SMBus error (%02x). " "Resetting...\n", temp); } outb(temp, SMBHSTSTS); temp = inb(SMBHSTSTS) & 0x0f; if (temp) { dev_err(&sch_adapter.dev, "SMBus is not ready: (%02x)\n", temp); return -EAGAIN; } } /* start the transaction by setting bit 4 */ outb(inb(SMBHSTCNT) | 0x10, SMBHSTCNT); do { usleep_range(100, 200); temp = inb(SMBHSTSTS) & 0x0f; } while ((temp & 0x08) && (retries++ < MAX_RETRIES)); /* If the SMBus is still busy, we give up */ if (retries > MAX_RETRIES) { dev_err(&sch_adapter.dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x04) { result = -EIO; dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be " "locked until next hard reset. (sorry!)\n"); /* Clock stops and slave is stuck in mid-transmission */ } else if (temp & 0x02) { result = -EIO; dev_err(&sch_adapter.dev, "Error: no response!\n"); } else if (temp & 0x01) { dev_dbg(&sch_adapter.dev, "Post complete!\n"); outb(temp, SMBHSTSTS); temp = inb(SMBHSTSTS) & 0x07; if (temp & 0x06) { /* Completion clear failed */ dev_dbg(&sch_adapter.dev, "Failed reset at end of " "transaction (%02x), Bus error!\n", temp); } } else { result = -ENXIO; dev_dbg(&sch_adapter.dev, "No such address.\n"); } dev_dbg(&sch_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT), inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0), inb(SMBHSTDAT1)); return result; } /* * This is the main access entry for i2c-sch access * adap is i2c_adapter pointer, addr is the i2c device bus address, read_write * (0 for read and 1 for write), size is i2c transaction type and data is the * union of transaction for data to be transferred or data read from bus. * return 0 for success and others for failure. */ static s32 sch_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int i, len, temp, rc; /* Make sure the SMBus host is not busy */ temp = inb(SMBHSTSTS) & 0x0f; if (temp & 0x08) { dev_dbg(&sch_adapter.dev, "SMBus busy (%02x)\n", temp); return -EAGAIN; } temp = inw(SMBHSTCLK); if (!temp) { /* * We can't determine if we have 33 or 25 MHz clock for * SMBus, so expect 33 MHz and calculate a bus clock of * 100 kHz. If we actually run at 25 MHz the bus will be * run ~75 kHz instead which should do no harm. */ dev_notice(&sch_adapter.dev, "Clock divider uninitialized. Setting defaults\n"); outw(backbone_speed / (4 * 100), SMBHSTCLK); } dev_dbg(&sch_adapter.dev, "access size: %d %s\n", size, (read_write)?"READ":"WRITE"); switch (size) { case I2C_SMBUS_QUICK: outb((addr << 1) | read_write, SMBHSTADD); size = SCH_QUICK; break; case I2C_SMBUS_BYTE: outb((addr << 1) | read_write, SMBHSTADD); if (read_write == I2C_SMBUS_WRITE) outb(command, SMBHSTCMD); size = SCH_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb((addr << 1) | read_write, SMBHSTADD); outb(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb(data->byte, SMBHSTDAT0); size = SCH_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb((addr << 1) | read_write, SMBHSTADD); outb(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb(data->word & 0xff, SMBHSTDAT0); outb((data->word & 0xff00) >> 8, SMBHSTDAT1); } size = SCH_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: outb((addr << 1) | read_write, SMBHSTADD); outb(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; outb(len, SMBHSTDAT0); for (i = 1; i <= len; i++) outb(data->block[i], SMBBLKDAT+i-1); } size = SCH_BLOCK_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } dev_dbg(&sch_adapter.dev, "write size %d to 0x%04x\n", size, SMBHSTCNT); outb((inb(SMBHSTCNT) & 0xb0) | (size & 0x7), SMBHSTCNT); rc = sch_transaction(); if (rc) /* Error in transaction */ return rc; if ((read_write == I2C_SMBUS_WRITE) || (size == SCH_QUICK)) return 0; switch (size) { case SCH_BYTE: case SCH_BYTE_DATA: data->byte = inb(SMBHSTDAT0); break; case SCH_WORD_DATA: data->word = inb(SMBHSTDAT0) + (inb(SMBHSTDAT1) << 8); break; case SCH_BLOCK_DATA: data->block[0] = inb(SMBHSTDAT0); if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; for (i = 1; i <= data->block[0]; i++) data->block[i] = inb(SMBBLKDAT+i-1); break; } return 0; } static u32 sch_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sch_access, .functionality = sch_func, }; static struct i2c_adapter sch_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static int smbus_sch_probe(struct platform_device *dev) { struct resource *res; int retval; res = platform_get_resource(dev, IORESOURCE_IO, 0); if (!res) return -EBUSY; if (!devm_request_region(&dev->dev, res->start, resource_size(res), dev->name)) { dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", sch_smba); return -EBUSY; } sch_smba = res->start; dev_dbg(&dev->dev, "SMBA = 0x%X\n", sch_smba); /* set up the sysfs linkage to our parent device */ sch_adapter.dev.parent = &dev->dev; snprintf(sch_adapter.name, sizeof(sch_adapter.name), "SMBus SCH adapter at %04x", sch_smba); retval = i2c_add_adapter(&sch_adapter); if (retval) sch_smba = 0; return retval; } static void smbus_sch_remove(struct platform_device *pdev) { if (sch_smba) { i2c_del_adapter(&sch_adapter); sch_smba = 0; } } static struct platform_driver smbus_sch_driver = { .driver = { .name = "isch_smbus", }, .probe = smbus_sch_probe, .remove_new = smbus_sch_remove, }; module_platform_driver(smbus_sch_driver); MODULE_AUTHOR("Jacob Pan <[email protected]>"); MODULE_DESCRIPTION("Intel SCH SMBus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:isch_smbus");
linux-master
drivers/i2c/busses/i2c-isch.c
// SPDX-License-Identifier: GPL-2.0-only /* * This driver implements I2C master functionality using the LSI API2C * controller. * * NOTE: The controller has a limitation in that it can only do transfers of * maximum 255 bytes at a time. If a larger transfer is attempted, error code * (-EINVAL) is returned. */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> #define SCL_WAIT_TIMEOUT_NS 25000000 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) #define FIFO_SIZE 8 #define SEQ_LEN 2 #define GLOBAL_CONTROL 0x00 #define GLOBAL_MST_EN BIT(0) #define GLOBAL_SLV_EN BIT(1) #define GLOBAL_IBML_EN BIT(2) #define INTERRUPT_STATUS 0x04 #define INTERRUPT_ENABLE 0x08 #define INT_SLV BIT(1) #define INT_MST BIT(0) #define WAIT_TIMER_CONTROL 0x0c #define WT_EN BIT(15) #define WT_VALUE(_x) ((_x) & 0x7fff) #define IBML_TIMEOUT 0x10 #define IBML_LOW_MEXT 0x14 #define IBML_LOW_SEXT 0x18 #define TIMER_CLOCK_DIV 0x1c #define I2C_BUS_MONITOR 0x20 #define BM_SDAC BIT(3) #define BM_SCLC BIT(2) #define BM_SDAS BIT(1) #define BM_SCLS BIT(0) #define SOFT_RESET 0x24 #define MST_COMMAND 0x28 #define CMD_BUSY (1<<3) #define CMD_MANUAL (0x00 | CMD_BUSY) #define CMD_AUTO (0x01 | CMD_BUSY) #define CMD_SEQUENCE (0x02 | CMD_BUSY) #define MST_RX_XFER 0x2c #define MST_TX_XFER 0x30 #define MST_ADDR_1 0x34 #define MST_ADDR_2 0x38 #define MST_DATA 0x3c #define MST_TX_FIFO 0x40 #define MST_RX_FIFO 0x44 #define MST_INT_ENABLE 0x48 #define MST_INT_STATUS 0x4c #define MST_STATUS_RFL (1 << 13) /* RX FIFO serivce */ #define MST_STATUS_TFL (1 << 12) /* TX FIFO service */ #define MST_STATUS_SNS (1 << 11) /* Manual mode done */ #define MST_STATUS_SS (1 << 10) /* Automatic mode done */ #define MST_STATUS_SCC (1 << 9) /* Stop complete */ #define MST_STATUS_IP (1 << 8) /* Invalid parameter */ #define MST_STATUS_TSS (1 << 7) /* Timeout */ #define MST_STATUS_AL (1 << 6) /* Arbitration lost */ #define MST_STATUS_ND (1 << 5) /* NAK on data phase */ #define MST_STATUS_NA (1 << 4) /* NAK on address phase */ #define MST_STATUS_NAK (MST_STATUS_NA | \ MST_STATUS_ND) #define MST_STATUS_ERR (MST_STATUS_NAK | \ MST_STATUS_AL | \ MST_STATUS_IP) #define MST_TX_BYTES_XFRD 0x50 #define MST_RX_BYTES_XFRD 0x54 #define SLV_ADDR_DEC_CTL 0x58 #define SLV_ADDR_DEC_GCE BIT(0) /* ACK to General Call Address from own master (loopback) */ #define SLV_ADDR_DEC_OGCE BIT(1) /* ACK to General Call Address from external masters */ #define SLV_ADDR_DEC_SA1E BIT(2) /* ACK to addr_1 enabled */ #define SLV_ADDR_DEC_SA1M BIT(3) /* 10-bit addressing for addr_1 enabled */ #define SLV_ADDR_DEC_SA2E BIT(4) /* ACK to addr_2 enabled */ #define SLV_ADDR_DEC_SA2M BIT(5) /* 10-bit addressing for addr_2 enabled */ #define SLV_ADDR_1 0x5c #define SLV_ADDR_2 0x60 #define SLV_RX_CTL 0x64 #define SLV_RX_ACSA1 BIT(0) /* Generate ACK for writes to addr_1 */ #define SLV_RX_ACSA2 BIT(1) /* Generate ACK for writes to addr_2 */ #define SLV_RX_ACGCA BIT(2) /* ACK data phase transfers to General Call Address */ #define SLV_DATA 0x68 #define SLV_RX_FIFO 0x6c #define SLV_FIFO_DV1 BIT(0) /* Data Valid for addr_1 */ #define SLV_FIFO_DV2 BIT(1) /* Data Valid for addr_2 */ #define SLV_FIFO_AS BIT(2) /* (N)ACK Sent */ #define SLV_FIFO_TNAK BIT(3) /* Timeout NACK */ #define SLV_FIFO_STRC BIT(4) /* First byte after start condition received */ #define SLV_FIFO_RSC BIT(5) /* Repeated Start Condition */ #define SLV_FIFO_STPC BIT(6) /* Stop Condition */ #define SLV_FIFO_DV (SLV_FIFO_DV1 | SLV_FIFO_DV2) #define SLV_INT_ENABLE 0x70 #define SLV_INT_STATUS 0x74 #define SLV_STATUS_RFH BIT(0) /* FIFO service */ #define SLV_STATUS_WTC BIT(1) /* Write transfer complete */ #define SLV_STATUS_SRS1 BIT(2) /* Slave read from addr 1 */ #define SLV_STATUS_SRRS1 BIT(3) /* Repeated start from addr 1 */ #define SLV_STATUS_SRND1 BIT(4) /* Read request not following start condition */ #define SLV_STATUS_SRC1 BIT(5) /* Read canceled */ #define SLV_STATUS_SRAT1 BIT(6) /* Slave Read timed out */ #define SLV_STATUS_SRDRE1 BIT(7) /* Data written after timed out */ #define SLV_READ_DUMMY 0x78 #define SCL_HIGH_PERIOD 0x80 #define SCL_LOW_PERIOD 0x84 #define SPIKE_FLTR_LEN 0x88 #define SDA_SETUP_TIME 0x8c #define SDA_HOLD_TIME 0x90 /** * struct axxia_i2c_dev - I2C device context * @base: pointer to register struct * @msg: pointer to current message * @msg_r: pointer to current read message (sequence transfer) * @msg_xfrd: number of bytes transferred in tx_fifo * @msg_xfrd_r: number of bytes transferred in rx_fifo * @msg_err: error code for completed message * @msg_complete: xfer completion object * @dev: device reference * @adapter: core i2c abstraction * @i2c_clk: clock reference for i2c input clock * @bus_clk_rate: current i2c bus clock rate * @last: a flag indicating is this is last message in transfer */ struct axxia_i2c_dev { void __iomem *base; struct i2c_msg *msg; struct i2c_msg *msg_r; size_t msg_xfrd; size_t msg_xfrd_r; int msg_err; struct completion msg_complete; struct device *dev; struct i2c_adapter adapter; struct clk *i2c_clk; u32 bus_clk_rate; bool last; struct i2c_client *slave; int irq; }; static void i2c_int_disable(struct axxia_i2c_dev *idev, u32 mask) { u32 int_en; int_en = readl(idev->base + MST_INT_ENABLE); writel(int_en & ~mask, idev->base + MST_INT_ENABLE); } static void i2c_int_enable(struct axxia_i2c_dev *idev, u32 mask) { u32 int_en; int_en = readl(idev->base + MST_INT_ENABLE); writel(int_en | mask, idev->base + MST_INT_ENABLE); } /** * ns_to_clk - Convert time (ns) to clock cycles for the given clock frequency. */ static u32 ns_to_clk(u64 ns, u32 clk_mhz) { return div_u64(ns * clk_mhz, 1000); } static int axxia_i2c_init(struct axxia_i2c_dev *idev) { u32 divisor = clk_get_rate(idev->i2c_clk) / idev->bus_clk_rate; u32 clk_mhz = clk_get_rate(idev->i2c_clk) / 1000000; u32 t_setup; u32 t_high, t_low; u32 tmo_clk; u32 prescale; unsigned long timeout; dev_dbg(idev->dev, "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n", idev->bus_clk_rate, clk_mhz, divisor); /* Reset controller */ writel(0x01, idev->base + SOFT_RESET); timeout = jiffies + msecs_to_jiffies(100); while (readl(idev->base + SOFT_RESET) & 1) { if (time_after(jiffies, timeout)) { dev_warn(idev->dev, "Soft reset failed\n"); break; } } /* Enable Master Mode */ writel(0x1, idev->base + GLOBAL_CONTROL); if (idev->bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ) { /* Standard mode SCL 50/50, tSU:DAT = 250 ns */ t_high = divisor * 1 / 2; t_low = divisor * 1 / 2; t_setup = ns_to_clk(250, clk_mhz); } else { /* Fast mode SCL 33/66, tSU:DAT = 100 ns */ t_high = divisor * 1 / 3; t_low = divisor * 2 / 3; t_setup = ns_to_clk(100, clk_mhz); } /* SCL High Time */ writel(t_high, idev->base + SCL_HIGH_PERIOD); /* SCL Low Time */ writel(t_low, idev->base + SCL_LOW_PERIOD); /* SDA Setup Time */ writel(t_setup, idev->base + SDA_SETUP_TIME); /* SDA Hold Time, 300ns */ writel(ns_to_clk(300, clk_mhz), idev->base + SDA_HOLD_TIME); /* Filter <50ns spikes */ writel(ns_to_clk(50, clk_mhz), idev->base + SPIKE_FLTR_LEN); /* Configure Time-Out Registers */ tmo_clk = ns_to_clk(SCL_WAIT_TIMEOUT_NS, clk_mhz); /* Find prescaler value that makes tmo_clk fit in 15-bits counter. */ for (prescale = 0; prescale < 15; ++prescale) { if (tmo_clk <= 0x7fff) break; tmo_clk >>= 1; } if (tmo_clk > 0x7fff) tmo_clk = 0x7fff; /* Prescale divider (log2) */ writel(prescale, idev->base + TIMER_CLOCK_DIV); /* Timeout in divided clocks */ writel(WT_EN | WT_VALUE(tmo_clk), idev->base + WAIT_TIMER_CONTROL); /* Mask all master interrupt bits */ i2c_int_disable(idev, ~0); /* Interrupt enable */ writel(0x01, idev->base + INTERRUPT_ENABLE); return 0; } static int i2c_m_rd(const struct i2c_msg *msg) { return (msg->flags & I2C_M_RD) != 0; } static int i2c_m_ten(const struct i2c_msg *msg) { return (msg->flags & I2C_M_TEN) != 0; } static int i2c_m_recv_len(const struct i2c_msg *msg) { return (msg->flags & I2C_M_RECV_LEN) != 0; } /** * axxia_i2c_empty_rx_fifo - Fetch data from RX FIFO and update SMBus block * transfer length if this is the first byte of such a transfer. */ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev) { struct i2c_msg *msg = idev->msg_r; size_t rx_fifo_avail = readl(idev->base + MST_RX_FIFO); int bytes_to_transfer = min(rx_fifo_avail, msg->len - idev->msg_xfrd_r); while (bytes_to_transfer-- > 0) { int c = readl(idev->base + MST_DATA); if (idev->msg_xfrd_r == 0 && i2c_m_recv_len(msg)) { /* * Check length byte for SMBus block read */ if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { idev->msg_err = -EPROTO; i2c_int_disable(idev, ~MST_STATUS_TSS); complete(&idev->msg_complete); break; } msg->len = 1 + c; writel(msg->len, idev->base + MST_RX_XFER); } msg->buf[idev->msg_xfrd_r++] = c; } return 0; } /** * axxia_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer. * @return: Number of bytes left to transfer. */ static int axxia_i2c_fill_tx_fifo(struct axxia_i2c_dev *idev) { struct i2c_msg *msg = idev->msg; size_t tx_fifo_avail = FIFO_SIZE - readl(idev->base + MST_TX_FIFO); int bytes_to_transfer = min(tx_fifo_avail, msg->len - idev->msg_xfrd); int ret = msg->len - idev->msg_xfrd - bytes_to_transfer; while (bytes_to_transfer-- > 0) writel(msg->buf[idev->msg_xfrd++], idev->base + MST_DATA); return ret; } static void axxia_i2c_slv_fifo_event(struct axxia_i2c_dev *idev) { u32 fifo_status = readl(idev->base + SLV_RX_FIFO); u8 val; dev_dbg(idev->dev, "slave irq fifo_status=0x%x\n", fifo_status); if (fifo_status & SLV_FIFO_DV1) { if (fifo_status & SLV_FIFO_STRC) i2c_slave_event(idev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); val = readl(idev->base + SLV_DATA); i2c_slave_event(idev->slave, I2C_SLAVE_WRITE_RECEIVED, &val); } if (fifo_status & SLV_FIFO_STPC) { readl(idev->base + SLV_DATA); /* dummy read */ i2c_slave_event(idev->slave, I2C_SLAVE_STOP, &val); } if (fifo_status & SLV_FIFO_RSC) readl(idev->base + SLV_DATA); /* dummy read */ } static irqreturn_t axxia_i2c_slv_isr(struct axxia_i2c_dev *idev) { u32 status = readl(idev->base + SLV_INT_STATUS); u8 val; dev_dbg(idev->dev, "slave irq status=0x%x\n", status); if (status & SLV_STATUS_RFH) axxia_i2c_slv_fifo_event(idev); if (status & SLV_STATUS_SRS1) { i2c_slave_event(idev->slave, I2C_SLAVE_READ_REQUESTED, &val); writel(val, idev->base + SLV_DATA); } if (status & SLV_STATUS_SRND1) { i2c_slave_event(idev->slave, I2C_SLAVE_READ_PROCESSED, &val); writel(val, idev->base + SLV_DATA); } if (status & SLV_STATUS_SRC1) i2c_slave_event(idev->slave, I2C_SLAVE_STOP, &val); writel(INT_SLV, idev->base + INTERRUPT_STATUS); return IRQ_HANDLED; } static irqreturn_t axxia_i2c_isr(int irq, void *_dev) { struct axxia_i2c_dev *idev = _dev; irqreturn_t ret = IRQ_NONE; u32 status; status = readl(idev->base + INTERRUPT_STATUS); if (status & INT_SLV) ret = axxia_i2c_slv_isr(idev); if (!(status & INT_MST)) return ret; /* Read interrupt status bits */ status = readl(idev->base + MST_INT_STATUS); if (!idev->msg) { dev_warn(idev->dev, "unexpected interrupt\n"); goto out; } /* RX FIFO needs service? */ if (i2c_m_rd(idev->msg_r) && (status & MST_STATUS_RFL)) axxia_i2c_empty_rx_fifo(idev); /* TX FIFO needs service? */ if (!i2c_m_rd(idev->msg) && (status & MST_STATUS_TFL)) { if (axxia_i2c_fill_tx_fifo(idev) == 0) i2c_int_disable(idev, MST_STATUS_TFL); } if (unlikely(status & MST_STATUS_ERR)) { /* Transfer error */ i2c_int_disable(idev, ~0); if (status & MST_STATUS_AL) idev->msg_err = -EAGAIN; else if (status & MST_STATUS_NAK) idev->msg_err = -ENXIO; else idev->msg_err = -EIO; dev_dbg(idev->dev, "error %#x, addr=%#x rx=%u/%u tx=%u/%u\n", status, idev->msg->addr, readl(idev->base + MST_RX_BYTES_XFRD), readl(idev->base + MST_RX_XFER), readl(idev->base + MST_TX_BYTES_XFRD), readl(idev->base + MST_TX_XFER)); complete(&idev->msg_complete); } else if (status & MST_STATUS_SCC) { /* Stop completed */ i2c_int_disable(idev, ~MST_STATUS_TSS); complete(&idev->msg_complete); } else if (status & (MST_STATUS_SNS | MST_STATUS_SS)) { /* Transfer done */ int mask = idev->last ? ~0 : ~MST_STATUS_TSS; i2c_int_disable(idev, mask); if (i2c_m_rd(idev->msg_r) && idev->msg_xfrd_r < idev->msg_r->len) axxia_i2c_empty_rx_fifo(idev); complete(&idev->msg_complete); } else if (status & MST_STATUS_TSS) { /* Transfer timeout */ idev->msg_err = -ETIMEDOUT; i2c_int_disable(idev, ~MST_STATUS_TSS); complete(&idev->msg_complete); } out: /* Clear interrupt */ writel(INT_MST, idev->base + INTERRUPT_STATUS); return IRQ_HANDLED; } static void axxia_i2c_set_addr(struct axxia_i2c_dev *idev, struct i2c_msg *msg) { u32 addr_1, addr_2; if (i2c_m_ten(msg)) { /* 10-bit address * addr_1: 5'b11110 | addr[9:8] | (R/nW) * addr_2: addr[7:0] */ addr_1 = 0xF0 | ((msg->addr >> 7) & 0x06); if (i2c_m_rd(msg)) addr_1 |= 1; /* Set the R/nW bit of the address */ addr_2 = msg->addr & 0xFF; } else { /* 7-bit address * addr_1: addr[6:0] | (R/nW) * addr_2: dont care */ addr_1 = i2c_8bit_addr_from_msg(msg); addr_2 = 0; } writel(addr_1, idev->base + MST_ADDR_1); writel(addr_2, idev->base + MST_ADDR_2); } /* The NAK interrupt will be sent _before_ issuing STOP command * so the controller might still be busy processing it. No * interrupt will be sent at the end so we have to poll for it */ static int axxia_i2c_handle_seq_nak(struct axxia_i2c_dev *idev) { unsigned long timeout = jiffies + I2C_XFER_TIMEOUT; do { if ((readl(idev->base + MST_COMMAND) & CMD_BUSY) == 0) return 0; usleep_range(1, 100); } while (time_before(jiffies, timeout)); return -ETIMEDOUT; } static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[]) { u32 int_mask = MST_STATUS_ERR | MST_STATUS_SS | MST_STATUS_RFL; u32 rlen = i2c_m_recv_len(&msgs[1]) ? I2C_SMBUS_BLOCK_MAX : msgs[1].len; unsigned long time_left; axxia_i2c_set_addr(idev, &msgs[0]); writel(msgs[0].len, idev->base + MST_TX_XFER); writel(rlen, idev->base + MST_RX_XFER); idev->msg = &msgs[0]; idev->msg_r = &msgs[1]; idev->msg_xfrd = 0; idev->msg_xfrd_r = 0; idev->last = true; axxia_i2c_fill_tx_fifo(idev); writel(CMD_SEQUENCE, idev->base + MST_COMMAND); reinit_completion(&idev->msg_complete); i2c_int_enable(idev, int_mask); time_left = wait_for_completion_timeout(&idev->msg_complete, I2C_XFER_TIMEOUT); if (idev->msg_err == -ENXIO) { if (axxia_i2c_handle_seq_nak(idev)) axxia_i2c_init(idev); } else if (readl(idev->base + MST_COMMAND) & CMD_BUSY) { dev_warn(idev->dev, "busy after xfer\n"); } if (time_left == 0) { idev->msg_err = -ETIMEDOUT; i2c_recover_bus(&idev->adapter); axxia_i2c_init(idev); } if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO) axxia_i2c_init(idev); return idev->msg_err; } static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg, bool last) { u32 int_mask = MST_STATUS_ERR; u32 rx_xfer, tx_xfer; unsigned long time_left; unsigned int wt_value; idev->msg = msg; idev->msg_r = msg; idev->msg_xfrd = 0; idev->msg_xfrd_r = 0; idev->last = last; reinit_completion(&idev->msg_complete); axxia_i2c_set_addr(idev, msg); if (i2c_m_rd(msg)) { /* I2C read transfer */ rx_xfer = i2c_m_recv_len(msg) ? I2C_SMBUS_BLOCK_MAX : msg->len; tx_xfer = 0; } else { /* I2C write transfer */ rx_xfer = 0; tx_xfer = msg->len; } writel(rx_xfer, idev->base + MST_RX_XFER); writel(tx_xfer, idev->base + MST_TX_XFER); if (i2c_m_rd(msg)) int_mask |= MST_STATUS_RFL; else if (axxia_i2c_fill_tx_fifo(idev) != 0) int_mask |= MST_STATUS_TFL; wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL)); /* Disable wait timer temporarly */ writel(wt_value, idev->base + WAIT_TIMER_CONTROL); /* Check if timeout error happened */ if (idev->msg_err) goto out; if (!last) { writel(CMD_MANUAL, idev->base + MST_COMMAND); int_mask |= MST_STATUS_SNS; } else { writel(CMD_AUTO, idev->base + MST_COMMAND); int_mask |= MST_STATUS_SS; } writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL); i2c_int_enable(idev, int_mask); time_left = wait_for_completion_timeout(&idev->msg_complete, I2C_XFER_TIMEOUT); i2c_int_disable(idev, int_mask); if (readl(idev->base + MST_COMMAND) & CMD_BUSY) dev_warn(idev->dev, "busy after xfer\n"); if (time_left == 0) { idev->msg_err = -ETIMEDOUT; i2c_recover_bus(&idev->adapter); axxia_i2c_init(idev); } out: if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO && idev->msg_err != -ETIMEDOUT) axxia_i2c_init(idev); return idev->msg_err; } /* This function checks if the msgs[] array contains messages compatible with * Sequence mode of operation. This mode assumes there will be exactly one * write of non-zero length followed by exactly one read of non-zero length, * both targeted at the same client device. */ static bool axxia_i2c_sequence_ok(struct i2c_msg msgs[], int num) { return num == SEQ_LEN && !i2c_m_rd(&msgs[0]) && i2c_m_rd(&msgs[1]) && msgs[0].len > 0 && msgs[0].len <= FIFO_SIZE && msgs[1].len > 0 && msgs[0].addr == msgs[1].addr; } static int axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); int i; int ret = 0; idev->msg_err = 0; if (axxia_i2c_sequence_ok(msgs, num)) { ret = axxia_i2c_xfer_seq(idev, msgs); return ret ? : SEQ_LEN; } i2c_int_enable(idev, MST_STATUS_TSS); for (i = 0; ret == 0 && i < num; ++i) ret = axxia_i2c_xfer_msg(idev, &msgs[i], i == (num - 1)); return ret ? : i; } static int axxia_i2c_get_scl(struct i2c_adapter *adap) { struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); return !!(readl(idev->base + I2C_BUS_MONITOR) & BM_SCLS); } static void axxia_i2c_set_scl(struct i2c_adapter *adap, int val) { struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); u32 tmp; /* Preserve SDA Control */ tmp = readl(idev->base + I2C_BUS_MONITOR) & BM_SDAC; if (!val) tmp |= BM_SCLC; writel(tmp, idev->base + I2C_BUS_MONITOR); } static int axxia_i2c_get_sda(struct i2c_adapter *adap) { struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); return !!(readl(idev->base + I2C_BUS_MONITOR) & BM_SDAS); } static struct i2c_bus_recovery_info axxia_i2c_recovery_info = { .recover_bus = i2c_generic_scl_recovery, .get_scl = axxia_i2c_get_scl, .set_scl = axxia_i2c_set_scl, .get_sda = axxia_i2c_get_sda, }; static u32 axxia_i2c_func(struct i2c_adapter *adap) { u32 caps = (I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA); return caps; } static int axxia_i2c_reg_slave(struct i2c_client *slave) { struct axxia_i2c_dev *idev = i2c_get_adapdata(slave->adapter); u32 slv_int_mask = SLV_STATUS_RFH; u32 dec_ctl; if (idev->slave) return -EBUSY; idev->slave = slave; /* Enable slave mode as well */ writel(GLOBAL_MST_EN | GLOBAL_SLV_EN, idev->base + GLOBAL_CONTROL); writel(INT_MST | INT_SLV, idev->base + INTERRUPT_ENABLE); /* Set slave address */ dec_ctl = SLV_ADDR_DEC_SA1E; if (slave->flags & I2C_CLIENT_TEN) dec_ctl |= SLV_ADDR_DEC_SA1M; writel(SLV_RX_ACSA1, idev->base + SLV_RX_CTL); writel(dec_ctl, idev->base + SLV_ADDR_DEC_CTL); writel(slave->addr, idev->base + SLV_ADDR_1); /* Enable interrupts */ slv_int_mask |= SLV_STATUS_SRS1 | SLV_STATUS_SRRS1 | SLV_STATUS_SRND1; slv_int_mask |= SLV_STATUS_SRC1; writel(slv_int_mask, idev->base + SLV_INT_ENABLE); return 0; } static int axxia_i2c_unreg_slave(struct i2c_client *slave) { struct axxia_i2c_dev *idev = i2c_get_adapdata(slave->adapter); /* Disable slave mode */ writel(GLOBAL_MST_EN, idev->base + GLOBAL_CONTROL); writel(INT_MST, idev->base + INTERRUPT_ENABLE); synchronize_irq(idev->irq); idev->slave = NULL; return 0; } static const struct i2c_algorithm axxia_i2c_algo = { .master_xfer = axxia_i2c_xfer, .functionality = axxia_i2c_func, .reg_slave = axxia_i2c_reg_slave, .unreg_slave = axxia_i2c_unreg_slave, }; static const struct i2c_adapter_quirks axxia_i2c_quirks = { .max_read_len = 255, .max_write_len = 255, }; static int axxia_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct axxia_i2c_dev *idev = NULL; void __iomem *base; int ret = 0; idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); if (!idev) return -ENOMEM; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); idev->irq = platform_get_irq(pdev, 0); if (idev->irq < 0) return idev->irq; idev->i2c_clk = devm_clk_get(&pdev->dev, "i2c"); if (IS_ERR(idev->i2c_clk)) { dev_err(&pdev->dev, "missing clock\n"); return PTR_ERR(idev->i2c_clk); } idev->base = base; idev->dev = &pdev->dev; init_completion(&idev->msg_complete); of_property_read_u32(np, "clock-frequency", &idev->bus_clk_rate); if (idev->bus_clk_rate == 0) idev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */ ret = clk_prepare_enable(idev->i2c_clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock\n"); return ret; } ret = axxia_i2c_init(idev); if (ret) { dev_err(&pdev->dev, "failed to initialize\n"); goto error_disable_clk; } ret = devm_request_irq(&pdev->dev, idev->irq, axxia_i2c_isr, 0, pdev->name, idev); if (ret) { dev_err(&pdev->dev, "failed to claim IRQ%d\n", idev->irq); goto error_disable_clk; } i2c_set_adapdata(&idev->adapter, idev); strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name)); idev->adapter.owner = THIS_MODULE; idev->adapter.algo = &axxia_i2c_algo; idev->adapter.bus_recovery_info = &axxia_i2c_recovery_info; idev->adapter.quirks = &axxia_i2c_quirks; idev->adapter.dev.parent = &pdev->dev; idev->adapter.dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, idev); ret = i2c_add_adapter(&idev->adapter); if (ret) goto error_disable_clk; return 0; error_disable_clk: clk_disable_unprepare(idev->i2c_clk); return ret; } static void axxia_i2c_remove(struct platform_device *pdev) { struct axxia_i2c_dev *idev = platform_get_drvdata(pdev); clk_disable_unprepare(idev->i2c_clk); i2c_del_adapter(&idev->adapter); } /* Match table for of_platform binding */ static const struct of_device_id axxia_i2c_of_match[] = { { .compatible = "lsi,api2c", }, {}, }; MODULE_DEVICE_TABLE(of, axxia_i2c_of_match); static struct platform_driver axxia_i2c_driver = { .probe = axxia_i2c_probe, .remove_new = axxia_i2c_remove, .driver = { .name = "axxia-i2c", .of_match_table = axxia_i2c_of_match, }, }; module_platform_driver(axxia_i2c_driver); MODULE_DESCRIPTION("Axxia I2C Bus driver"); MODULE_AUTHOR("Anders Berg <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-axxia.c
// SPDX-License-Identifier: GPL-2.0 /* * Nuvoton NPCM7xx I2C Controller driver * * Copyright (C) 2020 Nuvoton Technologies [email protected] */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> enum i2c_mode { I2C_MASTER, I2C_SLAVE, }; /* * External I2C Interface driver xfer indication values, which indicate status * of the bus. */ enum i2c_state_ind { I2C_NO_STATUS_IND = 0, I2C_SLAVE_RCV_IND, I2C_SLAVE_XMIT_IND, I2C_SLAVE_XMIT_MISSING_DATA_IND, I2C_SLAVE_RESTART_IND, I2C_SLAVE_DONE_IND, I2C_MASTER_DONE_IND, I2C_NACK_IND, I2C_BUS_ERR_IND, I2C_WAKE_UP_IND, I2C_BLOCK_BYTES_ERR_IND, I2C_SLAVE_RCV_MISSING_DATA_IND, }; /* * Operation type values (used to define the operation currently running) * module is interrupt driven, on each interrupt the current operation is * checked to see if the module is currently reading or writing. */ enum i2c_oper { I2C_NO_OPER = 0, I2C_WRITE_OPER, I2C_READ_OPER, }; /* I2C Bank (module had 2 banks of registers) */ enum i2c_bank { I2C_BANK_0 = 0, I2C_BANK_1, }; /* Internal I2C states values (for the I2C module state machine). */ enum i2c_state { I2C_DISABLE = 0, I2C_IDLE, I2C_MASTER_START, I2C_SLAVE_MATCH, I2C_OPER_STARTED, I2C_STOP_PENDING, }; #if IS_ENABLED(CONFIG_I2C_SLAVE) /* Module supports setting multiple own slave addresses */ enum i2c_addr { I2C_SLAVE_ADDR1 = 0, I2C_SLAVE_ADDR2, I2C_SLAVE_ADDR3, I2C_SLAVE_ADDR4, I2C_SLAVE_ADDR5, I2C_SLAVE_ADDR6, I2C_SLAVE_ADDR7, I2C_SLAVE_ADDR8, I2C_SLAVE_ADDR9, I2C_SLAVE_ADDR10, I2C_GC_ADDR, I2C_ARP_ADDR, }; #endif /* init register and default value required to enable module */ #define NPCM_I2CSEGCTL 0xE4 /* Common regs */ #define NPCM_I2CSDA 0x00 #define NPCM_I2CST 0x02 #define NPCM_I2CCST 0x04 #define NPCM_I2CCTL1 0x06 #define NPCM_I2CADDR1 0x08 #define NPCM_I2CCTL2 0x0A #define NPCM_I2CADDR2 0x0C #define NPCM_I2CCTL3 0x0E #define NPCM_I2CCST2 0x18 #define NPCM_I2CCST3 0x19 #define I2C_VER 0x1F /* BANK 0 regs */ #define NPCM_I2CADDR3 0x10 #define NPCM_I2CADDR7 0x11 #define NPCM_I2CADDR4 0x12 #define NPCM_I2CADDR8 0x13 #define NPCM_I2CADDR5 0x14 #define NPCM_I2CADDR9 0x15 #define NPCM_I2CADDR6 0x16 #define NPCM_I2CADDR10 0x17 #define NPCM_I2CCTL4 0x1A #define NPCM_I2CCTL5 0x1B #define NPCM_I2CSCLLT 0x1C /* SCL Low Time */ #define NPCM_I2CFIF_CTL 0x1D /* FIFO Control */ #define NPCM_I2CSCLHT 0x1E /* SCL High Time */ /* BANK 1 regs */ #define NPCM_I2CFIF_CTS 0x10 /* Both FIFOs Control and Status */ #define NPCM_I2CTXF_CTL 0x12 /* Tx-FIFO Control */ #define NPCM_I2CT_OUT 0x14 /* Bus T.O. */ #define NPCM_I2CPEC 0x16 /* PEC Data */ #define NPCM_I2CTXF_STS 0x1A /* Tx-FIFO Status */ #define NPCM_I2CRXF_STS 0x1C /* Rx-FIFO Status */ #define NPCM_I2CRXF_CTL 0x1E /* Rx-FIFO Control */ #if IS_ENABLED(CONFIG_I2C_SLAVE) /* * npcm_i2caddr array: * The module supports having multiple own slave addresses. * Since the addr regs are sprinkled all over the address space, * use this array to get the address or each register. */ #define I2C_NUM_OWN_ADDR 2 #define I2C_NUM_OWN_ADDR_SUPPORTED 2 static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = { NPCM_I2CADDR1, NPCM_I2CADDR2, }; #endif /* NPCM_I2CST reg fields */ #define NPCM_I2CST_XMIT BIT(0) /* Transmit mode */ #define NPCM_I2CST_MASTER BIT(1) /* Master mode */ #define NPCM_I2CST_NMATCH BIT(2) /* New match */ #define NPCM_I2CST_STASTR BIT(3) /* Stall after start */ #define NPCM_I2CST_NEGACK BIT(4) /* Negative ACK */ #define NPCM_I2CST_BER BIT(5) /* Bus error */ #define NPCM_I2CST_SDAST BIT(6) /* SDA status */ #define NPCM_I2CST_SLVSTP BIT(7) /* Slave stop */ /* NPCM_I2CCST reg fields */ #define NPCM_I2CCST_BUSY BIT(0) /* Busy */ #define NPCM_I2CCST_BB BIT(1) /* Bus busy */ #define NPCM_I2CCST_MATCH BIT(2) /* Address match */ #define NPCM_I2CCST_GCMATCH BIT(3) /* Global call match */ #define NPCM_I2CCST_TSDA BIT(4) /* Test SDA line */ #define NPCM_I2CCST_TGSCL BIT(5) /* Toggle SCL line */ #define NPCM_I2CCST_MATCHAF BIT(6) /* Match address field */ #define NPCM_I2CCST_ARPMATCH BIT(7) /* ARP address match */ /* NPCM_I2CCTL1 reg fields */ #define NPCM_I2CCTL1_START BIT(0) /* Generate start condition */ #define NPCM_I2CCTL1_STOP BIT(1) /* Generate stop condition */ #define NPCM_I2CCTL1_INTEN BIT(2) /* Interrupt enable */ #define NPCM_I2CCTL1_EOBINTE BIT(3) #define NPCM_I2CCTL1_ACK BIT(4) #define NPCM_I2CCTL1_GCMEN BIT(5) /* Global call match enable */ #define NPCM_I2CCTL1_NMINTE BIT(6) /* New match interrupt enable */ #define NPCM_I2CCTL1_STASTRE BIT(7) /* Stall after start enable */ /* RW1S fields (inside a RW reg): */ #define NPCM_I2CCTL1_RWS \ (NPCM_I2CCTL1_START | NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_ACK) /* npcm_i2caddr reg fields */ #define NPCM_I2CADDR_A GENMASK(6, 0) /* Address */ #define NPCM_I2CADDR_SAEN BIT(7) /* Slave address enable */ /* NPCM_I2CCTL2 reg fields */ #define I2CCTL2_ENABLE BIT(0) /* Module enable */ #define I2CCTL2_SCLFRQ6_0 GENMASK(7, 1) /* Bits 0:6 of frequency divisor */ /* NPCM_I2CCTL3 reg fields */ #define I2CCTL3_SCLFRQ8_7 GENMASK(1, 0) /* Bits 7:8 of frequency divisor */ #define I2CCTL3_ARPMEN BIT(2) /* ARP match enable */ #define I2CCTL3_IDL_START BIT(3) #define I2CCTL3_400K_MODE BIT(4) #define I2CCTL3_BNK_SEL BIT(5) #define I2CCTL3_SDA_LVL BIT(6) #define I2CCTL3_SCL_LVL BIT(7) /* NPCM_I2CCST2 reg fields */ #define NPCM_I2CCST2_MATCHA1F BIT(0) #define NPCM_I2CCST2_MATCHA2F BIT(1) #define NPCM_I2CCST2_MATCHA3F BIT(2) #define NPCM_I2CCST2_MATCHA4F BIT(3) #define NPCM_I2CCST2_MATCHA5F BIT(4) #define NPCM_I2CCST2_MATCHA6F BIT(5) #define NPCM_I2CCST2_MATCHA7F BIT(5) #define NPCM_I2CCST2_INTSTS BIT(7) /* NPCM_I2CCST3 reg fields */ #define NPCM_I2CCST3_MATCHA8F BIT(0) #define NPCM_I2CCST3_MATCHA9F BIT(1) #define NPCM_I2CCST3_MATCHA10F BIT(2) #define NPCM_I2CCST3_EO_BUSY BIT(7) /* NPCM_I2CCTL4 reg fields */ #define I2CCTL4_HLDT GENMASK(5, 0) #define I2CCTL4_LVL_WE BIT(7) /* NPCM_I2CCTL5 reg fields */ #define I2CCTL5_DBNCT GENMASK(3, 0) /* NPCM_I2CFIF_CTS reg fields */ #define NPCM_I2CFIF_CTS_RXF_TXE BIT(1) #define NPCM_I2CFIF_CTS_RFTE_IE BIT(3) #define NPCM_I2CFIF_CTS_CLR_FIFO BIT(6) #define NPCM_I2CFIF_CTS_SLVRSTR BIT(7) /* NPCM_I2CTXF_CTL reg field */ #define NPCM_I2CTXF_CTL_THR_TXIE BIT(6) /* NPCM_I2CT_OUT reg fields */ #define NPCM_I2CT_OUT_TO_CKDIV GENMASK(5, 0) #define NPCM_I2CT_OUT_T_OUTIE BIT(6) #define NPCM_I2CT_OUT_T_OUTST BIT(7) /* NPCM_I2CTXF_STS reg fields */ #define NPCM_I2CTXF_STS_TX_THST BIT(6) /* NPCM_I2CRXF_STS reg fields */ #define NPCM_I2CRXF_STS_RX_THST BIT(6) /* NPCM_I2CFIF_CTL reg fields */ #define NPCM_I2CFIF_CTL_FIFO_EN BIT(4) /* NPCM_I2CRXF_CTL reg fields */ #define NPCM_I2CRXF_CTL_THR_RXIE BIT(6) #define MAX_I2C_HW_FIFO_SIZE 32 /* I2C_VER reg fields */ #define I2C_VER_VERSION GENMASK(6, 0) #define I2C_VER_FIFO_EN BIT(7) /* stall/stuck timeout in us */ #define DEFAULT_STALL_COUNT 25 /* SCLFRQ field position */ #define SCLFRQ_0_TO_6 GENMASK(6, 0) #define SCLFRQ_7_TO_8 GENMASK(8, 7) /* supported clk settings. values in Hz. */ #define I2C_FREQ_MIN_HZ 10000 #define I2C_FREQ_MAX_HZ I2C_MAX_FAST_MODE_PLUS_FREQ struct npcm_i2c_data { u8 fifo_size; u32 segctl_init_val; u8 txf_sts_tx_bytes; u8 rxf_sts_rx_bytes; u8 rxf_ctl_last_pec; }; static const struct npcm_i2c_data npxm7xx_i2c_data = { .fifo_size = 16, .segctl_init_val = 0x0333F000, .txf_sts_tx_bytes = GENMASK(4, 0), .rxf_sts_rx_bytes = GENMASK(4, 0), .rxf_ctl_last_pec = BIT(5), }; static const struct npcm_i2c_data npxm8xx_i2c_data = { .fifo_size = 32, .segctl_init_val = 0x9333F000, .txf_sts_tx_bytes = GENMASK(5, 0), .rxf_sts_rx_bytes = GENMASK(5, 0), .rxf_ctl_last_pec = BIT(7), }; /* Status of one I2C module */ struct npcm_i2c { struct i2c_adapter adap; struct device *dev; unsigned char __iomem *reg; const struct npcm_i2c_data *data; spinlock_t lock; /* IRQ synchronization */ struct completion cmd_complete; int cmd_err; struct i2c_msg *msgs; int msgs_num; int num; u32 apb_clk; struct i2c_bus_recovery_info rinfo; enum i2c_state state; enum i2c_oper operation; enum i2c_mode master_or_slave; enum i2c_state_ind stop_ind; u8 dest_addr; u8 *rd_buf; u16 rd_size; u16 rd_ind; u8 *wr_buf; u16 wr_size; u16 wr_ind; bool fifo_use; u16 PEC_mask; /* PEC bit mask per slave address */ bool PEC_use; bool read_block_use; unsigned long int_time_stamp; unsigned long bus_freq; /* in Hz */ #if IS_ENABLED(CONFIG_I2C_SLAVE) u8 own_slave_addr; struct i2c_client *slave; int slv_rd_size; int slv_rd_ind; int slv_wr_size; int slv_wr_ind; u8 slv_rd_buf[MAX_I2C_HW_FIFO_SIZE]; u8 slv_wr_buf[MAX_I2C_HW_FIFO_SIZE]; #endif struct dentry *debugfs; /* debugfs device directory */ u64 ber_cnt; u64 rec_succ_cnt; u64 rec_fail_cnt; u64 nack_cnt; u64 timeout_cnt; u64 tx_complete_cnt; }; static inline void npcm_i2c_select_bank(struct npcm_i2c *bus, enum i2c_bank bank) { u8 i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3); if (bank == I2C_BANK_0) i2cctl3 = i2cctl3 & ~I2CCTL3_BNK_SEL; else i2cctl3 = i2cctl3 | I2CCTL3_BNK_SEL; iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3); } static void npcm_i2c_init_params(struct npcm_i2c *bus) { bus->stop_ind = I2C_NO_STATUS_IND; bus->rd_size = 0; bus->wr_size = 0; bus->rd_ind = 0; bus->wr_ind = 0; bus->read_block_use = false; bus->int_time_stamp = 0; bus->PEC_use = false; bus->PEC_mask = 0; #if IS_ENABLED(CONFIG_I2C_SLAVE) if (bus->slave) bus->master_or_slave = I2C_SLAVE; #endif } static inline void npcm_i2c_wr_byte(struct npcm_i2c *bus, u8 data) { iowrite8(data, bus->reg + NPCM_I2CSDA); } static inline u8 npcm_i2c_rd_byte(struct npcm_i2c *bus) { return ioread8(bus->reg + NPCM_I2CSDA); } static int npcm_i2c_get_SCL(struct i2c_adapter *_adap) { struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); return !!(I2CCTL3_SCL_LVL & ioread8(bus->reg + NPCM_I2CCTL3)); } static int npcm_i2c_get_SDA(struct i2c_adapter *_adap) { struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); return !!(I2CCTL3_SDA_LVL & ioread8(bus->reg + NPCM_I2CCTL3)); } static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus) { if (bus->operation == I2C_READ_OPER) return bus->rd_ind; if (bus->operation == I2C_WRITE_OPER) return bus->wr_ind; return 0; } /* quick protocol (just address) */ static inline bool npcm_i2c_is_quick(struct npcm_i2c *bus) { return bus->wr_size == 0 && bus->rd_size == 0; } static void npcm_i2c_disable(struct npcm_i2c *bus) { u8 i2cctl2; #if IS_ENABLED(CONFIG_I2C_SLAVE) int i; /* Slave addresses removal */ for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR_SUPPORTED; i++) iowrite8(0, bus->reg + npcm_i2caddr[i]); #endif /* Disable module */ i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2); i2cctl2 = i2cctl2 & ~I2CCTL2_ENABLE; iowrite8(i2cctl2, bus->reg + NPCM_I2CCTL2); bus->state = I2C_DISABLE; } static void npcm_i2c_enable(struct npcm_i2c *bus) { u8 i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2); i2cctl2 = i2cctl2 | I2CCTL2_ENABLE; iowrite8(i2cctl2, bus->reg + NPCM_I2CCTL2); bus->state = I2C_IDLE; } /* enable\disable end of busy (EOB) interrupts */ static inline void npcm_i2c_eob_int(struct npcm_i2c *bus, bool enable) { u8 val; /* Clear EO_BUSY pending bit: */ val = ioread8(bus->reg + NPCM_I2CCST3); val = val | NPCM_I2CCST3_EO_BUSY; iowrite8(val, bus->reg + NPCM_I2CCST3); val = ioread8(bus->reg + NPCM_I2CCTL1); val &= ~NPCM_I2CCTL1_RWS; if (enable) val |= NPCM_I2CCTL1_EOBINTE; else val &= ~NPCM_I2CCTL1_EOBINTE; iowrite8(val, bus->reg + NPCM_I2CCTL1); } static inline bool npcm_i2c_tx_fifo_empty(struct npcm_i2c *bus) { u8 tx_fifo_sts; tx_fifo_sts = ioread8(bus->reg + NPCM_I2CTXF_STS); /* check if TX FIFO is not empty */ if ((tx_fifo_sts & bus->data->txf_sts_tx_bytes) == 0) return false; /* check if TX FIFO status bit is set: */ return !!FIELD_GET(NPCM_I2CTXF_STS_TX_THST, tx_fifo_sts); } static inline bool npcm_i2c_rx_fifo_full(struct npcm_i2c *bus) { u8 rx_fifo_sts; rx_fifo_sts = ioread8(bus->reg + NPCM_I2CRXF_STS); /* check if RX FIFO is not empty: */ if ((rx_fifo_sts & bus->data->rxf_sts_rx_bytes) == 0) return false; /* check if rx fifo full status is set: */ return !!FIELD_GET(NPCM_I2CRXF_STS_RX_THST, rx_fifo_sts); } static inline void npcm_i2c_clear_fifo_int(struct npcm_i2c *bus) { u8 val; val = ioread8(bus->reg + NPCM_I2CFIF_CTS); val = (val & NPCM_I2CFIF_CTS_SLVRSTR) | NPCM_I2CFIF_CTS_RXF_TXE; iowrite8(val, bus->reg + NPCM_I2CFIF_CTS); } static inline void npcm_i2c_clear_tx_fifo(struct npcm_i2c *bus) { u8 val; val = ioread8(bus->reg + NPCM_I2CTXF_STS); val = val | NPCM_I2CTXF_STS_TX_THST; iowrite8(val, bus->reg + NPCM_I2CTXF_STS); } static inline void npcm_i2c_clear_rx_fifo(struct npcm_i2c *bus) { u8 val; val = ioread8(bus->reg + NPCM_I2CRXF_STS); val = val | NPCM_I2CRXF_STS_RX_THST; iowrite8(val, bus->reg + NPCM_I2CRXF_STS); } static void npcm_i2c_int_enable(struct npcm_i2c *bus, bool enable) { u8 val; val = ioread8(bus->reg + NPCM_I2CCTL1); val &= ~NPCM_I2CCTL1_RWS; if (enable) val |= NPCM_I2CCTL1_INTEN; else val &= ~NPCM_I2CCTL1_INTEN; iowrite8(val, bus->reg + NPCM_I2CCTL1); } static inline void npcm_i2c_master_start(struct npcm_i2c *bus) { u8 val; val = ioread8(bus->reg + NPCM_I2CCTL1); val &= ~(NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_ACK); val |= NPCM_I2CCTL1_START; iowrite8(val, bus->reg + NPCM_I2CCTL1); } static inline void npcm_i2c_master_stop(struct npcm_i2c *bus) { u8 val; /* * override HW issue: I2C may fail to supply stop condition in Master * Write operation. * Need to delay at least 5 us from the last int, before issueing a stop */ udelay(10); /* function called from interrupt, can't sleep */ val = ioread8(bus->reg + NPCM_I2CCTL1); val &= ~(NPCM_I2CCTL1_START | NPCM_I2CCTL1_ACK); val |= NPCM_I2CCTL1_STOP; iowrite8(val, bus->reg + NPCM_I2CCTL1); if (!bus->fifo_use) return; npcm_i2c_select_bank(bus, I2C_BANK_1); if (bus->operation == I2C_READ_OPER) npcm_i2c_clear_rx_fifo(bus); else npcm_i2c_clear_tx_fifo(bus); npcm_i2c_clear_fifo_int(bus); iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); } static inline void npcm_i2c_stall_after_start(struct npcm_i2c *bus, bool stall) { u8 val; val = ioread8(bus->reg + NPCM_I2CCTL1); val &= ~NPCM_I2CCTL1_RWS; if (stall) val |= NPCM_I2CCTL1_STASTRE; else val &= ~NPCM_I2CCTL1_STASTRE; iowrite8(val, bus->reg + NPCM_I2CCTL1); } static inline void npcm_i2c_nack(struct npcm_i2c *bus) { u8 val; val = ioread8(bus->reg + NPCM_I2CCTL1); val &= ~(NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_START); val |= NPCM_I2CCTL1_ACK; iowrite8(val, bus->reg + NPCM_I2CCTL1); } static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus) { u8 val; /* Clear NEGACK, STASTR and BER bits */ val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR; iowrite8(val, bus->reg + NPCM_I2CST); } #if IS_ENABLED(CONFIG_I2C_SLAVE) static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable) { u8 i2cctl1; /* enable interrupt on slave match: */ i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1); i2cctl1 &= ~NPCM_I2CCTL1_RWS; if (enable) i2cctl1 |= NPCM_I2CCTL1_NMINTE; else i2cctl1 &= ~NPCM_I2CCTL1_NMINTE; iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1); } static int npcm_i2c_slave_enable(struct npcm_i2c *bus, enum i2c_addr addr_type, u8 addr, bool enable) { u8 i2cctl1; u8 i2cctl3; u8 sa_reg; sa_reg = (addr & 0x7F) | FIELD_PREP(NPCM_I2CADDR_SAEN, enable); if (addr_type == I2C_GC_ADDR) { i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1); if (enable) i2cctl1 |= NPCM_I2CCTL1_GCMEN; else i2cctl1 &= ~NPCM_I2CCTL1_GCMEN; iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1); return 0; } else if (addr_type == I2C_ARP_ADDR) { i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3); if (enable) i2cctl3 |= I2CCTL3_ARPMEN; else i2cctl3 &= ~I2CCTL3_ARPMEN; iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3); return 0; } if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10) dev_err(bus->dev, "try to enable more than 2 SA not supported\n"); if (addr_type >= I2C_ARP_ADDR) return -EFAULT; /* Set and enable the address */ iowrite8(sa_reg, bus->reg + npcm_i2caddr[addr_type]); npcm_i2c_slave_int_enable(bus, enable); return 0; } #endif static void npcm_i2c_reset(struct npcm_i2c *bus) { /* * Save I2CCTL1 relevant bits. It is being cleared when the module * is disabled. */ u8 i2cctl1; #if IS_ENABLED(CONFIG_I2C_SLAVE) u8 addr; #endif i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1); npcm_i2c_disable(bus); npcm_i2c_enable(bus); /* Restore NPCM_I2CCTL1 Status */ i2cctl1 &= ~NPCM_I2CCTL1_RWS; iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1); /* Clear BB (BUS BUSY) bit */ iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); iowrite8(0xFF, bus->reg + NPCM_I2CST); /* Clear and disable EOB */ npcm_i2c_eob_int(bus, false); /* Clear all fifo bits: */ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); #if IS_ENABLED(CONFIG_I2C_SLAVE) if (bus->slave) { addr = bus->slave->addr; npcm_i2c_slave_enable(bus, I2C_SLAVE_ADDR1, addr, true); } #endif /* Clear status bits for spurious interrupts */ npcm_i2c_clear_master_status(bus); bus->state = I2C_IDLE; } static inline bool npcm_i2c_is_master(struct npcm_i2c *bus) { return !!FIELD_GET(NPCM_I2CST_MASTER, ioread8(bus->reg + NPCM_I2CST)); } static void npcm_i2c_callback(struct npcm_i2c *bus, enum i2c_state_ind op_status, u16 info) { struct i2c_msg *msgs; int msgs_num; msgs = bus->msgs; msgs_num = bus->msgs_num; /* * check that transaction was not timed-out, and msgs still * holds a valid value. */ if (!msgs) return; if (completion_done(&bus->cmd_complete)) return; switch (op_status) { case I2C_MASTER_DONE_IND: bus->cmd_err = bus->msgs_num; if (bus->tx_complete_cnt < ULLONG_MAX) bus->tx_complete_cnt++; fallthrough; case I2C_BLOCK_BYTES_ERR_IND: /* Master tx finished and all transmit bytes were sent */ if (bus->msgs) { if (msgs[0].flags & I2C_M_RD) msgs[0].len = info; else if (msgs_num == 2 && msgs[1].flags & I2C_M_RD) msgs[1].len = info; } if (completion_done(&bus->cmd_complete) == false) complete(&bus->cmd_complete); break; case I2C_NACK_IND: /* MASTER transmit got a NACK before tx all bytes */ bus->cmd_err = -ENXIO; if (bus->master_or_slave == I2C_MASTER) complete(&bus->cmd_complete); break; case I2C_BUS_ERR_IND: /* Bus error */ bus->cmd_err = -EAGAIN; if (bus->master_or_slave == I2C_MASTER) complete(&bus->cmd_complete); break; case I2C_WAKE_UP_IND: /* I2C wake up */ break; default: break; } bus->operation = I2C_NO_OPER; #if IS_ENABLED(CONFIG_I2C_SLAVE) if (bus->slave) bus->master_or_slave = I2C_SLAVE; #endif } static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus) { if (bus->operation == I2C_WRITE_OPER) return (bus->data->txf_sts_tx_bytes & ioread8(bus->reg + NPCM_I2CTXF_STS)); if (bus->operation == I2C_READ_OPER) return (bus->data->rxf_sts_rx_bytes & ioread8(bus->reg + NPCM_I2CRXF_STS)); return 0; } static void npcm_i2c_write_to_fifo_master(struct npcm_i2c *bus, u16 max_bytes) { u8 size_free_fifo; /* * Fill the FIFO, while the FIFO is not full and there are more bytes * to write */ size_free_fifo = bus->data->fifo_size - npcm_i2c_fifo_usage(bus); while (max_bytes-- && size_free_fifo) { if (bus->wr_ind < bus->wr_size) npcm_i2c_wr_byte(bus, bus->wr_buf[bus->wr_ind++]); else npcm_i2c_wr_byte(bus, 0xFF); size_free_fifo = bus->data->fifo_size - npcm_i2c_fifo_usage(bus); } } /* * npcm_i2c_set_fifo: * configure the FIFO before using it. If nread is -1 RX FIFO will not be * configured. same for nwrite */ static void npcm_i2c_set_fifo(struct npcm_i2c *bus, int nread, int nwrite) { u8 rxf_ctl = 0; if (!bus->fifo_use) return; npcm_i2c_select_bank(bus, I2C_BANK_1); npcm_i2c_clear_tx_fifo(bus); npcm_i2c_clear_rx_fifo(bus); /* configure RX FIFO */ if (nread > 0) { rxf_ctl = min_t(int, nread, bus->data->fifo_size); /* set LAST bit. if LAST is set next FIFO packet is nacked */ if (nread <= bus->data->fifo_size) rxf_ctl |= bus->data->rxf_ctl_last_pec; /* * if we are about to read the first byte in blk rd mode, * don't NACK it. If slave returns zero size HW can't NACK * it immediately, it will read extra byte and then NACK. */ if (bus->rd_ind == 0 && bus->read_block_use) { /* set fifo to read one byte, no last: */ rxf_ctl = 1; } /* set fifo size: */ iowrite8(rxf_ctl, bus->reg + NPCM_I2CRXF_CTL); } /* configure TX FIFO */ if (nwrite > 0) { if (nwrite > bus->data->fifo_size) /* data to send is more then FIFO size. */ iowrite8(bus->data->fifo_size, bus->reg + NPCM_I2CTXF_CTL); else iowrite8(nwrite, bus->reg + NPCM_I2CTXF_CTL); npcm_i2c_clear_tx_fifo(bus); } } static void npcm_i2c_read_fifo(struct npcm_i2c *bus, u8 bytes_in_fifo) { u8 data; while (bytes_in_fifo--) { data = npcm_i2c_rd_byte(bus); if (bus->rd_ind < bus->rd_size) bus->rd_buf[bus->rd_ind++] = data; } } static void npcm_i2c_master_abort(struct npcm_i2c *bus) { /* Only current master is allowed to issue a stop condition */ if (!npcm_i2c_is_master(bus)) return; npcm_i2c_eob_int(bus, true); npcm_i2c_master_stop(bus); npcm_i2c_clear_master_status(bus); } #if IS_ENABLED(CONFIG_I2C_SLAVE) static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type) { u8 slave_add; if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10) dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n"); slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]); return slave_add; } static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add) { int i; /* Set the enable bit */ slave_add |= 0x80; for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR_SUPPORTED; i++) { if (ioread8(bus->reg + npcm_i2caddr[i]) == slave_add) iowrite8(0, bus->reg + npcm_i2caddr[i]); } return 0; } static void npcm_i2c_write_fifo_slave(struct npcm_i2c *bus, u16 max_bytes) { /* * Fill the FIFO, while the FIFO is not full and there are more bytes * to write */ npcm_i2c_clear_fifo_int(bus); npcm_i2c_clear_tx_fifo(bus); iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); while (max_bytes-- && bus->data->fifo_size != npcm_i2c_fifo_usage(bus)) { if (bus->slv_wr_size <= 0) break; bus->slv_wr_ind = bus->slv_wr_ind & (bus->data->fifo_size - 1); npcm_i2c_wr_byte(bus, bus->slv_wr_buf[bus->slv_wr_ind]); bus->slv_wr_ind++; bus->slv_wr_ind = bus->slv_wr_ind & (bus->data->fifo_size - 1); bus->slv_wr_size--; } } static void npcm_i2c_read_fifo_slave(struct npcm_i2c *bus, u8 bytes_in_fifo) { u8 data; if (!bus->slave) return; while (bytes_in_fifo--) { data = npcm_i2c_rd_byte(bus); bus->slv_rd_ind = bus->slv_rd_ind & (bus->data->fifo_size - 1); bus->slv_rd_buf[bus->slv_rd_ind] = data; bus->slv_rd_ind++; /* 1st byte is length in block protocol: */ if (bus->slv_rd_ind == 1 && bus->read_block_use) bus->slv_rd_size = data + bus->PEC_use + 1; } } static int npcm_i2c_slave_get_wr_buf(struct npcm_i2c *bus) { int i; u8 value; int ind; int ret = bus->slv_wr_ind; /* fill a cyclic buffer */ for (i = 0; i < bus->data->fifo_size; i++) { if (bus->slv_wr_size >= bus->data->fifo_size) break; if (bus->state == I2C_SLAVE_MATCH) { i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value); bus->state = I2C_OPER_STARTED; } else { i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value); } ind = (bus->slv_wr_ind + bus->slv_wr_size) & (bus->data->fifo_size - 1); bus->slv_wr_buf[ind] = value; bus->slv_wr_size++; } return bus->data->fifo_size - ret; } static void npcm_i2c_slave_send_rd_buf(struct npcm_i2c *bus) { int i; for (i = 0; i < bus->slv_rd_ind; i++) i2c_slave_event(bus->slave, I2C_SLAVE_WRITE_RECEIVED, &bus->slv_rd_buf[i]); /* * once we send bytes up, need to reset the counter of the wr buf * got data from master (new offset in device), ignore wr fifo: */ if (bus->slv_rd_ind) { bus->slv_wr_size = 0; bus->slv_wr_ind = 0; } bus->slv_rd_ind = 0; bus->slv_rd_size = bus->adap.quirks->max_read_len; npcm_i2c_clear_fifo_int(bus); npcm_i2c_clear_rx_fifo(bus); } static void npcm_i2c_slave_receive(struct npcm_i2c *bus, u16 nread, u8 *read_data) { bus->state = I2C_OPER_STARTED; bus->operation = I2C_READ_OPER; bus->slv_rd_size = nread; bus->slv_rd_ind = 0; iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); iowrite8(bus->data->fifo_size, bus->reg + NPCM_I2CRXF_CTL); npcm_i2c_clear_tx_fifo(bus); npcm_i2c_clear_rx_fifo(bus); } static void npcm_i2c_slave_xmit(struct npcm_i2c *bus, u16 nwrite, u8 *write_data) { if (nwrite == 0) return; bus->operation = I2C_WRITE_OPER; /* get the next buffer */ npcm_i2c_slave_get_wr_buf(bus); npcm_i2c_write_fifo_slave(bus, nwrite); } /* * npcm_i2c_slave_wr_buf_sync: * currently slave IF only supports single byte operations. * in order to utilize the npcm HW FIFO, the driver will ask for 16 bytes * at a time, pack them in buffer, and then transmit them all together * to the FIFO and onward to the bus. * NACK on read will be once reached to bus->adap->quirks->max_read_len. * sending a NACK wherever the backend requests for it is not supported. * the next two functions allow reading to local buffer before writing it all * to the HW FIFO. */ static void npcm_i2c_slave_wr_buf_sync(struct npcm_i2c *bus) { int left_in_fifo; left_in_fifo = bus->data->txf_sts_tx_bytes & ioread8(bus->reg + NPCM_I2CTXF_STS); /* fifo already full: */ if (left_in_fifo >= bus->data->fifo_size || bus->slv_wr_size >= bus->data->fifo_size) return; /* update the wr fifo index back to the untransmitted bytes: */ bus->slv_wr_ind = bus->slv_wr_ind - left_in_fifo; bus->slv_wr_size = bus->slv_wr_size + left_in_fifo; if (bus->slv_wr_ind < 0) bus->slv_wr_ind += bus->data->fifo_size; } static void npcm_i2c_slave_rd_wr(struct npcm_i2c *bus) { if (NPCM_I2CST_XMIT & ioread8(bus->reg + NPCM_I2CST)) { /* * Slave got an address match with direction bit 1 so it should * transmit data. Write till the master will NACK */ bus->operation = I2C_WRITE_OPER; npcm_i2c_slave_xmit(bus, bus->adap.quirks->max_write_len, bus->slv_wr_buf); } else { /* * Slave got an address match with direction bit 0 so it should * receive data. * this module does not support saying no to bytes. * it will always ACK. */ bus->operation = I2C_READ_OPER; npcm_i2c_read_fifo_slave(bus, npcm_i2c_fifo_usage(bus)); bus->stop_ind = I2C_SLAVE_RCV_IND; npcm_i2c_slave_send_rd_buf(bus); npcm_i2c_slave_receive(bus, bus->adap.quirks->max_read_len, bus->slv_rd_buf); } } static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus) { u8 val; irqreturn_t ret = IRQ_NONE; u8 i2cst = ioread8(bus->reg + NPCM_I2CST); /* Slave: A NACK has occurred */ if (NPCM_I2CST_NEGACK & i2cst) { bus->stop_ind = I2C_NACK_IND; npcm_i2c_slave_wr_buf_sync(bus); if (bus->fifo_use) /* clear the FIFO */ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); /* In slave write, NACK is OK, otherwise it is a problem */ bus->stop_ind = I2C_NO_STATUS_IND; bus->operation = I2C_NO_OPER; bus->own_slave_addr = 0xFF; /* * Slave has to wait for STOP to decide this is the end * of the transaction. tx is not yet considered as done */ iowrite8(NPCM_I2CST_NEGACK, bus->reg + NPCM_I2CST); ret = IRQ_HANDLED; } /* Slave mode: a Bus Error (BER) has been identified */ if (NPCM_I2CST_BER & i2cst) { /* * Check whether bus arbitration or Start or Stop during data * xfer bus arbitration problem should not result in recovery */ bus->stop_ind = I2C_BUS_ERR_IND; /* wait for bus busy before clear fifo */ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); bus->state = I2C_IDLE; /* * in BER case we might get 2 interrupts: one for slave one for * master ( for a channel which is master\slave switching) */ if (completion_done(&bus->cmd_complete) == false) { bus->cmd_err = -EIO; complete(&bus->cmd_complete); } bus->own_slave_addr = 0xFF; iowrite8(NPCM_I2CST_BER, bus->reg + NPCM_I2CST); ret = IRQ_HANDLED; } /* A Slave Stop Condition has been identified */ if (NPCM_I2CST_SLVSTP & i2cst) { u8 bytes_in_fifo = npcm_i2c_fifo_usage(bus); bus->stop_ind = I2C_SLAVE_DONE_IND; if (bus->operation == I2C_READ_OPER) npcm_i2c_read_fifo_slave(bus, bytes_in_fifo); /* if the buffer is empty nothing will be sent */ npcm_i2c_slave_send_rd_buf(bus); /* Slave done transmitting or receiving */ bus->stop_ind = I2C_NO_STATUS_IND; /* * Note, just because we got here, it doesn't mean we through * away the wr buffer. * we keep it until the next received offset. */ bus->operation = I2C_NO_OPER; bus->own_slave_addr = 0xFF; i2c_slave_event(bus->slave, I2C_SLAVE_STOP, 0); iowrite8(NPCM_I2CST_SLVSTP, bus->reg + NPCM_I2CST); if (bus->fifo_use) { npcm_i2c_clear_fifo_int(bus); npcm_i2c_clear_rx_fifo(bus); npcm_i2c_clear_tx_fifo(bus); iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); } bus->state = I2C_IDLE; ret = IRQ_HANDLED; } /* restart condition occurred and Rx-FIFO was not empty */ if (bus->fifo_use && FIELD_GET(NPCM_I2CFIF_CTS_SLVRSTR, ioread8(bus->reg + NPCM_I2CFIF_CTS))) { bus->stop_ind = I2C_SLAVE_RESTART_IND; bus->master_or_slave = I2C_SLAVE; if (bus->operation == I2C_READ_OPER) npcm_i2c_read_fifo_slave(bus, npcm_i2c_fifo_usage(bus)); bus->operation = I2C_WRITE_OPER; iowrite8(0, bus->reg + NPCM_I2CRXF_CTL); val = NPCM_I2CFIF_CTS_CLR_FIFO | NPCM_I2CFIF_CTS_SLVRSTR | NPCM_I2CFIF_CTS_RXF_TXE; iowrite8(val, bus->reg + NPCM_I2CFIF_CTS); npcm_i2c_slave_rd_wr(bus); ret = IRQ_HANDLED; } /* A Slave Address Match has been identified */ if (NPCM_I2CST_NMATCH & i2cst) { u8 info = 0; /* Address match automatically implies slave mode */ bus->master_or_slave = I2C_SLAVE; npcm_i2c_clear_fifo_int(bus); npcm_i2c_clear_rx_fifo(bus); npcm_i2c_clear_tx_fifo(bus); iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); iowrite8(bus->data->fifo_size, bus->reg + NPCM_I2CRXF_CTL); if (NPCM_I2CST_XMIT & i2cst) { bus->operation = I2C_WRITE_OPER; } else { i2c_slave_event(bus->slave, I2C_SLAVE_WRITE_REQUESTED, &info); bus->operation = I2C_READ_OPER; } if (bus->own_slave_addr == 0xFF) { /* Check which type of address match */ val = ioread8(bus->reg + NPCM_I2CCST); if (NPCM_I2CCST_MATCH & val) { u16 addr; enum i2c_addr eaddr; u8 i2ccst2; u8 i2ccst3; i2ccst3 = ioread8(bus->reg + NPCM_I2CCST3); i2ccst2 = ioread8(bus->reg + NPCM_I2CCST2); /* * the i2c module can response to 10 own SA. * check which one was addressed by the master. * respond to the first one. */ addr = ((i2ccst3 & 0x07) << 7) | (i2ccst2 & 0x7F); info = ffs(addr); eaddr = (enum i2c_addr)info; addr = npcm_i2c_get_slave_addr(bus, eaddr); addr &= 0x7F; bus->own_slave_addr = addr; if (bus->PEC_mask & BIT(info)) bus->PEC_use = true; else bus->PEC_use = false; } else { if (NPCM_I2CCST_GCMATCH & val) bus->own_slave_addr = 0; if (NPCM_I2CCST_ARPMATCH & val) bus->own_slave_addr = 0x61; } } else { /* * Slave match can happen in two options: * 1. Start, SA, read (slave read without further ado) * 2. Start, SA, read, data, restart, SA, read, ... * (slave read in fragmented mode) * 3. Start, SA, write, data, restart, SA, read, .. * (regular write-read mode) */ if ((bus->state == I2C_OPER_STARTED && bus->operation == I2C_READ_OPER && bus->stop_ind == I2C_SLAVE_XMIT_IND) || bus->stop_ind == I2C_SLAVE_RCV_IND) { /* slave tx after slave rx w/o STOP */ bus->stop_ind = I2C_SLAVE_RESTART_IND; } } if (NPCM_I2CST_XMIT & i2cst) bus->stop_ind = I2C_SLAVE_XMIT_IND; else bus->stop_ind = I2C_SLAVE_RCV_IND; bus->state = I2C_SLAVE_MATCH; npcm_i2c_slave_rd_wr(bus); iowrite8(NPCM_I2CST_NMATCH, bus->reg + NPCM_I2CST); ret = IRQ_HANDLED; } /* Slave SDA status is set - tx or rx */ if ((NPCM_I2CST_SDAST & i2cst) || (bus->fifo_use && (npcm_i2c_tx_fifo_empty(bus) || npcm_i2c_rx_fifo_full(bus)))) { npcm_i2c_slave_rd_wr(bus); iowrite8(NPCM_I2CST_SDAST, bus->reg + NPCM_I2CST); ret = IRQ_HANDLED; } /* SDAST */ /* * If irq is not one of the above, make sure EOB is disabled and all * status bits are cleared. */ if (ret == IRQ_NONE) { npcm_i2c_eob_int(bus, false); npcm_i2c_clear_master_status(bus); } return IRQ_HANDLED; } static int npcm_i2c_reg_slave(struct i2c_client *client) { unsigned long lock_flags; struct npcm_i2c *bus = i2c_get_adapdata(client->adapter); bus->slave = client; if (!bus->slave) return -EINVAL; if (client->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; spin_lock_irqsave(&bus->lock, lock_flags); npcm_i2c_init_params(bus); bus->slv_rd_size = 0; bus->slv_wr_size = 0; bus->slv_rd_ind = 0; bus->slv_wr_ind = 0; if (client->flags & I2C_CLIENT_PEC) bus->PEC_use = true; dev_info(bus->dev, "i2c%d register slave SA=0x%x, PEC=%d\n", bus->num, client->addr, bus->PEC_use); npcm_i2c_slave_enable(bus, I2C_SLAVE_ADDR1, client->addr, true); npcm_i2c_clear_fifo_int(bus); npcm_i2c_clear_rx_fifo(bus); npcm_i2c_clear_tx_fifo(bus); npcm_i2c_slave_int_enable(bus, true); spin_unlock_irqrestore(&bus->lock, lock_flags); return 0; } static int npcm_i2c_unreg_slave(struct i2c_client *client) { struct npcm_i2c *bus = client->adapter->algo_data; unsigned long lock_flags; spin_lock_irqsave(&bus->lock, lock_flags); if (!bus->slave) { spin_unlock_irqrestore(&bus->lock, lock_flags); return -EINVAL; } npcm_i2c_slave_int_enable(bus, false); npcm_i2c_remove_slave_addr(bus, client->addr); bus->slave = NULL; spin_unlock_irqrestore(&bus->lock, lock_flags); return 0; } #endif /* CONFIG_I2C_SLAVE */ static void npcm_i2c_master_fifo_read(struct npcm_i2c *bus) { int rcount; int fifo_bytes; enum i2c_state_ind ind = I2C_MASTER_DONE_IND; fifo_bytes = npcm_i2c_fifo_usage(bus); rcount = bus->rd_size - bus->rd_ind; /* * In order not to change the RX_TRH during transaction (we found that * this might be problematic if it takes too much time to read the FIFO) * we read the data in the following way. If the number of bytes to * read == FIFO Size + C (where C < FIFO Size)then first read C bytes * and in the next int we read rest of the data. */ if (rcount < (2 * bus->data->fifo_size) && rcount > bus->data->fifo_size) fifo_bytes = rcount - bus->data->fifo_size; if (rcount <= fifo_bytes) { /* last bytes are about to be read - end of tx */ bus->state = I2C_STOP_PENDING; bus->stop_ind = ind; npcm_i2c_eob_int(bus, true); /* Stop should be set before reading last byte. */ npcm_i2c_master_stop(bus); npcm_i2c_read_fifo(bus, fifo_bytes); } else { npcm_i2c_read_fifo(bus, fifo_bytes); rcount = bus->rd_size - bus->rd_ind; npcm_i2c_set_fifo(bus, rcount, -1); } } static void npcm_i2c_irq_master_handler_write(struct npcm_i2c *bus) { u16 wcount; if (bus->fifo_use) npcm_i2c_clear_tx_fifo(bus); /* clear the TX fifo status bit */ /* Master write operation - last byte handling */ if (bus->wr_ind == bus->wr_size) { if (bus->fifo_use && npcm_i2c_fifo_usage(bus) > 0) /* * No more bytes to send (to add to the FIFO), * however the FIFO is not empty yet. It is * still in the middle of tx. Currently there's nothing * to do except for waiting to the end of the tx * We will get an int when the FIFO will get empty. */ return; if (bus->rd_size == 0) { /* all bytes have been written, in wr only operation */ npcm_i2c_eob_int(bus, true); bus->state = I2C_STOP_PENDING; bus->stop_ind = I2C_MASTER_DONE_IND; npcm_i2c_master_stop(bus); /* Clear SDA Status bit (by writing dummy byte) */ npcm_i2c_wr_byte(bus, 0xFF); } else { /* last write-byte written on previous int - restart */ npcm_i2c_set_fifo(bus, bus->rd_size, -1); /* Generate repeated start upon next write to SDA */ npcm_i2c_master_start(bus); /* * Receiving one byte only - stall after successful * completion of send address byte. If we NACK here, and * slave doesn't ACK the address, we might * unintentionally NACK the next multi-byte read. */ if (bus->rd_size == 1) npcm_i2c_stall_after_start(bus, true); /* Next int will occur on read */ bus->operation = I2C_READ_OPER; /* send the slave address in read direction */ npcm_i2c_wr_byte(bus, bus->dest_addr | 0x1); } } else { /* write next byte not last byte and not slave address */ if (!bus->fifo_use || bus->wr_size == 1) { npcm_i2c_wr_byte(bus, bus->wr_buf[bus->wr_ind++]); } else { wcount = bus->wr_size - bus->wr_ind; npcm_i2c_set_fifo(bus, -1, wcount); if (wcount) npcm_i2c_write_to_fifo_master(bus, wcount); } } } static void npcm_i2c_irq_master_handler_read(struct npcm_i2c *bus) { u16 block_extra_bytes_size; u8 data; /* added bytes to the packet: */ block_extra_bytes_size = bus->read_block_use + bus->PEC_use; /* * Perform master read, distinguishing between last byte and the rest of * the bytes. The last byte should be read when the clock is stopped */ if (bus->rd_ind == 0) { /* first byte handling: */ if (bus->read_block_use) { /* first byte in block protocol is the size: */ data = npcm_i2c_rd_byte(bus); data = clamp_val(data, 1, I2C_SMBUS_BLOCK_MAX); bus->rd_size = data + block_extra_bytes_size; bus->rd_buf[bus->rd_ind++] = data; /* clear RX FIFO interrupt status: */ if (bus->fifo_use) { data = ioread8(bus->reg + NPCM_I2CFIF_CTS); data = data | NPCM_I2CFIF_CTS_RXF_TXE; iowrite8(data, bus->reg + NPCM_I2CFIF_CTS); } npcm_i2c_set_fifo(bus, bus->rd_size - 1, -1); npcm_i2c_stall_after_start(bus, false); } else { npcm_i2c_clear_tx_fifo(bus); npcm_i2c_master_fifo_read(bus); } } else { if (bus->rd_size == block_extra_bytes_size && bus->read_block_use) { bus->state = I2C_STOP_PENDING; bus->stop_ind = I2C_BLOCK_BYTES_ERR_IND; bus->cmd_err = -EIO; npcm_i2c_eob_int(bus, true); npcm_i2c_master_stop(bus); npcm_i2c_read_fifo(bus, npcm_i2c_fifo_usage(bus)); } else { npcm_i2c_master_fifo_read(bus); } } } static void npcm_i2c_irq_handle_nmatch(struct npcm_i2c *bus) { iowrite8(NPCM_I2CST_NMATCH, bus->reg + NPCM_I2CST); npcm_i2c_nack(bus); bus->stop_ind = I2C_BUS_ERR_IND; npcm_i2c_callback(bus, bus->stop_ind, npcm_i2c_get_index(bus)); } /* A NACK has occurred */ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus) { u8 val; if (bus->nack_cnt < ULLONG_MAX) bus->nack_cnt++; if (bus->fifo_use) { /* * if there are still untransmitted bytes in TX FIFO * reduce them from wr_ind */ if (bus->operation == I2C_WRITE_OPER) bus->wr_ind -= npcm_i2c_fifo_usage(bus); /* clear the FIFO */ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); } /* In master write operation, got unexpected NACK */ bus->stop_ind = I2C_NACK_IND; /* Only current master is allowed to issue Stop Condition */ if (npcm_i2c_is_master(bus)) { /* stopping in the middle */ npcm_i2c_eob_int(bus, false); npcm_i2c_master_stop(bus); /* Clear SDA Status bit (by reading dummy byte) */ npcm_i2c_rd_byte(bus); /* * The bus is released from stall only after the SW clears * NEGACK bit. Then a Stop condition is sent. */ npcm_i2c_clear_master_status(bus); readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val, !(val & NPCM_I2CCST_BUSY), 10, 200); /* Verify no status bits are still set after bus is released */ npcm_i2c_clear_master_status(bus); } bus->state = I2C_IDLE; /* * In Master mode, NACK should be cleared only after STOP. * In such case, the bus is released from stall only after the * software clears NACK bit. Then a Stop condition is sent. */ npcm_i2c_callback(bus, bus->stop_ind, bus->wr_ind); } /* Master mode: a Bus Error has been identified */ static void npcm_i2c_irq_handle_ber(struct npcm_i2c *bus) { if (bus->ber_cnt < ULLONG_MAX) bus->ber_cnt++; bus->stop_ind = I2C_BUS_ERR_IND; if (npcm_i2c_is_master(bus)) { npcm_i2c_master_abort(bus); } else { npcm_i2c_clear_master_status(bus); /* Clear BB (BUS BUSY) bit */ iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); bus->cmd_err = -EAGAIN; npcm_i2c_callback(bus, bus->stop_ind, npcm_i2c_get_index(bus)); } bus->state = I2C_IDLE; } /* EOB: a master End Of Busy (meaning STOP completed) */ static void npcm_i2c_irq_handle_eob(struct npcm_i2c *bus) { npcm_i2c_eob_int(bus, false); bus->state = I2C_IDLE; npcm_i2c_callback(bus, bus->stop_ind, bus->rd_ind); } /* Address sent and requested stall occurred (Master mode) */ static void npcm_i2c_irq_handle_stall_after_start(struct npcm_i2c *bus) { if (npcm_i2c_is_quick(bus)) { bus->state = I2C_STOP_PENDING; bus->stop_ind = I2C_MASTER_DONE_IND; npcm_i2c_eob_int(bus, true); npcm_i2c_master_stop(bus); } else if ((bus->rd_size == 1) && !bus->read_block_use) { /* * Receiving one byte only - set NACK after ensuring * slave ACKed the address byte. */ npcm_i2c_nack(bus); } /* Reset stall-after-address-byte */ npcm_i2c_stall_after_start(bus, false); /* Clear stall only after setting STOP */ iowrite8(NPCM_I2CST_STASTR, bus->reg + NPCM_I2CST); } /* SDA status is set - TX or RX, master */ static void npcm_i2c_irq_handle_sda(struct npcm_i2c *bus, u8 i2cst) { u8 fif_cts; if (!npcm_i2c_is_master(bus)) return; if (bus->state == I2C_IDLE) { bus->stop_ind = I2C_WAKE_UP_IND; if (npcm_i2c_is_quick(bus) || bus->read_block_use) /* * Need to stall after successful * completion of sending address byte */ npcm_i2c_stall_after_start(bus, true); else npcm_i2c_stall_after_start(bus, false); /* * Receiving one byte only - stall after successful completion * of sending address byte If we NACK here, and slave doesn't * ACK the address, we might unintentionally NACK the next * multi-byte read */ if (bus->wr_size == 0 && bus->rd_size == 1) npcm_i2c_stall_after_start(bus, true); /* Initiate I2C master tx */ /* select bank 1 for FIFO regs */ npcm_i2c_select_bank(bus, I2C_BANK_1); fif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS); fif_cts = fif_cts & ~NPCM_I2CFIF_CTS_SLVRSTR; /* clear FIFO and relevant status bits. */ fif_cts = fif_cts | NPCM_I2CFIF_CTS_CLR_FIFO; iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS); /* re-enable */ fif_cts = fif_cts | NPCM_I2CFIF_CTS_RXF_TXE; iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS); /* * Configure the FIFO threshold: * according to the needed # of bytes to read. * Note: due to HW limitation can't config the rx fifo before it * got and ACK on the restart. LAST bit will not be reset unless * RX completed. It will stay set on the next tx. */ if (bus->wr_size) npcm_i2c_set_fifo(bus, -1, bus->wr_size); else npcm_i2c_set_fifo(bus, bus->rd_size, -1); bus->state = I2C_OPER_STARTED; if (npcm_i2c_is_quick(bus) || bus->wr_size) npcm_i2c_wr_byte(bus, bus->dest_addr); else npcm_i2c_wr_byte(bus, bus->dest_addr | BIT(0)); /* SDA interrupt, after start\restart */ } else { if (NPCM_I2CST_XMIT & i2cst) { bus->operation = I2C_WRITE_OPER; npcm_i2c_irq_master_handler_write(bus); } else { bus->operation = I2C_READ_OPER; npcm_i2c_irq_master_handler_read(bus); } } } static int npcm_i2c_int_master_handler(struct npcm_i2c *bus) { u8 i2cst; int ret = -EIO; i2cst = ioread8(bus->reg + NPCM_I2CST); if (FIELD_GET(NPCM_I2CST_NMATCH, i2cst)) { npcm_i2c_irq_handle_nmatch(bus); return 0; } /* A NACK has occurred */ if (FIELD_GET(NPCM_I2CST_NEGACK, i2cst)) { npcm_i2c_irq_handle_nack(bus); return 0; } /* Master mode: a Bus Error has been identified */ if (FIELD_GET(NPCM_I2CST_BER, i2cst)) { npcm_i2c_irq_handle_ber(bus); return 0; } /* EOB: a master End Of Busy (meaning STOP completed) */ if ((FIELD_GET(NPCM_I2CCTL1_EOBINTE, ioread8(bus->reg + NPCM_I2CCTL1)) == 1) && (FIELD_GET(NPCM_I2CCST3_EO_BUSY, ioread8(bus->reg + NPCM_I2CCST3)))) { npcm_i2c_irq_handle_eob(bus); return 0; } /* Address sent and requested stall occurred (Master mode) */ if (FIELD_GET(NPCM_I2CST_STASTR, i2cst)) { npcm_i2c_irq_handle_stall_after_start(bus); ret = 0; } /* SDA status is set - TX or RX, master */ if (FIELD_GET(NPCM_I2CST_SDAST, i2cst) || (bus->fifo_use && (npcm_i2c_tx_fifo_empty(bus) || npcm_i2c_rx_fifo_full(bus)))) { npcm_i2c_irq_handle_sda(bus, i2cst); ret = 0; } return ret; } /* recovery using TGCLK functionality of the module */ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap) { u8 val; u8 fif_cts; bool done = false; int status = -ENOTRECOVERABLE; struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); /* Allow 3 bytes (27 toggles) to be read from the slave: */ int iter = 27; if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) { dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck", bus->num, bus->dest_addr); npcm_i2c_reset(bus); return 0; } npcm_i2c_int_enable(bus, false); npcm_i2c_disable(bus); npcm_i2c_enable(bus); iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); npcm_i2c_clear_tx_fifo(bus); npcm_i2c_clear_rx_fifo(bus); iowrite8(0, bus->reg + NPCM_I2CRXF_CTL); iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); npcm_i2c_stall_after_start(bus, false); /* select bank 1 for FIFO regs */ npcm_i2c_select_bank(bus, I2C_BANK_1); /* clear FIFO and relevant status bits. */ fif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS); fif_cts &= ~NPCM_I2CFIF_CTS_SLVRSTR; fif_cts |= NPCM_I2CFIF_CTS_CLR_FIFO; iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS); npcm_i2c_set_fifo(bus, -1, 0); /* Repeat the following sequence until SDA is released */ do { /* Issue a single SCL toggle */ iowrite8(NPCM_I2CCST_TGSCL, bus->reg + NPCM_I2CCST); usleep_range(20, 30); /* If SDA line is inactive (high), stop */ if (npcm_i2c_get_SDA(_adap)) { done = true; status = 0; } } while (!done && iter--); /* If SDA line is released: send start-addr-stop, to re-sync. */ if (npcm_i2c_get_SDA(_adap)) { /* Send an address byte in write direction: */ npcm_i2c_wr_byte(bus, bus->dest_addr); npcm_i2c_master_start(bus); /* Wait until START condition is sent */ status = readx_poll_timeout(npcm_i2c_get_SCL, _adap, val, !val, 20, 200); /* If START condition was sent */ if (npcm_i2c_is_master(bus) > 0) { usleep_range(20, 30); npcm_i2c_master_stop(bus); usleep_range(200, 500); } } npcm_i2c_reset(bus); npcm_i2c_int_enable(bus, true); if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) status = 0; else status = -ENOTRECOVERABLE; if (status) { if (bus->rec_fail_cnt < ULLONG_MAX) bus->rec_fail_cnt++; } else { if (bus->rec_succ_cnt < ULLONG_MAX) bus->rec_succ_cnt++; } return status; } /* recovery using bit banging functionality of the module */ static void npcm_i2c_recovery_init(struct i2c_adapter *_adap) { struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); struct i2c_bus_recovery_info *rinfo = &bus->rinfo; rinfo->recover_bus = npcm_i2c_recovery_tgclk; /* * npcm i2c HW allows direct reading of SCL and SDA. * However, it does not support setting SCL and SDA directly. * The recovery function can toggle SCL when SDA is low (but not set) * Getter functions used internally, and can be used externally. */ rinfo->get_scl = npcm_i2c_get_SCL; rinfo->get_sda = npcm_i2c_get_SDA; _adap->bus_recovery_info = rinfo; } /* SCLFRQ min/max field values */ #define SCLFRQ_MIN 10 #define SCLFRQ_MAX 511 #define clk_coef(freq, mul) DIV_ROUND_UP((freq) * (mul), 1000000) /* * npcm_i2c_init_clk: init HW timing parameters. * NPCM7XX i2c module timing parameters are dependent on module core clk (APB) * and bus frequency. * 100kHz bus requires tSCL = 4 * SCLFRQ * tCLK. LT and HT are symmetric. * 400kHz bus requires asymmetric HT and LT. A different equation is recommended * by the HW designer, given core clock range (equations in comments below). * */ static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz) { u32 k1 = 0; u32 k2 = 0; u8 dbnct = 0; u32 sclfrq = 0; u8 hldt = 7; u8 fast_mode = 0; u32 src_clk_khz; u32 bus_freq_khz; src_clk_khz = bus->apb_clk / 1000; bus_freq_khz = bus_freq_hz / 1000; bus->bus_freq = bus_freq_hz; /* 100KHz and below: */ if (bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ) { sclfrq = src_clk_khz / (bus_freq_khz * 4); if (sclfrq < SCLFRQ_MIN || sclfrq > SCLFRQ_MAX) return -EDOM; if (src_clk_khz >= 40000) hldt = 17; else if (src_clk_khz >= 12500) hldt = 15; else hldt = 7; } /* 400KHz: */ else if (bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) { sclfrq = 0; fast_mode = I2CCTL3_400K_MODE; if (src_clk_khz < 7500) /* 400KHZ cannot be supported for core clock < 7.5MHz */ return -EDOM; else if (src_clk_khz >= 50000) { k1 = 80; k2 = 48; hldt = 12; dbnct = 7; } /* Master or Slave with frequency > 25MHz */ else if (src_clk_khz > 25000) { hldt = clk_coef(src_clk_khz, 300) + 7; k1 = clk_coef(src_clk_khz, 1600); k2 = clk_coef(src_clk_khz, 900); } } /* 1MHz: */ else if (bus_freq_hz <= I2C_MAX_FAST_MODE_PLUS_FREQ) { sclfrq = 0; fast_mode = I2CCTL3_400K_MODE; /* 1MHZ cannot be supported for core clock < 24 MHz */ if (src_clk_khz < 24000) return -EDOM; k1 = clk_coef(src_clk_khz, 620); k2 = clk_coef(src_clk_khz, 380); /* Core clk > 40 MHz */ if (src_clk_khz > 40000) { /* * Set HLDT: * SDA hold time: (HLDT-7) * T(CLK) >= 120 * HLDT = 120/T(CLK) + 7 = 120 * FREQ(CLK) + 7 */ hldt = clk_coef(src_clk_khz, 120) + 7; } else { hldt = 7; dbnct = 2; } } /* Frequency larger than 1 MHz is not supported */ else return -EINVAL; if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) { k1 = round_up(k1, 2); k2 = round_up(k2 + 1, 2); if (k1 < SCLFRQ_MIN || k1 > SCLFRQ_MAX || k2 < SCLFRQ_MIN || k2 > SCLFRQ_MAX) return -EDOM; } /* write sclfrq value. bits [6:0] are in I2CCTL2 reg */ iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, sclfrq & 0x7F), bus->reg + NPCM_I2CCTL2); /* bits [8:7] are in I2CCTL3 reg */ iowrite8(fast_mode | FIELD_PREP(I2CCTL3_SCLFRQ8_7, (sclfrq >> 7) & 0x3), bus->reg + NPCM_I2CCTL3); /* Select Bank 0 to access NPCM_I2CCTL4/NPCM_I2CCTL5 */ npcm_i2c_select_bank(bus, I2C_BANK_0); if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) { /* * Set SCL Low/High Time: * k1 = 2 * SCLLT7-0 -> Low Time = k1 / 2 * k2 = 2 * SCLLT7-0 -> High Time = k2 / 2 */ iowrite8(k1 / 2, bus->reg + NPCM_I2CSCLLT); iowrite8(k2 / 2, bus->reg + NPCM_I2CSCLHT); iowrite8(dbnct, bus->reg + NPCM_I2CCTL5); } iowrite8(hldt, bus->reg + NPCM_I2CCTL4); /* Return to Bank 1, and stay there by default: */ npcm_i2c_select_bank(bus, I2C_BANK_1); return 0; } static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode, u32 bus_freq_hz) { u8 val; int ret; /* Check whether module already enabled or frequency is out of bounds */ if ((bus->state != I2C_DISABLE && bus->state != I2C_IDLE) || bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ) return -EINVAL; npcm_i2c_int_enable(bus, false); npcm_i2c_disable(bus); /* Configure FIFO mode : */ if (FIELD_GET(I2C_VER_FIFO_EN, ioread8(bus->reg + I2C_VER))) { bus->fifo_use = true; npcm_i2c_select_bank(bus, I2C_BANK_0); val = ioread8(bus->reg + NPCM_I2CFIF_CTL); val |= NPCM_I2CFIF_CTL_FIFO_EN; iowrite8(val, bus->reg + NPCM_I2CFIF_CTL); npcm_i2c_select_bank(bus, I2C_BANK_1); } else { bus->fifo_use = false; } /* Configure I2C module clock frequency */ ret = npcm_i2c_init_clk(bus, bus_freq_hz); if (ret) { dev_err(bus->dev, "npcm_i2c_init_clk failed\n"); return ret; } /* Enable module (before configuring CTL1) */ npcm_i2c_enable(bus); bus->state = I2C_IDLE; val = ioread8(bus->reg + NPCM_I2CCTL1); val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS; iowrite8(val, bus->reg + NPCM_I2CCTL1); npcm_i2c_reset(bus); /* Check HW is OK: SDA and SCL should be high at this point. */ if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) { dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num); dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap)); return -ENXIO; } npcm_i2c_int_enable(bus, true); return 0; } static int __npcm_i2c_init(struct npcm_i2c *bus, struct platform_device *pdev) { u32 clk_freq_hz; int ret; /* Initialize the internal data structures */ bus->state = I2C_DISABLE; bus->master_or_slave = I2C_SLAVE; bus->int_time_stamp = 0; #if IS_ENABLED(CONFIG_I2C_SLAVE) bus->slave = NULL; #endif ret = device_property_read_u32(&pdev->dev, "clock-frequency", &clk_freq_hz); if (ret) { dev_info(&pdev->dev, "Could not read clock-frequency property"); clk_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; } ret = npcm_i2c_init_module(bus, I2C_MASTER, clk_freq_hz); if (ret) { dev_err(&pdev->dev, "npcm_i2c_init_module failed\n"); return ret; } return 0; } static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id) { struct npcm_i2c *bus = dev_id; if (npcm_i2c_is_master(bus)) bus->master_or_slave = I2C_MASTER; if (bus->master_or_slave == I2C_MASTER) { bus->int_time_stamp = jiffies; if (!npcm_i2c_int_master_handler(bus)) return IRQ_HANDLED; } #if IS_ENABLED(CONFIG_I2C_SLAVE) if (bus->slave) { bus->master_or_slave = I2C_SLAVE; if (npcm_i2c_int_slave_handler(bus)) return IRQ_HANDLED; } #endif /* Clear status bits for spurious interrupts */ npcm_i2c_clear_master_status(bus); return IRQ_HANDLED; } static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus, u8 slave_addr, u16 nwrite, u16 nread, u8 *write_data, u8 *read_data, bool use_PEC, bool use_read_block) { if (bus->state != I2C_IDLE) { bus->cmd_err = -EBUSY; return false; } bus->dest_addr = slave_addr << 1; bus->wr_buf = write_data; bus->wr_size = nwrite; bus->wr_ind = 0; bus->rd_buf = read_data; bus->rd_size = nread; bus->rd_ind = 0; bus->PEC_use = 0; /* for tx PEC is appended to buffer from i2c IF. PEC flag is ignored */ if (nread) bus->PEC_use = use_PEC; bus->read_block_use = use_read_block; if (nread && !nwrite) bus->operation = I2C_READ_OPER; else bus->operation = I2C_WRITE_OPER; if (bus->fifo_use) { u8 i2cfif_cts; npcm_i2c_select_bank(bus, I2C_BANK_1); /* clear FIFO and relevant status bits. */ i2cfif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS); i2cfif_cts &= ~NPCM_I2CFIF_CTS_SLVRSTR; i2cfif_cts |= NPCM_I2CFIF_CTS_CLR_FIFO; iowrite8(i2cfif_cts, bus->reg + NPCM_I2CFIF_CTS); } bus->state = I2C_IDLE; npcm_i2c_stall_after_start(bus, true); npcm_i2c_master_start(bus); return true; } static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct npcm_i2c *bus = container_of(adap, struct npcm_i2c, adap); struct i2c_msg *msg0, *msg1; unsigned long time_left, flags; u16 nwrite, nread; u8 *write_data, *read_data; u8 slave_addr; unsigned long timeout; bool read_block = false; bool read_PEC = false; u8 bus_busy; unsigned long timeout_usec; if (bus->state == I2C_DISABLE) { dev_err(bus->dev, "I2C%d module is disabled", bus->num); return -EINVAL; } msg0 = &msgs[0]; slave_addr = msg0->addr; if (msg0->flags & I2C_M_RD) { /* read */ nwrite = 0; write_data = NULL; read_data = msg0->buf; if (msg0->flags & I2C_M_RECV_LEN) { nread = 1; read_block = true; if (msg0->flags & I2C_CLIENT_PEC) read_PEC = true; } else { nread = msg0->len; } } else { /* write */ nwrite = msg0->len; write_data = msg0->buf; nread = 0; read_data = NULL; if (num == 2) { msg1 = &msgs[1]; read_data = msg1->buf; if (msg1->flags & I2C_M_RECV_LEN) { nread = 1; read_block = true; if (msg1->flags & I2C_CLIENT_PEC) read_PEC = true; } else { nread = msg1->len; read_block = false; } } } /* * Adaptive TimeOut: estimated time in usec + 100% margin: * 2: double the timeout for clock stretching case * 9: bits per transaction (including the ack/nack) */ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite); timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec)); if (nwrite >= 32 * 1024 || nread >= 32 * 1024) { dev_err(bus->dev, "i2c%d buffer too big\n", bus->num); return -EINVAL; } time_left = jiffies + timeout + 1; do { /* * we must clear slave address immediately when the bus is not * busy, so we spinlock it, but we don't keep the lock for the * entire while since it is too long. */ spin_lock_irqsave(&bus->lock, flags); bus_busy = ioread8(bus->reg + NPCM_I2CCST) & NPCM_I2CCST_BB; #if IS_ENABLED(CONFIG_I2C_SLAVE) if (!bus_busy && bus->slave) iowrite8((bus->slave->addr & 0x7F), bus->reg + NPCM_I2CADDR1); #endif spin_unlock_irqrestore(&bus->lock, flags); } while (time_is_after_jiffies(time_left) && bus_busy); if (bus_busy) { iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); npcm_i2c_reset(bus); i2c_recover_bus(adap); return -EAGAIN; } npcm_i2c_init_params(bus); bus->dest_addr = slave_addr; bus->msgs = msgs; bus->msgs_num = num; bus->cmd_err = 0; bus->read_block_use = read_block; reinit_completion(&bus->cmd_complete); npcm_i2c_int_enable(bus, true); if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread, write_data, read_data, read_PEC, read_block)) { time_left = wait_for_completion_timeout(&bus->cmd_complete, timeout); if (time_left == 0) { if (bus->timeout_cnt < ULLONG_MAX) bus->timeout_cnt++; if (bus->master_or_slave == I2C_MASTER) { i2c_recover_bus(adap); bus->cmd_err = -EIO; bus->state = I2C_IDLE; } } } /* if there was BER, check if need to recover the bus: */ if (bus->cmd_err == -EAGAIN) bus->cmd_err = i2c_recover_bus(adap); /* * After any type of error, check if LAST bit is still set, * due to a HW issue. * It cannot be cleared without resetting the module. */ else if (bus->cmd_err && (bus->data->rxf_ctl_last_pec & ioread8(bus->reg + NPCM_I2CRXF_CTL))) npcm_i2c_reset(bus); /* After any xfer, successful or not, stall and EOB must be disabled */ npcm_i2c_stall_after_start(bus, false); npcm_i2c_eob_int(bus, false); #if IS_ENABLED(CONFIG_I2C_SLAVE) /* reenable slave if it was enabled */ if (bus->slave) iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN, bus->reg + NPCM_I2CADDR1); #else npcm_i2c_int_enable(bus, false); #endif return bus->cmd_err; } static u32 npcm_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PEC | I2C_FUNC_SLAVE; } static const struct i2c_adapter_quirks npcm_i2c_quirks = { .max_read_len = 32768, .max_write_len = 32768, .flags = I2C_AQ_COMB_WRITE_THEN_READ, }; static const struct i2c_algorithm npcm_i2c_algo = { .master_xfer = npcm_i2c_master_xfer, .functionality = npcm_i2c_functionality, #if IS_ENABLED(CONFIG_I2C_SLAVE) .reg_slave = npcm_i2c_reg_slave, .unreg_slave = npcm_i2c_unreg_slave, #endif }; /* i2c debugfs directory: used to keep health monitor of i2c devices */ static struct dentry *npcm_i2c_debugfs_dir; static void npcm_i2c_init_debugfs(struct platform_device *pdev, struct npcm_i2c *bus) { struct dentry *d; if (!npcm_i2c_debugfs_dir) return; d = debugfs_create_dir(dev_name(&pdev->dev), npcm_i2c_debugfs_dir); if (IS_ERR_OR_NULL(d)) return; debugfs_create_u64("ber_cnt", 0444, d, &bus->ber_cnt); debugfs_create_u64("nack_cnt", 0444, d, &bus->nack_cnt); debugfs_create_u64("rec_succ_cnt", 0444, d, &bus->rec_succ_cnt); debugfs_create_u64("rec_fail_cnt", 0444, d, &bus->rec_fail_cnt); debugfs_create_u64("timeout_cnt", 0444, d, &bus->timeout_cnt); debugfs_create_u64("tx_complete_cnt", 0444, d, &bus->tx_complete_cnt); bus->debugfs = d; } static int npcm_i2c_probe_bus(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; static struct regmap *gcr_regmap; struct device *dev = &pdev->dev; struct i2c_adapter *adap; struct npcm_i2c *bus; struct clk *i2c_clk; int irq; int ret; bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); if (!bus) return -ENOMEM; bus->dev = &pdev->dev; bus->data = of_device_get_match_data(dev); if (!bus->data) { dev_err(dev, "OF data missing\n"); return -EINVAL; } bus->num = of_alias_get_id(pdev->dev.of_node, "i2c"); /* core clk must be acquired to calculate module timing settings */ i2c_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c_clk)) return PTR_ERR(i2c_clk); bus->apb_clk = clk_get_rate(i2c_clk); gcr_regmap = syscon_regmap_lookup_by_phandle(np, "nuvoton,sys-mgr"); if (IS_ERR(gcr_regmap)) gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr"); if (IS_ERR(gcr_regmap)) return PTR_ERR(gcr_regmap); regmap_write(gcr_regmap, NPCM_I2CSEGCTL, bus->data->segctl_init_val); bus->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(bus->reg)) return PTR_ERR(bus->reg); spin_lock_init(&bus->lock); init_completion(&bus->cmd_complete); adap = &bus->adap; adap->owner = THIS_MODULE; adap->retries = 3; adap->timeout = msecs_to_jiffies(35); adap->algo = &npcm_i2c_algo; adap->quirks = &npcm_i2c_quirks; adap->algo_data = bus; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; adap->nr = pdev->id; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0, dev_name(bus->dev), bus); if (ret) return ret; ret = __npcm_i2c_init(bus, pdev); if (ret) return ret; npcm_i2c_recovery_init(adap); i2c_set_adapdata(adap, bus); snprintf(bus->adap.name, sizeof(bus->adap.name), "npcm_i2c_%d", bus->num); ret = i2c_add_numbered_adapter(&bus->adap); if (ret) return ret; platform_set_drvdata(pdev, bus); npcm_i2c_init_debugfs(pdev, bus); return 0; } static void npcm_i2c_remove_bus(struct platform_device *pdev) { unsigned long lock_flags; struct npcm_i2c *bus = platform_get_drvdata(pdev); debugfs_remove_recursive(bus->debugfs); spin_lock_irqsave(&bus->lock, lock_flags); npcm_i2c_disable(bus); spin_unlock_irqrestore(&bus->lock, lock_flags); i2c_del_adapter(&bus->adap); } static const struct of_device_id npcm_i2c_bus_of_table[] = { { .compatible = "nuvoton,npcm750-i2c", .data = &npxm7xx_i2c_data }, { .compatible = "nuvoton,npcm845-i2c", .data = &npxm8xx_i2c_data }, {} }; MODULE_DEVICE_TABLE(of, npcm_i2c_bus_of_table); static struct platform_driver npcm_i2c_bus_driver = { .probe = npcm_i2c_probe_bus, .remove_new = npcm_i2c_remove_bus, .driver = { .name = "nuvoton-i2c", .of_match_table = npcm_i2c_bus_of_table, } }; static int __init npcm_i2c_init(void) { int ret; npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL); ret = platform_driver_register(&npcm_i2c_bus_driver); if (ret) { debugfs_remove_recursive(npcm_i2c_debugfs_dir); return ret; } return 0; } module_init(npcm_i2c_init); static void __exit npcm_i2c_exit(void) { platform_driver_unregister(&npcm_i2c_bus_driver); debugfs_remove_recursive(npcm_i2c_debugfs_dir); } module_exit(npcm_i2c_exit); MODULE_AUTHOR("Avi Fishman <[email protected]>"); MODULE_AUTHOR("Tali Perry <[email protected]>"); MODULE_AUTHOR("Tyrone Ting <[email protected]>"); MODULE_DESCRIPTION("Nuvoton I2C Bus Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-npcm7xx.c
// SPDX-License-Identifier: GPL-2.0 /* * Synopsys DesignWare I2C adapter driver (slave only). * * Based on the Synopsys DesignWare I2C adapter driver (master). * * Copyright (C) 2016 Synopsys Inc. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include "i2c-designware-core.h" static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev) { /* Configure Tx/Rx FIFO threshold levels. */ regmap_write(dev->map, DW_IC_TX_TL, 0); regmap_write(dev->map, DW_IC_RX_TL, 0); /* Configure the I2C slave. */ regmap_write(dev->map, DW_IC_CON, dev->slave_cfg); regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_SLAVE_MASK); } /** * i2c_dw_init_slave() - Initialize the designware i2c slave hardware * @dev: device private data * * This function configures and enables the I2C in slave mode. * This function is called during I2C init function, and in case of timeout at * run time. */ static int i2c_dw_init_slave(struct dw_i2c_dev *dev) { int ret; ret = i2c_dw_acquire_lock(dev); if (ret) return ret; /* Disable the adapter. */ __i2c_dw_disable(dev); /* Write SDA hold time if supported */ if (dev->sda_hold_time) regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); i2c_dw_configure_fifo_slave(dev); i2c_dw_release_lock(dev); return 0; } static int i2c_dw_reg_slave(struct i2c_client *slave) { struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter); if (dev->slave) return -EBUSY; if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; pm_runtime_get_sync(dev->dev); /* * Set slave address in the IC_SAR register, * the address to which the DW_apb_i2c responds. */ __i2c_dw_disable_nowait(dev); regmap_write(dev->map, DW_IC_SAR, slave->addr); dev->slave = slave; __i2c_dw_enable(dev); dev->status = 0; return 0; } static int i2c_dw_unreg_slave(struct i2c_client *slave) { struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter); regmap_write(dev->map, DW_IC_INTR_MASK, 0); dev->disable(dev); synchronize_irq(dev->irq); dev->slave = NULL; pm_runtime_put(dev->dev); return 0; } static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev) { unsigned int stat, dummy; /* * The IC_INTR_STAT register just indicates "enabled" interrupts. * The unmasked raw version of interrupt status bits is available * in the IC_RAW_INTR_STAT register. * * That is, * stat = readl(IC_INTR_STAT); * equals to, * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); * * The raw version might be useful for debugging purposes. */ regmap_read(dev->map, DW_IC_INTR_STAT, &stat); /* * Do not use the IC_CLR_INTR register to clear interrupts, or * you'll miss some interrupts, triggered during the period from * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). * * Instead, use the separately-prepared IC_CLR_* registers. */ if (stat & DW_IC_INTR_TX_ABRT) regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); if (stat & DW_IC_INTR_RX_UNDER) regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); if (stat & DW_IC_INTR_RX_OVER) regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); if (stat & DW_IC_INTR_TX_OVER) regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); if (stat & DW_IC_INTR_RX_DONE) regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); if (stat & DW_IC_INTR_ACTIVITY) regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); if (stat & DW_IC_INTR_STOP_DET) regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); if (stat & DW_IC_INTR_START_DET) regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); if (stat & DW_IC_INTR_GEN_CALL) regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); return stat; } /* * Interrupt service routine. This gets called whenever an I2C slave interrupt * occurs. */ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id) { struct dw_i2c_dev *dev = dev_id; unsigned int raw_stat, stat, enabled, tmp; u8 val = 0, slave_activity; regmap_read(dev->map, DW_IC_ENABLE, &enabled); regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat); regmap_read(dev->map, DW_IC_STATUS, &tmp); slave_activity = ((tmp & DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave) return IRQ_NONE; stat = i2c_dw_read_clear_intrbits_slave(dev); dev_dbg(dev->dev, "%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n", enabled, slave_activity, raw_stat, stat); if (stat & DW_IC_INTR_RX_FULL) { if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { dev->status |= STATUS_WRITE_IN_PROGRESS; dev->status &= ~STATUS_READ_IN_PROGRESS; i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); } do { regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); if (tmp & DW_IC_DATA_CMD_FIRST_DATA_BYTE) i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); val = tmp; i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, &val); regmap_read(dev->map, DW_IC_STATUS, &tmp); } while (tmp & DW_IC_STATUS_RFNE); } if (stat & DW_IC_INTR_RD_REQ) { if (slave_activity) { regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); if (!(dev->status & STATUS_READ_IN_PROGRESS)) { i2c_slave_event(dev->slave, I2C_SLAVE_READ_REQUESTED, &val); dev->status |= STATUS_READ_IN_PROGRESS; dev->status &= ~STATUS_WRITE_IN_PROGRESS; } else { i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, &val); } regmap_write(dev->map, DW_IC_DATA_CMD, val); } } if (stat & DW_IC_INTR_STOP_DET) i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); return IRQ_HANDLED; } static const struct i2c_algorithm i2c_dw_algo = { .functionality = i2c_dw_func, .reg_slave = i2c_dw_reg_slave, .unreg_slave = i2c_dw_unreg_slave, }; void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED; dev->mode = DW_IC_SLAVE; } EXPORT_SYMBOL_GPL(i2c_dw_configure_slave); int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { struct i2c_adapter *adap = &dev->adapter; int ret; dev->init = i2c_dw_init_slave; dev->disable = i2c_dw_disable; ret = i2c_dw_init_regmap(dev); if (ret) return ret; ret = i2c_dw_set_sda_hold(dev); if (ret) return ret; ret = i2c_dw_set_fifo_size(dev); if (ret) return ret; ret = dev->init(dev); if (ret) return ret; snprintf(adap->name, sizeof(adap->name), "Synopsys DesignWare I2C Slave adapter"); adap->retries = 3; adap->algo = &i2c_dw_algo; adap->dev.parent = dev->dev; i2c_set_adapdata(adap, dev); ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr_slave, IRQF_SHARED, dev_name(dev->dev), dev); if (ret) { dev_err(dev->dev, "failure requesting irq %i: %d\n", dev->irq, ret); return ret; } ret = i2c_add_numbered_adapter(adap); if (ret) dev_err(dev->dev, "failure adding adapter: %d\n", ret); return ret; } EXPORT_SYMBOL_GPL(i2c_dw_probe_slave); MODULE_AUTHOR("Luis Oliveira <[email protected]>"); MODULE_DESCRIPTION("Synopsys DesignWare I2C bus slave adapter"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-designware-slave.c
// SPDX-License-Identifier: GPL-2.0 /* * This is a combined i2c adapter and algorithm driver for the * MPC107/Tsi107 PowerPC northbridge and processors that include * the same I2C unit (8240, 8245, 85xx). * * Copyright (C) 2003-2004 Humboldt Solutions Ltd, [email protected] * Copyright (C) 2021 Allied Telesis Labs */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched/signal.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/fsl_devices.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <asm/mpc52xx.h> #include <asm/mpc85xx.h> #include <sysdev/fsl_soc.h> #define DRV_NAME "mpc-i2c" #define MPC_I2C_CLOCK_LEGACY 0 #define MPC_I2C_CLOCK_PRESERVE (~0U) #define MPC_I2C_FDR 0x04 #define MPC_I2C_CR 0x08 #define MPC_I2C_SR 0x0c #define MPC_I2C_DR 0x10 #define MPC_I2C_DFSRR 0x14 #define CCR_MEN 0x80 #define CCR_MIEN 0x40 #define CCR_MSTA 0x20 #define CCR_MTX 0x10 #define CCR_TXAK 0x08 #define CCR_RSTA 0x04 #define CCR_RSVD 0x02 #define CSR_MCF 0x80 #define CSR_MAAS 0x40 #define CSR_MBB 0x20 #define CSR_MAL 0x10 #define CSR_SRW 0x04 #define CSR_MIF 0x02 #define CSR_RXAK 0x01 enum mpc_i2c_action { MPC_I2C_ACTION_START = 1, MPC_I2C_ACTION_RESTART, MPC_I2C_ACTION_READ_BEGIN, MPC_I2C_ACTION_READ_BYTE, MPC_I2C_ACTION_WRITE, MPC_I2C_ACTION_STOP, __MPC_I2C_ACTION_CNT }; static const char * const action_str[] = { "invalid", "start", "restart", "read begin", "read", "write", "stop", }; static_assert(ARRAY_SIZE(action_str) == __MPC_I2C_ACTION_CNT); struct mpc_i2c { struct device *dev; void __iomem *base; u32 interrupt; wait_queue_head_t waitq; spinlock_t lock; struct i2c_adapter adap; int irq; u32 real_clk; u8 fdr, dfsrr; struct clk *clk_per; u32 cntl_bits; enum mpc_i2c_action action; struct i2c_msg *msgs; int num_msgs; int curr_msg; u32 byte_posn; u32 block; int rc; int expect_rxack; bool has_errata_A004447; }; struct mpc_i2c_divider { u16 divider; u16 fdr; /* including dfsrr */ }; struct mpc_i2c_data { void (*setup)(struct device_node *node, struct mpc_i2c *i2c, u32 clock); }; static inline void writeccr(struct mpc_i2c *i2c, u32 x) { writeb(x, i2c->base + MPC_I2C_CR); } /* Sometimes 9th clock pulse isn't generated, and slave doesn't release * the bus, because it wants to send ACK. * Following sequence of enabling/disabling and sending start/stop generates * the 9 pulses, each with a START then ending with STOP, so it's all OK. */ static void mpc_i2c_fixup(struct mpc_i2c *i2c) { int k; unsigned long flags; for (k = 9; k; k--) { writeccr(i2c, 0); writeb(0, i2c->base + MPC_I2C_SR); /* clear any status bits */ writeccr(i2c, CCR_MEN | CCR_MSTA); /* START */ readb(i2c->base + MPC_I2C_DR); /* init xfer */ udelay(15); /* let it hit the bus */ local_irq_save(flags); /* should not be delayed further */ writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSTA); /* delay SDA */ readb(i2c->base + MPC_I2C_DR); if (k != 1) udelay(5); local_irq_restore(flags); } writeccr(i2c, CCR_MEN); /* Initiate STOP */ readb(i2c->base + MPC_I2C_DR); udelay(15); /* Let STOP propagate */ writeccr(i2c, 0); } static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask) { void __iomem *addr = i2c->base + MPC_I2C_SR; u8 val; return readb_poll_timeout(addr, val, val & mask, 0, 100); } /* * Workaround for Erratum A004447. From the P2040CE Rev Q * * 1. Set up the frequency divider and sampling rate. * 2. I2CCR - a0h * 3. Poll for I2CSR[MBB] to get set. * 4. If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to * step 5. If MAL is not set, then go to step 13. * 5. I2CCR - 00h * 6. I2CCR - 22h * 7. I2CCR - a2h * 8. Poll for I2CSR[MBB] to get set. * 9. Issue read to I2CDR. * 10. Poll for I2CSR[MIF] to be set. * 11. I2CCR - 82h * 12. Workaround complete. Skip the next steps. * 13. Issue read to I2CDR. * 14. Poll for I2CSR[MIF] to be set. * 15. I2CCR - 80h */ static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c) { int ret; u32 val; writeccr(i2c, CCR_MEN | CCR_MSTA); ret = i2c_mpc_wait_sr(i2c, CSR_MBB); if (ret) { dev_err(i2c->dev, "timeout waiting for CSR_MBB\n"); return; } val = readb(i2c->base + MPC_I2C_SR); if (val & CSR_MAL) { writeccr(i2c, 0x00); writeccr(i2c, CCR_MSTA | CCR_RSVD); writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD); ret = i2c_mpc_wait_sr(i2c, CSR_MBB); if (ret) { dev_err(i2c->dev, "timeout waiting for CSR_MBB\n"); return; } val = readb(i2c->base + MPC_I2C_DR); ret = i2c_mpc_wait_sr(i2c, CSR_MIF); if (ret) { dev_err(i2c->dev, "timeout waiting for CSR_MIF\n"); return; } writeccr(i2c, CCR_MEN | CCR_RSVD); } else { val = readb(i2c->base + MPC_I2C_DR); ret = i2c_mpc_wait_sr(i2c, CSR_MIF); if (ret) { dev_err(i2c->dev, "timeout waiting for CSR_MIF\n"); return; } writeccr(i2c, CCR_MEN); } } #if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x) static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = { {20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23}, {28, 0x24}, {30, 0x01}, {32, 0x25}, {34, 0x02}, {36, 0x26}, {40, 0x27}, {44, 0x04}, {48, 0x28}, {52, 0x63}, {56, 0x29}, {60, 0x41}, {64, 0x2a}, {68, 0x07}, {72, 0x2b}, {80, 0x2c}, {88, 0x09}, {96, 0x2d}, {104, 0x0a}, {112, 0x2e}, {120, 0x81}, {128, 0x2f}, {136, 0x47}, {144, 0x0c}, {160, 0x30}, {176, 0x49}, {192, 0x31}, {208, 0x4a}, {224, 0x32}, {240, 0x0f}, {256, 0x33}, {272, 0x87}, {288, 0x10}, {320, 0x34}, {352, 0x89}, {384, 0x35}, {416, 0x8a}, {448, 0x36}, {480, 0x13}, {512, 0x37}, {576, 0x14}, {640, 0x38}, {768, 0x39}, {896, 0x3a}, {960, 0x17}, {1024, 0x3b}, {1152, 0x18}, {1280, 0x3c}, {1536, 0x3d}, {1792, 0x3e}, {1920, 0x1b}, {2048, 0x3f}, {2304, 0x1c}, {2560, 0x1d}, {3072, 0x1e}, {3584, 0x7e}, {3840, 0x1f}, {4096, 0x7f}, {4608, 0x5c}, {5120, 0x5d}, {6144, 0x5e}, {7168, 0xbe}, {7680, 0x5f}, {8192, 0xbf}, {9216, 0x9c}, {10240, 0x9d}, {12288, 0x9e}, {15360, 0x9f} }; static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock, u32 *real_clk) { struct fwnode_handle *fwnode = of_fwnode_handle(node); const struct mpc_i2c_divider *div = NULL; unsigned int pvr = mfspr(SPRN_PVR); u32 divider; int i; if (clock == MPC_I2C_CLOCK_LEGACY) { /* see below - default fdr = 0x3f -> div = 2048 */ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / 2048; return -EINVAL; } /* Determine divider value */ divider = mpc5xxx_fwnode_get_bus_frequency(fwnode) / clock; /* * We want to choose an FDR/DFSR that generates an I2C bus speed that * is equal to or lower than the requested speed. */ for (i = 0; i < ARRAY_SIZE(mpc_i2c_dividers_52xx); i++) { div = &mpc_i2c_dividers_52xx[i]; /* Old MPC5200 rev A CPUs do not support the high bits */ if (div->fdr & 0xc0 && pvr == 0x80822011) continue; if (div->divider >= divider) break; } *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / div->divider; return (int)div->fdr; } static void mpc_i2c_setup_52xx(struct device_node *node, struct mpc_i2c *i2c, u32 clock) { int ret, fdr; if (clock == MPC_I2C_CLOCK_PRESERVE) { dev_dbg(i2c->dev, "using fdr %d\n", readb(i2c->base + MPC_I2C_FDR)); return; } ret = mpc_i2c_get_fdr_52xx(node, clock, &i2c->real_clk); fdr = (ret >= 0) ? ret : 0x3f; /* backward compatibility */ writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR); if (ret >= 0) dev_info(i2c->dev, "clock %u Hz (fdr=%d)\n", i2c->real_clk, fdr); } #else /* !(CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x) */ static void mpc_i2c_setup_52xx(struct device_node *node, struct mpc_i2c *i2c, u32 clock) { } #endif /* CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x */ #ifdef CONFIG_PPC_MPC512x static void mpc_i2c_setup_512x(struct device_node *node, struct mpc_i2c *i2c, u32 clock) { struct device_node *node_ctrl; void __iomem *ctrl; u32 idx; /* Enable I2C interrupts for mpc5121 */ node_ctrl = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-i2c-ctrl"); if (node_ctrl) { ctrl = of_iomap(node_ctrl, 0); if (ctrl) { u64 addr; /* Interrupt enable bits for i2c-0/1/2: bit 24/26/28 */ of_property_read_reg(node, 0, &addr, NULL); idx = (addr & 0xff) / 0x20; setbits32(ctrl, 1 << (24 + idx * 2)); iounmap(ctrl); } of_node_put(node_ctrl); } /* The clock setup for the 52xx works also fine for the 512x */ mpc_i2c_setup_52xx(node, i2c, clock); } #else /* CONFIG_PPC_MPC512x */ static void mpc_i2c_setup_512x(struct device_node *node, struct mpc_i2c *i2c, u32 clock) { } #endif /* CONFIG_PPC_MPC512x */ #ifdef CONFIG_FSL_SOC static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = { {160, 0x0120}, {192, 0x0121}, {224, 0x0122}, {256, 0x0123}, {288, 0x0100}, {320, 0x0101}, {352, 0x0601}, {384, 0x0102}, {416, 0x0602}, {448, 0x0126}, {480, 0x0103}, {512, 0x0127}, {544, 0x0b03}, {576, 0x0104}, {608, 0x1603}, {640, 0x0105}, {672, 0x2003}, {704, 0x0b05}, {736, 0x2b03}, {768, 0x0106}, {800, 0x3603}, {832, 0x0b06}, {896, 0x012a}, {960, 0x0107}, {1024, 0x012b}, {1088, 0x1607}, {1152, 0x0108}, {1216, 0x2b07}, {1280, 0x0109}, {1408, 0x1609}, {1536, 0x010a}, {1664, 0x160a}, {1792, 0x012e}, {1920, 0x010b}, {2048, 0x012f}, {2176, 0x2b0b}, {2304, 0x010c}, {2560, 0x010d}, {2816, 0x2b0d}, {3072, 0x010e}, {3328, 0x2b0e}, {3584, 0x0132}, {3840, 0x010f}, {4096, 0x0133}, {4608, 0x0110}, {5120, 0x0111}, {6144, 0x0112}, {7168, 0x0136}, {7680, 0x0113}, {8192, 0x0137}, {9216, 0x0114}, {10240, 0x0115}, {12288, 0x0116}, {14336, 0x013a}, {15360, 0x0117}, {16384, 0x013b}, {18432, 0x0118}, {20480, 0x0119}, {24576, 0x011a}, {28672, 0x013e}, {30720, 0x011b}, {32768, 0x013f}, {36864, 0x011c}, {40960, 0x011d}, {49152, 0x011e}, {61440, 0x011f} }; static u32 mpc_i2c_get_sec_cfg_8xxx(void) { struct device_node *node; u32 __iomem *reg; u32 val = 0; node = of_find_node_by_name(NULL, "global-utilities"); if (node) { const u32 *prop = of_get_property(node, "reg", NULL); if (prop) { /* * Map and check POR Device Status Register 2 * (PORDEVSR2) at 0xE0014. Note than while MPC8533 * and MPC8544 indicate SEC frequency ratio * configuration as bit 26 in PORDEVSR2, other MPC8xxx * parts may store it differently or may not have it * at all. */ reg = ioremap(get_immrbase() + *prop + 0x14, 0x4); if (!reg) printk(KERN_ERR "Error: couldn't map PORDEVSR2\n"); else val = in_be32(reg) & 0x00000020; /* sec-cfg */ iounmap(reg); } } of_node_put(node); return val; } static u32 mpc_i2c_get_prescaler_8xxx(void) { /* * According to the AN2919 all MPC824x have prescaler 1, while MPC83xx * may have prescaler 1, 2, or 3, depending on the power-on * configuration. */ u32 prescaler = 1; /* mpc85xx */ if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2) || pvr_version_is(PVR_VER_E500MC) || pvr_version_is(PVR_VER_E5500) || pvr_version_is(PVR_VER_E6500)) { unsigned int svr = mfspr(SPRN_SVR); if ((SVR_SOC_VER(svr) == SVR_8540) || (SVR_SOC_VER(svr) == SVR_8541) || (SVR_SOC_VER(svr) == SVR_8560) || (SVR_SOC_VER(svr) == SVR_8555) || (SVR_SOC_VER(svr) == SVR_8610)) /* the above 85xx SoCs have prescaler 1 */ prescaler = 1; else if ((SVR_SOC_VER(svr) == SVR_8533) || (SVR_SOC_VER(svr) == SVR_8544)) /* the above 85xx SoCs have prescaler 3 or 2 */ prescaler = mpc_i2c_get_sec_cfg_8xxx() ? 3 : 2; else /* all the other 85xx have prescaler 2 */ prescaler = 2; } return prescaler; } static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock, u32 *real_clk) { const struct mpc_i2c_divider *div = NULL; u32 prescaler = mpc_i2c_get_prescaler_8xxx(); u32 divider; int i; if (clock == MPC_I2C_CLOCK_LEGACY) { /* see below - default fdr = 0x1031 -> div = 16 * 3072 */ *real_clk = fsl_get_sys_freq() / prescaler / (16 * 3072); return -EINVAL; } divider = fsl_get_sys_freq() / clock / prescaler; pr_debug("I2C: src_clock=%d clock=%d divider=%d\n", fsl_get_sys_freq(), clock, divider); /* * We want to choose an FDR/DFSR that generates an I2C bus speed that * is equal to or lower than the requested speed. */ for (i = 0; i < ARRAY_SIZE(mpc_i2c_dividers_8xxx); i++) { div = &mpc_i2c_dividers_8xxx[i]; if (div->divider >= divider) break; } *real_clk = fsl_get_sys_freq() / prescaler / div->divider; return (int)div->fdr; } static void mpc_i2c_setup_8xxx(struct device_node *node, struct mpc_i2c *i2c, u32 clock) { int ret, fdr; if (clock == MPC_I2C_CLOCK_PRESERVE) { dev_dbg(i2c->dev, "using dfsrr %d, fdr %d\n", readb(i2c->base + MPC_I2C_DFSRR), readb(i2c->base + MPC_I2C_FDR)); return; } ret = mpc_i2c_get_fdr_8xxx(node, clock, &i2c->real_clk); fdr = (ret >= 0) ? ret : 0x1031; /* backward compatibility */ writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR); writeb((fdr >> 8) & 0xff, i2c->base + MPC_I2C_DFSRR); if (ret >= 0) dev_info(i2c->dev, "clock %d Hz (dfsrr=%d fdr=%d)\n", i2c->real_clk, fdr >> 8, fdr & 0xff); } #else /* !CONFIG_FSL_SOC */ static void mpc_i2c_setup_8xxx(struct device_node *node, struct mpc_i2c *i2c, u32 clock) { } #endif /* CONFIG_FSL_SOC */ static void mpc_i2c_finish(struct mpc_i2c *i2c, int rc) { i2c->rc = rc; i2c->block = 0; i2c->cntl_bits = CCR_MEN; writeccr(i2c, i2c->cntl_bits); wake_up(&i2c->waitq); } static void mpc_i2c_do_action(struct mpc_i2c *i2c) { struct i2c_msg *msg = NULL; int dir = 0; int recv_len = 0; u8 byte; dev_dbg(i2c->dev, "action = %s\n", action_str[i2c->action]); i2c->cntl_bits &= ~(CCR_RSTA | CCR_MTX | CCR_TXAK); if (i2c->action != MPC_I2C_ACTION_STOP) { msg = &i2c->msgs[i2c->curr_msg]; if (msg->flags & I2C_M_RD) dir = 1; if (msg->flags & I2C_M_RECV_LEN) recv_len = 1; } switch (i2c->action) { case MPC_I2C_ACTION_RESTART: i2c->cntl_bits |= CCR_RSTA; fallthrough; case MPC_I2C_ACTION_START: i2c->cntl_bits |= CCR_MSTA | CCR_MTX; writeccr(i2c, i2c->cntl_bits); writeb((msg->addr << 1) | dir, i2c->base + MPC_I2C_DR); i2c->expect_rxack = 1; i2c->action = dir ? MPC_I2C_ACTION_READ_BEGIN : MPC_I2C_ACTION_WRITE; break; case MPC_I2C_ACTION_READ_BEGIN: if (msg->len) { if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN)) i2c->cntl_bits |= CCR_TXAK; writeccr(i2c, i2c->cntl_bits); /* Dummy read */ readb(i2c->base + MPC_I2C_DR); } i2c->action = MPC_I2C_ACTION_READ_BYTE; break; case MPC_I2C_ACTION_READ_BYTE: if (i2c->byte_posn || !recv_len) { /* Generate Tx ACK on next to last byte */ if (i2c->byte_posn == msg->len - 2) i2c->cntl_bits |= CCR_TXAK; /* Do not generate stop on last byte */ if (i2c->byte_posn == msg->len - 1) i2c->cntl_bits |= CCR_MTX; writeccr(i2c, i2c->cntl_bits); } byte = readb(i2c->base + MPC_I2C_DR); if (i2c->byte_posn == 0 && recv_len) { if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX) { mpc_i2c_finish(i2c, -EPROTO); return; } msg->len += byte; /* * For block reads, generate Tx ACK here if data length * is 1 byte (total length is 2 bytes). */ if (msg->len == 2) { i2c->cntl_bits |= CCR_TXAK; writeccr(i2c, i2c->cntl_bits); } } dev_dbg(i2c->dev, "%s %02x\n", action_str[i2c->action], byte); msg->buf[i2c->byte_posn++] = byte; break; case MPC_I2C_ACTION_WRITE: dev_dbg(i2c->dev, "%s %02x\n", action_str[i2c->action], msg->buf[i2c->byte_posn]); writeb(msg->buf[i2c->byte_posn++], i2c->base + MPC_I2C_DR); i2c->expect_rxack = 1; break; case MPC_I2C_ACTION_STOP: mpc_i2c_finish(i2c, 0); break; default: WARN(1, "Unexpected action %d\n", i2c->action); break; } if (msg && msg->len == i2c->byte_posn) { i2c->curr_msg++; i2c->byte_posn = 0; if (i2c->curr_msg == i2c->num_msgs) { i2c->action = MPC_I2C_ACTION_STOP; /* * We don't get another interrupt on read so * finish the transfer now */ if (dir) mpc_i2c_finish(i2c, 0); } else { i2c->action = MPC_I2C_ACTION_RESTART; } } } static void mpc_i2c_do_intr(struct mpc_i2c *i2c, u8 status) { spin_lock(&i2c->lock); if (!(status & CSR_MCF)) { dev_dbg(i2c->dev, "unfinished\n"); mpc_i2c_finish(i2c, -EIO); goto out; } if (status & CSR_MAL) { dev_dbg(i2c->dev, "arbitration lost\n"); mpc_i2c_finish(i2c, -EAGAIN); goto out; } if (i2c->expect_rxack && (status & CSR_RXAK)) { dev_dbg(i2c->dev, "no Rx ACK\n"); mpc_i2c_finish(i2c, -ENXIO); goto out; } i2c->expect_rxack = 0; mpc_i2c_do_action(i2c); out: spin_unlock(&i2c->lock); } static irqreturn_t mpc_i2c_isr(int irq, void *dev_id) { struct mpc_i2c *i2c = dev_id; u8 status; status = readb(i2c->base + MPC_I2C_SR); if (status & CSR_MIF) { /* Wait up to 100us for transfer to properly complete */ readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100); writeb(0, i2c->base + MPC_I2C_SR); mpc_i2c_do_intr(i2c, status); return IRQ_HANDLED; } return IRQ_NONE; } static int mpc_i2c_wait_for_completion(struct mpc_i2c *i2c) { long time_left; time_left = wait_event_timeout(i2c->waitq, !i2c->block, i2c->adap.timeout); if (!time_left) return -ETIMEDOUT; if (time_left < 0) return time_left; return 0; } static int mpc_i2c_execute_msg(struct mpc_i2c *i2c) { unsigned long orig_jiffies; unsigned long flags; int ret; spin_lock_irqsave(&i2c->lock, flags); i2c->curr_msg = 0; i2c->rc = 0; i2c->byte_posn = 0; i2c->block = 1; i2c->action = MPC_I2C_ACTION_START; i2c->cntl_bits = CCR_MEN | CCR_MIEN; writeb(0, i2c->base + MPC_I2C_SR); writeccr(i2c, i2c->cntl_bits); mpc_i2c_do_action(i2c); spin_unlock_irqrestore(&i2c->lock, flags); ret = mpc_i2c_wait_for_completion(i2c); if (ret) i2c->rc = ret; if (i2c->rc == -EIO || i2c->rc == -EAGAIN || i2c->rc == -ETIMEDOUT) i2c_recover_bus(&i2c->adap); orig_jiffies = jiffies; /* Wait until STOP is seen, allow up to 1 s */ while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) { if (time_after(jiffies, orig_jiffies + HZ)) { u8 status = readb(i2c->base + MPC_I2C_SR); dev_dbg(i2c->dev, "timeout\n"); if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) { writeb(status & ~CSR_MAL, i2c->base + MPC_I2C_SR); i2c_recover_bus(&i2c->adap); } return -EIO; } cond_resched(); } return i2c->rc; } static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int rc, ret = num; struct mpc_i2c *i2c = i2c_get_adapdata(adap); int i; dev_dbg(i2c->dev, "num = %d\n", num); for (i = 0; i < num; i++) dev_dbg(i2c->dev, " addr = %02x, flags = %02x, len = %d, %*ph\n", msgs[i].addr, msgs[i].flags, msgs[i].len, msgs[i].flags & I2C_M_RD ? 0 : msgs[i].len, msgs[i].buf); WARN_ON(i2c->msgs != NULL); i2c->msgs = msgs; i2c->num_msgs = num; rc = mpc_i2c_execute_msg(i2c); if (rc < 0) ret = rc; i2c->num_msgs = 0; i2c->msgs = NULL; return ret; } static u32 mpc_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static int fsl_i2c_bus_recovery(struct i2c_adapter *adap) { struct mpc_i2c *i2c = i2c_get_adapdata(adap); if (i2c->has_errata_A004447) mpc_i2c_fixup_A004447(i2c); else mpc_i2c_fixup(i2c); return 0; } static const struct i2c_algorithm mpc_algo = { .master_xfer = mpc_xfer, .functionality = mpc_functionality, }; static struct i2c_adapter mpc_ops = { .owner = THIS_MODULE, .algo = &mpc_algo, }; static struct i2c_bus_recovery_info fsl_i2c_recovery_info = { .recover_bus = fsl_i2c_bus_recovery, }; static int fsl_i2c_probe(struct platform_device *op) { const struct mpc_i2c_data *data; struct mpc_i2c *i2c; struct clk *clk; int result; u32 clock; int err; i2c = devm_kzalloc(&op->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->dev = &op->dev; /* for debug and error output */ init_waitqueue_head(&i2c->waitq); spin_lock_init(&i2c->lock); i2c->base = devm_platform_ioremap_resource(op, 0); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); i2c->irq = platform_get_irq(op, 0); if (i2c->irq < 0) return i2c->irq; result = devm_request_irq(&op->dev, i2c->irq, mpc_i2c_isr, IRQF_SHARED, "i2c-mpc", i2c); if (result < 0) { dev_err(i2c->dev, "failed to attach interrupt\n"); return result; } /* * enable clock for the I2C peripheral (non fatal), * keep a reference upon successful allocation */ clk = devm_clk_get_optional(&op->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); err = clk_prepare_enable(clk); if (err) { dev_err(&op->dev, "failed to enable clock\n"); return err; } i2c->clk_per = clk; if (of_property_read_bool(op->dev.of_node, "fsl,preserve-clocking")) { clock = MPC_I2C_CLOCK_PRESERVE; } else { result = of_property_read_u32(op->dev.of_node, "clock-frequency", &clock); if (result) clock = MPC_I2C_CLOCK_LEGACY; } data = device_get_match_data(&op->dev); if (data) { data->setup(op->dev.of_node, i2c, clock); } else { /* Backwards compatibility */ if (of_property_read_bool(op->dev.of_node, "dfsrr")) mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock); } /* * "fsl,timeout" has been marked as deprecated and, to maintain * backward compatibility, we will only look for it if * "i2c-scl-clk-low-timeout-us" is not present. */ result = of_property_read_u32(op->dev.of_node, "i2c-scl-clk-low-timeout-us", &mpc_ops.timeout); if (result == -EINVAL) result = of_property_read_u32(op->dev.of_node, "fsl,timeout", &mpc_ops.timeout); if (!result) { mpc_ops.timeout *= HZ / 1000000; if (mpc_ops.timeout < 5) mpc_ops.timeout = 5; } else { mpc_ops.timeout = HZ; } dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ); if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447")) i2c->has_errata_A004447 = true; i2c->adap = mpc_ops; scnprintf(i2c->adap.name, sizeof(i2c->adap.name), "MPC adapter (%s)", of_node_full_name(op->dev.of_node)); i2c->adap.dev.parent = &op->dev; i2c->adap.nr = op->id; i2c->adap.dev.of_node = of_node_get(op->dev.of_node); i2c->adap.bus_recovery_info = &fsl_i2c_recovery_info; platform_set_drvdata(op, i2c); i2c_set_adapdata(&i2c->adap, i2c); result = i2c_add_numbered_adapter(&i2c->adap); if (result) goto fail_add; return 0; fail_add: clk_disable_unprepare(i2c->clk_per); return result; }; static void fsl_i2c_remove(struct platform_device *op) { struct mpc_i2c *i2c = platform_get_drvdata(op); i2c_del_adapter(&i2c->adap); clk_disable_unprepare(i2c->clk_per); }; static int __maybe_unused mpc_i2c_suspend(struct device *dev) { struct mpc_i2c *i2c = dev_get_drvdata(dev); i2c->fdr = readb(i2c->base + MPC_I2C_FDR); i2c->dfsrr = readb(i2c->base + MPC_I2C_DFSRR); return 0; } static int __maybe_unused mpc_i2c_resume(struct device *dev) { struct mpc_i2c *i2c = dev_get_drvdata(dev); writeb(i2c->fdr, i2c->base + MPC_I2C_FDR); writeb(i2c->dfsrr, i2c->base + MPC_I2C_DFSRR); return 0; } static SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume); static const struct mpc_i2c_data mpc_i2c_data_512x = { .setup = mpc_i2c_setup_512x, }; static const struct mpc_i2c_data mpc_i2c_data_52xx = { .setup = mpc_i2c_setup_52xx, }; static const struct mpc_i2c_data mpc_i2c_data_8313 = { .setup = mpc_i2c_setup_8xxx, }; static const struct mpc_i2c_data mpc_i2c_data_8543 = { .setup = mpc_i2c_setup_8xxx, }; static const struct mpc_i2c_data mpc_i2c_data_8544 = { .setup = mpc_i2c_setup_8xxx, }; static const struct of_device_id mpc_i2c_of_match[] = { {.compatible = "mpc5200-i2c", .data = &mpc_i2c_data_52xx, }, {.compatible = "fsl,mpc5200b-i2c", .data = &mpc_i2c_data_52xx, }, {.compatible = "fsl,mpc5200-i2c", .data = &mpc_i2c_data_52xx, }, {.compatible = "fsl,mpc5121-i2c", .data = &mpc_i2c_data_512x, }, {.compatible = "fsl,mpc8313-i2c", .data = &mpc_i2c_data_8313, }, {.compatible = "fsl,mpc8543-i2c", .data = &mpc_i2c_data_8543, }, {.compatible = "fsl,mpc8544-i2c", .data = &mpc_i2c_data_8544, }, /* Backward compatibility */ {.compatible = "fsl-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, mpc_i2c_of_match); /* Structure for a device driver */ static struct platform_driver mpc_i2c_driver = { .probe = fsl_i2c_probe, .remove_new = fsl_i2c_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc_i2c_of_match, .pm = &mpc_i2c_pm_ops, }, }; module_platform_driver(mpc_i2c_driver); MODULE_AUTHOR("Adrian Cox <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus adapter for MPC107 bridge and " "MPC824x/83xx/85xx/86xx/512x/52xx processors"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-mpc.c
/* * (C) Copyright 2009-2010 * Nokia Siemens Networks, [email protected] * * Portions Copyright (C) 2010 - 2016 Cavium, Inc. * * This is a driver for the i2c adapter in Cavium Networks' OCTEON processors. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/atomic.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/octeon/octeon.h> #include "i2c-octeon-core.h" #define DRV_NAME "i2c-octeon" /** * octeon_i2c_int_enable - enable the CORE interrupt * @i2c: The struct octeon_i2c * * The interrupt will be asserted when there is non-STAT_IDLE state in * the SW_TWSI_EOP_TWSI_STAT register. */ static void octeon_i2c_int_enable(struct octeon_i2c *i2c) { octeon_i2c_write_int(i2c, TWSI_INT_CORE_EN); } /* disable the CORE interrupt */ static void octeon_i2c_int_disable(struct octeon_i2c *i2c) { /* clear TS/ST/IFLG events */ octeon_i2c_write_int(i2c, 0); } /** * octeon_i2c_int_enable78 - enable the CORE interrupt * @i2c: The struct octeon_i2c * * The interrupt will be asserted when there is non-STAT_IDLE state in the * SW_TWSI_EOP_TWSI_STAT register. */ static void octeon_i2c_int_enable78(struct octeon_i2c *i2c) { atomic_inc_return(&i2c->int_enable_cnt); enable_irq(i2c->irq); } static void __octeon_i2c_irq_disable(atomic_t *cnt, int irq) { int count; /* * The interrupt can be disabled in two places, but we only * want to make the disable_irq_nosync() call once, so keep * track with the atomic variable. */ count = atomic_dec_if_positive(cnt); if (count >= 0) disable_irq_nosync(irq); } /* disable the CORE interrupt */ static void octeon_i2c_int_disable78(struct octeon_i2c *i2c) { __octeon_i2c_irq_disable(&i2c->int_enable_cnt, i2c->irq); } /** * octeon_i2c_hlc_int_enable78 - enable the ST interrupt * @i2c: The struct octeon_i2c * * The interrupt will be asserted when there is non-STAT_IDLE state in * the SW_TWSI_EOP_TWSI_STAT register. */ static void octeon_i2c_hlc_int_enable78(struct octeon_i2c *i2c) { atomic_inc_return(&i2c->hlc_int_enable_cnt); enable_irq(i2c->hlc_irq); } /* disable the ST interrupt */ static void octeon_i2c_hlc_int_disable78(struct octeon_i2c *i2c) { __octeon_i2c_irq_disable(&i2c->hlc_int_enable_cnt, i2c->hlc_irq); } /* HLC interrupt service routine */ static irqreturn_t octeon_i2c_hlc_isr78(int irq, void *dev_id) { struct octeon_i2c *i2c = dev_id; i2c->hlc_int_disable(i2c); wake_up(&i2c->queue); return IRQ_HANDLED; } static void octeon_i2c_hlc_int_enable(struct octeon_i2c *i2c) { octeon_i2c_write_int(i2c, TWSI_INT_ST_EN); } static u32 octeon_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm octeon_i2c_algo = { .master_xfer = octeon_i2c_xfer, .functionality = octeon_i2c_functionality, }; static const struct i2c_adapter octeon_i2c_ops = { .owner = THIS_MODULE, .name = "OCTEON adapter", .algo = &octeon_i2c_algo, }; static int octeon_i2c_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; int irq, result = 0, hlc_irq = 0; struct octeon_i2c *i2c; bool cn78xx_style; cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-twsi"); if (cn78xx_style) { hlc_irq = platform_get_irq(pdev, 0); if (hlc_irq < 0) return hlc_irq; irq = platform_get_irq(pdev, 2); if (irq < 0) return irq; } else { /* All adaptors have an irq. */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; } i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) { result = -ENOMEM; goto out; } i2c->dev = &pdev->dev; i2c->roff.sw_twsi = 0x00; i2c->roff.twsi_int = 0x10; i2c->roff.sw_twsi_ext = 0x18; i2c->twsi_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->twsi_base)) { result = PTR_ERR(i2c->twsi_base); goto out; } /* * "clock-rate" is a legacy binding, the official binding is * "clock-frequency". Try the official one first and then * fall back if it doesn't exist. */ if (of_property_read_u32(node, "clock-frequency", &i2c->twsi_freq) && of_property_read_u32(node, "clock-rate", &i2c->twsi_freq)) { dev_err(i2c->dev, "no I2C 'clock-rate' or 'clock-frequency' property\n"); result = -ENXIO; goto out; } i2c->sys_freq = octeon_get_io_clock_rate(); init_waitqueue_head(&i2c->queue); i2c->irq = irq; if (cn78xx_style) { i2c->hlc_irq = hlc_irq; i2c->int_enable = octeon_i2c_int_enable78; i2c->int_disable = octeon_i2c_int_disable78; i2c->hlc_int_enable = octeon_i2c_hlc_int_enable78; i2c->hlc_int_disable = octeon_i2c_hlc_int_disable78; irq_set_status_flags(i2c->irq, IRQ_NOAUTOEN); irq_set_status_flags(i2c->hlc_irq, IRQ_NOAUTOEN); result = devm_request_irq(&pdev->dev, i2c->hlc_irq, octeon_i2c_hlc_isr78, 0, DRV_NAME, i2c); if (result < 0) { dev_err(i2c->dev, "failed to attach interrupt\n"); goto out; } } else { i2c->int_enable = octeon_i2c_int_enable; i2c->int_disable = octeon_i2c_int_disable; i2c->hlc_int_enable = octeon_i2c_hlc_int_enable; i2c->hlc_int_disable = octeon_i2c_int_disable; } result = devm_request_irq(&pdev->dev, i2c->irq, octeon_i2c_isr, 0, DRV_NAME, i2c); if (result < 0) { dev_err(i2c->dev, "failed to attach interrupt\n"); goto out; } if (OCTEON_IS_MODEL(OCTEON_CN38XX)) i2c->broken_irq_check = true; result = octeon_i2c_init_lowlevel(i2c); if (result) { dev_err(i2c->dev, "init low level failed\n"); goto out; } octeon_i2c_set_clock(i2c); i2c->adap = octeon_i2c_ops; i2c->adap.timeout = msecs_to_jiffies(2); i2c->adap.retries = 5; i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info; i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = node; i2c_set_adapdata(&i2c->adap, i2c); platform_set_drvdata(pdev, i2c); result = i2c_add_adapter(&i2c->adap); if (result < 0) goto out; dev_info(i2c->dev, "probed\n"); return 0; out: return result; }; static void octeon_i2c_remove(struct platform_device *pdev) { struct octeon_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); }; static const struct of_device_id octeon_i2c_match[] = { { .compatible = "cavium,octeon-3860-twsi", }, { .compatible = "cavium,octeon-7890-twsi", }, {}, }; MODULE_DEVICE_TABLE(of, octeon_i2c_match); static struct platform_driver octeon_i2c_driver = { .probe = octeon_i2c_probe, .remove_new = octeon_i2c_remove, .driver = { .name = DRV_NAME, .of_match_table = octeon_i2c_match, }, }; module_platform_driver(octeon_i2c_driver); MODULE_AUTHOR("Michael Lawnick <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-octeon-platdrv.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C bus driver for Conexant Digicolor SoCs * * Author: Baruch Siach <[email protected]> * * Copyright (C) 2015 Paradox Innovation Ltd. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #define TIMEOUT_MS 100 #define II_CONTROL 0x0 #define II_CONTROL_LOCAL_RESET BIT(0) #define II_CLOCKTIME 0x1 #define II_COMMAND 0x2 #define II_CMD_START 1 #define II_CMD_RESTART 2 #define II_CMD_SEND_ACK 3 #define II_CMD_GET_ACK 6 #define II_CMD_GET_NOACK 7 #define II_CMD_STOP 10 #define II_COMMAND_GO BIT(7) #define II_COMMAND_COMPLETION_STATUS(r) (((r) >> 5) & 3) #define II_CMD_STATUS_NORMAL 0 #define II_CMD_STATUS_ACK_GOOD 1 #define II_CMD_STATUS_ACK_BAD 2 #define II_CMD_STATUS_ABORT 3 #define II_DATA 0x3 #define II_INTFLAG_CLEAR 0x8 #define II_INTENABLE 0xa struct dc_i2c { struct i2c_adapter adap; struct device *dev; void __iomem *regs; struct clk *clk; unsigned int frequency; struct i2c_msg *msg; unsigned int msgbuf_ptr; int last; spinlock_t lock; struct completion done; int state; int error; }; enum { STATE_IDLE, STATE_START, STATE_ADDR, STATE_WRITE, STATE_READ, STATE_STOP, }; static void dc_i2c_cmd(struct dc_i2c *i2c, u8 cmd) { writeb_relaxed(cmd | II_COMMAND_GO, i2c->regs + II_COMMAND); } static u8 dc_i2c_addr_cmd(struct i2c_msg *msg) { u8 addr = (msg->addr & 0x7f) << 1; if (msg->flags & I2C_M_RD) addr |= 1; return addr; } static void dc_i2c_data(struct dc_i2c *i2c, u8 data) { writeb_relaxed(data, i2c->regs + II_DATA); } static void dc_i2c_write_byte(struct dc_i2c *i2c, u8 byte) { dc_i2c_data(i2c, byte); dc_i2c_cmd(i2c, II_CMD_SEND_ACK); } static void dc_i2c_write_buf(struct dc_i2c *i2c) { dc_i2c_write_byte(i2c, i2c->msg->buf[i2c->msgbuf_ptr++]); } static void dc_i2c_next_read(struct dc_i2c *i2c) { bool last = (i2c->msgbuf_ptr + 1 == i2c->msg->len); dc_i2c_cmd(i2c, last ? II_CMD_GET_NOACK : II_CMD_GET_ACK); } static void dc_i2c_stop(struct dc_i2c *i2c) { i2c->state = STATE_STOP; if (i2c->last) dc_i2c_cmd(i2c, II_CMD_STOP); else complete(&i2c->done); } static u8 dc_i2c_read_byte(struct dc_i2c *i2c) { return readb_relaxed(i2c->regs + II_DATA); } static void dc_i2c_read_buf(struct dc_i2c *i2c) { i2c->msg->buf[i2c->msgbuf_ptr++] = dc_i2c_read_byte(i2c); dc_i2c_next_read(i2c); } static void dc_i2c_set_irq(struct dc_i2c *i2c, int enable) { if (enable) writeb_relaxed(1, i2c->regs + II_INTFLAG_CLEAR); writeb_relaxed(!!enable, i2c->regs + II_INTENABLE); } static int dc_i2c_cmd_status(struct dc_i2c *i2c) { u8 cmd = readb_relaxed(i2c->regs + II_COMMAND); return II_COMMAND_COMPLETION_STATUS(cmd); } static void dc_i2c_start_msg(struct dc_i2c *i2c, int first) { struct i2c_msg *msg = i2c->msg; if (!(msg->flags & I2C_M_NOSTART)) { i2c->state = STATE_START; dc_i2c_cmd(i2c, first ? II_CMD_START : II_CMD_RESTART); } else if (msg->flags & I2C_M_RD) { i2c->state = STATE_READ; dc_i2c_next_read(i2c); } else { i2c->state = STATE_WRITE; dc_i2c_write_buf(i2c); } } static irqreturn_t dc_i2c_irq(int irq, void *dev_id) { struct dc_i2c *i2c = dev_id; int cmd_status = dc_i2c_cmd_status(i2c); u8 addr_cmd; writeb_relaxed(1, i2c->regs + II_INTFLAG_CLEAR); spin_lock(&i2c->lock); if (cmd_status == II_CMD_STATUS_ACK_BAD || cmd_status == II_CMD_STATUS_ABORT) { i2c->error = -EIO; complete(&i2c->done); goto out; } switch (i2c->state) { case STATE_START: addr_cmd = dc_i2c_addr_cmd(i2c->msg); dc_i2c_write_byte(i2c, addr_cmd); i2c->state = STATE_ADDR; break; case STATE_ADDR: if (i2c->msg->flags & I2C_M_RD) { dc_i2c_next_read(i2c); i2c->state = STATE_READ; break; } i2c->state = STATE_WRITE; fallthrough; case STATE_WRITE: if (i2c->msgbuf_ptr < i2c->msg->len) dc_i2c_write_buf(i2c); else dc_i2c_stop(i2c); break; case STATE_READ: if (i2c->msgbuf_ptr < i2c->msg->len) dc_i2c_read_buf(i2c); else dc_i2c_stop(i2c); break; case STATE_STOP: i2c->state = STATE_IDLE; complete(&i2c->done); break; } out: spin_unlock(&i2c->lock); return IRQ_HANDLED; } static int dc_i2c_xfer_msg(struct dc_i2c *i2c, struct i2c_msg *msg, int first, int last) { unsigned long timeout = msecs_to_jiffies(TIMEOUT_MS); unsigned long flags; spin_lock_irqsave(&i2c->lock, flags); i2c->msg = msg; i2c->msgbuf_ptr = 0; i2c->last = last; i2c->error = 0; reinit_completion(&i2c->done); dc_i2c_set_irq(i2c, 1); dc_i2c_start_msg(i2c, first); spin_unlock_irqrestore(&i2c->lock, flags); timeout = wait_for_completion_timeout(&i2c->done, timeout); dc_i2c_set_irq(i2c, 0); if (timeout == 0) { i2c->state = STATE_IDLE; return -ETIMEDOUT; } if (i2c->error) return i2c->error; return 0; } static int dc_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct dc_i2c *i2c = adap->algo_data; int i, ret; for (i = 0; i < num; i++) { ret = dc_i2c_xfer_msg(i2c, &msgs[i], i == 0, i == num - 1); if (ret) return ret; } return num; } static int dc_i2c_init_hw(struct dc_i2c *i2c) { unsigned long clk_rate = clk_get_rate(i2c->clk); unsigned int clocktime; writeb_relaxed(II_CONTROL_LOCAL_RESET, i2c->regs + II_CONTROL); udelay(100); writeb_relaxed(0, i2c->regs + II_CONTROL); udelay(100); clocktime = DIV_ROUND_UP(clk_rate, 64 * i2c->frequency); if (clocktime < 1 || clocktime > 0xff) { dev_err(i2c->dev, "can't set bus speed of %u Hz\n", i2c->frequency); return -EINVAL; } writeb_relaxed(clocktime - 1, i2c->regs + II_CLOCKTIME); return 0; } static u32 dc_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART; } static const struct i2c_algorithm dc_i2c_algorithm = { .master_xfer = dc_i2c_xfer, .functionality = dc_i2c_func, }; static int dc_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct dc_i2c *i2c; int ret = 0, irq; i2c = devm_kzalloc(&pdev->dev, sizeof(struct dc_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", &i2c->frequency)) i2c->frequency = I2C_MAX_STANDARD_MODE_FREQ; i2c->dev = &pdev->dev; platform_set_drvdata(pdev, i2c); spin_lock_init(&i2c->lock); init_completion(&i2c->done); i2c->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) return PTR_ERR(i2c->clk); i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(&pdev->dev, irq, dc_i2c_irq, 0, dev_name(&pdev->dev), i2c); if (ret < 0) return ret; strscpy(i2c->adap.name, "Conexant Digicolor I2C adapter", sizeof(i2c->adap.name)); i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &dc_i2c_algorithm; i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = np; i2c->adap.algo_data = i2c; ret = dc_i2c_init_hw(i2c); if (ret) return ret; ret = clk_prepare_enable(i2c->clk); if (ret < 0) return ret; ret = i2c_add_adapter(&i2c->adap); if (ret < 0) { clk_disable_unprepare(i2c->clk); return ret; } return 0; } static void dc_i2c_remove(struct platform_device *pdev) { struct dc_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); clk_disable_unprepare(i2c->clk); } static const struct of_device_id dc_i2c_match[] = { { .compatible = "cnxt,cx92755-i2c" }, { }, }; MODULE_DEVICE_TABLE(of, dc_i2c_match); static struct platform_driver dc_i2c_driver = { .probe = dc_i2c_probe, .remove_new = dc_i2c_remove, .driver = { .name = "digicolor-i2c", .of_match_table = dc_i2c_match, }, }; module_platform_driver(dc_i2c_driver); MODULE_AUTHOR("Baruch Siach <[email protected]>"); MODULE_DESCRIPTION("Conexant Digicolor I2C master driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-digicolor.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Virtio I2C Bus Driver * * The Virtio I2C Specification: * https://raw.githubusercontent.com/oasis-tcs/virtio-spec/master/virtio-i2c.tex * * Copyright (c) 2021 Intel Corporation. All rights reserved. */ #include <linux/acpi.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/virtio_i2c.h> /** * struct virtio_i2c - virtio I2C data * @vdev: virtio device for this controller * @adap: I2C adapter for this controller * @vq: the virtio virtqueue for communication */ struct virtio_i2c { struct virtio_device *vdev; struct i2c_adapter adap; struct virtqueue *vq; }; /** * struct virtio_i2c_req - the virtio I2C request structure * @completion: completion of virtio I2C message * @out_hdr: the OUT header of the virtio I2C message * @buf: the buffer into which data is read, or from which it's written * @in_hdr: the IN header of the virtio I2C message */ struct virtio_i2c_req { struct completion completion; struct virtio_i2c_out_hdr out_hdr ____cacheline_aligned; uint8_t *buf ____cacheline_aligned; struct virtio_i2c_in_hdr in_hdr ____cacheline_aligned; }; static void virtio_i2c_msg_done(struct virtqueue *vq) { struct virtio_i2c_req *req; unsigned int len; while ((req = virtqueue_get_buf(vq, &len))) complete(&req->completion); } static int virtio_i2c_prepare_reqs(struct virtqueue *vq, struct virtio_i2c_req *reqs, struct i2c_msg *msgs, int num) { struct scatterlist *sgs[3], out_hdr, msg_buf, in_hdr; int i; for (i = 0; i < num; i++) { int outcnt = 0, incnt = 0; init_completion(&reqs[i].completion); /* * Only 7-bit mode supported for this moment. For the address * format, Please check the Virtio I2C Specification. */ reqs[i].out_hdr.addr = cpu_to_le16(msgs[i].addr << 1); if (msgs[i].flags & I2C_M_RD) reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_M_RD); if (i != num - 1) reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT); sg_init_one(&out_hdr, &reqs[i].out_hdr, sizeof(reqs[i].out_hdr)); sgs[outcnt++] = &out_hdr; if (msgs[i].len) { reqs[i].buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1); if (!reqs[i].buf) break; sg_init_one(&msg_buf, reqs[i].buf, msgs[i].len); if (msgs[i].flags & I2C_M_RD) sgs[outcnt + incnt++] = &msg_buf; else sgs[outcnt++] = &msg_buf; } sg_init_one(&in_hdr, &reqs[i].in_hdr, sizeof(reqs[i].in_hdr)); sgs[outcnt + incnt++] = &in_hdr; if (virtqueue_add_sgs(vq, sgs, outcnt, incnt, &reqs[i], GFP_KERNEL)) { i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], false); break; } } return i; } static int virtio_i2c_complete_reqs(struct virtqueue *vq, struct virtio_i2c_req *reqs, struct i2c_msg *msgs, int num) { bool failed = false; int i, j = 0; for (i = 0; i < num; i++) { struct virtio_i2c_req *req = &reqs[i]; wait_for_completion(&req->completion); if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK) failed = true; i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed); if (!failed) j++; } return j; } static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct virtio_i2c *vi = i2c_get_adapdata(adap); struct virtqueue *vq = vi->vq; struct virtio_i2c_req *reqs; int count; reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL); if (!reqs) return -ENOMEM; count = virtio_i2c_prepare_reqs(vq, reqs, msgs, num); if (!count) goto err_free; /* * For the case where count < num, i.e. we weren't able to queue all the * msgs, ideally we should abort right away and return early, but some * of the messages are already sent to the remote I2C controller and the * virtqueue will be left in undefined state in that case. We kick the * remote here to clear the virtqueue, so we can try another set of * messages later on. */ virtqueue_kick(vq); count = virtio_i2c_complete_reqs(vq, reqs, msgs, count); err_free: kfree(reqs); return count; } static void virtio_i2c_del_vqs(struct virtio_device *vdev) { virtio_reset_device(vdev); vdev->config->del_vqs(vdev); } static int virtio_i2c_setup_vqs(struct virtio_i2c *vi) { struct virtio_device *vdev = vi->vdev; vi->vq = virtio_find_single_vq(vdev, virtio_i2c_msg_done, "msg"); return PTR_ERR_OR_ZERO(vi->vq); } static u32 virtio_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm virtio_algorithm = { .master_xfer = virtio_i2c_xfer, .functionality = virtio_i2c_func, }; static int virtio_i2c_probe(struct virtio_device *vdev) { struct virtio_i2c *vi; int ret; if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) { dev_err(&vdev->dev, "Zero-length request feature is mandatory\n"); return -EINVAL; } vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL); if (!vi) return -ENOMEM; vdev->priv = vi; vi->vdev = vdev; ret = virtio_i2c_setup_vqs(vi); if (ret) return ret; vi->adap.owner = THIS_MODULE; snprintf(vi->adap.name, sizeof(vi->adap.name), "i2c_virtio at virtio bus %d", vdev->index); vi->adap.algo = &virtio_algorithm; vi->adap.dev.parent = &vdev->dev; vi->adap.dev.of_node = vdev->dev.of_node; i2c_set_adapdata(&vi->adap, vi); /* * Setup ACPI node for controlled devices which will be probed through * ACPI. */ ACPI_COMPANION_SET(&vi->adap.dev, ACPI_COMPANION(vdev->dev.parent)); ret = i2c_add_adapter(&vi->adap); if (ret) virtio_i2c_del_vqs(vdev); return ret; } static void virtio_i2c_remove(struct virtio_device *vdev) { struct virtio_i2c *vi = vdev->priv; i2c_del_adapter(&vi->adap); virtio_i2c_del_vqs(vdev); } static struct virtio_device_id id_table[] = { { VIRTIO_ID_I2C_ADAPTER, VIRTIO_DEV_ANY_ID }, {} }; MODULE_DEVICE_TABLE(virtio, id_table); static int virtio_i2c_freeze(struct virtio_device *vdev) { virtio_i2c_del_vqs(vdev); return 0; } static int virtio_i2c_restore(struct virtio_device *vdev) { return virtio_i2c_setup_vqs(vdev->priv); } static const unsigned int features[] = { VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, }; static struct virtio_driver virtio_i2c_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .id_table = id_table, .probe = virtio_i2c_probe, .remove = virtio_i2c_remove, .driver = { .name = "i2c_virtio", }, .freeze = pm_sleep_ptr(virtio_i2c_freeze), .restore = pm_sleep_ptr(virtio_i2c_restore), }; module_virtio_driver(virtio_i2c_driver); MODULE_AUTHOR("Jie Deng <[email protected]>"); MODULE_AUTHOR("Conghui Chen <[email protected]>"); MODULE_DESCRIPTION("Virtio i2c bus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-virtio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c-algo-pcf.c i2c driver algorithms for PCF8584 adapters * * Copyright (C) 1995-1997 Simon G. Vogl * 1998-2000 Hans Berglund * * With some changes from Kyösti Mälkki <[email protected]> and * Frodo Looijaard <[email protected]>, and also from Martin Bailey * <[email protected]> * * Partially rewriten by Oleg I. Vdovikin <[email protected]> to handle multiple * messages, proper stop/repstart signaling during receive, added detect code */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/i2c-algo-pcf.h> #include "i2c-algo-pcf.h" #define DEB2(x) if (i2c_debug >= 2) x #define DEB3(x) if (i2c_debug >= 3) x /* print several statistical values */ #define DEBPROTO(x) if (i2c_debug >= 9) x; /* debug the protocol by showing transferred bits */ #define DEF_TIMEOUT 16 /* * module parameters: */ static int i2c_debug; /* setting states on the bus with the right timing: */ #define set_pcf(adap, ctl, val) adap->setpcf(adap->data, ctl, val) #define get_pcf(adap, ctl) adap->getpcf(adap->data, ctl) #define get_own(adap) adap->getown(adap->data) #define get_clock(adap) adap->getclock(adap->data) #define i2c_outb(adap, val) adap->setpcf(adap->data, 0, val) #define i2c_inb(adap) adap->getpcf(adap->data, 0) /* other auxiliary functions */ static void i2c_start(struct i2c_algo_pcf_data *adap) { DEBPROTO(printk(KERN_DEBUG "S ")); set_pcf(adap, 1, I2C_PCF_START); } static void i2c_repstart(struct i2c_algo_pcf_data *adap) { DEBPROTO(printk(" Sr ")); set_pcf(adap, 1, I2C_PCF_REPSTART); } static void i2c_stop(struct i2c_algo_pcf_data *adap) { DEBPROTO(printk("P\n")); set_pcf(adap, 1, I2C_PCF_STOP); } static void handle_lab(struct i2c_algo_pcf_data *adap, const int *status) { DEB2(printk(KERN_INFO "i2c-algo-pcf.o: lost arbitration (CSR 0x%02x)\n", *status)); /* * Cleanup from LAB -- reset and enable ESO. * This resets the PCF8584; since we've lost the bus, no * further attempts should be made by callers to clean up * (no i2c_stop() etc.) */ set_pcf(adap, 1, I2C_PCF_PIN); set_pcf(adap, 1, I2C_PCF_ESO); /* * We pause for a time period sufficient for any running * I2C transaction to complete -- the arbitration logic won't * work properly until the next START is seen. * It is assumed the bus driver or client has set a proper value. * * REVISIT: should probably use msleep instead of mdelay if we * know we can sleep. */ if (adap->lab_mdelay) mdelay(adap->lab_mdelay); DEB2(printk(KERN_INFO "i2c-algo-pcf.o: reset LAB condition (CSR 0x%02x)\n", get_pcf(adap, 1))); } static int wait_for_bb(struct i2c_algo_pcf_data *adap) { int timeout = DEF_TIMEOUT; int status; status = get_pcf(adap, 1); while (!(status & I2C_PCF_BB) && --timeout) { udelay(100); /* wait for 100 us */ status = get_pcf(adap, 1); } if (timeout == 0) { printk(KERN_ERR "Timeout waiting for Bus Busy\n"); return -ETIMEDOUT; } return 0; } static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status) { int timeout = DEF_TIMEOUT; *status = get_pcf(adap, 1); while ((*status & I2C_PCF_PIN) && --timeout) { adap->waitforpin(adap->data); *status = get_pcf(adap, 1); } if (*status & I2C_PCF_LAB) { handle_lab(adap, status); return -EINTR; } if (timeout == 0) return -ETIMEDOUT; return 0; } /* * This should perform the 'PCF8584 initialization sequence' as described * in the Philips IC12 data book (1995, Aug 29). * There should be a 30 clock cycle wait after reset, I assume this * has been fulfilled. * There should be a delay at the end equal to the longest I2C message * to synchronize the BB-bit (in multimaster systems). How long is * this? I assume 1 second is always long enough. * * vdovikin: added detect code for PCF8584 */ static int pcf_init_8584 (struct i2c_algo_pcf_data *adap) { unsigned char temp; DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: PCF state 0x%02x\n", get_pcf(adap, 1))); /* S1=0x80: S0 selected, serial interface off */ set_pcf(adap, 1, I2C_PCF_PIN); /* * check to see S1 now used as R/W ctrl - * PCF8584 does that when ESO is zero */ if (((temp = get_pcf(adap, 1)) & 0x7f) != (0)) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S0 (0x%02x).\n", temp)); return -ENXIO; /* definitely not PCF8584 */ } /* load own address in S0, effective address is (own << 1) */ i2c_outb(adap, get_own(adap)); /* check it's really written */ if ((temp = i2c_inb(adap)) != get_own(adap)) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S0 (0x%02x).\n", temp)); return -ENXIO; } /* S1=0xA0, next byte in S2 */ set_pcf(adap, 1, I2C_PCF_PIN | I2C_PCF_ES1); /* check to see S2 now selected */ if (((temp = get_pcf(adap, 1)) & 0x7f) != I2C_PCF_ES1) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S2 (0x%02x).\n", temp)); return -ENXIO; } /* load clock register S2 */ i2c_outb(adap, get_clock(adap)); /* check it's really written, the only 5 lowest bits does matter */ if (((temp = i2c_inb(adap)) & 0x1f) != get_clock(adap)) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S2 (0x%02x).\n", temp)); return -ENXIO; } /* Enable serial interface, idle, S0 selected */ set_pcf(adap, 1, I2C_PCF_IDLE); /* check to see PCF is really idled and we can access status register */ if ((temp = get_pcf(adap, 1)) != (I2C_PCF_PIN | I2C_PCF_BB)) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S1` (0x%02x).\n", temp)); return -ENXIO; } printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n"); return 0; } static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf, int count, int last) { struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; int wrcount, status, timeout; for (wrcount=0; wrcount<count; ++wrcount) { DEB2(dev_dbg(&i2c_adap->dev, "i2c_write: writing %2.2X\n", buf[wrcount] & 0xff)); i2c_outb(adap, buf[wrcount]); timeout = wait_for_pin(adap, &status); if (timeout) { if (timeout == -EINTR) return -EINTR; /* arbitration lost */ i2c_stop(adap); dev_err(&i2c_adap->dev, "i2c_write: error - timeout.\n"); return -EREMOTEIO; /* got a better one ?? */ } if (status & I2C_PCF_LRB) { i2c_stop(adap); dev_err(&i2c_adap->dev, "i2c_write: error - no ack.\n"); return -EREMOTEIO; /* got a better one ?? */ } } if (last) i2c_stop(adap); else i2c_repstart(adap); return wrcount; } static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count, int last) { int i, status; struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; int wfp; /* increment number of bytes to read by one -- read dummy byte */ for (i = 0; i <= count; i++) { if ((wfp = wait_for_pin(adap, &status))) { if (wfp == -EINTR) return -EINTR; /* arbitration lost */ i2c_stop(adap); dev_err(&i2c_adap->dev, "pcf_readbytes timed out.\n"); return -1; } if ((status & I2C_PCF_LRB) && (i != count)) { i2c_stop(adap); dev_err(&i2c_adap->dev, "i2c_read: i2c_inb, No ack.\n"); return -1; } if (i == count - 1) { set_pcf(adap, 1, I2C_PCF_ESO); } else if (i == count) { if (last) i2c_stop(adap); else i2c_repstart(adap); } if (i) buf[i - 1] = i2c_inb(adap); else i2c_inb(adap); /* dummy read */ } return i - 1; } static int pcf_doAddress(struct i2c_algo_pcf_data *adap, struct i2c_msg *msg) { unsigned char addr = i2c_8bit_addr_from_msg(msg); if (msg->flags & I2C_M_REV_DIR_ADDR) addr ^= 1; i2c_outb(adap, addr); return 0; } static int pcf_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; struct i2c_msg *pmsg; int i; int ret=0, timeout, status; if (adap->xfer_begin) adap->xfer_begin(adap->data); /* Check for bus busy */ timeout = wait_for_bb(adap); if (timeout) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: " "Timeout waiting for BB in pcf_xfer\n");) i = -EIO; goto out; } for (i = 0;ret >= 0 && i < num; i++) { pmsg = &msgs[i]; DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n", pmsg->flags & I2C_M_RD ? "read" : "write", pmsg->len, pmsg->addr, i + 1, num);) ret = pcf_doAddress(adap, pmsg); /* Send START */ if (i == 0) i2c_start(adap); /* Wait for PIN (pending interrupt NOT) */ timeout = wait_for_pin(adap, &status); if (timeout) { if (timeout == -EINTR) { /* arbitration lost */ i = -EINTR; goto out; } i2c_stop(adap); DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting " "for PIN(1) in pcf_xfer\n");) i = -EREMOTEIO; goto out; } /* Check LRB (last rcvd bit - slave ack) */ if (status & I2C_PCF_LRB) { i2c_stop(adap); DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");) i = -EREMOTEIO; goto out; } DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: Msg %d, addr=0x%x, flags=0x%x, len=%d\n", i, msgs[i].addr, msgs[i].flags, msgs[i].len);) if (pmsg->flags & I2C_M_RD) { ret = pcf_readbytes(i2c_adap, pmsg->buf, pmsg->len, (i + 1 == num)); if (ret != pmsg->len) { DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: " "only read %d bytes.\n",ret)); } else { DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: read %d bytes.\n",ret)); } } else { ret = pcf_sendbytes(i2c_adap, pmsg->buf, pmsg->len, (i + 1 == num)); if (ret != pmsg->len) { DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: " "only wrote %d bytes.\n",ret)); } else { DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: wrote %d bytes.\n",ret)); } } } out: if (adap->xfer_end) adap->xfer_end(adap->data); return i; } static u32 pcf_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } /* exported algorithm data: */ static const struct i2c_algorithm pcf_algo = { .master_xfer = pcf_xfer, .functionality = pcf_func, }; /* * registering functions to load algorithms at runtime */ int i2c_pcf_add_bus(struct i2c_adapter *adap) { struct i2c_algo_pcf_data *pcf_adap = adap->algo_data; int rval; DEB2(dev_dbg(&adap->dev, "hw routines registered.\n")); /* register new adapter to i2c module... */ adap->algo = &pcf_algo; if ((rval = pcf_init_8584(pcf_adap))) return rval; rval = i2c_add_adapter(adap); return rval; } EXPORT_SYMBOL(i2c_pcf_add_bus); MODULE_AUTHOR("Hans Berglund <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus PCF8584 algorithm"); MODULE_LICENSE("GPL"); module_param(i2c_debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(i2c_debug, "debug level - 0 off; 1 normal; 2,3 more verbose; 9 pcf-protocol");
linux-master
drivers/i2c/algos/i2c-algo-pcf.c
// SPDX-License-Identifier: GPL-2.0+ /* * i2c-algo-bit.c: i2c driver algorithms for bit-shift adapters * * Copyright (C) 1995-2000 Simon G. Vogl * * With some changes from Frodo Looijaard <[email protected]>, Kyösti Mälkki * <[email protected]> and Jean Delvare <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> /* ----- global defines ----------------------------------------------- */ #ifdef DEBUG #define bit_dbg(level, dev, format, args...) \ do { \ if (i2c_debug >= level) \ dev_dbg(dev, format, ##args); \ } while (0) #else #define bit_dbg(level, dev, format, args...) \ do {} while (0) #endif /* DEBUG */ /* ----- global variables --------------------------------------------- */ static int bit_test; /* see if the line-setting functions work */ module_param(bit_test, int, S_IRUGO); MODULE_PARM_DESC(bit_test, "lines testing - 0 off; 1 report; 2 fail if stuck"); #ifdef DEBUG static int i2c_debug = 1; module_param(i2c_debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(i2c_debug, "debug level - 0 off; 1 normal; 2 verbose; 3 very verbose"); #endif /* --- setting states on the bus with the right timing: --------------- */ #define setsda(adap, val) adap->setsda(adap->data, val) #define setscl(adap, val) adap->setscl(adap->data, val) #define getsda(adap) adap->getsda(adap->data) #define getscl(adap) adap->getscl(adap->data) static inline void sdalo(struct i2c_algo_bit_data *adap) { setsda(adap, 0); udelay((adap->udelay + 1) / 2); } static inline void sdahi(struct i2c_algo_bit_data *adap) { setsda(adap, 1); udelay((adap->udelay + 1) / 2); } static inline void scllo(struct i2c_algo_bit_data *adap) { setscl(adap, 0); udelay(adap->udelay / 2); } /* * Raise scl line, and do checking for delays. This is necessary for slower * devices. */ static int sclhi(struct i2c_algo_bit_data *adap) { unsigned long start; setscl(adap, 1); /* Not all adapters have scl sense line... */ if (!adap->getscl) goto done; start = jiffies; while (!getscl(adap)) { /* This hw knows how to read the clock line, so we wait * until it actually gets high. This is safer as some * chips may hold it low ("clock stretching") while they * are processing data internally. */ if (time_after(jiffies, start + adap->timeout)) { /* Test one last time, as we may have been preempted * between last check and timeout test. */ if (getscl(adap)) break; return -ETIMEDOUT; } cpu_relax(); } #ifdef DEBUG if (jiffies != start && i2c_debug >= 3) pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n", jiffies - start); #endif done: udelay(adap->udelay); return 0; } /* --- other auxiliary functions -------------------------------------- */ static void i2c_start(struct i2c_algo_bit_data *adap) { /* assert: scl, sda are high */ setsda(adap, 0); udelay(adap->udelay); scllo(adap); } static void i2c_repstart(struct i2c_algo_bit_data *adap) { /* assert: scl is low */ sdahi(adap); sclhi(adap); setsda(adap, 0); udelay(adap->udelay); scllo(adap); } static void i2c_stop(struct i2c_algo_bit_data *adap) { /* assert: scl is low */ sdalo(adap); sclhi(adap); setsda(adap, 1); udelay(adap->udelay); } /* send a byte without start cond., look for arbitration, check ackn. from slave */ /* returns: * 1 if the device acknowledged * 0 if the device did not ack * -ETIMEDOUT if an error occurred (while raising the scl line) */ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) { int i; int sb; int ack; struct i2c_algo_bit_data *adap = i2c_adap->algo_data; /* assert: scl is low */ for (i = 7; i >= 0; i--) { sb = (c >> i) & 1; setsda(adap, sb); udelay((adap->udelay + 1) / 2); if (sclhi(adap) < 0) { /* timed out */ bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, timeout at bit #%d\n", (int)c, i); return -ETIMEDOUT; } /* FIXME do arbitration here: * if (sb && !getsda(adap)) -> ouch! Get out of here. * * Report a unique code, so higher level code can retry * the whole (combined) message and *NOT* issue STOP. */ scllo(adap); } sdahi(adap); if (sclhi(adap) < 0) { /* timeout */ bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, timeout at ack\n", (int)c); return -ETIMEDOUT; } /* read ack: SDA should be pulled down by slave, or it may * NAK (usually to report problems with the data we wrote). * Always report ACK if SDA is write-only. */ ack = !adap->getsda || !getsda(adap); /* ack: sda is pulled low -> success */ bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c, ack ? "A" : "NA"); scllo(adap); return ack; /* assert: scl is low (sda undef) */ } static int i2c_inb(struct i2c_adapter *i2c_adap) { /* read byte via i2c port, without start/stop sequence */ /* acknowledge is sent in i2c_read. */ int i; unsigned char indata = 0; struct i2c_algo_bit_data *adap = i2c_adap->algo_data; /* assert: scl is low */ sdahi(adap); for (i = 0; i < 8; i++) { if (sclhi(adap) < 0) { /* timeout */ bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit #%d\n", 7 - i); return -ETIMEDOUT; } indata *= 2; if (getsda(adap)) indata |= 0x01; setscl(adap, 0); udelay(i == 7 ? adap->udelay / 2 : adap->udelay); } /* assert: scl is low */ return indata; } /* * Sanity check for the adapter hardware - check the reaction of * the bus lines only if it seems to be idle. */ static int test_bus(struct i2c_adapter *i2c_adap) { struct i2c_algo_bit_data *adap = i2c_adap->algo_data; const char *name = i2c_adap->name; int scl, sda, ret; if (adap->pre_xfer) { ret = adap->pre_xfer(i2c_adap); if (ret < 0) return -ENODEV; } if (adap->getsda == NULL) pr_info("%s: SDA is write-only, testing not possible\n", name); if (adap->getscl == NULL) pr_info("%s: SCL is write-only, testing not possible\n", name); sda = adap->getsda ? getsda(adap) : 1; scl = adap->getscl ? getscl(adap) : 1; if (!scl || !sda) { pr_warn("%s: bus seems to be busy (scl=%d, sda=%d)\n", name, scl, sda); goto bailout; } sdalo(adap); if (adap->getsda && getsda(adap)) { pr_warn("%s: SDA stuck high!\n", name); goto bailout; } if (adap->getscl && !getscl(adap)) { pr_warn("%s: SCL unexpected low while pulling SDA low!\n", name); goto bailout; } sdahi(adap); if (adap->getsda && !getsda(adap)) { pr_warn("%s: SDA stuck low!\n", name); goto bailout; } if (adap->getscl && !getscl(adap)) { pr_warn("%s: SCL unexpected low while pulling SDA high!\n", name); goto bailout; } scllo(adap); if (adap->getscl && getscl(adap)) { pr_warn("%s: SCL stuck high!\n", name); goto bailout; } if (adap->getsda && !getsda(adap)) { pr_warn("%s: SDA unexpected low while pulling SCL low!\n", name); goto bailout; } sclhi(adap); if (adap->getscl && !getscl(adap)) { pr_warn("%s: SCL stuck low!\n", name); goto bailout; } if (adap->getsda && !getsda(adap)) { pr_warn("%s: SDA unexpected low while pulling SCL high!\n", name); goto bailout; } if (adap->post_xfer) adap->post_xfer(i2c_adap); pr_info("%s: Test OK\n", name); return 0; bailout: sdahi(adap); sclhi(adap); if (adap->post_xfer) adap->post_xfer(i2c_adap); return -ENODEV; } /* ----- Utility functions */ /* try_address tries to contact a chip for a number of * times before it gives up. * return values: * 1 chip answered * 0 chip did not answer * -x transmission error */ static int try_address(struct i2c_adapter *i2c_adap, unsigned char addr, int retries) { struct i2c_algo_bit_data *adap = i2c_adap->algo_data; int i, ret = 0; for (i = 0; i <= retries; i++) { ret = i2c_outb(i2c_adap, addr); if (ret == 1 || i == retries) break; bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n"); i2c_stop(adap); udelay(adap->udelay); yield(); bit_dbg(3, &i2c_adap->dev, "emitting start condition\n"); i2c_start(adap); } if (i && ret) bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at 0x%02x: %s\n", i + 1, addr & 1 ? "read from" : "write to", addr >> 1, ret == 1 ? "success" : "failed, timeout?"); return ret; } static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) { const unsigned char *temp = msg->buf; int count = msg->len; unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK; int retval; int wrcount = 0; while (count > 0) { retval = i2c_outb(i2c_adap, *temp); /* OK/ACK; or ignored NAK */ if ((retval > 0) || (nak_ok && (retval == 0))) { count--; temp++; wrcount++; /* A slave NAKing the master means the slave didn't like * something about the data it saw. For example, maybe * the SMBus PEC was wrong. */ } else if (retval == 0) { dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n"); return -EIO; /* Timeout; or (someday) lost arbitration * * FIXME Lost ARB implies retrying the transaction from * the first message, after the "winning" master issues * its STOP. As a rule, upper layer code has no reason * to know or care about this ... it is *NOT* an error. */ } else { dev_err(&i2c_adap->dev, "sendbytes: error %d\n", retval); return retval; } } return wrcount; } static int acknak(struct i2c_adapter *i2c_adap, int is_ack) { struct i2c_algo_bit_data *adap = i2c_adap->algo_data; /* assert: sda is high */ if (is_ack) /* send ack */ setsda(adap, 0); udelay((adap->udelay + 1) / 2); if (sclhi(adap) < 0) { /* timeout */ dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n"); return -ETIMEDOUT; } scllo(adap); return 0; } static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) { int inval; int rdcount = 0; /* counts bytes read */ unsigned char *temp = msg->buf; int count = msg->len; const unsigned flags = msg->flags; struct i2c_algo_bit_data *adap = i2c_adap->algo_data; if (!adap->getsda) return -EOPNOTSUPP; while (count > 0) { inval = i2c_inb(i2c_adap); if (inval >= 0) { *temp = inval; rdcount++; } else { /* read timed out */ break; } temp++; count--; /* Some SMBus transactions require that we receive the transaction length as the first read byte. */ if (rdcount == 1 && (flags & I2C_M_RECV_LEN)) { if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { if (!(flags & I2C_M_NO_RD_ACK)) acknak(i2c_adap, 0); dev_err(&i2c_adap->dev, "readbytes: invalid block length (%d)\n", inval); return -EPROTO; } /* The original count value accounts for the extra bytes, that is, either 1 for a regular transaction, or 2 for a PEC transaction. */ count += inval; msg->len += inval; } bit_dbg(2, &i2c_adap->dev, "readbytes: 0x%02x %s\n", inval, (flags & I2C_M_NO_RD_ACK) ? "(no ack/nak)" : (count ? "A" : "NA")); if (!(flags & I2C_M_NO_RD_ACK)) { inval = acknak(i2c_adap, count); if (inval < 0) return inval; } } return rdcount; } /* doAddress initiates the transfer by generating the start condition (in * try_address) and transmits the address in the necessary format to handle * reads, writes as well as 10bit-addresses. * returns: * 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set * -x an error occurred (like: -ENXIO if the device did not answer, or * -ETIMEDOUT, for example if the lines are stuck...) */ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) { unsigned short flags = msg->flags; unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK; struct i2c_algo_bit_data *adap = i2c_adap->algo_data; unsigned char addr; int ret, retries; retries = nak_ok ? 0 : i2c_adap->retries; if (flags & I2C_M_TEN) { /* a ten bit address */ addr = 0xf0 | ((msg->addr >> 7) & 0x06); bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); /* try extended address code...*/ ret = try_address(i2c_adap, addr, retries); if ((ret != 1) && !nak_ok) { dev_err(&i2c_adap->dev, "died at extended address code\n"); return -ENXIO; } /* the remaining 8 bit address */ ret = i2c_outb(i2c_adap, msg->addr & 0xff); if ((ret != 1) && !nak_ok) { /* the chip did not ack / xmission error occurred */ dev_err(&i2c_adap->dev, "died at 2nd address code\n"); return -ENXIO; } if (flags & I2C_M_RD) { bit_dbg(3, &i2c_adap->dev, "emitting repeated start condition\n"); i2c_repstart(adap); /* okay, now switch into reading mode */ addr |= 0x01; ret = try_address(i2c_adap, addr, retries); if ((ret != 1) && !nak_ok) { dev_err(&i2c_adap->dev, "died at repeated address code\n"); return -EIO; } } } else { /* normal 7bit address */ addr = i2c_8bit_addr_from_msg(msg); if (flags & I2C_M_REV_DIR_ADDR) addr ^= 1; ret = try_address(i2c_adap, addr, retries); if ((ret != 1) && !nak_ok) return -ENXIO; } return 0; } static int bit_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct i2c_msg *pmsg; struct i2c_algo_bit_data *adap = i2c_adap->algo_data; int i, ret; unsigned short nak_ok; if (adap->pre_xfer) { ret = adap->pre_xfer(i2c_adap); if (ret < 0) return ret; } bit_dbg(3, &i2c_adap->dev, "emitting start condition\n"); i2c_start(adap); for (i = 0; i < num; i++) { pmsg = &msgs[i]; nak_ok = pmsg->flags & I2C_M_IGNORE_NAK; if (!(pmsg->flags & I2C_M_NOSTART)) { if (i) { if (msgs[i - 1].flags & I2C_M_STOP) { bit_dbg(3, &i2c_adap->dev, "emitting enforced stop/start condition\n"); i2c_stop(adap); i2c_start(adap); } else { bit_dbg(3, &i2c_adap->dev, "emitting repeated start condition\n"); i2c_repstart(adap); } } ret = bit_doAddress(i2c_adap, pmsg); if ((ret != 0) && !nak_ok) { bit_dbg(1, &i2c_adap->dev, "NAK from device addr 0x%02x msg #%d\n", msgs[i].addr, i); goto bailout; } } if (pmsg->flags & I2C_M_RD) { /* read bytes into buffer*/ ret = readbytes(i2c_adap, pmsg); if (ret >= 1) bit_dbg(2, &i2c_adap->dev, "read %d byte%s\n", ret, ret == 1 ? "" : "s"); if (ret < pmsg->len) { if (ret >= 0) ret = -EIO; goto bailout; } } else { /* write bytes from buffer */ ret = sendbytes(i2c_adap, pmsg); if (ret >= 1) bit_dbg(2, &i2c_adap->dev, "wrote %d byte%s\n", ret, ret == 1 ? "" : "s"); if (ret < pmsg->len) { if (ret >= 0) ret = -EIO; goto bailout; } } } ret = i; bailout: bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n"); i2c_stop(adap); if (adap->post_xfer) adap->post_xfer(i2c_adap); return ret; } /* * We print a warning when we are not flagged to support atomic transfers but * will try anyhow. That's what the I2C core would do as well. Sadly, we can't * modify the algorithm struct at probe time because this struct is exported * 'const'. */ static int bit_xfer_atomic(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct i2c_algo_bit_data *adap = i2c_adap->algo_data; if (!adap->can_do_atomic) dev_warn(&i2c_adap->dev, "not flagged for atomic transfers\n"); return bit_xfer(i2c_adap, msgs, num); } static u32 bit_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL_ALL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; } /* -----exported algorithm data: ------------------------------------- */ const struct i2c_algorithm i2c_bit_algo = { .master_xfer = bit_xfer, .master_xfer_atomic = bit_xfer_atomic, .functionality = bit_func, }; EXPORT_SYMBOL(i2c_bit_algo); static const struct i2c_adapter_quirks i2c_bit_quirk_no_clk_stretch = { .flags = I2C_AQ_NO_CLK_STRETCH, }; /* * registering functions to load algorithms at runtime */ static int __i2c_bit_add_bus(struct i2c_adapter *adap, int (*add_adapter)(struct i2c_adapter *)) { struct i2c_algo_bit_data *bit_adap = adap->algo_data; int ret; if (bit_test) { ret = test_bus(adap); if (bit_test >= 2 && ret < 0) return -ENODEV; } /* register new adapter to i2c module... */ adap->algo = &i2c_bit_algo; adap->retries = 3; if (bit_adap->getscl == NULL) adap->quirks = &i2c_bit_quirk_no_clk_stretch; /* * We tried forcing SCL/SDA to an initial state here. But that caused a * regression, sadly. Check Bugzilla #200045 for details. */ ret = add_adapter(adap); if (ret < 0) return ret; if (bit_adap->getsda == NULL) dev_warn(&adap->dev, "Not I2C compliant: can't read SDA\n"); if (bit_adap->getscl == NULL) dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n"); if (bit_adap->getsda == NULL || bit_adap->getscl == NULL) dev_warn(&adap->dev, "Bus may be unreliable\n"); return 0; } int i2c_bit_add_bus(struct i2c_adapter *adap) { return __i2c_bit_add_bus(adap, i2c_add_adapter); } EXPORT_SYMBOL(i2c_bit_add_bus); int i2c_bit_add_numbered_bus(struct i2c_adapter *adap) { return __i2c_bit_add_bus(adap, i2c_add_numbered_adapter); } EXPORT_SYMBOL(i2c_bit_add_numbered_bus); MODULE_AUTHOR("Simon G. Vogl <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus bit-banging algorithm"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/algos/i2c-algo-bit.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c-algo-pca.c i2c driver algorithms for PCA9564 adapters * Copyright (C) 2004 Arcom Control Systems * Copyright (C) 2008 Pengutronix */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/i2c-algo-pca.h> #define DEB1(fmt, args...) do { if (i2c_debug >= 1) \ printk(KERN_DEBUG fmt, ## args); } while (0) #define DEB2(fmt, args...) do { if (i2c_debug >= 2) \ printk(KERN_DEBUG fmt, ## args); } while (0) #define DEB3(fmt, args...) do { if (i2c_debug >= 3) \ printk(KERN_DEBUG fmt, ## args); } while (0) static int i2c_debug; #define pca_outw(adap, reg, val) adap->write_byte(adap->data, reg, val) #define pca_inw(adap, reg) adap->read_byte(adap->data, reg) #define pca_status(adap) pca_inw(adap, I2C_PCA_STA) #define pca_clock(adap) adap->i2c_clock #define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val) #define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON) #define pca_wait(adap) adap->wait_for_completion(adap->data) static void pca_reset(struct i2c_algo_pca_data *adap) { if (adap->chip == I2C_PCA_CHIP_9665) { /* Ignore the reset function from the module, * we can use the parallel bus reset. */ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET); pca_outw(adap, I2C_PCA_IND, 0xA5); pca_outw(adap, I2C_PCA_IND, 0x5A); /* * After a reset we need to re-apply any configuration * (calculated in pca_init) to get the bus in a working state. */ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE); pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode); pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL); pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow); pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH); pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi); pca_set_con(adap, I2C_PCA_CON_ENSIO); } else { adap->reset_chip(adap->data); pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq); } } /* * Generate a start condition on the i2c bus. * * returns after the start condition has occurred */ static int pca_start(struct i2c_algo_pca_data *adap) { int sta = pca_get_con(adap); DEB2("=== START\n"); sta |= I2C_PCA_CON_STA; sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_SI); pca_set_con(adap, sta); return pca_wait(adap); } /* * Generate a repeated start condition on the i2c bus * * return after the repeated start condition has occurred */ static int pca_repeated_start(struct i2c_algo_pca_data *adap) { int sta = pca_get_con(adap); DEB2("=== REPEATED START\n"); sta |= I2C_PCA_CON_STA; sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_SI); pca_set_con(adap, sta); return pca_wait(adap); } /* * Generate a stop condition on the i2c bus * * returns after the stop condition has been generated * * STOPs do not generate an interrupt or set the SI flag, since the * part returns the idle state (0xf8). Hence we don't need to * pca_wait here. */ static void pca_stop(struct i2c_algo_pca_data *adap) { int sta = pca_get_con(adap); DEB2("=== STOP\n"); sta |= I2C_PCA_CON_STO; sta &= ~(I2C_PCA_CON_STA|I2C_PCA_CON_SI); pca_set_con(adap, sta); } /* * Send the slave address and R/W bit * * returns after the address has been sent */ static int pca_address(struct i2c_algo_pca_data *adap, struct i2c_msg *msg) { int sta = pca_get_con(adap); int addr = i2c_8bit_addr_from_msg(msg); DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n", msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr); pca_outw(adap, I2C_PCA_DAT, addr); sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI); pca_set_con(adap, sta); return pca_wait(adap); } /* * Transmit a byte. * * Returns after the byte has been transmitted */ static int pca_tx_byte(struct i2c_algo_pca_data *adap, __u8 b) { int sta = pca_get_con(adap); DEB2("=== WRITE %#04x\n", b); pca_outw(adap, I2C_PCA_DAT, b); sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI); pca_set_con(adap, sta); return pca_wait(adap); } /* * Receive a byte * * returns immediately. */ static void pca_rx_byte(struct i2c_algo_pca_data *adap, __u8 *b, int ack) { *b = pca_inw(adap, I2C_PCA_DAT); DEB2("=== READ %#04x %s\n", *b, ack ? "ACK" : "NACK"); } /* * Setup ACK or NACK for next received byte and wait for it to arrive. * * Returns after next byte has arrived. */ static int pca_rx_ack(struct i2c_algo_pca_data *adap, int ack) { int sta = pca_get_con(adap); sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI|I2C_PCA_CON_AA); if (ack) sta |= I2C_PCA_CON_AA; pca_set_con(adap, sta); return pca_wait(adap); } static int pca_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct i2c_algo_pca_data *adap = i2c_adap->algo_data; struct i2c_msg *msg = NULL; int curmsg; int numbytes = 0; int state; int ret; int completed = 1; unsigned long timeout = jiffies + i2c_adap->timeout; while ((state = pca_status(adap)) != 0xf8) { if (time_before(jiffies, timeout)) { msleep(10); } else { dev_dbg(&i2c_adap->dev, "bus is not idle. status is " "%#04x\n", state); return -EBUSY; } } DEB1("{{{ XFER %d messages\n", num); if (i2c_debug >= 2) { for (curmsg = 0; curmsg < num; curmsg++) { int addr, i; msg = &msgs[curmsg]; addr = (0x7f & msg->addr) ; if (msg->flags & I2C_M_RD) printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n", curmsg, msg->len, addr, (addr << 1) | 1); else { printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s", curmsg, msg->len, addr, addr << 1, msg->len == 0 ? "" : ", "); for (i = 0; i < msg->len; i++) printk("%#04x%s", msg->buf[i], i == msg->len - 1 ? "" : ", "); printk("]\n"); } } } curmsg = 0; ret = -EIO; while (curmsg < num) { state = pca_status(adap); DEB3("STATE is 0x%02x\n", state); msg = &msgs[curmsg]; switch (state) { case 0xf8: /* On reset or stop the bus is idle */ completed = pca_start(adap); break; case 0x08: /* A START condition has been transmitted */ case 0x10: /* A repeated start condition has been transmitted */ completed = pca_address(adap, msg); break; case 0x18: /* SLA+W has been transmitted; ACK has been received */ case 0x28: /* Data byte in I2CDAT has been transmitted; ACK has been received */ if (numbytes < msg->len) { completed = pca_tx_byte(adap, msg->buf[numbytes]); numbytes++; break; } curmsg++; numbytes = 0; if (curmsg == num) pca_stop(adap); else completed = pca_repeated_start(adap); break; case 0x20: /* SLA+W has been transmitted; NOT ACK has been received */ DEB2("NOT ACK received after SLA+W\n"); pca_stop(adap); ret = -ENXIO; goto out; case 0x40: /* SLA+R has been transmitted; ACK has been received */ completed = pca_rx_ack(adap, msg->len > 1); break; case 0x50: /* Data bytes has been received; ACK has been returned */ if (numbytes < msg->len) { pca_rx_byte(adap, &msg->buf[numbytes], 1); numbytes++; completed = pca_rx_ack(adap, numbytes < msg->len - 1); break; } curmsg++; numbytes = 0; if (curmsg == num) pca_stop(adap); else completed = pca_repeated_start(adap); break; case 0x48: /* SLA+R has been transmitted; NOT ACK has been received */ DEB2("NOT ACK received after SLA+R\n"); pca_stop(adap); ret = -ENXIO; goto out; case 0x30: /* Data byte in I2CDAT has been transmitted; NOT ACK has been received */ DEB2("NOT ACK received after data byte\n"); pca_stop(adap); goto out; case 0x38: /* Arbitration lost during SLA+W, SLA+R or data bytes */ DEB2("Arbitration lost\n"); /* * The PCA9564 data sheet (2006-09-01) says "A * START condition will be transmitted when the * bus becomes free (STOP or SCL and SDA high)" * when the STA bit is set (p. 11). * * In case this won't work, try pca_reset() * instead. */ pca_start(adap); goto out; case 0x58: /* Data byte has been received; NOT ACK has been returned */ if (numbytes == msg->len - 1) { pca_rx_byte(adap, &msg->buf[numbytes], 0); curmsg++; numbytes = 0; if (curmsg == num) pca_stop(adap); else completed = pca_repeated_start(adap); } else { DEB2("NOT ACK sent after data byte received. " "Not final byte. numbytes %d. len %d\n", numbytes, msg->len); pca_stop(adap); goto out; } break; case 0x70: /* Bus error - SDA stuck low */ DEB2("BUS ERROR - SDA Stuck low\n"); pca_reset(adap); goto out; case 0x78: /* Bus error - SCL stuck low (PCA9665) */ case 0x90: /* Bus error - SCL stuck low (PCA9564) */ DEB2("BUS ERROR - SCL Stuck low\n"); pca_reset(adap); goto out; case 0x00: /* Bus error during master or slave mode due to illegal START or STOP condition */ DEB2("BUS ERROR - Illegal START or STOP\n"); pca_reset(adap); goto out; default: dev_err(&i2c_adap->dev, "unhandled SIO state 0x%02x\n", state); break; } if (!completed) goto out; } ret = curmsg; out: DEB1("}}} transferred %d/%d messages. " "status is %#04x. control is %#04x\n", curmsg, num, pca_status(adap), pca_get_con(adap)); return ret; } static u32 pca_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm pca_algo = { .master_xfer = pca_xfer, .functionality = pca_func, }; static unsigned int pca_probe_chip(struct i2c_adapter *adap) { struct i2c_algo_pca_data *pca_data = adap->algo_data; /* The trick here is to check if there is an indirect register * available. If there is one, we will read the value we first * wrote on I2C_PCA_IADR. Otherwise, we will read the last value * we wrote on I2C_PCA_ADR */ pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IADR); pca_outw(pca_data, I2C_PCA_IND, 0xAA); pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ITO); pca_outw(pca_data, I2C_PCA_IND, 0x00); pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IADR); if (pca_inw(pca_data, I2C_PCA_IND) == 0xAA) { printk(KERN_INFO "%s: PCA9665 detected.\n", adap->name); pca_data->chip = I2C_PCA_CHIP_9665; } else { printk(KERN_INFO "%s: PCA9564 detected.\n", adap->name); pca_data->chip = I2C_PCA_CHIP_9564; } return pca_data->chip; } static int pca_init(struct i2c_adapter *adap) { struct i2c_algo_pca_data *pca_data = adap->algo_data; adap->algo = &pca_algo; if (pca_probe_chip(adap) == I2C_PCA_CHIP_9564) { static int freqs[] = {330, 288, 217, 146, 88, 59, 44, 36}; int clock; if (pca_data->i2c_clock > 7) { switch (pca_data->i2c_clock) { case 330000: pca_data->i2c_clock = I2C_PCA_CON_330kHz; break; case 288000: pca_data->i2c_clock = I2C_PCA_CON_288kHz; break; case 217000: pca_data->i2c_clock = I2C_PCA_CON_217kHz; break; case 146000: pca_data->i2c_clock = I2C_PCA_CON_146kHz; break; case 88000: pca_data->i2c_clock = I2C_PCA_CON_88kHz; break; case 59000: pca_data->i2c_clock = I2C_PCA_CON_59kHz; break; case 44000: pca_data->i2c_clock = I2C_PCA_CON_44kHz; break; case 36000: pca_data->i2c_clock = I2C_PCA_CON_36kHz; break; default: printk(KERN_WARNING "%s: Invalid I2C clock speed selected." " Using default 59kHz.\n", adap->name); pca_data->i2c_clock = I2C_PCA_CON_59kHz; } } else { printk(KERN_WARNING "%s: " "Choosing the clock frequency based on " "index is deprecated." " Use the nominal frequency.\n", adap->name); } clock = pca_clock(pca_data); printk(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]); /* Store settings as these will be needed when the PCA chip is reset */ pca_data->bus_settings.clock_freq = clock; pca_reset(pca_data); } else { int clock; int mode; int tlow, thi; /* Values can be found on PCA9665 datasheet section 7.3.2.6 */ int min_tlow, min_thi; /* These values are the maximum raise and fall values allowed * by the I2C operation mode (Standard, Fast or Fast+) * They are used (added) below to calculate the clock dividers * of PCA9665. Note that they are slightly different of the * real maximum, to allow the change on mode exactly on the * maximum clock rate for each mode */ int raise_fall_time; if (pca_data->i2c_clock > 1265800) { printk(KERN_WARNING "%s: I2C clock speed too high." " Using 1265.8kHz.\n", adap->name); pca_data->i2c_clock = 1265800; } if (pca_data->i2c_clock < 60300) { printk(KERN_WARNING "%s: I2C clock speed too low." " Using 60.3kHz.\n", adap->name); pca_data->i2c_clock = 60300; } /* To avoid integer overflow, use clock/100 for calculations */ clock = pca_clock(pca_data) / 100; if (pca_data->i2c_clock > I2C_MAX_FAST_MODE_PLUS_FREQ) { mode = I2C_PCA_MODE_TURBO; min_tlow = 14; min_thi = 5; raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */ } else if (pca_data->i2c_clock > I2C_MAX_FAST_MODE_FREQ) { mode = I2C_PCA_MODE_FASTP; min_tlow = 17; min_thi = 9; raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */ } else if (pca_data->i2c_clock > I2C_MAX_STANDARD_MODE_FREQ) { mode = I2C_PCA_MODE_FAST; min_tlow = 44; min_thi = 20; raise_fall_time = 58; /* Raise 29e-8s, Fall 29e-8s */ } else { mode = I2C_PCA_MODE_STD; min_tlow = 157; min_thi = 134; raise_fall_time = 127; /* Raise 29e-8s, Fall 98e-8s */ } /* The minimum clock that respects the thi/tlow = 134/157 is * 64800 Hz. Below that, we have to fix the tlow to 255 and * calculate the thi factor. */ if (clock < 648) { tlow = 255; thi = 1000000 - clock * raise_fall_time; thi /= (I2C_PCA_OSC_PER * clock) - tlow; } else { tlow = (1000000 - clock * raise_fall_time) * min_tlow; tlow /= I2C_PCA_OSC_PER * clock * (min_thi + min_tlow); thi = tlow * min_thi / min_tlow; } /* Store settings as these will be needed when the PCA chip is reset */ pca_data->bus_settings.mode = mode; pca_data->bus_settings.tlow = tlow; pca_data->bus_settings.thi = thi; pca_reset(pca_data); printk(KERN_INFO "%s: Clock frequency is %dHz\n", adap->name, clock * 100); } udelay(500); /* 500 us for oscillator to stabilise */ return 0; } /* * registering functions to load algorithms at runtime */ int i2c_pca_add_bus(struct i2c_adapter *adap) { int rval; rval = pca_init(adap); if (rval) return rval; return i2c_add_adapter(adap); } EXPORT_SYMBOL(i2c_pca_add_bus); int i2c_pca_add_numbered_bus(struct i2c_adapter *adap) { int rval; rval = pca_init(adap); if (rval) return rval; return i2c_add_numbered_adapter(adap); } EXPORT_SYMBOL(i2c_pca_add_numbered_bus); MODULE_AUTHOR("Ian Campbell <[email protected]>"); MODULE_AUTHOR("Wolfram Sang <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus PCA9564/PCA9665 algorithm"); MODULE_LICENSE("GPL"); module_param(i2c_debug, int, 0);
linux-master
drivers/i2c/algos/i2c-algo-pca.c
// SPDX-License-Identifier: GPL-2.0-only /* * Pinctrl based I2C DeMultiplexer * * Copyright (C) 2015-16 by Wolfram Sang, Sang Engineering <[email protected]> * Copyright (C) 2015-16 by Renesas Electronics Corporation * * See the bindings doc for DTS setup and the sysfs doc for usage information. * (look for filenames containing 'i2c-demux-pinctrl' in Documentation/) */ #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/sysfs.h> struct i2c_demux_pinctrl_chan { struct device_node *parent_np; struct i2c_adapter *parent_adap; struct of_changeset chgset; }; struct i2c_demux_pinctrl_priv { int cur_chan; int num_chan; struct device *dev; const char *bus_name; struct i2c_adapter cur_adap; struct i2c_algorithm algo; struct i2c_demux_pinctrl_chan chan[]; }; static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct i2c_demux_pinctrl_priv *priv = adap->algo_data; struct i2c_adapter *parent = priv->chan[priv->cur_chan].parent_adap; return __i2c_transfer(parent, msgs, num); } static u32 i2c_demux_functionality(struct i2c_adapter *adap) { struct i2c_demux_pinctrl_priv *priv = adap->algo_data; struct i2c_adapter *parent = priv->chan[priv->cur_chan].parent_adap; return parent->algo->functionality(parent); } static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 new_chan) { struct i2c_adapter *adap; struct pinctrl *p; int ret; ret = of_changeset_apply(&priv->chan[new_chan].chgset); if (ret) goto err; adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); if (!adap) { ret = -ENODEV; goto err_with_revert; } /* * Check if there are pinctrl states at all. Note: we cant' use * devm_pinctrl_get_select() because we need to distinguish between * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state(). */ p = devm_pinctrl_get(adap->dev.parent); if (IS_ERR(p)) { ret = PTR_ERR(p); /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */ if (ret != -ENODEV) goto err_with_put; } else { /* there are states. check and use them */ struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name); if (IS_ERR(s)) { ret = PTR_ERR(s); goto err_with_put; } ret = pinctrl_select_state(p, s); if (ret < 0) goto err_with_put; } priv->chan[new_chan].parent_adap = adap; priv->cur_chan = new_chan; /* Now fill out current adapter structure. cur_chan must be up to date */ priv->algo.master_xfer = i2c_demux_master_xfer; if (adap->algo->master_xfer_atomic) priv->algo.master_xfer_atomic = i2c_demux_master_xfer; priv->algo.functionality = i2c_demux_functionality; snprintf(priv->cur_adap.name, sizeof(priv->cur_adap.name), "i2c-demux (master i2c-%d)", i2c_adapter_id(adap)); priv->cur_adap.owner = THIS_MODULE; priv->cur_adap.algo = &priv->algo; priv->cur_adap.algo_data = priv; priv->cur_adap.dev.parent = &adap->dev; priv->cur_adap.class = adap->class; priv->cur_adap.retries = adap->retries; priv->cur_adap.timeout = adap->timeout; priv->cur_adap.quirks = adap->quirks; priv->cur_adap.dev.of_node = priv->dev->of_node; ret = i2c_add_adapter(&priv->cur_adap); if (ret < 0) goto err_with_put; return 0; err_with_put: i2c_put_adapter(adap); err_with_revert: of_changeset_revert(&priv->chan[new_chan].chgset); err: dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); priv->cur_chan = -EINVAL; return ret; } static int i2c_demux_deactivate_master(struct i2c_demux_pinctrl_priv *priv) { int ret, cur = priv->cur_chan; if (cur < 0) return 0; i2c_del_adapter(&priv->cur_adap); i2c_put_adapter(priv->chan[cur].parent_adap); ret = of_changeset_revert(&priv->chan[cur].chgset); priv->chan[cur].parent_adap = NULL; priv->cur_chan = -EINVAL; return ret; } static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_chan) { int ret; if (new_chan == priv->cur_chan) return 0; ret = i2c_demux_deactivate_master(priv); if (ret) return ret; return i2c_demux_activate_master(priv, new_chan); } static ssize_t available_masters_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); int count = 0, i; for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++) count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%pOF%c", i, priv->chan[i].parent_np, i == priv->num_chan - 1 ? '\n' : ' '); return count; } static DEVICE_ATTR_RO(available_masters); static ssize_t current_master_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); return sprintf(buf, "%d\n", priv->cur_chan); } static ssize_t current_master_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); unsigned int val; int ret; ret = kstrtouint(buf, 0, &val); if (ret < 0) return ret; if (val >= priv->num_chan) return -EINVAL; ret = i2c_demux_change_master(priv, val); return ret < 0 ? ret : count; } static DEVICE_ATTR_RW(current_master); static int i2c_demux_pinctrl_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct i2c_demux_pinctrl_priv *priv; struct property *props; int num_chan, i, j, err; num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL); if (num_chan < 2) { dev_err(&pdev->dev, "Need at least two I2C masters to switch\n"); return -EINVAL; } priv = devm_kzalloc(&pdev->dev, struct_size(priv, chan, num_chan), GFP_KERNEL); props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL); if (!priv || !props) return -ENOMEM; err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name); if (err) return err; for (i = 0; i < num_chan; i++) { struct device_node *adap_np; adap_np = of_parse_phandle(np, "i2c-parent", i); if (!adap_np) { dev_err(&pdev->dev, "can't get phandle for parent %d\n", i); err = -ENOENT; goto err_rollback; } priv->chan[i].parent_np = adap_np; props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL); props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL); if (!props[i].name || !props[i].value) { err = -ENOMEM; goto err_rollback; } props[i].length = 3; of_changeset_init(&priv->chan[i].chgset); of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]); } priv->num_chan = num_chan; priv->dev = &pdev->dev; platform_set_drvdata(pdev, priv); pm_runtime_no_callbacks(&pdev->dev); /* switch to first parent as active master */ i2c_demux_activate_master(priv, 0); err = device_create_file(&pdev->dev, &dev_attr_available_masters); if (err) goto err_rollback_activation; err = device_create_file(&pdev->dev, &dev_attr_current_master); if (err) goto err_rollback_available; return 0; err_rollback_available: device_remove_file(&pdev->dev, &dev_attr_available_masters); err_rollback_activation: i2c_demux_deactivate_master(priv); err_rollback: for (j = 0; j < i; j++) { of_node_put(priv->chan[j].parent_np); of_changeset_destroy(&priv->chan[j].chgset); } return err; } static void i2c_demux_pinctrl_remove(struct platform_device *pdev) { struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev); int i; device_remove_file(&pdev->dev, &dev_attr_current_master); device_remove_file(&pdev->dev, &dev_attr_available_masters); i2c_demux_deactivate_master(priv); for (i = 0; i < priv->num_chan; i++) { of_node_put(priv->chan[i].parent_np); of_changeset_destroy(&priv->chan[i].chgset); } } static const struct of_device_id i2c_demux_pinctrl_of_match[] = { { .compatible = "i2c-demux-pinctrl", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_demux_pinctrl_of_match); static struct platform_driver i2c_demux_pinctrl_driver = { .driver = { .name = "i2c-demux-pinctrl", .of_match_table = i2c_demux_pinctrl_of_match, }, .probe = i2c_demux_pinctrl_probe, .remove_new = i2c_demux_pinctrl_remove, }; module_platform_driver(i2c_demux_pinctrl_driver); MODULE_DESCRIPTION("pinctrl-based I2C demux driver"); MODULE_AUTHOR("Wolfram Sang <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c-demux-pinctrl");
linux-master
drivers/i2c/muxes/i2c-demux-pinctrl.c
// SPDX-License-Identifier: GPL-2.0-only /* * General Purpose I2C multiplexer * * Copyright (C) 2017 Axentia Technologies AB * * Author: Peter Rosin <[email protected]> */ #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/module.h> #include <linux/mux/consumer.h> #include <linux/of.h> #include <linux/platform_device.h> struct mux { struct mux_control *control; bool do_not_deselect; }; static int i2c_mux_select(struct i2c_mux_core *muxc, u32 chan) { struct mux *mux = i2c_mux_priv(muxc); int ret; ret = mux_control_select(mux->control, chan); mux->do_not_deselect = ret < 0; return ret; } static int i2c_mux_deselect(struct i2c_mux_core *muxc, u32 chan) { struct mux *mux = i2c_mux_priv(muxc); if (mux->do_not_deselect) return 0; return mux_control_deselect(mux->control); } static struct i2c_adapter *mux_parent_adapter(struct device *dev) { struct device_node *np = dev->of_node; struct device_node *parent_np; struct i2c_adapter *parent; parent_np = of_parse_phandle(np, "i2c-parent", 0); if (!parent_np) { dev_err(dev, "Cannot parse i2c-parent\n"); return ERR_PTR(-ENODEV); } parent = of_find_i2c_adapter_by_node(parent_np); of_node_put(parent_np); if (!parent) return ERR_PTR(-EPROBE_DEFER); return parent; } static const struct of_device_id i2c_mux_of_match[] = { { .compatible = "i2c-mux", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_mux_of_match); static int i2c_mux_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *child; struct i2c_mux_core *muxc; struct mux *mux; struct i2c_adapter *parent; int children; int ret; if (!np) return -ENODEV; mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); if (!mux) return -ENOMEM; mux->control = devm_mux_control_get(dev, NULL); if (IS_ERR(mux->control)) return dev_err_probe(dev, PTR_ERR(mux->control), "failed to get control-mux\n"); parent = mux_parent_adapter(dev); if (IS_ERR(parent)) return dev_err_probe(dev, PTR_ERR(parent), "failed to get i2c-parent adapter\n"); children = of_get_child_count(np); muxc = i2c_mux_alloc(parent, dev, children, 0, 0, i2c_mux_select, i2c_mux_deselect); if (!muxc) { ret = -ENOMEM; goto err_parent; } muxc->priv = mux; platform_set_drvdata(pdev, muxc); muxc->mux_locked = of_property_read_bool(np, "mux-locked"); for_each_child_of_node(np, child) { u32 chan; ret = of_property_read_u32(child, "reg", &chan); if (ret < 0) { dev_err(dev, "no reg property for node '%pOFn'\n", child); goto err_children; } if (chan >= mux_control_states(mux->control)) { dev_err(dev, "invalid reg %u\n", chan); ret = -EINVAL; goto err_children; } ret = i2c_mux_add_adapter(muxc, 0, chan, 0); if (ret) goto err_children; } dev_info(dev, "%d-port mux on %s adapter\n", children, parent->name); return 0; err_children: of_node_put(child); i2c_mux_del_adapters(muxc); err_parent: i2c_put_adapter(parent); return ret; } static void i2c_mux_remove(struct platform_device *pdev) { struct i2c_mux_core *muxc = platform_get_drvdata(pdev); i2c_mux_del_adapters(muxc); i2c_put_adapter(muxc->parent); } static struct platform_driver i2c_mux_driver = { .probe = i2c_mux_probe, .remove_new = i2c_mux_remove, .driver = { .name = "i2c-mux-gpmux", .of_match_table = i2c_mux_of_match, }, }; module_platform_driver(i2c_mux_driver); MODULE_DESCRIPTION("General Purpose I2C multiplexer driver"); MODULE_AUTHOR("Peter Rosin <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/muxes/i2c-mux-gpmux.c
// SPDX-License-Identifier: GPL-2.0-only /* * Linear Technology LTC4306 and LTC4305 I2C multiplexer/switch * * Copyright (C) 2017 Analog Devices Inc. * * Based on: i2c-mux-pca954x.c * * Datasheet: http://cds.linear.com/docs/en/datasheet/4306.pdf */ #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include <linux/i2c-mux.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <linux/property.h> #include <linux/regmap.h> #include <linux/slab.h> #define LTC4305_MAX_NCHANS 2 #define LTC4306_MAX_NCHANS 4 #define LTC_REG_STATUS 0x0 #define LTC_REG_CONFIG 0x1 #define LTC_REG_MODE 0x2 #define LTC_REG_SWITCH 0x3 #define LTC_DOWNSTREAM_ACCL_EN BIT(6) #define LTC_UPSTREAM_ACCL_EN BIT(7) #define LTC_GPIO_ALL_INPUT 0xC0 #define LTC_SWITCH_MASK 0xF0 enum ltc_type { ltc_4305, ltc_4306, }; struct chip_desc { u8 nchans; u8 num_gpios; }; struct ltc4306 { struct regmap *regmap; struct gpio_chip gpiochip; const struct chip_desc *chip; }; static const struct chip_desc chips[] = { [ltc_4305] = { .nchans = LTC4305_MAX_NCHANS, }, [ltc_4306] = { .nchans = LTC4306_MAX_NCHANS, .num_gpios = 2, }, }; static bool ltc4306_is_volatile_reg(struct device *dev, unsigned int reg) { return reg == LTC_REG_CONFIG; } static const struct regmap_config ltc4306_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = LTC_REG_SWITCH, .volatile_reg = ltc4306_is_volatile_reg, .cache_type = REGCACHE_FLAT, }; static int ltc4306_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct ltc4306 *data = gpiochip_get_data(chip); unsigned int val; int ret; ret = regmap_read(data->regmap, LTC_REG_CONFIG, &val); if (ret < 0) return ret; return !!(val & BIT(1 - offset)); } static void ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct ltc4306 *data = gpiochip_get_data(chip); regmap_update_bits(data->regmap, LTC_REG_CONFIG, BIT(5 - offset), value ? BIT(5 - offset) : 0); } static int ltc4306_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { struct ltc4306 *data = gpiochip_get_data(chip); unsigned int val; int ret; ret = regmap_read(data->regmap, LTC_REG_MODE, &val); if (ret < 0) return ret; return !!(val & BIT(7 - offset)); } static int ltc4306_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct ltc4306 *data = gpiochip_get_data(chip); return regmap_update_bits(data->regmap, LTC_REG_MODE, BIT(7 - offset), BIT(7 - offset)); } static int ltc4306_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct ltc4306 *data = gpiochip_get_data(chip); ltc4306_gpio_set(chip, offset, value); return regmap_update_bits(data->regmap, LTC_REG_MODE, BIT(7 - offset), 0); } static int ltc4306_gpio_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long config) { struct ltc4306 *data = gpiochip_get_data(chip); unsigned int val; switch (pinconf_to_config_param(config)) { case PIN_CONFIG_DRIVE_OPEN_DRAIN: val = 0; break; case PIN_CONFIG_DRIVE_PUSH_PULL: val = BIT(4 - offset); break; default: return -ENOTSUPP; } return regmap_update_bits(data->regmap, LTC_REG_MODE, BIT(4 - offset), val); } static int ltc4306_gpio_init(struct ltc4306 *data) { struct device *dev = regmap_get_device(data->regmap); if (!data->chip->num_gpios) return 0; data->gpiochip.label = dev_name(dev); data->gpiochip.base = -1; data->gpiochip.ngpio = data->chip->num_gpios; data->gpiochip.parent = dev; data->gpiochip.can_sleep = true; data->gpiochip.get_direction = ltc4306_gpio_get_direction; data->gpiochip.direction_input = ltc4306_gpio_direction_input; data->gpiochip.direction_output = ltc4306_gpio_direction_output; data->gpiochip.get = ltc4306_gpio_get; data->gpiochip.set = ltc4306_gpio_set; data->gpiochip.set_config = ltc4306_gpio_set_config; data->gpiochip.owner = THIS_MODULE; /* gpiolib assumes all GPIOs default input */ regmap_write(data->regmap, LTC_REG_MODE, LTC_GPIO_ALL_INPUT); return devm_gpiochip_add_data(dev, &data->gpiochip, data); } static int ltc4306_select_mux(struct i2c_mux_core *muxc, u32 chan) { struct ltc4306 *data = i2c_mux_priv(muxc); return regmap_update_bits(data->regmap, LTC_REG_SWITCH, LTC_SWITCH_MASK, BIT(7 - chan)); } static int ltc4306_deselect_mux(struct i2c_mux_core *muxc, u32 chan) { struct ltc4306 *data = i2c_mux_priv(muxc); return regmap_update_bits(data->regmap, LTC_REG_SWITCH, LTC_SWITCH_MASK, 0); } static const struct i2c_device_id ltc4306_id[] = { { "ltc4305", ltc_4305 }, { "ltc4306", ltc_4306 }, { } }; MODULE_DEVICE_TABLE(i2c, ltc4306_id); static const struct of_device_id ltc4306_of_match[] = { { .compatible = "lltc,ltc4305", .data = &chips[ltc_4305] }, { .compatible = "lltc,ltc4306", .data = &chips[ltc_4306] }, { } }; MODULE_DEVICE_TABLE(of, ltc4306_of_match); static int ltc4306_probe(struct i2c_client *client) { struct i2c_adapter *adap = client->adapter; const struct chip_desc *chip; struct i2c_mux_core *muxc; struct ltc4306 *data; struct gpio_desc *gpio; bool idle_disc; unsigned int val = 0; int num, ret; chip = of_device_get_match_data(&client->dev); if (!chip) chip = &chips[i2c_match_id(ltc4306_id, client)->driver_data]; idle_disc = device_property_read_bool(&client->dev, "i2c-mux-idle-disconnect"); muxc = i2c_mux_alloc(adap, &client->dev, chip->nchans, sizeof(*data), I2C_MUX_LOCKED, ltc4306_select_mux, idle_disc ? ltc4306_deselect_mux : NULL); if (!muxc) return -ENOMEM; data = i2c_mux_priv(muxc); data->chip = chip; i2c_set_clientdata(client, muxc); data->regmap = devm_regmap_init_i2c(client, &ltc4306_regmap_config); if (IS_ERR(data->regmap)) { ret = PTR_ERR(data->regmap); dev_err(&client->dev, "Failed to allocate register map: %d\n", ret); return ret; } /* Reset and enable the mux if an enable GPIO is specified. */ gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(gpio)) return PTR_ERR(gpio); if (gpio) { udelay(1); gpiod_set_value(gpio, 1); } /* * Write the mux register at addr to verify * that the mux is in fact present. This also * initializes the mux to disconnected state. */ if (regmap_write(data->regmap, LTC_REG_SWITCH, 0) < 0) { dev_warn(&client->dev, "probe failed\n"); return -ENODEV; } if (device_property_read_bool(&client->dev, "ltc,downstream-accelerators-enable")) val |= LTC_DOWNSTREAM_ACCL_EN; if (device_property_read_bool(&client->dev, "ltc,upstream-accelerators-enable")) val |= LTC_UPSTREAM_ACCL_EN; if (regmap_write(data->regmap, LTC_REG_CONFIG, val) < 0) return -ENODEV; ret = ltc4306_gpio_init(data); if (ret < 0) return ret; /* Now create an adapter for each channel */ for (num = 0; num < chip->nchans; num++) { ret = i2c_mux_add_adapter(muxc, 0, num, 0); if (ret) { i2c_mux_del_adapters(muxc); return ret; } } dev_info(&client->dev, "registered %d multiplexed busses for I2C switch %s\n", num, client->name); return 0; } static void ltc4306_remove(struct i2c_client *client) { struct i2c_mux_core *muxc = i2c_get_clientdata(client); i2c_mux_del_adapters(muxc); } static struct i2c_driver ltc4306_driver = { .driver = { .name = "ltc4306", .of_match_table = of_match_ptr(ltc4306_of_match), }, .probe = ltc4306_probe, .remove = ltc4306_remove, .id_table = ltc4306_id, }; module_i2c_driver(ltc4306_driver); MODULE_AUTHOR("Michael Hennerich <[email protected]>"); MODULE_DESCRIPTION("Linear Technology LTC4306, LTC4305 I2C mux/switch driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/muxes/i2c-mux-ltc4306.c
// SPDX-License-Identifier: GPL-2.0 /* * I2C multiplexer * * Copyright (c) 2008-2009 Rodolfo Giometti <[email protected]> * Copyright (c) 2008-2009 Eurotech S.p.A. <[email protected]> * * This module supports the PCA954x and PCA984x series of I2C multiplexer/switch * chips made by NXP Semiconductors. * This includes the: * PCA9540, PCA9542, PCA9543, PCA9544, PCA9545, PCA9546, PCA9547, * PCA9548, PCA9846, PCA9847, PCA9848 and PCA9849. * * It's also compatible to Maxims MAX735x I2C switch chips, which are controlled * as the NXP PCA9548 and the MAX736x chips that act like the PCA9544. * * This includes the: * MAX7356, MAX7357, MAX7358, MAX7367, MAX7368 and MAX7369 * * These chips are all controlled via the I2C bus itself, and all have a * single 8-bit register. The upstream "parent" bus fans out to two, * four, or eight downstream busses or channels; which of these * are selected is determined by the chip type and register contents. A * mux can select only one sub-bus at a time; a switch can select any * combination simultaneously. * * Based on: * pca954x.c from Kumar Gala <[email protected]> * Copyright (C) 2006 * * Based on: * pca954x.c from Ken Harrenstien * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) * * Based on: * i2c-virtual_cb.c from Brian Kuschak <[email protected]> * and * pca9540.c from Jean Delvare <[email protected]>. */ #include <linux/device.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <dt-bindings/mux/mux.h> #define PCA954X_MAX_NCHANS 8 #define PCA954X_IRQ_OFFSET 4 enum pca_type { max_7356, max_7357, max_7358, max_7367, max_7368, max_7369, pca_9540, pca_9542, pca_9543, pca_9544, pca_9545, pca_9546, pca_9547, pca_9548, pca_9846, pca_9847, pca_9848, pca_9849, }; struct chip_desc { u8 nchans; u8 enable; /* used for muxes only */ u8 has_irq; enum muxtype { pca954x_ismux = 0, pca954x_isswi } muxtype; struct i2c_device_identity id; }; struct pca954x { const struct chip_desc *chip; u8 last_chan; /* last register value */ /* MUX_IDLE_AS_IS, MUX_IDLE_DISCONNECT or >= 0 for channel */ s32 idle_state; struct i2c_client *client; struct irq_domain *irq; unsigned int irq_mask; raw_spinlock_t lock; struct regulator *supply; }; /* Provide specs for the MAX735x, PCA954x and PCA984x types we know about */ static const struct chip_desc chips[] = { [max_7356] = { .nchans = 8, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [max_7357] = { .nchans = 8, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, /* * No interrupt controller support. The interrupt * provides information about stuck channels. */ }, [max_7358] = { .nchans = 8, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, /* * No interrupt controller support. The interrupt * provides information about stuck channels. */ }, [max_7367] = { .nchans = 4, .muxtype = pca954x_isswi, .has_irq = 1, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [max_7368] = { .nchans = 4, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [max_7369] = { .nchans = 4, .enable = 0x4, .muxtype = pca954x_ismux, .has_irq = 1, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9540] = { .nchans = 2, .enable = 0x4, .muxtype = pca954x_ismux, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9542] = { .nchans = 2, .enable = 0x4, .has_irq = 1, .muxtype = pca954x_ismux, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9543] = { .nchans = 2, .has_irq = 1, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9544] = { .nchans = 4, .enable = 0x4, .has_irq = 1, .muxtype = pca954x_ismux, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9545] = { .nchans = 4, .has_irq = 1, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9546] = { .nchans = 4, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9547] = { .nchans = 8, .enable = 0x8, .muxtype = pca954x_ismux, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9548] = { .nchans = 8, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NONE }, }, [pca_9846] = { .nchans = 4, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NXP_SEMICONDUCTORS, .part_id = 0x10b, }, }, [pca_9847] = { .nchans = 8, .enable = 0x8, .muxtype = pca954x_ismux, .id = { .manufacturer_id = I2C_DEVICE_ID_NXP_SEMICONDUCTORS, .part_id = 0x108, }, }, [pca_9848] = { .nchans = 8, .muxtype = pca954x_isswi, .id = { .manufacturer_id = I2C_DEVICE_ID_NXP_SEMICONDUCTORS, .part_id = 0x10a, }, }, [pca_9849] = { .nchans = 4, .enable = 0x4, .muxtype = pca954x_ismux, .id = { .manufacturer_id = I2C_DEVICE_ID_NXP_SEMICONDUCTORS, .part_id = 0x109, }, }, }; static const struct i2c_device_id pca954x_id[] = { { "max7356", max_7356 }, { "max7357", max_7357 }, { "max7358", max_7358 }, { "max7367", max_7367 }, { "max7368", max_7368 }, { "max7369", max_7369 }, { "pca9540", pca_9540 }, { "pca9542", pca_9542 }, { "pca9543", pca_9543 }, { "pca9544", pca_9544 }, { "pca9545", pca_9545 }, { "pca9546", pca_9546 }, { "pca9547", pca_9547 }, { "pca9548", pca_9548 }, { "pca9846", pca_9846 }, { "pca9847", pca_9847 }, { "pca9848", pca_9848 }, { "pca9849", pca_9849 }, { } }; MODULE_DEVICE_TABLE(i2c, pca954x_id); static const struct of_device_id pca954x_of_match[] = { { .compatible = "maxim,max7356", .data = &chips[max_7356] }, { .compatible = "maxim,max7357", .data = &chips[max_7357] }, { .compatible = "maxim,max7358", .data = &chips[max_7358] }, { .compatible = "maxim,max7367", .data = &chips[max_7367] }, { .compatible = "maxim,max7368", .data = &chips[max_7368] }, { .compatible = "maxim,max7369", .data = &chips[max_7369] }, { .compatible = "nxp,pca9540", .data = &chips[pca_9540] }, { .compatible = "nxp,pca9542", .data = &chips[pca_9542] }, { .compatible = "nxp,pca9543", .data = &chips[pca_9543] }, { .compatible = "nxp,pca9544", .data = &chips[pca_9544] }, { .compatible = "nxp,pca9545", .data = &chips[pca_9545] }, { .compatible = "nxp,pca9546", .data = &chips[pca_9546] }, { .compatible = "nxp,pca9547", .data = &chips[pca_9547] }, { .compatible = "nxp,pca9548", .data = &chips[pca_9548] }, { .compatible = "nxp,pca9846", .data = &chips[pca_9846] }, { .compatible = "nxp,pca9847", .data = &chips[pca_9847] }, { .compatible = "nxp,pca9848", .data = &chips[pca_9848] }, { .compatible = "nxp,pca9849", .data = &chips[pca_9849] }, {} }; MODULE_DEVICE_TABLE(of, pca954x_of_match); /* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() for this as they will try to lock adapter a second time */ static int pca954x_reg_write(struct i2c_adapter *adap, struct i2c_client *client, u8 val) { union i2c_smbus_data dummy; return __i2c_smbus_xfer(adap, client->addr, client->flags, I2C_SMBUS_WRITE, val, I2C_SMBUS_BYTE, &dummy); } static u8 pca954x_regval(struct pca954x *data, u8 chan) { /* We make switches look like muxes, not sure how to be smarter. */ if (data->chip->muxtype == pca954x_ismux) return chan | data->chip->enable; else return 1 << chan; } static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan) { struct pca954x *data = i2c_mux_priv(muxc); struct i2c_client *client = data->client; u8 regval; int ret = 0; regval = pca954x_regval(data, chan); /* Only select the channel if its different from the last channel */ if (data->last_chan != regval) { ret = pca954x_reg_write(muxc->parent, client, regval); data->last_chan = ret < 0 ? 0 : regval; } return ret; } static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan) { struct pca954x *data = i2c_mux_priv(muxc); struct i2c_client *client = data->client; s32 idle_state; idle_state = READ_ONCE(data->idle_state); if (idle_state >= 0) /* Set the mux back to a predetermined channel */ return pca954x_select_chan(muxc, idle_state); if (idle_state == MUX_IDLE_DISCONNECT) { /* Deselect active channel */ data->last_chan = 0; return pca954x_reg_write(muxc->parent, client, data->last_chan); } /* otherwise leave as-is */ return 0; } static ssize_t idle_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct i2c_mux_core *muxc = i2c_get_clientdata(client); struct pca954x *data = i2c_mux_priv(muxc); return sprintf(buf, "%d\n", READ_ONCE(data->idle_state)); } static ssize_t idle_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct i2c_mux_core *muxc = i2c_get_clientdata(client); struct pca954x *data = i2c_mux_priv(muxc); int val; int ret; ret = kstrtoint(buf, 0, &val); if (ret < 0) return ret; if (val != MUX_IDLE_AS_IS && val != MUX_IDLE_DISCONNECT && (val < 0 || val >= data->chip->nchans)) return -EINVAL; i2c_lock_bus(muxc->parent, I2C_LOCK_SEGMENT); WRITE_ONCE(data->idle_state, val); /* * Set the mux into a state consistent with the new * idle_state. */ if (data->last_chan || val != MUX_IDLE_DISCONNECT) ret = pca954x_deselect_mux(muxc, 0); i2c_unlock_bus(muxc->parent, I2C_LOCK_SEGMENT); return ret < 0 ? ret : count; } static DEVICE_ATTR_RW(idle_state); static irqreturn_t pca954x_irq_handler(int irq, void *dev_id) { struct pca954x *data = dev_id; unsigned long pending; int ret, i; ret = i2c_smbus_read_byte(data->client); if (ret < 0) return IRQ_NONE; pending = (ret >> PCA954X_IRQ_OFFSET) & (BIT(data->chip->nchans) - 1); for_each_set_bit(i, &pending, data->chip->nchans) handle_nested_irq(irq_linear_revmap(data->irq, i)); return IRQ_RETVAL(pending); } static int pca954x_irq_set_type(struct irq_data *idata, unsigned int type) { if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_LOW) return -EINVAL; return 0; } static struct irq_chip pca954x_irq_chip = { .name = "i2c-mux-pca954x", .irq_set_type = pca954x_irq_set_type, }; static int pca954x_irq_setup(struct i2c_mux_core *muxc) { struct pca954x *data = i2c_mux_priv(muxc); struct i2c_client *client = data->client; int c, irq; if (!data->chip->has_irq || client->irq <= 0) return 0; raw_spin_lock_init(&data->lock); data->irq = irq_domain_add_linear(client->dev.of_node, data->chip->nchans, &irq_domain_simple_ops, data); if (!data->irq) return -ENODEV; for (c = 0; c < data->chip->nchans; c++) { irq = irq_create_mapping(data->irq, c); if (!irq) { dev_err(&client->dev, "failed irq create map\n"); return -EINVAL; } irq_set_chip_data(irq, data); irq_set_chip_and_handler(irq, &pca954x_irq_chip, handle_simple_irq); } return 0; } static void pca954x_cleanup(struct i2c_mux_core *muxc) { struct pca954x *data = i2c_mux_priv(muxc); int c, irq; regulator_disable(data->supply); if (data->irq) { for (c = 0; c < data->chip->nchans; c++) { irq = irq_find_mapping(data->irq, c); irq_dispose_mapping(irq); } irq_domain_remove(data->irq); } i2c_mux_del_adapters(muxc); } static int pca954x_init(struct i2c_client *client, struct pca954x *data) { int ret; if (data->idle_state >= 0) data->last_chan = pca954x_regval(data, data->idle_state); else data->last_chan = 0; /* Disconnect multiplexer */ ret = i2c_smbus_write_byte(client, data->last_chan); if (ret < 0) data->last_chan = 0; return ret; } /* * I2C init/probing/exit functions */ static int pca954x_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); struct i2c_adapter *adap = client->adapter; struct device *dev = &client->dev; struct gpio_desc *gpio; struct i2c_mux_core *muxc; struct pca954x *data; int num; int ret; if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) return -ENODEV; muxc = i2c_mux_alloc(adap, dev, PCA954X_MAX_NCHANS, sizeof(*data), 0, pca954x_select_chan, pca954x_deselect_mux); if (!muxc) return -ENOMEM; data = i2c_mux_priv(muxc); i2c_set_clientdata(client, muxc); data->client = client; data->supply = devm_regulator_get(dev, "vdd"); if (IS_ERR(data->supply)) return dev_err_probe(dev, PTR_ERR(data->supply), "Failed to request regulator\n"); ret = regulator_enable(data->supply); if (ret) return dev_err_probe(dev, ret, "Failed to enable vdd supply\n"); /* Reset the mux if a reset GPIO is specified. */ gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(gpio)) { ret = PTR_ERR(gpio); goto fail_cleanup; } if (gpio) { udelay(1); gpiod_set_value_cansleep(gpio, 0); /* Give the chip some time to recover. */ udelay(1); } data->chip = device_get_match_data(dev); if (!data->chip) data->chip = &chips[id->driver_data]; if (data->chip->id.manufacturer_id != I2C_DEVICE_ID_NONE) { struct i2c_device_identity id; ret = i2c_get_device_id(client, &id); if (ret && ret != -EOPNOTSUPP) goto fail_cleanup; if (!ret && (id.manufacturer_id != data->chip->id.manufacturer_id || id.part_id != data->chip->id.part_id)) { dev_warn(dev, "unexpected device id %03x-%03x-%x\n", id.manufacturer_id, id.part_id, id.die_revision); ret = -ENODEV; goto fail_cleanup; } } data->idle_state = MUX_IDLE_AS_IS; if (device_property_read_u32(dev, "idle-state", &data->idle_state)) { if (device_property_read_bool(dev, "i2c-mux-idle-disconnect")) data->idle_state = MUX_IDLE_DISCONNECT; } /* * Write the mux register at addr to verify * that the mux is in fact present. This also * initializes the mux to a channel * or disconnected state. */ ret = pca954x_init(client, data); if (ret < 0) { dev_warn(dev, "probe failed\n"); ret = -ENODEV; goto fail_cleanup; } ret = pca954x_irq_setup(muxc); if (ret) goto fail_cleanup; /* Now create an adapter for each channel */ for (num = 0; num < data->chip->nchans; num++) { ret = i2c_mux_add_adapter(muxc, 0, num, 0); if (ret) goto fail_cleanup; } if (data->irq) { ret = devm_request_threaded_irq(dev, data->client->irq, NULL, pca954x_irq_handler, IRQF_ONESHOT | IRQF_SHARED, "pca954x", data); if (ret) goto fail_cleanup; } /* * The attr probably isn't going to be needed in most cases, * so don't fail completely on error. */ device_create_file(dev, &dev_attr_idle_state); dev_info(dev, "registered %d multiplexed busses for I2C %s %s\n", num, data->chip->muxtype == pca954x_ismux ? "mux" : "switch", client->name); return 0; fail_cleanup: pca954x_cleanup(muxc); return ret; } static void pca954x_remove(struct i2c_client *client) { struct i2c_mux_core *muxc = i2c_get_clientdata(client); device_remove_file(&client->dev, &dev_attr_idle_state); pca954x_cleanup(muxc); } static int pca954x_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct i2c_mux_core *muxc = i2c_get_clientdata(client); struct pca954x *data = i2c_mux_priv(muxc); int ret; ret = pca954x_init(client, data); if (ret < 0) dev_err(&client->dev, "failed to verify mux presence\n"); return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(pca954x_pm, NULL, pca954x_resume); static struct i2c_driver pca954x_driver = { .driver = { .name = "pca954x", .pm = pm_sleep_ptr(&pca954x_pm), .of_match_table = pca954x_of_match, }, .probe = pca954x_probe, .remove = pca954x_remove, .id_table = pca954x_id, }; module_i2c_driver(pca954x_driver); MODULE_AUTHOR("Rodolfo Giometti <[email protected]>"); MODULE_DESCRIPTION("PCA954x I2C mux/switch driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/muxes/i2c-mux-pca954x.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* * Mellanox i2c mux driver * * Copyright (C) 2016-2020 Mellanox Technologies */ #include <linux/device.h> #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/io.h> #include <linux/init.h> #include <linux/module.h> #include <linux/platform_data/mlxcpld.h> #include <linux/platform_device.h> #include <linux/slab.h> /* mlxcpld_mux - mux control structure: * @last_val - last selected register value or -1 if mux deselected * @client - I2C device client * @pdata: platform data */ struct mlxcpld_mux { int last_val; struct i2c_client *client; struct mlxcpld_mux_plat_data pdata; }; /* MUX logic description. * Driver can support different mux control logic, according to CPLD * implementation. * * Connectivity schema. * * i2c-mlxcpld Digital Analog * driver * *--------* * -> mux1 (virt bus2) -> mux -> | * | I2CLPC | i2c physical * -> mux2 (virt bus3) -> mux -> | * | bridge | bus 1 *---------* | * | logic |---------------------> * mux reg * | * | in CPLD| *---------* | * *--------* i2c-mux-mlxpcld ^ * -> muxn (virt busn) -> mux -> | * | driver | | * | *---------------* | Devices * | * CPLD (i2c bus)* select | * | * registers for *--------* * | * mux selection * deselect * | *---------------* * | | * <--------> <-----------> * i2c cntrl Board cntrl reg * reg space space (mux select, * IO, LED, WD, info) * */ /* Write to mux register. Don't use i2c_transfer() and i2c_smbus_xfer() * for this as they will try to lock adapter a second time. */ static int mlxcpld_mux_reg_write(struct i2c_adapter *adap, struct mlxcpld_mux *mux, u32 val) { struct i2c_client *client = mux->client; union i2c_smbus_data data; struct i2c_msg msg; u8 buf[3]; switch (mux->pdata.reg_size) { case 1: data.byte = val; return __i2c_smbus_xfer(adap, client->addr, client->flags, I2C_SMBUS_WRITE, mux->pdata.sel_reg_addr, I2C_SMBUS_BYTE_DATA, &data); case 2: buf[0] = mux->pdata.sel_reg_addr >> 8; buf[1] = mux->pdata.sel_reg_addr; buf[2] = val; msg.addr = client->addr; msg.buf = buf; msg.len = mux->pdata.reg_size + 1; msg.flags = 0; return __i2c_transfer(adap, &msg, 1); default: return -EINVAL; } } static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan) { struct mlxcpld_mux *mux = i2c_mux_priv(muxc); u32 regval = chan; int err = 0; if (mux->pdata.reg_size == 1) regval += 1; /* Only select the channel if its different from the last channel */ if (mux->last_val != regval) { err = mlxcpld_mux_reg_write(muxc->parent, mux, regval); mux->last_val = err < 0 ? -1 : regval; } return err; } static int mlxcpld_mux_deselect(struct i2c_mux_core *muxc, u32 chan) { struct mlxcpld_mux *mux = i2c_mux_priv(muxc); /* Deselect active channel */ mux->last_val = -1; return mlxcpld_mux_reg_write(muxc->parent, mux, 0); } /* Probe/reomove functions */ static int mlxcpld_mux_probe(struct platform_device *pdev) { struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&pdev->dev); struct i2c_client *client = to_i2c_client(pdev->dev.parent); struct i2c_mux_core *muxc; struct mlxcpld_mux *data; int num, err; u32 func; if (!pdata) return -EINVAL; switch (pdata->reg_size) { case 1: func = I2C_FUNC_SMBUS_WRITE_BYTE_DATA; break; case 2: func = I2C_FUNC_I2C; break; default: return -EINVAL; } if (!i2c_check_functionality(client->adapter, func)) return -ENODEV; muxc = i2c_mux_alloc(client->adapter, &pdev->dev, pdata->num_adaps, sizeof(*data), 0, mlxcpld_mux_select_chan, mlxcpld_mux_deselect); if (!muxc) return -ENOMEM; platform_set_drvdata(pdev, muxc); data = i2c_mux_priv(muxc); data->client = client; memcpy(&data->pdata, pdata, sizeof(*pdata)); data->last_val = -1; /* force the first selection */ /* Create an adapter for each channel. */ for (num = 0; num < pdata->num_adaps; num++) { err = i2c_mux_add_adapter(muxc, 0, pdata->chan_ids[num], 0); if (err) goto virt_reg_failed; } /* Notify caller when all channels' adapters are created. */ if (pdata->completion_notify) pdata->completion_notify(pdata->handle, muxc->parent, muxc->adapter); return 0; virt_reg_failed: i2c_mux_del_adapters(muxc); return err; } static void mlxcpld_mux_remove(struct platform_device *pdev) { struct i2c_mux_core *muxc = platform_get_drvdata(pdev); i2c_mux_del_adapters(muxc); } static struct platform_driver mlxcpld_mux_driver = { .driver = { .name = "i2c-mux-mlxcpld", }, .probe = mlxcpld_mux_probe, .remove_new = mlxcpld_mux_remove, }; module_platform_driver(mlxcpld_mux_driver); MODULE_AUTHOR("Michael Shych ([email protected])"); MODULE_DESCRIPTION("Mellanox I2C-CPLD-MUX driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("platform:i2c-mux-mlxcpld");
linux-master
drivers/i2c/muxes/i2c-mux-mlxcpld.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO-based I2C Arbitration Using a Challenge & Response Mechanism * * Copyright (C) 2012 Google, Inc */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> /** * struct i2c_arbitrator_data - Driver data for I2C arbitrator * * @our_gpio: GPIO descriptor we'll use to claim. * @their_gpio: GPIO descriptor that the other side will use to claim. * @slew_delay_us: microseconds to wait for a GPIO to go high. * @wait_retry_us: we'll attempt another claim after this many microseconds. * @wait_free_us: we'll give up after this many microseconds. */ struct i2c_arbitrator_data { struct gpio_desc *our_gpio; struct gpio_desc *their_gpio; unsigned int slew_delay_us; unsigned int wait_retry_us; unsigned int wait_free_us; }; /* * i2c_arbitrator_select - claim the I2C bus * * Use the GPIO-based signalling protocol; return -EBUSY if we fail. */ static int i2c_arbitrator_select(struct i2c_mux_core *muxc, u32 chan) { const struct i2c_arbitrator_data *arb = i2c_mux_priv(muxc); unsigned long stop_retry, stop_time; /* Start a round of trying to claim the bus */ stop_time = jiffies + usecs_to_jiffies(arb->wait_free_us) + 1; do { /* Indicate that we want to claim the bus */ gpiod_set_value(arb->our_gpio, 1); udelay(arb->slew_delay_us); /* Wait for the other master to release it */ stop_retry = jiffies + usecs_to_jiffies(arb->wait_retry_us) + 1; while (time_before(jiffies, stop_retry)) { int gpio_val = gpiod_get_value(arb->their_gpio); if (!gpio_val) { /* We got it, so return */ return 0; } usleep_range(50, 200); } /* It didn't release, so give up, wait, and try again */ gpiod_set_value(arb->our_gpio, 0); usleep_range(arb->wait_retry_us, arb->wait_retry_us * 2); } while (time_before(jiffies, stop_time)); /* Give up, release our claim */ gpiod_set_value(arb->our_gpio, 0); udelay(arb->slew_delay_us); dev_err(muxc->dev, "Could not claim bus, timeout\n"); return -EBUSY; } /* * i2c_arbitrator_deselect - release the I2C bus * * Release the I2C bus using the GPIO-based signalling protocol. */ static int i2c_arbitrator_deselect(struct i2c_mux_core *muxc, u32 chan) { const struct i2c_arbitrator_data *arb = i2c_mux_priv(muxc); /* Release the bus and wait for the other master to notice */ gpiod_set_value(arb->our_gpio, 0); udelay(arb->slew_delay_us); return 0; } static int i2c_arbitrator_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *parent_np; struct i2c_mux_core *muxc; struct i2c_arbitrator_data *arb; struct gpio_desc *dummy; int ret; /* We only support probing from device tree; no platform_data */ if (!np) { dev_err(dev, "Cannot find device tree node\n"); return -ENODEV; } if (dev_get_platdata(dev)) { dev_err(dev, "Platform data is not supported\n"); return -EINVAL; } muxc = i2c_mux_alloc(NULL, dev, 1, sizeof(*arb), I2C_MUX_ARBITRATOR, i2c_arbitrator_select, i2c_arbitrator_deselect); if (!muxc) return -ENOMEM; arb = i2c_mux_priv(muxc); platform_set_drvdata(pdev, muxc); /* Request GPIOs, our GPIO as unclaimed to begin with */ arb->our_gpio = devm_gpiod_get(dev, "our-claim", GPIOD_OUT_LOW); if (IS_ERR(arb->our_gpio)) { dev_err(dev, "could not get \"our-claim\" GPIO (%ld)\n", PTR_ERR(arb->our_gpio)); return PTR_ERR(arb->our_gpio); } arb->their_gpio = devm_gpiod_get(dev, "their-claim", GPIOD_IN); if (IS_ERR(arb->their_gpio)) { dev_err(dev, "could not get \"their-claim\" GPIO (%ld)\n", PTR_ERR(arb->their_gpio)); return PTR_ERR(arb->their_gpio); } /* At the moment we only support a single two master (us + 1 other) */ dummy = devm_gpiod_get_index(dev, "their-claim", 1, GPIOD_IN); if (!IS_ERR(dummy)) { dev_err(dev, "Only one other master is supported\n"); return -EINVAL; } else if (PTR_ERR(dummy) == -EPROBE_DEFER) { return -EPROBE_DEFER; } /* Arbitration parameters */ if (of_property_read_u32(np, "slew-delay-us", &arb->slew_delay_us)) arb->slew_delay_us = 10; if (of_property_read_u32(np, "wait-retry-us", &arb->wait_retry_us)) arb->wait_retry_us = 3000; if (of_property_read_u32(np, "wait-free-us", &arb->wait_free_us)) arb->wait_free_us = 50000; /* Find our parent */ parent_np = of_parse_phandle(np, "i2c-parent", 0); if (!parent_np) { dev_err(dev, "Cannot parse i2c-parent\n"); return -EINVAL; } muxc->parent = of_get_i2c_adapter_by_node(parent_np); of_node_put(parent_np); if (!muxc->parent) { dev_err(dev, "Cannot find parent bus\n"); return -EPROBE_DEFER; } /* Actually add the mux adapter */ ret = i2c_mux_add_adapter(muxc, 0, 0, 0); if (ret) i2c_put_adapter(muxc->parent); return ret; } static void i2c_arbitrator_remove(struct platform_device *pdev) { struct i2c_mux_core *muxc = platform_get_drvdata(pdev); i2c_mux_del_adapters(muxc); i2c_put_adapter(muxc->parent); } static const struct of_device_id i2c_arbitrator_of_match[] = { { .compatible = "i2c-arb-gpio-challenge", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_arbitrator_of_match); static struct platform_driver i2c_arbitrator_driver = { .probe = i2c_arbitrator_probe, .remove_new = i2c_arbitrator_remove, .driver = { .name = "i2c-arb-gpio-challenge", .of_match_table = i2c_arbitrator_of_match, }, }; module_platform_driver(i2c_arbitrator_driver); MODULE_DESCRIPTION("GPIO-based I2C Arbitration"); MODULE_AUTHOR("Doug Anderson <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c-arb-gpio-challenge");
linux-master
drivers/i2c/muxes/i2c-arb-gpio-challenge.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C multiplexer using pinctrl API * * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. */ #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/module.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of.h> #include "../../pinctrl/core.h" struct i2c_mux_pinctrl { struct pinctrl *pinctrl; struct pinctrl_state *states[]; }; static int i2c_mux_pinctrl_select(struct i2c_mux_core *muxc, u32 chan) { struct i2c_mux_pinctrl *mux = i2c_mux_priv(muxc); return pinctrl_select_state(mux->pinctrl, mux->states[chan]); } static int i2c_mux_pinctrl_deselect(struct i2c_mux_core *muxc, u32 chan) { return i2c_mux_pinctrl_select(muxc, muxc->num_adapters); } static struct i2c_adapter *i2c_mux_pinctrl_root_adapter( struct pinctrl_state *state) { struct i2c_adapter *root = NULL; struct pinctrl_setting *setting; struct i2c_adapter *pin_root; list_for_each_entry(setting, &state->settings, node) { pin_root = i2c_root_adapter(setting->pctldev->dev); if (!pin_root) return NULL; if (!root) root = pin_root; else if (root != pin_root) return NULL; } return root; } static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev) { struct device_node *np = dev->of_node; struct device_node *parent_np; struct i2c_adapter *parent; parent_np = of_parse_phandle(np, "i2c-parent", 0); if (!parent_np) { dev_err(dev, "Cannot parse i2c-parent\n"); return ERR_PTR(-ENODEV); } parent = of_find_i2c_adapter_by_node(parent_np); of_node_put(parent_np); if (!parent) return ERR_PTR(-EPROBE_DEFER); return parent; } static int i2c_mux_pinctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct i2c_mux_core *muxc; struct i2c_mux_pinctrl *mux; struct i2c_adapter *parent; struct i2c_adapter *root; int num_names, i, ret; const char *name; num_names = of_property_count_strings(np, "pinctrl-names"); if (num_names < 0) { dev_err(dev, "Cannot parse pinctrl-names: %d\n", num_names); return num_names; } parent = i2c_mux_pinctrl_parent_adapter(dev); if (IS_ERR(parent)) return PTR_ERR(parent); muxc = i2c_mux_alloc(parent, dev, num_names, struct_size(mux, states, num_names), 0, i2c_mux_pinctrl_select, NULL); if (!muxc) { ret = -ENOMEM; goto err_put_parent; } mux = i2c_mux_priv(muxc); platform_set_drvdata(pdev, muxc); mux->pinctrl = devm_pinctrl_get(dev); if (IS_ERR(mux->pinctrl)) { ret = PTR_ERR(mux->pinctrl); dev_err(dev, "Cannot get pinctrl: %d\n", ret); goto err_put_parent; } for (i = 0; i < num_names; i++) { ret = of_property_read_string_index(np, "pinctrl-names", i, &name); if (ret < 0) { dev_err(dev, "Cannot parse pinctrl-names: %d\n", ret); goto err_put_parent; } mux->states[i] = pinctrl_lookup_state(mux->pinctrl, name); if (IS_ERR(mux->states[i])) { ret = PTR_ERR(mux->states[i]); dev_err(dev, "Cannot look up pinctrl state %s: %d\n", name, ret); goto err_put_parent; } if (strcmp(name, "idle")) continue; if (i != num_names - 1) { dev_err(dev, "idle state must be last\n"); ret = -EINVAL; goto err_put_parent; } muxc->deselect = i2c_mux_pinctrl_deselect; } root = i2c_root_adapter(&muxc->parent->dev); muxc->mux_locked = true; for (i = 0; i < num_names; i++) { if (root != i2c_mux_pinctrl_root_adapter(mux->states[i])) { muxc->mux_locked = false; break; } } if (muxc->mux_locked) dev_info(dev, "mux-locked i2c mux\n"); /* Do not add any adapter for the idle state (if it's there at all). */ for (i = 0; i < num_names - !!muxc->deselect; i++) { ret = i2c_mux_add_adapter(muxc, 0, i, 0); if (ret) goto err_del_adapter; } return 0; err_del_adapter: i2c_mux_del_adapters(muxc); err_put_parent: i2c_put_adapter(parent); return ret; } static void i2c_mux_pinctrl_remove(struct platform_device *pdev) { struct i2c_mux_core *muxc = platform_get_drvdata(pdev); i2c_mux_del_adapters(muxc); i2c_put_adapter(muxc->parent); } static const struct of_device_id i2c_mux_pinctrl_of_match[] = { { .compatible = "i2c-mux-pinctrl", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match); static struct platform_driver i2c_mux_pinctrl_driver = { .driver = { .name = "i2c-mux-pinctrl", .of_match_table = i2c_mux_pinctrl_of_match, }, .probe = i2c_mux_pinctrl_probe, .remove_new = i2c_mux_pinctrl_remove, }; module_platform_driver(i2c_mux_pinctrl_driver); MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver"); MODULE_AUTHOR("Stephen Warren <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c-mux-pinctrl");
linux-master
drivers/i2c/muxes/i2c-mux-pinctrl.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * I2C multiplexer using a single register * * Copyright 2015 Freescale Semiconductor * York Sun <[email protected]> */ #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/platform_data/i2c-mux-reg.h> #include <linux/platform_device.h> #include <linux/slab.h> struct regmux { struct i2c_mux_reg_platform_data data; }; static int i2c_mux_reg_set(const struct regmux *mux, unsigned int chan_id) { if (!mux->data.reg) return -EINVAL; /* * Write to the register, followed by a read to ensure the write is * completed on a "posted" bus, for example PCI or write buffers. * The endianness of reading doesn't matter and the return data * is not used. */ switch (mux->data.reg_size) { case 4: if (mux->data.little_endian) iowrite32(chan_id, mux->data.reg); else iowrite32be(chan_id, mux->data.reg); if (!mux->data.write_only) ioread32(mux->data.reg); break; case 2: if (mux->data.little_endian) iowrite16(chan_id, mux->data.reg); else iowrite16be(chan_id, mux->data.reg); if (!mux->data.write_only) ioread16(mux->data.reg); break; case 1: iowrite8(chan_id, mux->data.reg); if (!mux->data.write_only) ioread8(mux->data.reg); break; } return 0; } static int i2c_mux_reg_select(struct i2c_mux_core *muxc, u32 chan) { struct regmux *mux = i2c_mux_priv(muxc); return i2c_mux_reg_set(mux, chan); } static int i2c_mux_reg_deselect(struct i2c_mux_core *muxc, u32 chan) { struct regmux *mux = i2c_mux_priv(muxc); if (mux->data.idle_in_use) return i2c_mux_reg_set(mux, mux->data.idle); return 0; } #ifdef CONFIG_OF static int i2c_mux_reg_probe_dt(struct regmux *mux, struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *adapter_np, *child; struct i2c_adapter *adapter; struct resource res; unsigned *values; int i = 0; if (!np) return -ENODEV; adapter_np = of_parse_phandle(np, "i2c-parent", 0); if (!adapter_np) { dev_err(&pdev->dev, "Cannot parse i2c-parent\n"); return -ENODEV; } adapter = of_find_i2c_adapter_by_node(adapter_np); of_node_put(adapter_np); if (!adapter) return -EPROBE_DEFER; mux->data.parent = i2c_adapter_id(adapter); put_device(&adapter->dev); mux->data.n_values = of_get_child_count(np); if (of_property_read_bool(np, "little-endian")) { mux->data.little_endian = true; } else if (of_property_read_bool(np, "big-endian")) { mux->data.little_endian = false; } else { #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : \ defined(__LITTLE_ENDIAN) mux->data.little_endian = true; #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : \ defined(__BIG_ENDIAN) mux->data.little_endian = false; #else #error Endianness not defined? #endif } mux->data.write_only = of_property_read_bool(np, "write-only"); values = devm_kcalloc(&pdev->dev, mux->data.n_values, sizeof(*mux->data.values), GFP_KERNEL); if (!values) return -ENOMEM; for_each_child_of_node(np, child) { of_property_read_u32(child, "reg", values + i); i++; } mux->data.values = values; if (!of_property_read_u32(np, "idle-state", &mux->data.idle)) mux->data.idle_in_use = true; /* map address from "reg" if exists */ if (of_address_to_resource(np, 0, &res) == 0) { mux->data.reg_size = resource_size(&res); mux->data.reg = devm_ioremap_resource(&pdev->dev, &res); if (IS_ERR(mux->data.reg)) return PTR_ERR(mux->data.reg); } return 0; } #else static int i2c_mux_reg_probe_dt(struct regmux *mux, struct platform_device *pdev) { return 0; } #endif static int i2c_mux_reg_probe(struct platform_device *pdev) { struct i2c_mux_core *muxc; struct regmux *mux; struct i2c_adapter *parent; struct resource *res; unsigned int class; int i, ret, nr; mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); if (!mux) return -ENOMEM; if (dev_get_platdata(&pdev->dev)) { memcpy(&mux->data, dev_get_platdata(&pdev->dev), sizeof(mux->data)); } else { ret = i2c_mux_reg_probe_dt(mux, pdev); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "Error parsing device tree"); } parent = i2c_get_adapter(mux->data.parent); if (!parent) return -EPROBE_DEFER; if (!mux->data.reg) { dev_info(&pdev->dev, "Register not set, using platform resource\n"); mux->data.reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(mux->data.reg)) { ret = PTR_ERR(mux->data.reg); goto err_put_parent; } mux->data.reg_size = resource_size(res); } if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && mux->data.reg_size != 1) { dev_err(&pdev->dev, "Invalid register size\n"); ret = -EINVAL; goto err_put_parent; } muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, i2c_mux_reg_select, NULL); if (!muxc) { ret = -ENOMEM; goto err_put_parent; } muxc->priv = mux; platform_set_drvdata(pdev, muxc); if (mux->data.idle_in_use) muxc->deselect = i2c_mux_reg_deselect; for (i = 0; i < mux->data.n_values; i++) { nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0; class = mux->data.classes ? mux->data.classes[i] : 0; ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); if (ret) goto err_del_mux_adapters; } dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", mux->data.n_values, muxc->parent->name); return 0; err_del_mux_adapters: i2c_mux_del_adapters(muxc); err_put_parent: i2c_put_adapter(parent); return ret; } static void i2c_mux_reg_remove(struct platform_device *pdev) { struct i2c_mux_core *muxc = platform_get_drvdata(pdev); i2c_mux_del_adapters(muxc); i2c_put_adapter(muxc->parent); } static const struct of_device_id i2c_mux_reg_of_match[] = { { .compatible = "i2c-mux-reg", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_mux_reg_of_match); static struct platform_driver i2c_mux_reg_driver = { .probe = i2c_mux_reg_probe, .remove_new = i2c_mux_reg_remove, .driver = { .name = "i2c-mux-reg", .of_match_table = of_match_ptr(i2c_mux_reg_of_match), }, }; module_platform_driver(i2c_mux_reg_driver); MODULE_DESCRIPTION("Register-based I2C multiplexer driver"); MODULE_AUTHOR("York Sun <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:i2c-mux-reg");
linux-master
drivers/i2c/muxes/i2c-mux-reg.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C multiplexer using GPIO API * * Peter Korsgaard <[email protected]> */ #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/overflow.h> #include <linux/platform_data/i2c-mux-gpio.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/bits.h> #include <linux/gpio/consumer.h> /* FIXME: stop poking around inside gpiolib */ #include "../../gpio/gpiolib.h" struct gpiomux { struct i2c_mux_gpio_platform_data data; int ngpios; struct gpio_desc **gpios; }; static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val) { DECLARE_BITMAP(values, BITS_PER_TYPE(val)); values[0] = val; gpiod_set_array_value_cansleep(mux->ngpios, mux->gpios, NULL, values); } static int i2c_mux_gpio_select(struct i2c_mux_core *muxc, u32 chan) { struct gpiomux *mux = i2c_mux_priv(muxc); i2c_mux_gpio_set(mux, chan); return 0; } static int i2c_mux_gpio_deselect(struct i2c_mux_core *muxc, u32 chan) { struct gpiomux *mux = i2c_mux_priv(muxc); i2c_mux_gpio_set(mux, mux->data.idle); return 0; } static int i2c_mux_gpio_probe_fw(struct gpiomux *mux, struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fwnode_handle *fwnode = dev_fwnode(dev); struct device_node *np = dev->of_node; struct device_node *adapter_np; struct i2c_adapter *adapter = NULL; struct fwnode_handle *child; unsigned *values; int rc, i = 0; if (is_of_node(fwnode)) { if (!np) return -ENODEV; adapter_np = of_parse_phandle(np, "i2c-parent", 0); if (!adapter_np) { dev_err(&pdev->dev, "Cannot parse i2c-parent\n"); return -ENODEV; } adapter = of_find_i2c_adapter_by_node(adapter_np); of_node_put(adapter_np); } else if (is_acpi_node(fwnode)) { /* * In ACPI land the mux should be a direct child of the i2c * bus it muxes. */ acpi_handle dev_handle = ACPI_HANDLE(dev->parent); adapter = i2c_acpi_find_adapter_by_handle(dev_handle); } if (!adapter) return -EPROBE_DEFER; mux->data.parent = i2c_adapter_id(adapter); put_device(&adapter->dev); mux->data.n_values = device_get_child_node_count(dev); values = devm_kcalloc(dev, mux->data.n_values, sizeof(*mux->data.values), GFP_KERNEL); if (!values) { dev_err(dev, "Cannot allocate values array"); return -ENOMEM; } device_for_each_child_node(dev, child) { if (is_of_node(child)) { fwnode_property_read_u32(child, "reg", values + i); } else if (is_acpi_node(child)) { rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i); if (rc) { fwnode_handle_put(child); return dev_err_probe(dev, rc, "Cannot get address\n"); } } i++; } mux->data.values = values; if (device_property_read_u32(dev, "idle-state", &mux->data.idle)) mux->data.idle = I2C_MUX_GPIO_NO_IDLE; return 0; } static int i2c_mux_gpio_probe(struct platform_device *pdev) { struct i2c_mux_core *muxc; struct gpiomux *mux; struct i2c_adapter *parent; struct i2c_adapter *root; unsigned initial_state; int i, ngpios, ret; mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); if (!mux) return -ENOMEM; if (!dev_get_platdata(&pdev->dev)) { ret = i2c_mux_gpio_probe_fw(mux, pdev); if (ret < 0) return ret; } else { memcpy(&mux->data, dev_get_platdata(&pdev->dev), sizeof(mux->data)); } ngpios = gpiod_count(&pdev->dev, "mux"); if (ngpios <= 0) { dev_err(&pdev->dev, "no valid gpios provided\n"); return ngpios ?: -EINVAL; } mux->ngpios = ngpios; parent = i2c_get_adapter(mux->data.parent); if (!parent) return -EPROBE_DEFER; muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, array_size(ngpios, sizeof(*mux->gpios)), 0, i2c_mux_gpio_select, NULL); if (!muxc) { ret = -ENOMEM; goto alloc_failed; } mux->gpios = muxc->priv; muxc->priv = mux; platform_set_drvdata(pdev, muxc); root = i2c_root_adapter(&parent->dev); muxc->mux_locked = true; if (mux->data.idle != I2C_MUX_GPIO_NO_IDLE) { initial_state = mux->data.idle; muxc->deselect = i2c_mux_gpio_deselect; } else { initial_state = mux->data.values[0]; } for (i = 0; i < ngpios; i++) { struct device *gpio_dev; struct gpio_desc *gpiod; enum gpiod_flags flag; if (initial_state & BIT(i)) flag = GPIOD_OUT_HIGH; else flag = GPIOD_OUT_LOW; gpiod = devm_gpiod_get_index(&pdev->dev, "mux", i, flag); if (IS_ERR(gpiod)) { ret = PTR_ERR(gpiod); goto alloc_failed; } mux->gpios[i] = gpiod; if (!muxc->mux_locked) continue; /* FIXME: find a proper way to access the GPIO device */ gpio_dev = &gpiod->gdev->dev; muxc->mux_locked = i2c_root_adapter(gpio_dev) == root; } if (muxc->mux_locked) dev_info(&pdev->dev, "mux-locked i2c mux\n"); for (i = 0; i < mux->data.n_values; i++) { u32 nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0; unsigned int class = mux->data.classes ? mux->data.classes[i] : 0; ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); if (ret) goto add_adapter_failed; } dev_info(&pdev->dev, "%d port mux on %s adapter\n", mux->data.n_values, parent->name); return 0; add_adapter_failed: i2c_mux_del_adapters(muxc); alloc_failed: i2c_put_adapter(parent); return ret; } static void i2c_mux_gpio_remove(struct platform_device *pdev) { struct i2c_mux_core *muxc = platform_get_drvdata(pdev); i2c_mux_del_adapters(muxc); i2c_put_adapter(muxc->parent); } static const struct of_device_id i2c_mux_gpio_of_match[] = { { .compatible = "i2c-mux-gpio", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_mux_gpio_of_match); static struct platform_driver i2c_mux_gpio_driver = { .probe = i2c_mux_gpio_probe, .remove_new = i2c_mux_gpio_remove, .driver = { .name = "i2c-mux-gpio", .of_match_table = i2c_mux_gpio_of_match, }, }; module_platform_driver(i2c_mux_gpio_driver); MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver"); MODULE_AUTHOR("Peter Korsgaard <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:i2c-mux-gpio");
linux-master
drivers/i2c/muxes/i2c-mux-gpio.c
/* * I2C multiplexer driver for PCA9541 bus master selector * * Copyright (c) 2010 Ericsson AB. * * Author: Guenter Roeck <[email protected]> * * Derived from: * pca954x.c * * Copyright (c) 2008-2009 Rodolfo Giometti <[email protected]> * Copyright (c) 2008-2009 Eurotech S.p.A. <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/slab.h> /* * The PCA9541 is a bus master selector. It supports two I2C masters connected * to a single slave bus. * * Before each bus transaction, a master has to acquire bus ownership. After the * transaction is complete, bus ownership has to be released. This fits well * into the I2C multiplexer framework, which provides select and release * functions for this purpose. For this reason, this driver is modeled as * single-channel I2C bus multiplexer. * * This driver assumes that the two bus masters are controlled by two different * hosts. If a single host controls both masters, platform code has to ensure * that only one of the masters is instantiated at any given time. */ #define PCA9541_CONTROL 0x01 #define PCA9541_ISTAT 0x02 #define PCA9541_CTL_MYBUS BIT(0) #define PCA9541_CTL_NMYBUS BIT(1) #define PCA9541_CTL_BUSON BIT(2) #define PCA9541_CTL_NBUSON BIT(3) #define PCA9541_CTL_BUSINIT BIT(4) #define PCA9541_CTL_TESTON BIT(6) #define PCA9541_CTL_NTESTON BIT(7) #define PCA9541_ISTAT_INTIN BIT(0) #define PCA9541_ISTAT_BUSINIT BIT(1) #define PCA9541_ISTAT_BUSOK BIT(2) #define PCA9541_ISTAT_BUSLOST BIT(3) #define PCA9541_ISTAT_MYTEST BIT(6) #define PCA9541_ISTAT_NMYTEST BIT(7) #define BUSON (PCA9541_CTL_BUSON | PCA9541_CTL_NBUSON) #define MYBUS (PCA9541_CTL_MYBUS | PCA9541_CTL_NMYBUS) #define mybus(x) (!((x) & MYBUS) || ((x) & MYBUS) == MYBUS) #define busoff(x) (!((x) & BUSON) || ((x) & BUSON) == BUSON) /* arbitration timeouts, in jiffies */ #define ARB_TIMEOUT (HZ / 8) /* 125 ms until forcing bus ownership */ #define ARB2_TIMEOUT (HZ / 4) /* 250 ms until acquisition failure */ /* arbitration retry delays, in us */ #define SELECT_DELAY_SHORT 50 #define SELECT_DELAY_LONG 1000 struct pca9541 { struct i2c_client *client; unsigned long select_timeout; unsigned long arb_timeout; }; static const struct i2c_device_id pca9541_id[] = { {"pca9541", 0}, {} }; MODULE_DEVICE_TABLE(i2c, pca9541_id); #ifdef CONFIG_OF static const struct of_device_id pca9541_of_match[] = { { .compatible = "nxp,pca9541" }, {} }; MODULE_DEVICE_TABLE(of, pca9541_of_match); #endif /* * Write to chip register. Don't use i2c_transfer()/i2c_smbus_xfer() * as they will try to lock the adapter a second time. */ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val) { struct i2c_adapter *adap = client->adapter; union i2c_smbus_data data = { .byte = val }; return __i2c_smbus_xfer(adap, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_BYTE_DATA, &data); } /* * Read from chip register. Don't use i2c_transfer()/i2c_smbus_xfer() * as they will try to lock adapter a second time. */ static int pca9541_reg_read(struct i2c_client *client, u8 command) { struct i2c_adapter *adap = client->adapter; union i2c_smbus_data data; int ret; ret = __i2c_smbus_xfer(adap, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_BYTE_DATA, &data); return ret ?: data.byte; } /* * Arbitration management functions */ /* Release bus. Also reset NTESTON and BUSINIT if it was set. */ static void pca9541_release_bus(struct i2c_client *client) { int reg; reg = pca9541_reg_read(client, PCA9541_CONTROL); if (reg >= 0 && !busoff(reg) && mybus(reg)) pca9541_reg_write(client, PCA9541_CONTROL, (reg & PCA9541_CTL_NBUSON) >> 1); } /* * Arbitration is defined as a two-step process. A bus master can only activate * the slave bus if it owns it; otherwise it has to request ownership first. * This multi-step process ensures that access contention is resolved * gracefully. * * Bus Ownership Other master Action * state requested access * ---------------------------------------------------- * off - yes wait for arbitration timeout or * for other master to drop request * off no no take ownership * off yes no turn on bus * on yes - done * on no - wait for arbitration timeout or * for other master to release bus * * The main contention point occurs if the slave bus is off and both masters * request ownership at the same time. In this case, one master will turn on * the slave bus, believing that it owns it. The other master will request * bus ownership. Result is that the bus is turned on, and master which did * _not_ own the slave bus before ends up owning it. */ /* Control commands per PCA9541 datasheet */ static const u8 pca9541_control[16] = { 4, 0, 1, 5, 4, 4, 5, 5, 0, 0, 1, 1, 0, 4, 5, 1 }; /* * Channel arbitration * * Return values: * <0: error * 0 : bus not acquired * 1 : bus acquired */ static int pca9541_arbitrate(struct i2c_client *client) { struct i2c_mux_core *muxc = i2c_get_clientdata(client); struct pca9541 *data = i2c_mux_priv(muxc); int reg; reg = pca9541_reg_read(client, PCA9541_CONTROL); if (reg < 0) return reg; if (busoff(reg)) { int istat; /* * Bus is off. Request ownership or turn it on unless * other master requested ownership. */ istat = pca9541_reg_read(client, PCA9541_ISTAT); if (!(istat & PCA9541_ISTAT_NMYTEST) || time_is_before_eq_jiffies(data->arb_timeout)) { /* * Other master did not request ownership, * or arbitration timeout expired. Take the bus. */ pca9541_reg_write(client, PCA9541_CONTROL, pca9541_control[reg & 0x0f] | PCA9541_CTL_NTESTON); data->select_timeout = SELECT_DELAY_SHORT; } else { /* * Other master requested ownership. * Set extra long timeout to give it time to acquire it. */ data->select_timeout = SELECT_DELAY_LONG * 2; } } else if (mybus(reg)) { /* * Bus is on, and we own it. We are done with acquisition. * Reset NTESTON and BUSINIT, then return success. */ if (reg & (PCA9541_CTL_NTESTON | PCA9541_CTL_BUSINIT)) pca9541_reg_write(client, PCA9541_CONTROL, reg & ~(PCA9541_CTL_NTESTON | PCA9541_CTL_BUSINIT)); return 1; } else { /* * Other master owns the bus. * If arbitration timeout has expired, force ownership. * Otherwise request it. */ data->select_timeout = SELECT_DELAY_LONG; if (time_is_before_eq_jiffies(data->arb_timeout)) { /* Time is up, take the bus and reset it. */ pca9541_reg_write(client, PCA9541_CONTROL, pca9541_control[reg & 0x0f] | PCA9541_CTL_BUSINIT | PCA9541_CTL_NTESTON); } else { /* Request bus ownership if needed */ if (!(reg & PCA9541_CTL_NTESTON)) pca9541_reg_write(client, PCA9541_CONTROL, reg | PCA9541_CTL_NTESTON); } } return 0; } static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan) { struct pca9541 *data = i2c_mux_priv(muxc); struct i2c_client *client = data->client; int ret; unsigned long timeout = jiffies + ARB2_TIMEOUT; /* give up after this time */ data->arb_timeout = jiffies + ARB_TIMEOUT; /* force bus ownership after this time */ do { ret = pca9541_arbitrate(client); if (ret) return ret < 0 ? ret : 0; if (data->select_timeout == SELECT_DELAY_SHORT) udelay(data->select_timeout); else msleep(data->select_timeout / 1000); } while (time_is_after_eq_jiffies(timeout)); return -ETIMEDOUT; } static int pca9541_release_chan(struct i2c_mux_core *muxc, u32 chan) { struct pca9541 *data = i2c_mux_priv(muxc); struct i2c_client *client = data->client; pca9541_release_bus(client); return 0; } /* * I2C init/probing/exit functions */ static int pca9541_probe(struct i2c_client *client) { struct i2c_adapter *adap = client->adapter; struct i2c_mux_core *muxc; struct pca9541 *data; int ret; if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* * I2C accesses are unprotected here. * We have to lock the I2C segment before releasing the bus. */ i2c_lock_bus(adap, I2C_LOCK_SEGMENT); pca9541_release_bus(client); i2c_unlock_bus(adap, I2C_LOCK_SEGMENT); /* Create mux adapter */ muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data), I2C_MUX_ARBITRATOR, pca9541_select_chan, pca9541_release_chan); if (!muxc) return -ENOMEM; data = i2c_mux_priv(muxc); data->client = client; i2c_set_clientdata(client, muxc); ret = i2c_mux_add_adapter(muxc, 0, 0, 0); if (ret) return ret; dev_info(&client->dev, "registered master selector for I2C %s\n", client->name); return 0; } static void pca9541_remove(struct i2c_client *client) { struct i2c_mux_core *muxc = i2c_get_clientdata(client); i2c_mux_del_adapters(muxc); } static struct i2c_driver pca9541_driver = { .driver = { .name = "pca9541", .of_match_table = of_match_ptr(pca9541_of_match), }, .probe = pca9541_probe, .remove = pca9541_remove, .id_table = pca9541_id, }; module_i2c_driver(pca9541_driver); MODULE_AUTHOR("Guenter Roeck <[email protected]>"); MODULE_DESCRIPTION("PCA9541 I2C master selector driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/muxes/i2c-mux-pca9541.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2014 Intel Corporation * * Authors: * Chen, Gong <[email protected]> */ #include <linux/init.h> #include <linux/ras.h> #include <linux/uuid.h> #define CREATE_TRACE_POINTS #define TRACE_INCLUDE_PATH ../../include/ras #include <ras/ras_event.h> void log_non_standard_event(const guid_t *sec_type, const guid_t *fru_id, const char *fru_text, const u8 sev, const u8 *err, const u32 len) { trace_non_standard_event(sec_type, fru_id, fru_text, sev, err, len); } void log_arm_hw_error(struct cper_sec_proc_arm *err) { trace_arm_event(err); } static int __init ras_init(void) { int rc = 0; ras_debugfs_init(); rc = ras_add_daemon_trace(); return rc; } subsys_initcall(ras_init); #if defined(CONFIG_ACPI_EXTLOG) || defined(CONFIG_ACPI_EXTLOG_MODULE) EXPORT_TRACEPOINT_SYMBOL_GPL(extlog_mem_event); #endif EXPORT_TRACEPOINT_SYMBOL_GPL(mc_event); EXPORT_TRACEPOINT_SYMBOL_GPL(non_standard_event); EXPORT_TRACEPOINT_SYMBOL_GPL(arm_event); static int __init parse_ras_param(char *str) { #ifdef CONFIG_RAS_CEC parse_cec_param(str); #endif return 1; } __setup("ras", parse_ras_param);
linux-master
drivers/ras/ras.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/debugfs.h> #include <linux/ras.h> #include "debugfs.h" struct dentry *ras_debugfs_dir; static atomic_t trace_count = ATOMIC_INIT(0); int ras_userspace_consumers(void) { return atomic_read(&trace_count); } EXPORT_SYMBOL_GPL(ras_userspace_consumers); static int trace_show(struct seq_file *m, void *v) { return 0; } static int trace_open(struct inode *inode, struct file *file) { atomic_inc(&trace_count); return single_open(file, trace_show, NULL); } static int trace_release(struct inode *inode, struct file *file) { atomic_dec(&trace_count); return single_release(inode, file); } static const struct file_operations trace_fops = { .open = trace_open, .read = seq_read, .llseek = seq_lseek, .release = trace_release, }; int __init ras_add_daemon_trace(void) { struct dentry *fentry; if (!ras_debugfs_dir) return -ENOENT; fentry = debugfs_create_file("daemon_active", S_IRUSR, ras_debugfs_dir, NULL, &trace_fops); if (IS_ERR(fentry)) return -ENODEV; return 0; } void __init ras_debugfs_init(void) { ras_debugfs_dir = debugfs_create_dir("ras", NULL); }
linux-master
drivers/ras/debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2017-2019 Borislav Petkov, SUSE Labs. */ #include <linux/mm.h> #include <linux/gfp.h> #include <linux/ras.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <asm/mce.h> #include "debugfs.h" /* * RAS Correctable Errors Collector * * This is a simple gadget which collects correctable errors and counts their * occurrence per physical page address. * * We've opted for possibly the simplest data structure to collect those - an * array of the size of a memory page. It stores 512 u64's with the following * structure: * * [63 ... PFN ... 12 | 11 ... generation ... 10 | 9 ... count ... 0] * * The generation in the two highest order bits is two bits which are set to 11b * on every insertion. During the course of each entry's existence, the * generation field gets decremented during spring cleaning to 10b, then 01b and * then 00b. * * This way we're employing the natural numeric ordering to make sure that newly * inserted/touched elements have higher 12-bit counts (which we've manufactured) * and thus iterating over the array initially won't kick out those elements * which were inserted last. * * Spring cleaning is what we do when we reach a certain number CLEAN_ELEMS of * elements entered into the array, during which, we're decaying all elements. * If, after decay, an element gets inserted again, its generation is set to 11b * to make sure it has higher numerical count than other, older elements and * thus emulate an LRU-like behavior when deleting elements to free up space * in the page. * * When an element reaches it's max count of action_threshold, we try to poison * it by assuming that errors triggered action_threshold times in a single page * are excessive and that page shouldn't be used anymore. action_threshold is * initialized to COUNT_MASK which is the maximum. * * That error event entry causes cec_add_elem() to return !0 value and thus * signal to its callers to log the error. * * To the question why we've chosen a page and moving elements around with * memmove(), it is because it is a very simple structure to handle and max data * movement is 4K which on highly optimized modern CPUs is almost unnoticeable. * We wanted to avoid the pointer traversal of more complex structures like a * linked list or some sort of a balancing search tree. * * Deleting an element takes O(n) but since it is only a single page, it should * be fast enough and it shouldn't happen all too often depending on error * patterns. */ #undef pr_fmt #define pr_fmt(fmt) "RAS: " fmt /* * We use DECAY_BITS bits of PAGE_SHIFT bits for counting decay, i.e., how long * elements have stayed in the array without having been accessed again. */ #define DECAY_BITS 2 #define DECAY_MASK ((1ULL << DECAY_BITS) - 1) #define MAX_ELEMS (PAGE_SIZE / sizeof(u64)) /* * Threshold amount of inserted elements after which we start spring * cleaning. */ #define CLEAN_ELEMS (MAX_ELEMS >> DECAY_BITS) /* Bits which count the number of errors happened in this 4K page. */ #define COUNT_BITS (PAGE_SHIFT - DECAY_BITS) #define COUNT_MASK ((1ULL << COUNT_BITS) - 1) #define FULL_COUNT_MASK (PAGE_SIZE - 1) /* * u64: [ 63 ... 12 | DECAY_BITS | COUNT_BITS ] */ #define PFN(e) ((e) >> PAGE_SHIFT) #define DECAY(e) (((e) >> COUNT_BITS) & DECAY_MASK) #define COUNT(e) ((unsigned int)(e) & COUNT_MASK) #define FULL_COUNT(e) ((e) & (PAGE_SIZE - 1)) static struct ce_array { u64 *array; /* container page */ unsigned int n; /* number of elements in the array */ unsigned int decay_count; /* * number of element insertions/increments * since the last spring cleaning. */ u64 pfns_poisoned; /* * number of PFNs which got poisoned. */ u64 ces_entered; /* * The number of correctable errors * entered into the collector. */ u64 decays_done; /* * Times we did spring cleaning. */ union { struct { __u32 disabled : 1, /* cmdline disabled */ __resv : 31; }; __u32 flags; }; } ce_arr; static DEFINE_MUTEX(ce_mutex); static u64 dfs_pfn; /* Amount of errors after which we offline */ static u64 action_threshold = COUNT_MASK; /* Each element "decays" each decay_interval which is 24hrs by default. */ #define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */ #define CEC_DECAY_MIN_INTERVAL 1 * 60 * 60 /* 1h */ #define CEC_DECAY_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */ static struct delayed_work cec_work; static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL; /* * Decrement decay value. We're using DECAY_BITS bits to denote decay of an * element in the array. On insertion and any access, it gets reset to max. */ static void do_spring_cleaning(struct ce_array *ca) { int i; for (i = 0; i < ca->n; i++) { u8 decay = DECAY(ca->array[i]); if (!decay) continue; decay--; ca->array[i] &= ~(DECAY_MASK << COUNT_BITS); ca->array[i] |= (decay << COUNT_BITS); } ca->decay_count = 0; ca->decays_done++; } /* * @interval in seconds */ static void cec_mod_work(unsigned long interval) { unsigned long iv; iv = interval * HZ; mod_delayed_work(system_wq, &cec_work, round_jiffies(iv)); } static void cec_work_fn(struct work_struct *work) { mutex_lock(&ce_mutex); do_spring_cleaning(&ce_arr); mutex_unlock(&ce_mutex); cec_mod_work(decay_interval); } /* * @to: index of the smallest element which is >= then @pfn. * * Return the index of the pfn if found, otherwise negative value. */ static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) { int min = 0, max = ca->n - 1; u64 this_pfn; while (min <= max) { int i = (min + max) >> 1; this_pfn = PFN(ca->array[i]); if (this_pfn < pfn) min = i + 1; else if (this_pfn > pfn) max = i - 1; else if (this_pfn == pfn) { if (to) *to = i; return i; } } /* * When the loop terminates without finding @pfn, min has the index of * the element slot where the new @pfn should be inserted. The loop * terminates when min > max, which means the min index points to the * bigger element while the max index to the smaller element, in-between * which the new @pfn belongs to. * * For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3. */ if (to) *to = min; return -ENOKEY; } static int find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) { WARN_ON(!to); if (!ca->n) { *to = 0; return -ENOKEY; } return __find_elem(ca, pfn, to); } static void del_elem(struct ce_array *ca, int idx) { /* Save us a function call when deleting the last element. */ if (ca->n - (idx + 1)) memmove((void *)&ca->array[idx], (void *)&ca->array[idx + 1], (ca->n - (idx + 1)) * sizeof(u64)); ca->n--; } static u64 del_lru_elem_unlocked(struct ce_array *ca) { unsigned int min = FULL_COUNT_MASK; int i, min_idx = 0; for (i = 0; i < ca->n; i++) { unsigned int this = FULL_COUNT(ca->array[i]); if (min > this) { min = this; min_idx = i; } } del_elem(ca, min_idx); return PFN(ca->array[min_idx]); } /* * We return the 0th pfn in the error case under the assumption that it cannot * be poisoned and excessive CEs in there are a serious deal anyway. */ static u64 __maybe_unused del_lru_elem(void) { struct ce_array *ca = &ce_arr; u64 pfn; if (!ca->n) return 0; mutex_lock(&ce_mutex); pfn = del_lru_elem_unlocked(ca); mutex_unlock(&ce_mutex); return pfn; } static bool sanity_check(struct ce_array *ca) { bool ret = false; u64 prev = 0; int i; for (i = 0; i < ca->n; i++) { u64 this = PFN(ca->array[i]); if (WARN(prev > this, "prev: 0x%016llx <-> this: 0x%016llx\n", prev, this)) ret = true; prev = this; } if (!ret) return ret; pr_info("Sanity check dump:\n{ n: %d\n", ca->n); for (i = 0; i < ca->n; i++) { u64 this = PFN(ca->array[i]); pr_info(" %03d: [%016llx|%03llx]\n", i, this, FULL_COUNT(ca->array[i])); } pr_info("}\n"); return ret; } /** * cec_add_elem - Add an element to the CEC array. * @pfn: page frame number to insert * * Return values: * - <0: on error * - 0: on success * - >0: when the inserted pfn was offlined */ static int cec_add_elem(u64 pfn) { struct ce_array *ca = &ce_arr; int count, err, ret = 0; unsigned int to = 0; /* * We can be called very early on the identify_cpu() path where we are * not initialized yet. We ignore the error for simplicity. */ if (!ce_arr.array || ce_arr.disabled) return -ENODEV; mutex_lock(&ce_mutex); ca->ces_entered++; /* Array full, free the LRU slot. */ if (ca->n == MAX_ELEMS) WARN_ON(!del_lru_elem_unlocked(ca)); err = find_elem(ca, pfn, &to); if (err < 0) { /* * Shift range [to-end] to make room for one more element. */ memmove((void *)&ca->array[to + 1], (void *)&ca->array[to], (ca->n - to) * sizeof(u64)); ca->array[to] = pfn << PAGE_SHIFT; ca->n++; } /* Add/refresh element generation and increment count */ ca->array[to] |= DECAY_MASK << COUNT_BITS; ca->array[to]++; /* Check action threshold and soft-offline, if reached. */ count = COUNT(ca->array[to]); if (count >= action_threshold) { u64 pfn = ca->array[to] >> PAGE_SHIFT; if (!pfn_valid(pfn)) { pr_warn("CEC: Invalid pfn: 0x%llx\n", pfn); } else { /* We have reached max count for this page, soft-offline it. */ pr_err("Soft-offlining pfn: 0x%llx\n", pfn); memory_failure_queue(pfn, MF_SOFT_OFFLINE); ca->pfns_poisoned++; } del_elem(ca, to); /* * Return a >0 value to callers, to denote that we've reached * the offlining threshold. */ ret = 1; goto unlock; } ca->decay_count++; if (ca->decay_count >= CLEAN_ELEMS) do_spring_cleaning(ca); WARN_ON_ONCE(sanity_check(ca)); unlock: mutex_unlock(&ce_mutex); return ret; } static int u64_get(void *data, u64 *val) { *val = *(u64 *)data; return 0; } static int pfn_set(void *data, u64 val) { *(u64 *)data = val; cec_add_elem(val); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(pfn_ops, u64_get, pfn_set, "0x%llx\n"); static int decay_interval_set(void *data, u64 val) { if (val < CEC_DECAY_MIN_INTERVAL) return -EINVAL; if (val > CEC_DECAY_MAX_INTERVAL) return -EINVAL; *(u64 *)data = val; decay_interval = val; cec_mod_work(decay_interval); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n"); static int action_threshold_set(void *data, u64 val) { *(u64 *)data = val; if (val > COUNT_MASK) val = COUNT_MASK; action_threshold = val; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(action_threshold_ops, u64_get, action_threshold_set, "%lld\n"); static const char * const bins[] = { "00", "01", "10", "11" }; static int array_show(struct seq_file *m, void *v) { struct ce_array *ca = &ce_arr; int i; mutex_lock(&ce_mutex); seq_printf(m, "{ n: %d\n", ca->n); for (i = 0; i < ca->n; i++) { u64 this = PFN(ca->array[i]); seq_printf(m, " %3d: [%016llx|%s|%03llx]\n", i, this, bins[DECAY(ca->array[i])], COUNT(ca->array[i])); } seq_printf(m, "}\n"); seq_printf(m, "Stats:\nCEs: %llu\nofflined pages: %llu\n", ca->ces_entered, ca->pfns_poisoned); seq_printf(m, "Flags: 0x%x\n", ca->flags); seq_printf(m, "Decay interval: %lld seconds\n", decay_interval); seq_printf(m, "Decays: %lld\n", ca->decays_done); seq_printf(m, "Action threshold: %lld\n", action_threshold); mutex_unlock(&ce_mutex); return 0; } DEFINE_SHOW_ATTRIBUTE(array); static int __init create_debugfs_nodes(void) { struct dentry *d, *pfn, *decay, *count, *array; d = debugfs_create_dir("cec", ras_debugfs_dir); if (!d) { pr_warn("Error creating cec debugfs node!\n"); return -1; } decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d, &decay_interval, &decay_interval_ops); if (!decay) { pr_warn("Error creating decay_interval debugfs node!\n"); goto err; } count = debugfs_create_file("action_threshold", S_IRUSR | S_IWUSR, d, &action_threshold, &action_threshold_ops); if (!count) { pr_warn("Error creating action_threshold debugfs node!\n"); goto err; } if (!IS_ENABLED(CONFIG_RAS_CEC_DEBUG)) return 0; pfn = debugfs_create_file("pfn", S_IRUSR | S_IWUSR, d, &dfs_pfn, &pfn_ops); if (!pfn) { pr_warn("Error creating pfn debugfs node!\n"); goto err; } array = debugfs_create_file("array", S_IRUSR, d, NULL, &array_fops); if (!array) { pr_warn("Error creating array debugfs node!\n"); goto err; } return 0; err: debugfs_remove_recursive(d); return 1; } static int cec_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct mce *m = (struct mce *)data; if (!m) return NOTIFY_DONE; /* We eat only correctable DRAM errors with usable addresses. */ if (mce_is_memory_error(m) && mce_is_correctable(m) && mce_usable_address(m)) { if (!cec_add_elem(m->addr >> PAGE_SHIFT)) { m->kflags |= MCE_HANDLED_CEC; return NOTIFY_OK; } } return NOTIFY_DONE; } static struct notifier_block cec_nb = { .notifier_call = cec_notifier, .priority = MCE_PRIO_CEC, }; static int __init cec_init(void) { if (ce_arr.disabled) return -ENODEV; /* * Intel systems may avoid uncorrectable errors * if pages with corrected errors are aggressively * taken offline. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) action_threshold = 2; ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL); if (!ce_arr.array) { pr_err("Error allocating CE array page!\n"); return -ENOMEM; } if (create_debugfs_nodes()) { free_page((unsigned long)ce_arr.array); return -ENOMEM; } INIT_DELAYED_WORK(&cec_work, cec_work_fn); schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL); mce_register_decode_chain(&cec_nb); pr_info("Correctable Errors collector initialized.\n"); return 0; } late_initcall(cec_init); int __init parse_cec_param(char *str) { if (!str) return 0; if (*str == '=') str++; if (!strcmp(str, "cec_disable")) ce_arr.disabled = 1; else return 0; return 1; }
linux-master
drivers/ras/cec.c