python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
/* * Broadcom specific AMBA * ChipCommon core driver * * Copyright 2005, Broadcom Corporation * Copyright 2006, 2007, Michael Buesch <[email protected]> * Copyright 2012, Hauke Mehrtens <[email protected]> * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/bcm47xx_wdt.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/bcma/bcma.h> static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, u32 mask, u32 value) { value &= mask; value |= bcma_cc_read32(cc, offset) & ~mask; bcma_cc_write32(cc, offset, value); return value; } u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc) { if (cc->capabilities & BCMA_CC_CAP_PMU) return bcma_pmu_get_alp_clock(cc); return 20000000; } EXPORT_SYMBOL_GPL(bcma_chipco_get_alp_clock); static bool bcma_core_cc_has_pmu_watchdog(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; if (cc->capabilities & BCMA_CC_CAP_PMU) { if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573) { WARN(bus->chipinfo.rev <= 1, "No watchdog available\n"); /* 53573B0 and 53573B1 have bugged PMU watchdog. It can * be enabled but timer can't be bumped. Use CC one * instead. */ return false; } return true; } else { return false; } } static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; u32 nb; if (bcma_core_cc_has_pmu_watchdog(cc)) { if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) nb = 32; else if (cc->core->id.rev < 26) nb = 16; else nb = (cc->core->id.rev >= 37) ? 32 : 24; } else { nb = 28; } if (nb == 32) return 0xffffffff; else return (1 << nb) - 1; } static u32 bcma_chipco_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks) { struct bcma_drv_cc *cc = bcm47xx_wdt_get_drvdata(wdt); return bcma_chipco_watchdog_timer_set(cc, ticks); } static u32 bcma_chipco_watchdog_timer_set_ms_wdt(struct bcm47xx_wdt *wdt, u32 ms) { struct bcma_drv_cc *cc = bcm47xx_wdt_get_drvdata(wdt); u32 ticks; ticks = bcma_chipco_watchdog_timer_set(cc, cc->ticks_per_ms * ms); return ticks / cc->ticks_per_ms; } static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; if (cc->capabilities & BCMA_CC_CAP_PMU) { if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) /* 4706 CC and PMU watchdogs are clocked at 1/4 of ALP * clock */ return bcma_chipco_get_alp_clock(cc) / 4000; else /* based on 32KHz ILP clock */ return 32; } else { return bcma_chipco_get_alp_clock(cc) / 1000; } } int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; struct bcm47xx_wdt wdt = {}; struct platform_device *pdev; if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573 && bus->chipinfo.rev <= 1) { pr_debug("No watchdog on 53573A0 / 53573A1\n"); return 0; } wdt.driver_data = cc; wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt; wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt; wdt.max_timer_ms = bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms; pdev = platform_device_register_data(NULL, "bcm47xx-wdt", bus->num, &wdt, sizeof(wdt)); if (IS_ERR(pdev)) return PTR_ERR(pdev); cc->watchdog = pdev; return 0; } static void bcma_core_chipcommon_flash_detect(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { case BCMA_CC_FLASHT_STSER: case BCMA_CC_FLASHT_ATSER: bcma_debug(bus, "Found serial flash\n"); bcma_sflash_init(cc); break; case BCMA_CC_FLASHT_PARA: bcma_debug(bus, "Found parallel flash\n"); bcma_pflash_init(cc); break; default: bcma_err(bus, "Flash type not supported\n"); } if (cc->core->id.rev == 38 || bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) { if (cc->capabilities & BCMA_CC_CAP_NFLASH) { bcma_debug(bus, "Found NAND flash\n"); bcma_nflash_init(cc); } } } void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; if (cc->early_setup_done) return; spin_lock_init(&cc->gpio_lock); if (cc->core->id.rev >= 11) cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT); cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP); if (cc->core->id.rev >= 35) cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT); if (cc->capabilities & BCMA_CC_CAP_PMU) bcma_pmu_early_init(cc); if (bus->hosttype == BCMA_HOSTTYPE_SOC) bcma_core_chipcommon_flash_detect(cc); cc->early_setup_done = true; } void bcma_core_chipcommon_init(struct bcma_drv_cc *cc) { u32 leddc_on = 10; u32 leddc_off = 90; if (cc->setup_done) return; bcma_core_chipcommon_early_init(cc); if (cc->core->id.rev >= 20) { u32 pullup = 0, pulldown = 0; if (cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM43142) { pullup = 0x402e0; pulldown = 0x20500; } bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, pullup); bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, pulldown); } if (cc->capabilities & BCMA_CC_CAP_PMU) bcma_pmu_init(cc); if (cc->capabilities & BCMA_CC_CAP_PCTL) bcma_err(cc->core->bus, "Power control not implemented!\n"); if (cc->core->id.rev >= 16) { if (cc->core->bus->sprom.leddc_on_time && cc->core->bus->sprom.leddc_off_time) { leddc_on = cc->core->bus->sprom.leddc_on_time; leddc_off = cc->core->bus->sprom.leddc_off_time; } bcma_cc_write32(cc, BCMA_CC_GPIOTIMER, ((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) | (leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT))); } cc->ticks_per_ms = bcma_chipco_watchdog_ticks_per_ms(cc); cc->setup_done = true; } /* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks) { u32 maxt; maxt = bcma_chipco_watchdog_get_max_timer(cc); if (bcma_core_cc_has_pmu_watchdog(cc)) { if (ticks == 1) ticks = 2; else if (ticks > maxt) ticks = maxt; bcma_pmu_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks); } else { struct bcma_bus *bus = cc->core->bus; if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 && bus->chipinfo.id != BCMA_CHIP_ID_BCM47094 && bus->chipinfo.id != BCMA_CHIP_ID_BCM53018) bcma_core_set_clockmode(cc->core, ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC); if (ticks > maxt) ticks = maxt; /* instant NMI */ bcma_cc_write32(cc, BCMA_CC_WATCHDOG, ticks); } return ticks; } void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value) { bcma_cc_write32_masked(cc, BCMA_CC_IRQMASK, mask, value); } u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask) { return bcma_cc_read32(cc, BCMA_CC_IRQSTAT) & mask; } u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask) { return bcma_cc_read32(cc, BCMA_CC_GPIOIN) & mask; } u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value) { unsigned long flags; u32 res; spin_lock_irqsave(&cc->gpio_lock, flags); res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value); spin_unlock_irqrestore(&cc->gpio_lock, flags); return res; } EXPORT_SYMBOL_GPL(bcma_chipco_gpio_out); u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value) { unsigned long flags; u32 res; spin_lock_irqsave(&cc->gpio_lock, flags); res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value); spin_unlock_irqrestore(&cc->gpio_lock, flags); return res; } EXPORT_SYMBOL_GPL(bcma_chipco_gpio_outen); /* * If the bit is set to 0, chipcommon controls this GPIO, * if the bit is set to 1, it is used by some part of the chip and not our code. */ u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value) { unsigned long flags; u32 res; spin_lock_irqsave(&cc->gpio_lock, flags); res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value); spin_unlock_irqrestore(&cc->gpio_lock, flags); return res; } EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control); u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value) { unsigned long flags; u32 res; spin_lock_irqsave(&cc->gpio_lock, flags); res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value); spin_unlock_irqrestore(&cc->gpio_lock, flags); return res; } u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value) { unsigned long flags; u32 res; spin_lock_irqsave(&cc->gpio_lock, flags); res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value); spin_unlock_irqrestore(&cc->gpio_lock, flags); return res; } u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value) { unsigned long flags; u32 res; if (cc->core->id.rev < 20) return 0; spin_lock_irqsave(&cc->gpio_lock, flags); res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLUP, mask, value); spin_unlock_irqrestore(&cc->gpio_lock, flags); return res; } u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value) { unsigned long flags; u32 res; if (cc->core->id.rev < 20) return 0; spin_lock_irqsave(&cc->gpio_lock, flags); res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLDOWN, mask, value); spin_unlock_irqrestore(&cc->gpio_lock, flags); return res; } #ifdef CONFIG_BCMA_DRIVER_MIPS void bcma_chipco_serial_init(struct bcma_drv_cc *cc) { unsigned int irq; u32 baud_base; u32 i; unsigned int ccrev = cc->core->id.rev; struct bcma_serial_port *ports = cc->serial_ports; if (ccrev >= 11 && ccrev != 15) { baud_base = bcma_chipco_get_alp_clock(cc); if (ccrev >= 21) { /* Turn off UART clock before switching clocksource. */ bcma_cc_write32(cc, BCMA_CC_CORECTL, bcma_cc_read32(cc, BCMA_CC_CORECTL) & ~BCMA_CC_CORECTL_UARTCLKEN); } /* Set the override bit so we don't divide it */ bcma_cc_write32(cc, BCMA_CC_CORECTL, bcma_cc_read32(cc, BCMA_CC_CORECTL) | BCMA_CC_CORECTL_UARTCLK0); if (ccrev >= 21) { /* Re-enable the UART clock. */ bcma_cc_write32(cc, BCMA_CC_CORECTL, bcma_cc_read32(cc, BCMA_CC_CORECTL) | BCMA_CC_CORECTL_UARTCLKEN); } } else { bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev); return; } irq = bcma_core_irq(cc->core, 0); /* Determine the registers of the UARTs */ cc->nr_serial_ports = (cc->capabilities & BCMA_CC_CAP_NRUART); for (i = 0; i < cc->nr_serial_ports; i++) { ports[i].regs = cc->core->io_addr + BCMA_CC_UART0_DATA + (i * 256); ports[i].irq = irq; ports[i].baud_base = baud_base; ports[i].reg_shift = 0; } } #endif /* CONFIG_BCMA_DRIVER_MIPS */
linux-master
drivers/bcma/driver_chipcommon.c
/* * Broadcom specific AMBA * ChipCommon NAND flash interface * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/platform_data/brcmnand.h> #include <linux/bcma/bcma.h> /* Alternate NAND controller driver name in order to allow both bcm47xxnflash * and bcma_brcmnand to be built into the same kernel image. */ static const char *bcma_nflash_alt_name = "bcma_brcmnand"; struct platform_device bcma_nflash_dev = { .name = "bcma_nflash", .num_resources = 0, }; static const char *probes[] = { "bcm47xxpart", NULL }; /* Initialize NAND flash access */ int bcma_nflash_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; u32 reg; if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && cc->core->id.rev != 38) { bcma_err(bus, "NAND flash on unsupported board!\n"); return -ENOTSUPP; } if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) { bcma_err(bus, "NAND flash not present according to ChipCommon\n"); return -ENODEV; } cc->nflash.present = true; if (cc->core->id.rev == 38 && (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT)) { cc->nflash.boot = true; /* Determine the chip select that is being used */ reg = bcma_cc_read32(cc, BCMA_CC_NAND_CS_NAND_SELECT) & 0xff; cc->nflash.brcmnand_info.chip_select = ffs(reg) - 1; cc->nflash.brcmnand_info.part_probe_types = probes; cc->nflash.brcmnand_info.ecc_stepsize = 512; cc->nflash.brcmnand_info.ecc_strength = 1; bcma_nflash_dev.name = bcma_nflash_alt_name; } /* Prepare platform device, but don't register it yet. It's too early, * malloc (required by device_private_init) is not available yet. */ bcma_nflash_dev.dev.platform_data = &cc->nflash; return 0; }
linux-master
drivers/bcma/driver_chipcommon_nflash.c
/* * Broadcom specific AMBA * GPIO driver * * Copyright 2011, Broadcom Corporation * Copyright 2012, Hauke Mehrtens <[email protected]> * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/property.h> #include <linux/bcma/bcma.h> #include "bcma_private.h" #define BCMA_GPIO_MAX_PINS 32 static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { struct bcma_drv_cc *cc = gpiochip_get_data(chip); return !!bcma_chipco_gpio_in(cc, 1 << gpio); } static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct bcma_drv_cc *cc = gpiochip_get_data(chip); bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0); } static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { struct bcma_drv_cc *cc = gpiochip_get_data(chip); bcma_chipco_gpio_outen(cc, 1 << gpio, 0); return 0; } static int bcma_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct bcma_drv_cc *cc = gpiochip_get_data(chip); bcma_chipco_gpio_outen(cc, 1 << gpio, 1 << gpio); bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0); return 0; } static int bcma_gpio_request(struct gpio_chip *chip, unsigned gpio) { struct bcma_drv_cc *cc = gpiochip_get_data(chip); bcma_chipco_gpio_control(cc, 1 << gpio, 0); /* clear pulldown */ bcma_chipco_gpio_pulldown(cc, 1 << gpio, 0); /* Set pullup */ bcma_chipco_gpio_pullup(cc, 1 << gpio, 1 << gpio); return 0; } static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio) { struct bcma_drv_cc *cc = gpiochip_get_data(chip); /* clear pullup */ bcma_chipco_gpio_pullup(cc, 1 << gpio, 0); } #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X) static void bcma_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct bcma_drv_cc *cc = gpiochip_get_data(gc); int gpio = irqd_to_hwirq(d); u32 val = bcma_chipco_gpio_in(cc, BIT(gpio)); gpiochip_enable_irq(gc, gpio); bcma_chipco_gpio_polarity(cc, BIT(gpio), val); bcma_chipco_gpio_intmask(cc, BIT(gpio), BIT(gpio)); } static void bcma_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct bcma_drv_cc *cc = gpiochip_get_data(gc); int gpio = irqd_to_hwirq(d); bcma_chipco_gpio_intmask(cc, BIT(gpio), 0); gpiochip_disable_irq(gc, gpio); } static const struct irq_chip bcma_gpio_irq_chip = { .name = "BCMA-GPIO", .irq_mask = bcma_gpio_irq_mask, .irq_unmask = bcma_gpio_irq_unmask, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id) { struct bcma_drv_cc *cc = dev_id; struct gpio_chip *gc = &cc->gpio; u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN); u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ); u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL); unsigned long irqs = (val ^ pol) & mask; int gpio; if (!irqs) return IRQ_NONE; for_each_set_bit(gpio, &irqs, gc->ngpio) generic_handle_domain_irq_safe(gc->irq.domain, gpio); bcma_chipco_gpio_polarity(cc, irqs, val & irqs); return IRQ_HANDLED; } static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) { struct gpio_chip *chip = &cc->gpio; struct gpio_irq_chip *girq = &chip->irq; int hwirq, err; if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) return 0; hwirq = bcma_core_irq(cc->core, 0); err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio", cc); if (err) return err; bcma_chipco_gpio_intmask(cc, ~0, 0); bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO); gpio_irq_chip_set_chip(girq, &bcma_gpio_irq_chip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; girq->parents = NULL; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_simple_irq; return 0; } static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc) { if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) return; bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO); free_irq(bcma_core_irq(cc->core, 0), cc); } #else static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) { return 0; } static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc) { } #endif int bcma_gpio_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; struct gpio_chip *chip = &cc->gpio; int err; chip->label = "bcma_gpio"; chip->owner = THIS_MODULE; chip->request = bcma_gpio_request; chip->free = bcma_gpio_free; chip->get = bcma_gpio_get_value; chip->set = bcma_gpio_set_value; chip->direction_input = bcma_gpio_direction_input; chip->direction_output = bcma_gpio_direction_output; chip->parent = bus->dev; chip->fwnode = dev_fwnode(&cc->core->dev); switch (bus->chipinfo.id) { case BCMA_CHIP_ID_BCM4707: case BCMA_CHIP_ID_BCM5357: case BCMA_CHIP_ID_BCM53572: case BCMA_CHIP_ID_BCM53573: case BCMA_CHIP_ID_BCM47094: chip->ngpio = 32; break; default: chip->ngpio = 16; } /* * Register SoC GPIO devices with absolute GPIO pin base. * On MIPS, we don't have Device Tree and we can't use relative (per chip) * GPIO numbers. * On some ARM devices, user space may want to access some system GPIO * pins directly, which is easier to do with a predictable GPIO base. */ if (IS_BUILTIN(CONFIG_BCM47XX) || cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) chip->base = bus->num * BCMA_GPIO_MAX_PINS; else chip->base = -1; err = bcma_gpio_irq_init(cc); if (err) return err; err = gpiochip_add_data(chip, cc); if (err) { bcma_gpio_irq_exit(cc); return err; } return 0; } int bcma_gpio_unregister(struct bcma_drv_cc *cc) { bcma_gpio_irq_exit(cc); gpiochip_remove(&cc->gpio); return 0; }
linux-master
drivers/bcma/driver_gpio.c
/* * Broadcom specific AMBA * PCI Core * * Copyright 2005, 2011, Broadcom Corporation * Copyright 2006, 2007, Michael Buesch <[email protected]> * Copyright 2011, 2012, Hauke Mehrtens <[email protected]> * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/export.h> #include <linux/bcma/bcma.h> /************************************************** * R/W ops. **************************************************/ u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address) { pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address); pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR); return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA); } static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data) { pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address); pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR); pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data); } static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy) { u32 v; int i; v = BCMA_CORE_PCI_MDIODATA_START; v |= BCMA_CORE_PCI_MDIODATA_WRITE; v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF); v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF); v |= BCMA_CORE_PCI_MDIODATA_TA; v |= (phy << 4); pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v); udelay(10); for (i = 0; i < 200; i++) { v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) break; usleep_range(1000, 2000); } } static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address) { int max_retries = 10; u16 ret = 0; u32 v; int i; /* enable mdio access to SERDES */ v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN; v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL; pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v); if (pc->core->id.rev >= 10) { max_retries = 200; bcma_pcie_mdio_set_phy(pc, device); v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF); v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF); } else { v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD); v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD); } v |= BCMA_CORE_PCI_MDIODATA_START; v |= BCMA_CORE_PCI_MDIODATA_READ; v |= BCMA_CORE_PCI_MDIODATA_TA; pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v); /* Wait for the device to complete the transaction */ udelay(10); for (i = 0; i < max_retries; i++) { v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) { udelay(10); ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA); break; } usleep_range(1000, 2000); } pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); return ret; } static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device, u8 address, u16 data) { int max_retries = 10; u32 v; int i; /* enable mdio access to SERDES */ v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN; v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL; pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v); if (pc->core->id.rev >= 10) { max_retries = 200; bcma_pcie_mdio_set_phy(pc, device); v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF); v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF); } else { v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD); v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD); } v |= BCMA_CORE_PCI_MDIODATA_START; v |= BCMA_CORE_PCI_MDIODATA_WRITE; v |= BCMA_CORE_PCI_MDIODATA_TA; v |= data; pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v); /* Wait for the device to complete the transaction */ udelay(10); for (i = 0; i < max_retries; i++) { v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) break; usleep_range(1000, 2000); } pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); } static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device, u8 address, u16 data) { bcma_pcie_mdio_write(pc, device, address, data); return bcma_pcie_mdio_read(pc, device, address); } /************************************************** * Early init. **************************************************/ static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc) { struct bcma_device *core = pc->core; u16 val16, core_index; uint regoff; regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET); core_index = (u16)core->core_index; val16 = pcicore_read16(pc, regoff); if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT) != core_index) { val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) | (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK); pcicore_write16(pc, regoff, val16); } } /* * Apply some early fixes required before accessing SPROM. * See also si_pci_fixcfg. */ void bcma_core_pci_early_init(struct bcma_drv_pci *pc) { if (pc->early_setup_done) return; pc->hostmode = bcma_core_pci_is_in_hostmode(pc); if (pc->hostmode) goto out; bcma_core_pci_fixcfg(pc); out: pc->early_setup_done = true; } /************************************************** * Workarounds. **************************************************/ static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc) { u32 tmp; tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG); if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT) return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE | BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY; else return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE; } static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc) { u16 tmp; bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX, BCMA_CORE_PCI_SERDES_RX_CTRL, bcma_pcicore_polarity_workaround(pc)); tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL, BCMA_CORE_PCI_SERDES_PLL_CTRL); if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN) bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL, BCMA_CORE_PCI_SERDES_PLL_CTRL, tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN); } /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */ /* Needs to happen when coming out of 'standby'/'hibernate' */ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) { u16 val16; uint regoff; regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG); val16 = pcicore_read16(pc, regoff); if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) { val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST; pcicore_write16(pc, regoff, val16); } } /************************************************** * Init. **************************************************/ static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc) { bcma_pcicore_serdes_workaround(pc); bcma_core_pci_config_fixup(pc); } void bcma_core_pci_init(struct bcma_drv_pci *pc) { if (pc->setup_done) return; bcma_core_pci_early_init(pc); if (pc->hostmode) bcma_core_pci_hostmode_init(pc); else bcma_core_pci_clientmode_init(pc); } void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) { struct bcma_drv_pci *pc; u16 data; if (bus->hosttype != BCMA_HOSTTYPE_PCI) return; pc = &bus->drv_pci[0]; if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) { data = up ? 0x74 : 0x7C; bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64); bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) { data = up ? 0x75 : 0x7D; bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65); bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); } } EXPORT_SYMBOL_GPL(bcma_core_pci_power_save); static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) { u32 w; w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG); if (extend) w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND; else w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND; bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w); bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG); } void bcma_core_pci_up(struct bcma_drv_pci *pc) { bcma_core_pci_extend_L1timer(pc, true); } void bcma_core_pci_down(struct bcma_drv_pci *pc) { bcma_core_pci_extend_L1timer(pc, false); }
linux-master
drivers/bcma/driver_pci.c
/* * Broadcom specific AMBA * GBIT MAC COMMON Core * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/bcma/bcma.h> void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { mutex_init(&gc->phy_mutex); }
linux-master
drivers/bcma/driver_gmac_cmn.c
// SPDX-License-Identifier: GPL-2.0-only /* * ARM/ARM64 generic CPU idle driver. * * Copyright (C) 2014 ARM Ltd. * Author: Lorenzo Pieralisi <[email protected]> */ #define pr_fmt(fmt) "CPUidle arm: " fmt #include <linux/cpu_cooling.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <asm/cpuidle.h> #include "dt_idle_states.h" /* * arm_enter_idle_state - Programs CPU to enter the specified state * * dev: cpuidle device * drv: cpuidle driver * idx: state index * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static __cpuidle int arm_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { /* * Pass idle state index to arm_cpuidle_suspend which in turn * will call the CPU ops suspend protocol with idle index as a * parameter. */ return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx); } static struct cpuidle_driver arm_idle_driver __initdata = { .name = "arm_idle", .owner = THIS_MODULE, /* * State at index 0 is standby wfi and considered standard * on all ARM platforms. If in some platforms simple wfi * can't be used as "state 0", DT bindings must be implemented * to work around this issue and allow installing a special * handler for idle state index 0. */ .states[0] = { .enter = arm_enter_idle_state, .exit_latency = 1, .target_residency = 1, .power_usage = UINT_MAX, .name = "WFI", .desc = "ARM WFI", } }; static const struct of_device_id arm_idle_state_match[] __initconst = { { .compatible = "arm,idle-state", .data = arm_enter_idle_state }, { }, }; /* * arm_idle_init_cpu * * Registers the arm specific cpuidle driver with the cpuidle * framework. It relies on core code to parse the idle states * and initialize them using driver data structures accordingly. */ static int __init arm_idle_init_cpu(int cpu) { int ret; struct cpuidle_driver *drv; drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM; drv->cpumask = (struct cpumask *)cpumask_of(cpu); /* * Initialize idle states data, starting at index 1. This * driver is DT only, if no DT idle states are detected (ret * == 0) let the driver initialization fail accordingly since * there is no reason to initialize the idle driver if only * wfi is supported. */ ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); if (ret <= 0) { ret = ret ? : -ENODEV; goto out_kfree_drv; } /* * Call arch CPU operations in order to initialize * idle states suspend back-end specific data */ ret = arm_cpuidle_init(cpu); /* * Allow the initialization to continue for other CPUs, if the * reported failure is a HW misconfiguration/breakage (-ENXIO). * * Some platforms do not support idle operations * (arm_cpuidle_init() returning -EOPNOTSUPP), we should * not flag this case as an error, it is a valid * configuration. */ if (ret) { if (ret != -EOPNOTSUPP) pr_err("CPU %d failed to init idle CPU ops\n", cpu); ret = ret == -ENXIO ? 0 : ret; goto out_kfree_drv; } ret = cpuidle_register(drv, NULL); if (ret) goto out_kfree_drv; cpuidle_cooling_register(drv); return 0; out_kfree_drv: kfree(drv); return ret; } /* * arm_idle_init - Initializes arm cpuidle driver * * Initializes arm cpuidle driver for all CPUs, if any CPU fails * to register cpuidle driver then rollback to cancel all CPUs * registeration. */ static int __init arm_idle_init(void) { int cpu, ret; struct cpuidle_driver *drv; struct cpuidle_device *dev; for_each_possible_cpu(cpu) { ret = arm_idle_init_cpu(cpu); if (ret) goto out_fail; } return 0; out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); drv = cpuidle_get_cpu_driver(dev); cpuidle_unregister(drv); kfree(drv); } return ret; } device_initcall(arm_idle_init);
linux-master
drivers/cpuidle/cpuidle-arm.c
// SPDX-License-Identifier: GPL-2.0 /* * PM domains for CPUs via genpd - managed by cpuidle-psci. * * Copyright (C) 2019 Linaro Ltd. * Author: Ulf Hansson <[email protected]> * */ #define pr_fmt(fmt) "CPUidle PSCI: " fmt #include <linux/cpu.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/psci.h> #include <linux/slab.h> #include <linux/string.h> #include "cpuidle-psci.h" struct psci_pd_provider { struct list_head link; struct device_node *node; }; static LIST_HEAD(psci_pd_providers); static bool psci_pd_allow_domain_state; static int psci_pd_power_off(struct generic_pm_domain *pd) { struct genpd_power_state *state = &pd->states[pd->state_idx]; u32 *pd_state; if (!state->data) return 0; if (!psci_pd_allow_domain_state) return -EBUSY; /* OSI mode is enabled, set the corresponding domain state. */ pd_state = state->data; psci_set_domain_state(*pd_state); return 0; } static int psci_pd_init(struct device_node *np, bool use_osi) { struct generic_pm_domain *pd; struct psci_pd_provider *pd_provider; struct dev_power_governor *pd_gov; int ret = -ENOMEM; pd = dt_idle_pd_alloc(np, psci_dt_parse_state_node); if (!pd) goto out; pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); if (!pd_provider) goto free_pd; pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; /* * Allow power off when OSI has been successfully enabled. * PREEMPT_RT is not yet ready to enter domain idle states. */ if (use_osi && !IS_ENABLED(CONFIG_PREEMPT_RT)) pd->power_off = psci_pd_power_off; else pd->flags |= GENPD_FLAG_ALWAYS_ON; /* Use governor for CPU PM domains if it has some states to manage. */ pd_gov = pd->states ? &pm_domain_cpu_gov : NULL; ret = pm_genpd_init(pd, pd_gov, false); if (ret) goto free_pd_prov; ret = of_genpd_add_provider_simple(np, pd); if (ret) goto remove_pd; pd_provider->node = of_node_get(np); list_add(&pd_provider->link, &psci_pd_providers); pr_debug("init PM domain %s\n", pd->name); return 0; remove_pd: pm_genpd_remove(pd); free_pd_prov: kfree(pd_provider); free_pd: dt_idle_pd_free(pd); out: pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); return ret; } static void psci_pd_remove(void) { struct psci_pd_provider *pd_provider, *it; struct generic_pm_domain *genpd; list_for_each_entry_safe_reverse(pd_provider, it, &psci_pd_providers, link) { of_genpd_del_provider(pd_provider->node); genpd = of_genpd_remove_last(pd_provider->node); if (!IS_ERR(genpd)) kfree(genpd); of_node_put(pd_provider->node); list_del(&pd_provider->link); kfree(pd_provider); } } static void psci_cpuidle_domain_sync_state(struct device *dev) { /* * All devices have now been attached/probed to the PM domain topology, * hence it's fine to allow domain states to be picked. */ psci_pd_allow_domain_state = true; } static const struct of_device_id psci_of_match[] = { { .compatible = "arm,psci-1.0" }, {} }; static int psci_cpuidle_domain_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *node; bool use_osi = psci_has_osi_support(); int ret = 0, pd_count = 0; if (!np) return -ENODEV; /* * Parse child nodes for the "#power-domain-cells" property and * initialize a genpd/genpd-of-provider pair when it's found. */ for_each_child_of_node(np, node) { if (!of_property_present(node, "#power-domain-cells")) continue; ret = psci_pd_init(node, use_osi); if (ret) { of_node_put(node); goto exit; } pd_count++; } /* Bail out if not using the hierarchical CPU topology. */ if (!pd_count) return 0; /* Link genpd masters/subdomains to model the CPU topology. */ ret = dt_idle_pd_init_topology(np); if (ret) goto remove_pd; /* let's try to enable OSI. */ ret = psci_set_osi_mode(use_osi); if (ret) goto remove_pd; pr_info("Initialized CPU PM domain topology using %s mode\n", use_osi ? "OSI" : "PC"); return 0; remove_pd: dt_idle_pd_remove_topology(np); psci_pd_remove(); exit: pr_err("failed to create CPU PM domains ret=%d\n", ret); return ret; } static struct platform_driver psci_cpuidle_domain_driver = { .probe = psci_cpuidle_domain_probe, .driver = { .name = "psci-cpuidle-domain", .of_match_table = psci_of_match, .sync_state = psci_cpuidle_domain_sync_state, }, }; static int __init psci_idle_init_domains(void) { return platform_driver_register(&psci_cpuidle_domain_driver); } subsys_initcall(psci_idle_init_domains);
linux-master
drivers/cpuidle/cpuidle-psci-domain.c
// SPDX-License-Identifier: GPL-2.0-only /* * RISC-V SBI CPU idle driver. * * Copyright (c) 2021 Western Digital Corporation or its affiliates. * Copyright (c) 2022 Ventana Micro Systems Inc. */ #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt #include <linux/cpuhotplug.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/cpu_pm.h> #include <linux/cpu_cooling.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <asm/cpuidle.h> #include <asm/sbi.h> #include <asm/smp.h> #include <asm/suspend.h> #include "dt_idle_states.h" #include "dt_idle_genpd.h" struct sbi_cpuidle_data { u32 *states; struct device *dev; }; struct sbi_domain_state { bool available; u32 state; }; static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data); static DEFINE_PER_CPU(struct sbi_domain_state, domain_state); static bool sbi_cpuidle_use_osi; static bool sbi_cpuidle_use_cpuhp; static bool sbi_cpuidle_pd_allow_domain_state; static inline void sbi_set_domain_state(u32 state) { struct sbi_domain_state *data = this_cpu_ptr(&domain_state); data->available = true; data->state = state; } static inline u32 sbi_get_domain_state(void) { struct sbi_domain_state *data = this_cpu_ptr(&domain_state); return data->state; } static inline void sbi_clear_domain_state(void) { struct sbi_domain_state *data = this_cpu_ptr(&domain_state); data->available = false; } static inline bool sbi_is_domain_state_available(void) { struct sbi_domain_state *data = this_cpu_ptr(&domain_state); return data->available; } static int sbi_suspend_finisher(unsigned long suspend_type, unsigned long resume_addr, unsigned long opaque) { struct sbiret ret; ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, resume_addr, opaque, 0, 0, 0); return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; } static int sbi_suspend(u32 state) { if (state & SBI_HSM_SUSP_NON_RET_BIT) return cpu_suspend(state, sbi_suspend_finisher); else return sbi_suspend_finisher(state, 0, 0); } static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { u32 *states = __this_cpu_read(sbi_cpuidle_data.states); u32 state = states[idx]; if (state & SBI_HSM_SUSP_NON_RET_BIT) return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state); else return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend, idx, state); } static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx, bool s2idle) { struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data); u32 *states = data->states; struct device *pd_dev = data->dev; u32 state; int ret; ret = cpu_pm_enter(); if (ret) return -1; /* Do runtime PM to manage a hierarchical CPU toplogy. */ if (s2idle) dev_pm_genpd_suspend(pd_dev); else pm_runtime_put_sync_suspend(pd_dev); ct_cpuidle_enter(); if (sbi_is_domain_state_available()) state = sbi_get_domain_state(); else state = states[idx]; ret = sbi_suspend(state) ? -1 : idx; ct_cpuidle_exit(); if (s2idle) dev_pm_genpd_resume(pd_dev); else pm_runtime_get_sync(pd_dev); cpu_pm_exit(); /* Clear the domain state to start fresh when back from idle. */ sbi_clear_domain_state(); return ret; } static int sbi_enter_domain_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { return __sbi_enter_domain_idle_state(dev, drv, idx, false); } static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { return __sbi_enter_domain_idle_state(dev, drv, idx, true); } static int sbi_cpuidle_cpuhp_up(unsigned int cpu) { struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); if (pd_dev) pm_runtime_get_sync(pd_dev); return 0; } static int sbi_cpuidle_cpuhp_down(unsigned int cpu) { struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); if (pd_dev) { pm_runtime_put_sync(pd_dev); /* Clear domain state to start fresh at next online. */ sbi_clear_domain_state(); } return 0; } static void sbi_idle_init_cpuhp(void) { int err; if (!sbi_cpuidle_use_cpuhp) return; err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, "cpuidle/sbi:online", sbi_cpuidle_cpuhp_up, sbi_cpuidle_cpuhp_down); if (err) pr_warn("Failed %d while setup cpuhp state\n", err); } static const struct of_device_id sbi_cpuidle_state_match[] = { { .compatible = "riscv,idle-state", .data = sbi_cpuidle_enter_state }, { }, }; static bool sbi_suspend_state_is_valid(u32 state) { if (state > SBI_HSM_SUSPEND_RET_DEFAULT && state < SBI_HSM_SUSPEND_RET_PLATFORM) return false; if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) return false; return true; } static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) { int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); if (err) { pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np); return err; } if (!sbi_suspend_state_is_valid(*state)) { pr_warn("Invalid SBI suspend state %#x\n", *state); return -EINVAL; } return 0; } static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv, struct sbi_cpuidle_data *data, unsigned int state_count, int cpu) { /* Currently limit the hierarchical topology to be used in OSI mode. */ if (!sbi_cpuidle_use_osi) return 0; data->dev = dt_idle_attach_cpu(cpu, "sbi"); if (IS_ERR_OR_NULL(data->dev)) return PTR_ERR_OR_ZERO(data->dev); /* * Using the deepest state for the CPU to trigger a potential selection * of a shared state for the domain, assumes the domain states are all * deeper states. */ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE; drv->states[state_count - 1].enter = sbi_enter_domain_idle_state; drv->states[state_count - 1].enter_s2idle = sbi_enter_s2idle_domain_idle_state; sbi_cpuidle_use_cpuhp = true; return 0; } static int sbi_cpuidle_dt_init_states(struct device *dev, struct cpuidle_driver *drv, unsigned int cpu, unsigned int state_count) { struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); struct device_node *state_node; struct device_node *cpu_node; u32 *states; int i, ret; cpu_node = of_cpu_device_node_get(cpu); if (!cpu_node) return -ENODEV; states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); if (!states) { ret = -ENOMEM; goto fail; } /* Parse SBI specific details from state DT nodes */ for (i = 1; i < state_count; i++) { state_node = of_get_cpu_state_node(cpu_node, i - 1); if (!state_node) break; ret = sbi_dt_parse_state_node(state_node, &states[i]); of_node_put(state_node); if (ret) return ret; pr_debug("sbi-state %#x index %d\n", states[i], i); } if (i != state_count) { ret = -ENODEV; goto fail; } /* Initialize optional data, used for the hierarchical topology. */ ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); if (ret < 0) return ret; /* Store states in the per-cpu struct. */ data->states = states; fail: of_node_put(cpu_node); return ret; } static void sbi_cpuidle_deinit_cpu(int cpu) { struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); dt_idle_detach_cpu(data->dev); sbi_cpuidle_use_cpuhp = false; } static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) { struct cpuidle_driver *drv; unsigned int state_count = 0; int ret = 0; drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM; drv->name = "sbi_cpuidle"; drv->owner = THIS_MODULE; drv->cpumask = (struct cpumask *)cpumask_of(cpu); /* RISC-V architectural WFI to be represented as state index 0. */ drv->states[0].enter = sbi_cpuidle_enter_state; drv->states[0].exit_latency = 1; drv->states[0].target_residency = 1; drv->states[0].power_usage = UINT_MAX; strcpy(drv->states[0].name, "WFI"); strcpy(drv->states[0].desc, "RISC-V WFI"); /* * If no DT idle states are detected (ret == 0) let the driver * initialization fail accordingly since there is no reason to * initialize the idle driver if only wfi is supported, the * default archictectural back-end already executes wfi * on idle entry. */ ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1); if (ret <= 0) { pr_debug("HART%ld: failed to parse DT idle states\n", cpuid_to_hartid_map(cpu)); return ret ? : -ENODEV; } state_count = ret + 1; /* Include WFI state as well */ /* Initialize idle states from DT. */ ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count); if (ret) { pr_err("HART%ld: failed to init idle states\n", cpuid_to_hartid_map(cpu)); return ret; } ret = cpuidle_register(drv, NULL); if (ret) goto deinit; cpuidle_cooling_register(drv); return 0; deinit: sbi_cpuidle_deinit_cpu(cpu); return ret; } static void sbi_cpuidle_domain_sync_state(struct device *dev) { /* * All devices have now been attached/probed to the PM domain * topology, hence it's fine to allow domain states to be picked. */ sbi_cpuidle_pd_allow_domain_state = true; } #ifdef CONFIG_DT_IDLE_GENPD static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd) { struct genpd_power_state *state = &pd->states[pd->state_idx]; u32 *pd_state; if (!state->data) return 0; if (!sbi_cpuidle_pd_allow_domain_state) return -EBUSY; /* OSI mode is enabled, set the corresponding domain state. */ pd_state = state->data; sbi_set_domain_state(*pd_state); return 0; } struct sbi_pd_provider { struct list_head link; struct device_node *node; }; static LIST_HEAD(sbi_pd_providers); static int sbi_pd_init(struct device_node *np) { struct generic_pm_domain *pd; struct sbi_pd_provider *pd_provider; struct dev_power_governor *pd_gov; int ret = -ENOMEM; pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node); if (!pd) goto out; pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); if (!pd_provider) goto free_pd; pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; /* Allow power off when OSI is available. */ if (sbi_cpuidle_use_osi) pd->power_off = sbi_cpuidle_pd_power_off; else pd->flags |= GENPD_FLAG_ALWAYS_ON; /* Use governor for CPU PM domains if it has some states to manage. */ pd_gov = pd->states ? &pm_domain_cpu_gov : NULL; ret = pm_genpd_init(pd, pd_gov, false); if (ret) goto free_pd_prov; ret = of_genpd_add_provider_simple(np, pd); if (ret) goto remove_pd; pd_provider->node = of_node_get(np); list_add(&pd_provider->link, &sbi_pd_providers); pr_debug("init PM domain %s\n", pd->name); return 0; remove_pd: pm_genpd_remove(pd); free_pd_prov: kfree(pd_provider); free_pd: dt_idle_pd_free(pd); out: pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); return ret; } static void sbi_pd_remove(void) { struct sbi_pd_provider *pd_provider, *it; struct generic_pm_domain *genpd; list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) { of_genpd_del_provider(pd_provider->node); genpd = of_genpd_remove_last(pd_provider->node); if (!IS_ERR(genpd)) kfree(genpd); of_node_put(pd_provider->node); list_del(&pd_provider->link); kfree(pd_provider); } } static int sbi_genpd_probe(struct device_node *np) { struct device_node *node; int ret = 0, pd_count = 0; if (!np) return -ENODEV; /* * Parse child nodes for the "#power-domain-cells" property and * initialize a genpd/genpd-of-provider pair when it's found. */ for_each_child_of_node(np, node) { if (!of_property_present(node, "#power-domain-cells")) continue; ret = sbi_pd_init(node); if (ret) goto put_node; pd_count++; } /* Bail out if not using the hierarchical CPU topology. */ if (!pd_count) goto no_pd; /* Link genpd masters/subdomains to model the CPU topology. */ ret = dt_idle_pd_init_topology(np); if (ret) goto remove_pd; return 0; put_node: of_node_put(node); remove_pd: sbi_pd_remove(); pr_err("failed to create CPU PM domains ret=%d\n", ret); no_pd: return ret; } #else static inline int sbi_genpd_probe(struct device_node *np) { return 0; } #endif static int sbi_cpuidle_probe(struct platform_device *pdev) { int cpu, ret; struct cpuidle_driver *drv; struct cpuidle_device *dev; struct device_node *np, *pds_node; /* Detect OSI support based on CPU DT nodes */ sbi_cpuidle_use_osi = true; for_each_possible_cpu(cpu) { np = of_cpu_device_node_get(cpu); if (np && of_property_present(np, "power-domains") && of_property_present(np, "power-domain-names")) { continue; } else { sbi_cpuidle_use_osi = false; break; } } /* Populate generic power domains from DT nodes */ pds_node = of_find_node_by_path("/cpus/power-domains"); if (pds_node) { ret = sbi_genpd_probe(pds_node); of_node_put(pds_node); if (ret) return ret; } /* Initialize CPU idle driver for each CPU */ for_each_possible_cpu(cpu) { ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu); if (ret) { pr_debug("HART%ld: idle driver init failed\n", cpuid_to_hartid_map(cpu)); goto out_fail; } } /* Setup CPU hotplut notifiers */ sbi_idle_init_cpuhp(); pr_info("idle driver registered for all CPUs\n"); return 0; out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); drv = cpuidle_get_cpu_driver(dev); cpuidle_unregister(drv); sbi_cpuidle_deinit_cpu(cpu); } return ret; } static struct platform_driver sbi_cpuidle_driver = { .probe = sbi_cpuidle_probe, .driver = { .name = "sbi-cpuidle", .sync_state = sbi_cpuidle_domain_sync_state, }, }; static int __init sbi_cpuidle_init(void) { int ret; struct platform_device *pdev; /* * The SBI HSM suspend function is only available when: * 1) SBI version is 0.3 or higher * 2) SBI HSM extension is available */ if ((sbi_spec_version < sbi_mk_version(0, 3)) || !sbi_probe_extension(SBI_EXT_HSM)) { pr_info("HSM suspend not available\n"); return 0; } ret = platform_driver_register(&sbi_cpuidle_driver); if (ret) return ret; pdev = platform_device_register_simple("sbi-cpuidle", -1, NULL, 0); if (IS_ERR(pdev)) { platform_driver_unregister(&sbi_cpuidle_driver); return PTR_ERR(pdev); } return 0; } device_initcall(sbi_cpuidle_init);
linux-master
drivers/cpuidle/cpuidle-riscv-sbi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * coupled.c - helper functions to enter the same idle state on multiple cpus * * Copyright (c) 2011 Google, Inc. * * Author: Colin Cross <[email protected]> */ #include <linux/kernel.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "cpuidle.h" /** * DOC: Coupled cpuidle states * * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the * cpus cannot be independently powered down, either due to * sequencing restrictions (on Tegra 2, cpu 0 must be the last to * power down), or due to HW bugs (on OMAP4460, a cpu powering up * will corrupt the gic state unless the other cpu runs a work * around). Each cpu has a power state that it can enter without * coordinating with the other cpu (usually Wait For Interrupt, or * WFI), and one or more "coupled" power states that affect blocks * shared between the cpus (L2 cache, interrupt controller, and * sometimes the whole SoC). Entering a coupled power state must * be tightly controlled on both cpus. * * This file implements a solution, where each cpu will wait in the * WFI state until all cpus are ready to enter a coupled state, at * which point the coupled state function will be called on all * cpus at approximately the same time. * * Once all cpus are ready to enter idle, they are woken by an smp * cross call. At this point, there is a chance that one of the * cpus will find work to do, and choose not to enter idle. A * final pass is needed to guarantee that all cpus will call the * power state enter function at the same time. During this pass, * each cpu will increment the ready counter, and continue once the * ready counter matches the number of online coupled cpus. If any * cpu exits idle, the other cpus will decrement their counter and * retry. * * requested_state stores the deepest coupled idle state each cpu * is ready for. It is assumed that the states are indexed from * shallowest (highest power, lowest exit latency) to deepest * (lowest power, highest exit latency). The requested_state * variable is not locked. It is only written from the cpu that * it stores (or by the on/offlining cpu if that cpu is offline), * and only read after all the cpus are ready for the coupled idle * state are no longer updating it. * * Three atomic counters are used. alive_count tracks the number * of cpus in the coupled set that are currently or soon will be * online. waiting_count tracks the number of cpus that are in * the waiting loop, in the ready loop, or in the coupled idle state. * ready_count tracks the number of cpus that are in the ready loop * or in the coupled idle state. * * To use coupled cpuidle states, a cpuidle driver must: * * Set struct cpuidle_device.coupled_cpus to the mask of all * coupled cpus, usually the same as cpu_possible_mask if all cpus * are part of the same cluster. The coupled_cpus mask must be * set in the struct cpuidle_device for each cpu. * * Set struct cpuidle_device.safe_state to a state that is not a * coupled state. This is usually WFI. * * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each * state that affects multiple cpus. * * Provide a struct cpuidle_state.enter function for each state * that affects multiple cpus. This function is guaranteed to be * called on all cpus at approximately the same time. The driver * should ensure that the cpus all abort together if any cpu tries * to abort once the function is called. The function should return * with interrupts still disabled. */ /** * struct cpuidle_coupled - data for set of cpus that share a coupled idle state * @coupled_cpus: mask of cpus that are part of the coupled set * @requested_state: array of requested states for cpus in the coupled set * @ready_waiting_counts: combined count of cpus in ready or waiting loops * @abort_barrier: synchronisation point for abort cases * @online_count: count of cpus that are online * @refcnt: reference count of cpuidle devices that are using this struct * @prevent: flag to prevent coupled idle while a cpu is hotplugging */ struct cpuidle_coupled { cpumask_t coupled_cpus; int requested_state[NR_CPUS]; atomic_t ready_waiting_counts; atomic_t abort_barrier; int online_count; int refcnt; int prevent; }; #define WAITING_BITS 16 #define MAX_WAITING_CPUS (1 << WAITING_BITS) #define WAITING_MASK (MAX_WAITING_CPUS - 1) #define READY_MASK (~WAITING_MASK) #define CPUIDLE_COUPLED_NOT_IDLE (-1) static DEFINE_PER_CPU(call_single_data_t, cpuidle_coupled_poke_cb); /* * The cpuidle_coupled_poke_pending mask is used to avoid calling * __smp_call_function_single with the per cpu call_single_data_t struct already * in use. This prevents a deadlock where two cpus are waiting for each others * call_single_data_t struct to be available */ static cpumask_t cpuidle_coupled_poke_pending; /* * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked * once to minimize entering the ready loop with a poke pending, which would * require aborting and retrying. */ static cpumask_t cpuidle_coupled_poked; /** * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus * @dev: cpuidle_device of the calling cpu * @a: atomic variable to hold the barrier * * No caller to this function will return from this function until all online * cpus in the same coupled group have called this function. Once any caller * has returned from this function, the barrier is immediately available for * reuse. * * The atomic variable must be initialized to 0 before any cpu calls * this function, will be reset to 0 before any cpu returns from this function. * * Must only be called from within a coupled idle state handler * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set). * * Provides full smp barrier semantics before and after calling. */ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) { int n = dev->coupled->online_count; smp_mb__before_atomic(); atomic_inc(a); while (atomic_read(a) < n) cpu_relax(); if (atomic_inc_return(a) == n * 2) { atomic_set(a, 0); return; } while (atomic_read(a) > n) cpu_relax(); } /** * cpuidle_state_is_coupled - check if a state is part of a coupled set * @drv: struct cpuidle_driver for the platform * @state: index of the target state in drv->states * * Returns true if the target state is coupled with cpus besides this one */ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) { return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; } /** * cpuidle_coupled_state_verify - check if the coupled states are correctly set. * @drv: struct cpuidle_driver for the platform * * Returns 0 for valid state values, a negative error code otherwise: * * -EINVAL if any coupled state(safe_state_index) is wrongly set. */ int cpuidle_coupled_state_verify(struct cpuidle_driver *drv) { int i; for (i = drv->state_count - 1; i >= 0; i--) { if (cpuidle_state_is_coupled(drv, i) && (drv->safe_state_index == i || drv->safe_state_index < 0 || drv->safe_state_index >= drv->state_count)) return -EINVAL; } return 0; } /** * cpuidle_coupled_set_ready - mark a cpu as ready * @coupled: the struct coupled that contains the current cpu */ static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) { atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); } /** * cpuidle_coupled_set_not_ready - mark a cpu as not ready * @coupled: the struct coupled that contains the current cpu * * Decrements the ready counter, unless the ready (and thus the waiting) counter * is equal to the number of online cpus. Prevents a race where one cpu * decrements the waiting counter and then re-increments it just before another * cpu has decremented its ready counter, leading to the ready counter going * down from the number of online cpus without going through the coupled idle * state. * * Returns 0 if the counter was decremented successfully, -EINVAL if the ready * counter was equal to the number of online cpus. */ static inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) { int all; int ret; all = coupled->online_count | (coupled->online_count << WAITING_BITS); ret = atomic_add_unless(&coupled->ready_waiting_counts, -MAX_WAITING_CPUS, all); return ret ? 0 : -EINVAL; } /** * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready * @coupled: the struct coupled that contains the current cpu * * Returns true if all of the cpus in a coupled set are out of the ready loop. */ static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) { int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; return r == 0; } /** * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready * @coupled: the struct coupled that contains the current cpu * * Returns true if all cpus coupled to this target state are in the ready loop */ static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) { int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; return r == coupled->online_count; } /** * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting * @coupled: the struct coupled that contains the current cpu * * Returns true if all cpus coupled to this target state are in the wait loop */ static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) { int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; return w == coupled->online_count; } /** * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting * @coupled: the struct coupled that contains the current cpu * * Returns true if all of the cpus in a coupled set are out of the waiting loop. */ static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) { int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; return w == 0; } /** * cpuidle_coupled_get_state - determine the deepest idle state * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * * Returns the deepest idle state that all coupled cpus can enter */ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, struct cpuidle_coupled *coupled) { int i; int state = INT_MAX; /* * Read barrier ensures that read of requested_state is ordered after * reads of ready_count. Matches the write barriers * cpuidle_set_state_waiting. */ smp_rmb(); for_each_cpu(i, &coupled->coupled_cpus) if (cpu_online(i) && coupled->requested_state[i] < state) state = coupled->requested_state[i]; return state; } static void cpuidle_coupled_handle_poke(void *info) { int cpu = (unsigned long)info; cpumask_set_cpu(cpu, &cpuidle_coupled_poked); cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); } /** * cpuidle_coupled_poke - wake up a cpu that may be waiting * @cpu: target cpu * * Ensures that the target cpu exits it's waiting idle state (if it is in it) * and will see updates to waiting_count before it re-enters it's waiting idle * state. * * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu * either has or will soon have a pending IPI that will wake it out of idle, * or it is currently processing the IPI and is not in idle. */ static void cpuidle_coupled_poke(int cpu) { call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) smp_call_function_single_async(cpu, csd); } /** * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting * @this_cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Calls cpuidle_coupled_poke on all other online cpus. */ static void cpuidle_coupled_poke_others(int this_cpu, struct cpuidle_coupled *coupled) { int cpu; for_each_cpu(cpu, &coupled->coupled_cpus) if (cpu != this_cpu && cpu_online(cpu)) cpuidle_coupled_poke(cpu); } /** * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * @next_state: the index in drv->states of the requested state for this cpu * * Updates the requested idle state for the specified cpuidle device. * Returns the number of waiting cpus. */ static int cpuidle_coupled_set_waiting(int cpu, struct cpuidle_coupled *coupled, int next_state) { coupled->requested_state[cpu] = next_state; /* * The atomic_inc_return provides a write barrier to order the write * to requested_state with the later write that increments ready_count. */ return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; } /** * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Removes the requested idle state for the specified cpuidle device. */ static void cpuidle_coupled_set_not_waiting(int cpu, struct cpuidle_coupled *coupled) { /* * Decrementing waiting count can race with incrementing it in * cpuidle_coupled_set_waiting, but that's OK. Worst case, some * cpus will increment ready_count and then spin until they * notice that this cpu has cleared it's requested_state. */ atomic_dec(&coupled->ready_waiting_counts); coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; } /** * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop * @cpu: the current cpu * @coupled: the struct coupled that contains the current cpu * * Marks this cpu as no longer in the ready and waiting loops. Decrements * the waiting count first to prevent another cpu looping back in and seeing * this cpu as waiting just before it exits idle. */ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) { cpuidle_coupled_set_not_waiting(cpu, coupled); atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); } /** * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed * @cpu: this cpu * * Turns on interrupts and spins until any outstanding poke interrupts have * been processed and the poke bit has been cleared. * * Other interrupts may also be processed while interrupts are enabled, so * need_resched() must be tested after this function returns to make sure * the interrupt didn't schedule work that should take the cpu out of idle. * * Returns 0 if no poke was pending, 1 if a poke was cleared. */ static int cpuidle_coupled_clear_pokes(int cpu) { if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) return 0; local_irq_enable(); while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) cpu_relax(); local_irq_disable(); return 1; } static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) { cpumask_t cpus; int ret; cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); return ret; } /** * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus * @dev: struct cpuidle_device for the current cpu * @drv: struct cpuidle_driver for the platform * @next_state: index of the requested state in drv->states * * Coordinate with coupled cpus to enter the target state. This is a two * stage process. In the first stage, the cpus are operating independently, * and may call into cpuidle_enter_state_coupled at completely different times. * To save as much power as possible, the first cpus to call this function will * go to an intermediate state (the cpuidle_device's safe state), and wait for * all the other cpus to call this function. Once all coupled cpus are idle, * the second stage will start. Each coupled cpu will spin until all cpus have * guaranteed that they will call the target_state. * * This function must be called with interrupts disabled. It may enable * interrupts while preparing for idle, and it will always return with * interrupts enabled. */ int cpuidle_enter_state_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int next_state) { int entered_state = -1; struct cpuidle_coupled *coupled = dev->coupled; int w; if (!coupled) return -EINVAL; while (coupled->prevent) { cpuidle_coupled_clear_pokes(dev->cpu); if (need_resched()) { local_irq_enable(); return entered_state; } entered_state = cpuidle_enter_state(dev, drv, drv->safe_state_index); local_irq_disable(); } /* Read barrier ensures online_count is read after prevent is cleared */ smp_rmb(); reset: cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); /* * If this is the last cpu to enter the waiting state, poke * all the other cpus out of their waiting state so they can * enter a deeper state. This can race with one of the cpus * exiting the waiting state due to an interrupt and * decrementing waiting_count, see comment below. */ if (w == coupled->online_count) { cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); cpuidle_coupled_poke_others(dev->cpu, coupled); } retry: /* * Wait for all coupled cpus to be idle, using the deepest state * allowed for a single cpu. If this was not the poking cpu, wait * for at least one poke before leaving to avoid a race where * two cpus could arrive at the waiting loop at the same time, * but the first of the two to arrive could skip the loop without * processing the pokes from the last to arrive. */ while (!cpuidle_coupled_cpus_waiting(coupled) || !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { if (cpuidle_coupled_clear_pokes(dev->cpu)) continue; if (need_resched()) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } if (coupled->prevent) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } entered_state = cpuidle_enter_state(dev, drv, drv->safe_state_index); local_irq_disable(); } cpuidle_coupled_clear_pokes(dev->cpu); if (need_resched()) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } /* * Make sure final poke status for this cpu is visible before setting * cpu as ready. */ smp_wmb(); /* * All coupled cpus are probably idle. There is a small chance that * one of the other cpus just became active. Increment the ready count, * and spin until all coupled cpus have incremented the counter. Once a * cpu has incremented the ready counter, it cannot abort idle and must * spin until either all cpus have incremented the ready counter, or * another cpu leaves idle and decrements the waiting counter. */ cpuidle_coupled_set_ready(coupled); while (!cpuidle_coupled_cpus_ready(coupled)) { /* Check if any other cpus bailed out of idle. */ if (!cpuidle_coupled_cpus_waiting(coupled)) if (!cpuidle_coupled_set_not_ready(coupled)) goto retry; cpu_relax(); } /* * Make sure read of all cpus ready is done before reading pending pokes */ smp_rmb(); /* * There is a small chance that a cpu left and reentered idle after this * cpu saw that all cpus were waiting. The cpu that reentered idle will * have sent this cpu a poke, which will still be pending after the * ready loop. The pending interrupt may be lost by the interrupt * controller when entering the deep idle state. It's not possible to * clear a pending interrupt without turning interrupts on and handling * it, and it's too late to turn on interrupts here, so reset the * coupled idle state of all cpus and retry. */ if (cpuidle_coupled_any_pokes_pending(coupled)) { cpuidle_coupled_set_done(dev->cpu, coupled); /* Wait for all cpus to see the pending pokes */ cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); goto reset; } /* all cpus have acked the coupled state */ next_state = cpuidle_coupled_get_state(dev, coupled); entered_state = cpuidle_enter_state(dev, drv, next_state); cpuidle_coupled_set_done(dev->cpu, coupled); out: /* * Normal cpuidle states are expected to return with irqs enabled. * That leads to an inefficiency where a cpu receiving an interrupt * that brings it out of idle will process that interrupt before * exiting the idle enter function and decrementing ready_count. All * other cpus will need to spin waiting for the cpu that is processing * the interrupt. If the driver returns with interrupts disabled, * all other cpus will loop back into the safe idle state instead of * spinning, saving power. * * Calling local_irq_enable here allows coupled states to return with * interrupts disabled, but won't cause problems for drivers that * exit with interrupts enabled. */ local_irq_enable(); /* * Wait until all coupled cpus have exited idle. There is no risk that * a cpu exits and re-enters the ready state because this cpu has * already decremented its waiting_count. */ while (!cpuidle_coupled_no_cpus_ready(coupled)) cpu_relax(); return entered_state; } static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) { cpumask_t cpus; cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); coupled->online_count = cpumask_weight(&cpus); } /** * cpuidle_coupled_register_device - register a coupled cpuidle device * @dev: struct cpuidle_device for the current cpu * * Called from cpuidle_register_device to handle coupled idle init. Finds the * cpuidle_coupled struct for this set of coupled cpus, or creates one if none * exists yet. */ int cpuidle_coupled_register_device(struct cpuidle_device *dev) { int cpu; struct cpuidle_device *other_dev; call_single_data_t *csd; struct cpuidle_coupled *coupled; if (cpumask_empty(&dev->coupled_cpus)) return 0; for_each_cpu(cpu, &dev->coupled_cpus) { other_dev = per_cpu(cpuidle_devices, cpu); if (other_dev && other_dev->coupled) { coupled = other_dev->coupled; goto have_coupled; } } /* No existing coupled info found, create a new one */ coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); if (!coupled) return -ENOMEM; coupled->coupled_cpus = dev->coupled_cpus; have_coupled: dev->coupled = coupled; if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) coupled->prevent++; cpuidle_coupled_update_online_cpus(coupled); coupled->refcnt++; csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); INIT_CSD(csd, cpuidle_coupled_handle_poke, (void *)(unsigned long)dev->cpu); return 0; } /** * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device * @dev: struct cpuidle_device for the current cpu * * Called from cpuidle_unregister_device to tear down coupled idle. Removes the * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if * this was the last cpu in the set. */ void cpuidle_coupled_unregister_device(struct cpuidle_device *dev) { struct cpuidle_coupled *coupled = dev->coupled; if (cpumask_empty(&dev->coupled_cpus)) return; if (--coupled->refcnt) kfree(coupled); dev->coupled = NULL; } /** * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state * @coupled: the struct coupled that contains the cpu that is changing state * * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that * cpu_online_mask doesn't change while cpus are coordinating coupled idle. */ static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) { int cpu = get_cpu(); /* Force all cpus out of the waiting loop. */ coupled->prevent++; cpuidle_coupled_poke_others(cpu, coupled); put_cpu(); while (!cpuidle_coupled_no_cpus_waiting(coupled)) cpu_relax(); } /** * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state * @coupled: the struct coupled that contains the cpu that is changing state * * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that * cpu_online_mask doesn't change while cpus are coordinating coupled idle. */ static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) { int cpu = get_cpu(); /* * Write barrier ensures readers see the new online_count when they * see prevent == 0. */ smp_wmb(); coupled->prevent--; /* Force cpus out of the prevent loop. */ cpuidle_coupled_poke_others(cpu, coupled); put_cpu(); } static int coupled_cpu_online(unsigned int cpu) { struct cpuidle_device *dev; mutex_lock(&cpuidle_lock); dev = per_cpu(cpuidle_devices, cpu); if (dev && dev->coupled) { cpuidle_coupled_update_online_cpus(dev->coupled); cpuidle_coupled_allow_idle(dev->coupled); } mutex_unlock(&cpuidle_lock); return 0; } static int coupled_cpu_up_prepare(unsigned int cpu) { struct cpuidle_device *dev; mutex_lock(&cpuidle_lock); dev = per_cpu(cpuidle_devices, cpu); if (dev && dev->coupled) cpuidle_coupled_prevent_idle(dev->coupled); mutex_unlock(&cpuidle_lock); return 0; } static int __init cpuidle_coupled_init(void) { int ret; ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE, "cpuidle/coupled:prepare", coupled_cpu_up_prepare, coupled_cpu_online); if (ret) return ret; ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpuidle/coupled:online", coupled_cpu_online, coupled_cpu_up_prepare); if (ret < 0) cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE); return ret; } core_initcall(cpuidle_coupled_init);
linux-master
drivers/cpuidle/coupled.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * CLPS711X CPU idle driver * * Copyright (C) 2014 Alexander Shiyan <[email protected]> */ #include <linux/cpuidle.h> #include <linux/err.h> #include <linux/io.h> #include <linux/init.h> #include <linux/platform_device.h> #define CLPS711X_CPUIDLE_NAME "clps711x-cpuidle" static void __iomem *clps711x_halt; static int clps711x_cpuidle_halt(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { writel(0xaa, clps711x_halt); return index; } static struct cpuidle_driver clps711x_idle_driver = { .name = CLPS711X_CPUIDLE_NAME, .owner = THIS_MODULE, .states[0] = { .name = "HALT", .desc = "CLPS711X HALT", .enter = clps711x_cpuidle_halt, .exit_latency = 1, }, .state_count = 1, }; static int __init clps711x_cpuidle_probe(struct platform_device *pdev) { clps711x_halt = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(clps711x_halt)) return PTR_ERR(clps711x_halt); return cpuidle_register(&clps711x_idle_driver, NULL); } static struct platform_driver clps711x_cpuidle_driver = { .driver = { .name = CLPS711X_CPUIDLE_NAME, }, }; builtin_platform_driver_probe(clps711x_cpuidle_driver, clps711x_cpuidle_probe);
linux-master
drivers/cpuidle/cpuidle-clps711x.c
// SPDX-License-Identifier: GPL-2.0 /* * cpuidle-pseries - idle state cpuidle driver. * Adapted from drivers/idle/intel_idle.c and * drivers/acpi/processor_idle.c * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/cpuidle.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <asm/paca.h> #include <asm/reg.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/runlatch.h> #include <asm/idle.h> #include <asm/plpar_wrappers.h> #include <asm/rtas.h> static struct cpuidle_driver pseries_idle_driver = { .name = "pseries_idle", .owner = THIS_MODULE, }; static int max_idle_state __read_mostly; static struct cpuidle_state *cpuidle_state_table __read_mostly; static u64 snooze_timeout __read_mostly; static bool snooze_timeout_en __read_mostly; static __cpuidle int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { u64 snooze_exit_time; set_thread_flag(TIF_POLLING_NRFLAG); pseries_idle_prolog(); raw_local_irq_enable(); snooze_exit_time = get_tb() + snooze_timeout; dev->poll_time_limit = false; while (!need_resched()) { HMT_low(); HMT_very_low(); if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) { /* * Task has not woken up but we are exiting the polling * loop anyway. Require a barrier after polling is * cleared to order subsequent test of need_resched(). */ dev->poll_time_limit = true; clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); break; } } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); raw_local_irq_disable(); pseries_idle_epilog(); return index; } static __cpuidle void check_and_cede_processor(void) { /* * Ensure our interrupt state is properly tracked, * also checks if no interrupt has occurred while we * were soft-disabled */ if (prep_irq_for_idle()) { cede_processor(); #ifdef CONFIG_TRACE_IRQFLAGS /* Ensure that H_CEDE returns with IRQs on */ if (WARN_ON(!(mfmsr() & MSR_EE))) __hard_irq_enable(); #endif } } /* * XCEDE: Extended CEDE states discovered through the * "ibm,get-systems-parameter" RTAS call with the token * CEDE_LATENCY_TOKEN */ /* * Section 7.3.16 System Parameters Option of PAPR version 2.8.1 has a * table with all the parameters to ibm,get-system-parameters. * CEDE_LATENCY_TOKEN corresponds to the token value for Cede Latency * Settings Information. */ #define CEDE_LATENCY_TOKEN 45 /* * If the platform supports the cede latency settings information system * parameter it must provide the following information in the NULL terminated * parameter string: * * a. The first byte is the length “N” of each cede latency setting record minus * one (zero indicates a length of 1 byte). * * b. For each supported cede latency setting a cede latency setting record * consisting of the first “N” bytes as per the following table. * * ----------------------------- * | Field | Field | * | Name | Length | * ----------------------------- * | Cede Latency | 1 Byte | * | Specifier Value | | * ----------------------------- * | Maximum wakeup | | * | latency in | 8 Bytes | * | tb-ticks | | * ----------------------------- * | Responsive to | | * | external | 1 Byte | * | interrupts | | * ----------------------------- * * This version has cede latency record size = 10. * * The structure xcede_latency_payload represents a) and b) with * xcede_latency_record representing the table in b). * * xcede_latency_parameter is what gets returned by * ibm,get-systems-parameter RTAS call when made with * CEDE_LATENCY_TOKEN. * * These structures are only used to represent the data obtained by the RTAS * call. The data is in big-endian. */ struct xcede_latency_record { u8 hint; __be64 latency_ticks; u8 wake_on_irqs; } __packed; // Make space for 16 records, which "should be enough". struct xcede_latency_payload { u8 record_size; struct xcede_latency_record records[16]; } __packed; struct xcede_latency_parameter { __be16 payload_size; struct xcede_latency_payload payload; u8 null_char; } __packed; static unsigned int nr_xcede_records; static struct xcede_latency_parameter xcede_latency_parameter __initdata; static int __init parse_cede_parameters(void) { struct xcede_latency_payload *payload; u32 total_xcede_records_size; u8 xcede_record_size; u16 payload_size; int ret, i; ret = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, NULL, CEDE_LATENCY_TOKEN, __pa(&xcede_latency_parameter), sizeof(xcede_latency_parameter)); if (ret) { pr_err("xcede: Error parsing CEDE_LATENCY_TOKEN\n"); return ret; } payload_size = be16_to_cpu(xcede_latency_parameter.payload_size); payload = &xcede_latency_parameter.payload; xcede_record_size = payload->record_size + 1; if (xcede_record_size != sizeof(struct xcede_latency_record)) { pr_err("xcede: Expected record-size %lu. Observed size %u.\n", sizeof(struct xcede_latency_record), xcede_record_size); return -EINVAL; } pr_info("xcede: xcede_record_size = %d\n", xcede_record_size); /* * Since the payload_size includes the last NULL byte and the * xcede_record_size, the remaining bytes correspond to array of all * cede_latency settings. */ total_xcede_records_size = payload_size - 2; nr_xcede_records = total_xcede_records_size / xcede_record_size; for (i = 0; i < nr_xcede_records; i++) { struct xcede_latency_record *record = &payload->records[i]; u64 latency_ticks = be64_to_cpu(record->latency_ticks); u8 wake_on_irqs = record->wake_on_irqs; u8 hint = record->hint; pr_info("xcede: Record %d : hint = %u, latency = 0x%llx tb ticks, Wake-on-irq = %u\n", i, hint, latency_ticks, wake_on_irqs); } return 0; } #define NR_DEDICATED_STATES 2 /* snooze, CEDE */ static u8 cede_latency_hint[NR_DEDICATED_STATES]; static __cpuidle int dedicated_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { u8 old_latency_hint; pseries_idle_prolog(); get_lppaca()->donate_dedicated_cpu = 1; old_latency_hint = get_lppaca()->cede_latency_hint; get_lppaca()->cede_latency_hint = cede_latency_hint[index]; HMT_medium(); check_and_cede_processor(); raw_local_irq_disable(); get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->cede_latency_hint = old_latency_hint; pseries_idle_epilog(); return index; } static __cpuidle int shared_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { pseries_idle_prolog(); /* * Yield the processor to the hypervisor. We return if * an external interrupt occurs (which are driven prior * to returning here) or if a prod occurs from another * processor. When returning here, external interrupts * are enabled. */ check_and_cede_processor(); raw_local_irq_disable(); pseries_idle_epilog(); return index; } /* * States for dedicated partition case. */ static struct cpuidle_state dedicated_states[NR_DEDICATED_STATES] = { { /* Snooze */ .name = "snooze", .desc = "snooze", .exit_latency = 0, .target_residency = 0, .enter = &snooze_loop, .flags = CPUIDLE_FLAG_POLLING }, { /* CEDE */ .name = "CEDE", .desc = "CEDE", .exit_latency = 10, .target_residency = 100, .enter = &dedicated_cede_loop }, }; /* * States for shared partition case. */ static struct cpuidle_state shared_states[] = { { /* Snooze */ .name = "snooze", .desc = "snooze", .exit_latency = 0, .target_residency = 0, .enter = &snooze_loop, .flags = CPUIDLE_FLAG_POLLING }, { /* Shared Cede */ .name = "Shared Cede", .desc = "Shared Cede", .exit_latency = 10, .target_residency = 100, .enter = &shared_cede_loop }, }; static int pseries_cpuidle_cpu_online(unsigned int cpu) { struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); if (dev && cpuidle_get_driver()) { cpuidle_pause_and_lock(); cpuidle_enable_device(dev); cpuidle_resume_and_unlock(); } return 0; } static int pseries_cpuidle_cpu_dead(unsigned int cpu) { struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); if (dev && cpuidle_get_driver()) { cpuidle_pause_and_lock(); cpuidle_disable_device(dev); cpuidle_resume_and_unlock(); } return 0; } /* * pseries_cpuidle_driver_init() */ static int pseries_cpuidle_driver_init(void) { int idle_state; struct cpuidle_driver *drv = &pseries_idle_driver; drv->state_count = 0; for (idle_state = 0; idle_state < max_idle_state; ++idle_state) { /* Is the state not enabled? */ if (cpuidle_state_table[idle_state].enter == NULL) continue; drv->states[drv->state_count] = /* structure copy */ cpuidle_state_table[idle_state]; drv->state_count += 1; } return 0; } static void __init fixup_cede0_latency(void) { struct xcede_latency_payload *payload; u64 min_xcede_latency_us = UINT_MAX; int i; if (parse_cede_parameters()) return; pr_info("cpuidle: Skipping the %d Extended CEDE idle states\n", nr_xcede_records); payload = &xcede_latency_parameter.payload; /* * The CEDE idle state maps to CEDE(0). While the hypervisor * does not advertise CEDE(0) exit latency values, it does * advertise the latency values of the extended CEDE states. * We use the lowest advertised exit latency value as a proxy * for the exit latency of CEDE(0). */ for (i = 0; i < nr_xcede_records; i++) { struct xcede_latency_record *record = &payload->records[i]; u8 hint = record->hint; u64 latency_tb = be64_to_cpu(record->latency_ticks); u64 latency_us = DIV_ROUND_UP_ULL(tb_to_ns(latency_tb), NSEC_PER_USEC); /* * We expect the exit latency of an extended CEDE * state to be non-zero, it to since it takes at least * a few nanoseconds to wakeup the idle CPU and * dispatch the virtual processor into the Linux * Guest. * * So we consider only non-zero value for performing * the fixup of CEDE(0) latency. */ if (latency_us == 0) { pr_warn("cpuidle: Skipping xcede record %d [hint=%d]. Exit latency = 0us\n", i, hint); continue; } if (latency_us < min_xcede_latency_us) min_xcede_latency_us = latency_us; } if (min_xcede_latency_us != UINT_MAX) { dedicated_states[1].exit_latency = min_xcede_latency_us; dedicated_states[1].target_residency = 10 * (min_xcede_latency_us); pr_info("cpuidle: Fixed up CEDE exit latency to %llu us\n", min_xcede_latency_us); } } /* * pseries_idle_probe() * Choose state table for shared versus dedicated partition */ static int __init pseries_idle_probe(void) { if (cpuidle_disable != IDLE_NO_OVERRIDE) return -ENODEV; if (firmware_has_feature(FW_FEATURE_SPLPAR)) { if (lppaca_shared_proc()) { cpuidle_state_table = shared_states; max_idle_state = ARRAY_SIZE(shared_states); } else { /* * Use firmware provided latency values * starting with POWER10 platforms. In the * case that we are running on a POWER10 * platform but in an earlier compat mode, we * can still use the firmware provided values. * * However, on platforms prior to POWER10, we * cannot rely on the accuracy of the firmware * provided latency values. On such platforms, * go with the conservative default estimate * of 10us. */ if (cpu_has_feature(CPU_FTR_ARCH_31) || pvr_version_is(PVR_POWER10)) fixup_cede0_latency(); cpuidle_state_table = dedicated_states; max_idle_state = NR_DEDICATED_STATES; } } else return -ENODEV; if (max_idle_state > 1) { snooze_timeout_en = true; snooze_timeout = cpuidle_state_table[1].target_residency * tb_ticks_per_usec; } return 0; } static int __init pseries_processor_idle_init(void) { int retval; retval = pseries_idle_probe(); if (retval) return retval; pseries_cpuidle_driver_init(); retval = cpuidle_register(&pseries_idle_driver, NULL); if (retval) { printk(KERN_DEBUG "Registration of pseries driver failed.\n"); return retval; } retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpuidle/pseries:online", pseries_cpuidle_cpu_online, NULL); WARN_ON(retval < 0); retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD, "cpuidle/pseries:DEAD", NULL, pseries_cpuidle_cpu_dead); WARN_ON(retval < 0); printk(KERN_DEBUG "pseries_idle_driver registered\n"); return 0; } device_initcall(pseries_processor_idle_init);
linux-master
drivers/cpuidle/cpuidle-pseries.c
// SPDX-License-Identifier: GPL-2.0-only /* * CPU idle driver for Tegra CPUs * * Copyright (c) 2010-2013, NVIDIA Corporation. * Copyright (c) 2011 Google, Inc. * Author: Colin Cross <[email protected]> * Gary King <[email protected]> * * Rework for 3.3 by Peter De Schrijver <[email protected]> * * Tegra20/124 driver unification by Dmitry Osipenko <[email protected]> */ #define pr_fmt(fmt) "tegra-cpuidle: " fmt #include <linux/atomic.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/clk/tegra.h> #include <linux/firmware/trusted_foundations.h> #include <soc/tegra/cpuidle.h> #include <soc/tegra/flowctrl.h> #include <soc/tegra/fuse.h> #include <soc/tegra/irq.h> #include <soc/tegra/pm.h> #include <soc/tegra/pmc.h> #include <asm/cpuidle.h> #include <asm/firmware.h> #include <asm/smp_plat.h> #include <asm/suspend.h> enum tegra_state { TEGRA_C1, TEGRA_C7, TEGRA_CC6, TEGRA_STATE_COUNT, }; static atomic_t tegra_idle_barrier; static atomic_t tegra_abort_flag; static void tegra_cpuidle_report_cpus_state(void) { unsigned long cpu, lcpu, csr; for_each_cpu(lcpu, cpu_possible_mask) { cpu = cpu_logical_map(lcpu); csr = flowctrl_read_cpu_csr(cpu); pr_err("cpu%lu: online=%d flowctrl_csr=0x%08lx\n", cpu, cpu_online(lcpu), csr); } } static int tegra_cpuidle_wait_for_secondary_cpus_parking(void) { unsigned int retries = 3; while (retries--) { unsigned int delay_us = 10; unsigned int timeout_us = 500 * 1000 / delay_us; /* * The primary CPU0 core shall wait for the secondaries * shutdown in order to power-off CPU's cluster safely. * The timeout value depends on the current CPU frequency, * it takes about 40-150us in average and over 1000us in * a worst case scenario. */ do { if (tegra_cpu_rail_off_ready()) return 0; udelay(delay_us); } while (timeout_us--); pr_err("secondary CPU taking too long to park\n"); tegra_cpuidle_report_cpus_state(); } pr_err("timed out waiting secondaries to park\n"); return -ETIMEDOUT; } static void tegra_cpuidle_unpark_secondary_cpus(void) { unsigned int cpu, lcpu; for_each_cpu(lcpu, cpu_online_mask) { cpu = cpu_logical_map(lcpu); if (cpu > 0) { tegra_enable_cpu_clock(cpu); tegra_cpu_out_of_reset(cpu); flowctrl_write_cpu_halt(cpu, 0); } } } static int tegra_cpuidle_cc6_enter(unsigned int cpu) { int ret; if (cpu > 0) { ret = cpu_suspend(cpu, tegra_pm_park_secondary_cpu); } else { ret = tegra_cpuidle_wait_for_secondary_cpus_parking(); if (!ret) ret = tegra_pm_enter_lp2(); tegra_cpuidle_unpark_secondary_cpus(); } return ret; } static int tegra_cpuidle_c7_enter(void) { int err; err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2); if (err && err != -ENOSYS) return err; return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend); } static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev) { if (tegra_pending_sgi()) { /* * CPU got local interrupt that will be lost after GIC's * shutdown because GIC driver doesn't save/restore the * pending SGI state across CPU cluster PM. Abort and retry * next time. */ atomic_set(&tegra_abort_flag, 1); } cpuidle_coupled_parallel_barrier(dev, &tegra_idle_barrier); if (atomic_read(&tegra_abort_flag)) { cpuidle_coupled_parallel_barrier(dev, &tegra_idle_barrier); atomic_set(&tegra_abort_flag, 0); return -EINTR; } return 0; } static __cpuidle int tegra_cpuidle_state_enter(struct cpuidle_device *dev, int index, unsigned int cpu) { int err; /* * CC6 state is the "CPU cluster power-off" state. In order to * enter this state, at first the secondary CPU cores need to be * parked into offline mode, then the last CPU should clean out * remaining dirty cache lines into DRAM and trigger Flow Controller * logic that turns off the cluster's power domain (which includes * CPU cores, GIC and L2 cache). */ if (index == TEGRA_CC6) { err = tegra_cpuidle_coupled_barrier(dev); if (err) return err; } local_fiq_disable(); tegra_pm_set_cpu_in_lp2(); cpu_pm_enter(); ct_cpuidle_enter(); switch (index) { case TEGRA_C7: err = tegra_cpuidle_c7_enter(); break; case TEGRA_CC6: err = tegra_cpuidle_cc6_enter(cpu); break; default: err = -EINVAL; break; } ct_cpuidle_exit(); cpu_pm_exit(); tegra_pm_clear_cpu_in_lp2(); local_fiq_enable(); return err ?: index; } static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu) { /* * On Tegra30 CPU0 can't be power-gated separately from secondary * cores because it gates the whole CPU cluster. */ if (cpu > 0 || index != TEGRA_C7 || tegra_get_chip_id() != TEGRA30) return index; /* put CPU0 into C1 if C7 is requested and secondaries are online */ if (!IS_ENABLED(CONFIG_PM_SLEEP) || num_online_cpus() > 1) index = TEGRA_C1; else index = TEGRA_CC6; return index; } static __cpuidle int tegra_cpuidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { bool do_rcu = drv->states[index].flags & CPUIDLE_FLAG_RCU_IDLE; unsigned int cpu = cpu_logical_map(dev->cpu); int ret; index = tegra_cpuidle_adjust_state_index(index, cpu); if (dev->states_usage[index].disable) return -1; if (index == TEGRA_C1) { if (do_rcu) ct_cpuidle_enter(); ret = arm_cpuidle_simple_enter(dev, drv, index); if (do_rcu) ct_cpuidle_exit(); } else ret = tegra_cpuidle_state_enter(dev, index, cpu); if (ret < 0) { if (ret != -EINTR || index != TEGRA_CC6) pr_err_once("failed to enter state %d err: %d\n", index, ret); index = -1; } else { index = ret; } return index; } static int tegra114_enter_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { tegra_cpuidle_enter(dev, drv, index); return 0; } /* * The previous versions of Tegra CPUIDLE driver used a different "legacy" * terminology for naming of the idling states, while this driver uses the * new terminology. * * Mapping of the old terms into the new ones: * * Old | New * --------- * LP3 | C1 (CPU core clock gating) * LP2 | C7 (CPU core power gating) * LP2 | CC6 (CPU cluster power gating) * * Note that that the older CPUIDLE driver versions didn't explicitly * differentiate the LP2 states because these states either used the same * code path or because CC6 wasn't supported. */ static struct cpuidle_driver tegra_idle_driver = { .name = "tegra_idle", .states = { [TEGRA_C1] = ARM_CPUIDLE_WFI_STATE_PWR(600), [TEGRA_C7] = { .enter = tegra_cpuidle_enter, .exit_latency = 2000, .target_residency = 2200, .power_usage = 100, .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE, .name = "C7", .desc = "CPU core powered off", }, [TEGRA_CC6] = { .enter = tegra_cpuidle_enter, .exit_latency = 5000, .target_residency = 10000, .power_usage = 0, .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE | CPUIDLE_FLAG_COUPLED, .name = "CC6", .desc = "CPU cluster powered off", }, }, .state_count = TEGRA_STATE_COUNT, .safe_state_index = TEGRA_C1, }; static inline void tegra_cpuidle_disable_state(enum tegra_state state) { cpuidle_driver_state_disabled(&tegra_idle_driver, state, true); } /* * Tegra20 HW appears to have a bug such that PCIe device interrupts, whether * they are legacy IRQs or MSI, are lost when CC6 is enabled. To work around * this, simply disable CC6 if the PCI driver and DT node are both enabled. */ void tegra_cpuidle_pcie_irqs_in_use(void) { struct cpuidle_state *state_cc6 = &tegra_idle_driver.states[TEGRA_CC6]; if ((state_cc6->flags & CPUIDLE_FLAG_UNUSABLE) || tegra_get_chip_id() != TEGRA20) return; pr_info("disabling CC6 state, since PCIe IRQs are in use\n"); tegra_cpuidle_disable_state(TEGRA_CC6); } static void tegra_cpuidle_setup_tegra114_c7_state(void) { struct cpuidle_state *s = &tegra_idle_driver.states[TEGRA_C7]; s->enter_s2idle = tegra114_enter_s2idle; s->target_residency = 1000; s->exit_latency = 500; } static int tegra_cpuidle_probe(struct platform_device *pdev) { if (tegra_pmc_get_suspend_mode() == TEGRA_SUSPEND_NOT_READY) return -EPROBE_DEFER; /* LP2 could be disabled in device-tree */ if (tegra_pmc_get_suspend_mode() < TEGRA_SUSPEND_LP2) tegra_cpuidle_disable_state(TEGRA_CC6); /* * Required suspend-resume functionality, which is provided by the * Tegra-arch core and PMC driver, is unavailable if PM-sleep option * is disabled. */ if (!IS_ENABLED(CONFIG_PM_SLEEP)) { tegra_cpuidle_disable_state(TEGRA_C7); tegra_cpuidle_disable_state(TEGRA_CC6); } /* * Generic WFI state (also known as C1 or LP3) and the coupled CPU * cluster power-off (CC6 or LP2) states are common for all Tegra SoCs. */ switch (tegra_get_chip_id()) { case TEGRA20: /* Tegra20 isn't capable to power-off individual CPU cores */ tegra_cpuidle_disable_state(TEGRA_C7); break; case TEGRA30: break; case TEGRA114: case TEGRA124: tegra_cpuidle_setup_tegra114_c7_state(); /* coupled CC6 (LP2) state isn't implemented yet */ tegra_cpuidle_disable_state(TEGRA_CC6); break; default: return -EINVAL; } return cpuidle_register(&tegra_idle_driver, cpu_possible_mask); } static struct platform_driver tegra_cpuidle_driver = { .probe = tegra_cpuidle_probe, .driver = { .name = "tegra-cpuidle", }, }; builtin_platform_driver(tegra_cpuidle_driver);
linux-master
drivers/cpuidle/cpuidle-tegra.c
/* * sysfs.c - sysfs support * * (C) 2006-2007 Shaohua Li <[email protected]> * * This code is licenced under the GPL. */ #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/completion.h> #include <linux/capability.h> #include <linux/device.h> #include <linux/kobject.h> #include "cpuidle.h" static ssize_t show_available_governors(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t i = 0; struct cpuidle_governor *tmp; mutex_lock(&cpuidle_lock); list_for_each_entry(tmp, &cpuidle_governors, governor_list) { if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2))) goto out; i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name); } out: i+= sprintf(&buf[i], "\n"); mutex_unlock(&cpuidle_lock); return i; } static ssize_t show_current_driver(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; struct cpuidle_driver *drv; spin_lock(&cpuidle_driver_lock); drv = cpuidle_get_driver(); if (drv) ret = sprintf(buf, "%s\n", drv->name); else ret = sprintf(buf, "none\n"); spin_unlock(&cpuidle_driver_lock); return ret; } static ssize_t show_current_governor(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; mutex_lock(&cpuidle_lock); if (cpuidle_curr_governor) ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name); else ret = sprintf(buf, "none\n"); mutex_unlock(&cpuidle_lock); return ret; } static ssize_t store_current_governor(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { char gov_name[CPUIDLE_NAME_LEN + 1]; int ret; struct cpuidle_governor *gov; ret = sscanf(buf, "%" __stringify(CPUIDLE_NAME_LEN) "s", gov_name); if (ret != 1) return -EINVAL; mutex_lock(&cpuidle_lock); ret = -EINVAL; list_for_each_entry(gov, &cpuidle_governors, governor_list) { if (!strncmp(gov->name, gov_name, CPUIDLE_NAME_LEN)) { ret = cpuidle_switch_governor(gov); break; } } mutex_unlock(&cpuidle_lock); return ret ? ret : count; } static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL); static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL); static DEVICE_ATTR(current_governor, 0644, show_current_governor, store_current_governor); static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL); static struct attribute *cpuidle_attrs[] = { &dev_attr_available_governors.attr, &dev_attr_current_driver.attr, &dev_attr_current_governor.attr, &dev_attr_current_governor_ro.attr, NULL }; static struct attribute_group cpuidle_attr_group = { .attrs = cpuidle_attrs, .name = "cpuidle", }; /** * cpuidle_add_interface - add CPU global sysfs attributes */ int cpuidle_add_interface(void) { struct device *dev_root = bus_get_dev_root(&cpu_subsys); int retval; if (!dev_root) return -EINVAL; retval = sysfs_create_group(&dev_root->kobj, &cpuidle_attr_group); put_device(dev_root); return retval; } /** * cpuidle_remove_interface - remove CPU global sysfs attributes * @dev: the target device */ void cpuidle_remove_interface(struct device *dev) { sysfs_remove_group(&dev->kobj, &cpuidle_attr_group); } struct cpuidle_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_device *, char *); ssize_t (*store)(struct cpuidle_device *, const char *, size_t count); }; #define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr) struct cpuidle_device_kobj { struct cpuidle_device *dev; struct completion kobj_unregister; struct kobject kobj; }; static inline struct cpuidle_device *to_cpuidle_device(struct kobject *kobj) { struct cpuidle_device_kobj *kdev = container_of(kobj, struct cpuidle_device_kobj, kobj); return kdev->dev; } static ssize_t cpuidle_show(struct kobject *kobj, struct attribute *attr, char *buf) { int ret = -EIO; struct cpuidle_device *dev = to_cpuidle_device(kobj); struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr); if (cattr->show) { mutex_lock(&cpuidle_lock); ret = cattr->show(dev, buf); mutex_unlock(&cpuidle_lock); } return ret; } static ssize_t cpuidle_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret = -EIO; struct cpuidle_device *dev = to_cpuidle_device(kobj); struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr); if (cattr->store) { mutex_lock(&cpuidle_lock); ret = cattr->store(dev, buf, count); mutex_unlock(&cpuidle_lock); } return ret; } static const struct sysfs_ops cpuidle_sysfs_ops = { .show = cpuidle_show, .store = cpuidle_store, }; static void cpuidle_sysfs_release(struct kobject *kobj) { struct cpuidle_device_kobj *kdev = container_of(kobj, struct cpuidle_device_kobj, kobj); complete(&kdev->kobj_unregister); } static const struct kobj_type ktype_cpuidle = { .sysfs_ops = &cpuidle_sysfs_ops, .release = cpuidle_sysfs_release, }; struct cpuidle_state_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_state *, \ struct cpuidle_state_usage *, char *); ssize_t (*store)(struct cpuidle_state *, \ struct cpuidle_state_usage *, const char *, size_t); }; #define define_one_state_ro(_name, show) \ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) #define define_one_state_rw(_name, show, store) \ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store) #define define_show_state_function(_name) \ static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, char *buf) \ { \ return sprintf(buf, "%u\n", state->_name);\ } #define define_show_state_ull_function(_name) \ static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ return sprintf(buf, "%llu\n", state_usage->_name);\ } #define define_show_state_str_function(_name) \ static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ if (state->_name[0] == '\0')\ return sprintf(buf, "<null>\n");\ return sprintf(buf, "%s\n", state->_name);\ } #define define_show_state_time_function(_name) \ static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \ } define_show_state_time_function(exit_latency) define_show_state_time_function(target_residency) define_show_state_function(power_usage) define_show_state_ull_function(usage) define_show_state_ull_function(rejected) define_show_state_str_function(name) define_show_state_str_function(desc) define_show_state_ull_function(above) define_show_state_ull_function(below) static ssize_t show_state_time(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns)); } static ssize_t show_state_disable(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { return sprintf(buf, "%llu\n", state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER); } static ssize_t store_state_disable(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, const char *buf, size_t size) { unsigned int value; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; err = kstrtouint(buf, 0, &value); if (err) return err; if (value) state_usage->disable |= CPUIDLE_STATE_DISABLED_BY_USER; else state_usage->disable &= ~CPUIDLE_STATE_DISABLED_BY_USER; return size; } static ssize_t show_state_default_status(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { return sprintf(buf, "%s\n", state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); } define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); define_one_state_ro(latency, show_state_exit_latency); define_one_state_ro(residency, show_state_target_residency); define_one_state_ro(power, show_state_power_usage); define_one_state_ro(usage, show_state_usage); define_one_state_ro(rejected, show_state_rejected); define_one_state_ro(time, show_state_time); define_one_state_rw(disable, show_state_disable, store_state_disable); define_one_state_ro(above, show_state_above); define_one_state_ro(below, show_state_below); define_one_state_ro(default_status, show_state_default_status); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, &attr_desc.attr, &attr_latency.attr, &attr_residency.attr, &attr_power.attr, &attr_usage.attr, &attr_rejected.attr, &attr_time.attr, &attr_disable.attr, &attr_above.attr, &attr_below.attr, &attr_default_status.attr, NULL }; ATTRIBUTE_GROUPS(cpuidle_state_default); struct cpuidle_state_kobj { struct cpuidle_state *state; struct cpuidle_state_usage *state_usage; struct completion kobj_unregister; struct kobject kobj; struct cpuidle_device *device; }; #ifdef CONFIG_SUSPEND #define define_show_state_s2idle_ull_function(_name) \ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\ } define_show_state_s2idle_ull_function(usage); define_show_state_s2idle_ull_function(time); #define define_one_state_s2idle_ro(_name, show) \ static struct cpuidle_state_attr attr_s2idle_##_name = \ __ATTR(_name, 0444, show, NULL) define_one_state_s2idle_ro(usage, show_state_s2idle_usage); define_one_state_s2idle_ro(time, show_state_s2idle_time); static struct attribute *cpuidle_state_s2idle_attrs[] = { &attr_s2idle_usage.attr, &attr_s2idle_time.attr, NULL }; static const struct attribute_group cpuidle_state_s2idle_group = { .name = "s2idle", .attrs = cpuidle_state_s2idle_attrs, }; static void cpuidle_add_s2idle_attr_group(struct cpuidle_state_kobj *kobj) { int ret; if (!kobj->state->enter_s2idle) return; ret = sysfs_create_group(&kobj->kobj, &cpuidle_state_s2idle_group); if (ret) pr_debug("%s: sysfs attribute group not created\n", __func__); } static void cpuidle_remove_s2idle_attr_group(struct cpuidle_state_kobj *kobj) { if (kobj->state->enter_s2idle) sysfs_remove_group(&kobj->kobj, &cpuidle_state_s2idle_group); } #else static inline void cpuidle_add_s2idle_attr_group(struct cpuidle_state_kobj *kobj) { } static inline void cpuidle_remove_s2idle_attr_group(struct cpuidle_state_kobj *kobj) { } #endif /* CONFIG_SUSPEND */ #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) #define kobj_to_state(k) (kobj_to_state_obj(k)->state) #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) #define kobj_to_device(k) (kobj_to_state_obj(k)->device) #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr, char *buf) { int ret = -EIO; struct cpuidle_state *state = kobj_to_state(kobj); struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj); struct cpuidle_state_attr *cattr = attr_to_stateattr(attr); if (cattr->show) ret = cattr->show(state, state_usage, buf); return ret; } static ssize_t cpuidle_state_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { int ret = -EIO; struct cpuidle_state *state = kobj_to_state(kobj); struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj); struct cpuidle_state_attr *cattr = attr_to_stateattr(attr); struct cpuidle_device *dev = kobj_to_device(kobj); if (cattr->store) ret = cattr->store(state, state_usage, buf, size); /* reset poll time cache */ dev->poll_limit_ns = 0; return ret; } static const struct sysfs_ops cpuidle_state_sysfs_ops = { .show = cpuidle_state_show, .store = cpuidle_state_store, }; static void cpuidle_state_sysfs_release(struct kobject *kobj) { struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj); complete(&state_obj->kobj_unregister); } static const struct kobj_type ktype_state_cpuidle = { .sysfs_ops = &cpuidle_state_sysfs_ops, .default_groups = cpuidle_state_default_groups, .release = cpuidle_state_sysfs_release, }; static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) { cpuidle_remove_s2idle_attr_group(device->kobjs[i]); kobject_put(&device->kobjs[i]->kobj); wait_for_completion(&device->kobjs[i]->kobj_unregister); kfree(device->kobjs[i]); device->kobjs[i] = NULL; } /** * cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes * @device: the target device */ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) { int i, ret = -ENOMEM; struct cpuidle_state_kobj *kobj; struct cpuidle_device_kobj *kdev = device->kobj_dev; struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); /* state statistics */ for (i = 0; i < drv->state_count; i++) { kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); if (!kobj) { ret = -ENOMEM; goto error_state; } kobj->state = &drv->states[i]; kobj->state_usage = &device->states_usage[i]; kobj->device = device; init_completion(&kobj->kobj_unregister); ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &kdev->kobj, "state%d", i); if (ret) { kobject_put(&kobj->kobj); kfree(kobj); goto error_state; } cpuidle_add_s2idle_attr_group(kobj); kobject_uevent(&kobj->kobj, KOBJ_ADD); device->kobjs[i] = kobj; } return 0; error_state: for (i = i - 1; i >= 0; i--) cpuidle_free_state_kobj(device, i); return ret; } /** * cpuidle_remove_state_sysfs - removes the cpuidle states sysfs attributes * @device: the target device */ static void cpuidle_remove_state_sysfs(struct cpuidle_device *device) { struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); int i; for (i = 0; i < drv->state_count; i++) cpuidle_free_state_kobj(device, i); } #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS #define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj) #define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr) #define define_one_driver_ro(_name, show) \ static struct cpuidle_driver_attr attr_driver_##_name = \ __ATTR(_name, 0444, show, NULL) struct cpuidle_driver_kobj { struct cpuidle_driver *drv; struct completion kobj_unregister; struct kobject kobj; }; struct cpuidle_driver_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_driver *, char *); ssize_t (*store)(struct cpuidle_driver *, const char *, size_t); }; static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf) { ssize_t ret; spin_lock(&cpuidle_driver_lock); ret = sprintf(buf, "%s\n", drv ? drv->name : "none"); spin_unlock(&cpuidle_driver_lock); return ret; } static void cpuidle_driver_sysfs_release(struct kobject *kobj) { struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); complete(&driver_kobj->kobj_unregister); } static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute *attr, char *buf) { int ret = -EIO; struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); if (dattr->show) ret = dattr->show(driver_kobj->drv, buf); return ret; } static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { int ret = -EIO; struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); if (dattr->store) ret = dattr->store(driver_kobj->drv, buf, size); return ret; } define_one_driver_ro(name, show_driver_name); static const struct sysfs_ops cpuidle_driver_sysfs_ops = { .show = cpuidle_driver_show, .store = cpuidle_driver_store, }; static struct attribute *cpuidle_driver_default_attrs[] = { &attr_driver_name.attr, NULL }; ATTRIBUTE_GROUPS(cpuidle_driver_default); static const struct kobj_type ktype_driver_cpuidle = { .sysfs_ops = &cpuidle_driver_sysfs_ops, .default_groups = cpuidle_driver_default_groups, .release = cpuidle_driver_sysfs_release, }; /** * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute * @dev: the target device */ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) { struct cpuidle_driver_kobj *kdrv; struct cpuidle_device_kobj *kdev = dev->kobj_dev; struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int ret; kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL); if (!kdrv) return -ENOMEM; kdrv->drv = drv; init_completion(&kdrv->kobj_unregister); ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, &kdev->kobj, "driver"); if (ret) { kobject_put(&kdrv->kobj); kfree(kdrv); return ret; } kobject_uevent(&kdrv->kobj, KOBJ_ADD); dev->kobj_driver = kdrv; return ret; } /** * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute * @dev: the target device */ static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) { struct cpuidle_driver_kobj *kdrv = dev->kobj_driver; kobject_put(&kdrv->kobj); wait_for_completion(&kdrv->kobj_unregister); kfree(kdrv); } #else static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) { return 0; } static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) { ; } #endif /** * cpuidle_add_device_sysfs - adds device specific sysfs attributes * @device: the target device */ int cpuidle_add_device_sysfs(struct cpuidle_device *device) { int ret; ret = cpuidle_add_state_sysfs(device); if (ret) return ret; ret = cpuidle_add_driver_sysfs(device); if (ret) cpuidle_remove_state_sysfs(device); return ret; } /** * cpuidle_remove_device_sysfs : removes device specific sysfs attributes * @device : the target device */ void cpuidle_remove_device_sysfs(struct cpuidle_device *device) { cpuidle_remove_driver_sysfs(device); cpuidle_remove_state_sysfs(device); } /** * cpuidle_add_sysfs - creates a sysfs instance for the target device * @dev: the target device */ int cpuidle_add_sysfs(struct cpuidle_device *dev) { struct cpuidle_device_kobj *kdev; struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); int error; /* * Return if cpu_device is not setup for this CPU. * * This could happen if the arch did not set up cpu_device * since this CPU is not in cpu_present mask and the * driver did not send a correct CPU mask during registration. * Without this check we would end up passing bogus * value for &cpu_dev->kobj in kobject_init_and_add() */ if (!cpu_dev) return -ENODEV; kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); if (!kdev) return -ENOMEM; kdev->dev = dev; init_completion(&kdev->kobj_unregister); error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, "cpuidle"); if (error) { kobject_put(&kdev->kobj); kfree(kdev); return error; } dev->kobj_dev = kdev; kobject_uevent(&kdev->kobj, KOBJ_ADD); return 0; } /** * cpuidle_remove_sysfs - deletes a sysfs instance on the target device * @dev: the target device */ void cpuidle_remove_sysfs(struct cpuidle_device *dev) { struct cpuidle_device_kobj *kdev = dev->kobj_dev; kobject_put(&kdev->kobj); wait_for_completion(&kdev->kobj_unregister); kfree(kdev); }
linux-master
drivers/cpuidle/sysfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * PSCI CPU idle driver. * * Copyright (C) 2019 ARM Ltd. * Author: Lorenzo Pieralisi <[email protected]> */ #define pr_fmt(fmt) "CPUidle PSCI: " fmt #include <linux/cpuhotplug.h> #include <linux/cpu_cooling.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/psci.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/syscore_ops.h> #include <asm/cpuidle.h> #include "cpuidle-psci.h" #include "dt_idle_states.h" struct psci_cpuidle_data { u32 *psci_states; struct device *dev; }; static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); static DEFINE_PER_CPU(u32, domain_state); static bool psci_cpuidle_use_cpuhp; void psci_set_domain_state(u32 state) { __this_cpu_write(domain_state, state); } static inline u32 psci_get_domain_state(void) { return __this_cpu_read(domain_state); } static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx, bool s2idle) { struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data); u32 *states = data->psci_states; struct device *pd_dev = data->dev; u32 state; int ret; ret = cpu_pm_enter(); if (ret) return -1; /* Do runtime PM to manage a hierarchical CPU toplogy. */ if (s2idle) dev_pm_genpd_suspend(pd_dev); else pm_runtime_put_sync_suspend(pd_dev); state = psci_get_domain_state(); if (!state) state = states[idx]; ret = psci_cpu_suspend_enter(state) ? -1 : idx; if (s2idle) dev_pm_genpd_resume(pd_dev); else pm_runtime_get_sync(pd_dev); cpu_pm_exit(); /* Clear the domain state to start fresh when back from idle. */ psci_set_domain_state(0); return ret; } static int psci_enter_domain_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { return __psci_enter_domain_idle_state(dev, drv, idx, false); } static int psci_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { return __psci_enter_domain_idle_state(dev, drv, idx, true); } static int psci_idle_cpuhp_up(unsigned int cpu) { struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); if (pd_dev) pm_runtime_get_sync(pd_dev); return 0; } static int psci_idle_cpuhp_down(unsigned int cpu) { struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); if (pd_dev) { pm_runtime_put_sync(pd_dev); /* Clear domain state to start fresh at next online. */ psci_set_domain_state(0); } return 0; } static void psci_idle_syscore_switch(bool suspend) { bool cleared = false; struct device *dev; int cpu; for_each_possible_cpu(cpu) { dev = per_cpu_ptr(&psci_cpuidle_data, cpu)->dev; if (dev && suspend) { dev_pm_genpd_suspend(dev); } else if (dev) { dev_pm_genpd_resume(dev); /* Account for userspace having offlined a CPU. */ if (pm_runtime_status_suspended(dev)) pm_runtime_set_active(dev); /* Clear domain state to re-start fresh. */ if (!cleared) { psci_set_domain_state(0); cleared = true; } } } } static int psci_idle_syscore_suspend(void) { psci_idle_syscore_switch(true); return 0; } static void psci_idle_syscore_resume(void) { psci_idle_syscore_switch(false); } static struct syscore_ops psci_idle_syscore_ops = { .suspend = psci_idle_syscore_suspend, .resume = psci_idle_syscore_resume, }; static void psci_idle_init_cpuhp(void) { int err; if (!psci_cpuidle_use_cpuhp) return; register_syscore_ops(&psci_idle_syscore_ops); err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, "cpuidle/psci:online", psci_idle_cpuhp_up, psci_idle_cpuhp_down); if (err) pr_warn("Failed %d while setup cpuhp state\n", err); } static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states); return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]); } static const struct of_device_id psci_idle_state_match[] = { { .compatible = "arm,idle-state", .data = psci_enter_idle_state }, { }, }; int psci_dt_parse_state_node(struct device_node *np, u32 *state) { int err = of_property_read_u32(np, "arm,psci-suspend-param", state); if (err) { pr_warn("%pOF missing arm,psci-suspend-param property\n", np); return err; } if (!psci_power_state_is_valid(*state)) { pr_warn("Invalid PSCI power state %#x\n", *state); return -EINVAL; } return 0; } static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, struct psci_cpuidle_data *data, unsigned int state_count, int cpu) { /* Currently limit the hierarchical topology to be used in OSI mode. */ if (!psci_has_osi_support()) return 0; if (IS_ENABLED(CONFIG_PREEMPT_RT)) return 0; data->dev = psci_dt_attach_cpu(cpu); if (IS_ERR_OR_NULL(data->dev)) return PTR_ERR_OR_ZERO(data->dev); /* * Using the deepest state for the CPU to trigger a potential selection * of a shared state for the domain, assumes the domain states are all * deeper states. */ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE; drv->states[state_count - 1].enter = psci_enter_domain_idle_state; drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; psci_cpuidle_use_cpuhp = true; return 0; } static int psci_dt_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv, struct device_node *cpu_node, unsigned int state_count, int cpu) { int i, ret = 0; u32 *psci_states; struct device_node *state_node; struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); state_count++; /* Add WFI state too */ psci_states = devm_kcalloc(dev, state_count, sizeof(*psci_states), GFP_KERNEL); if (!psci_states) return -ENOMEM; for (i = 1; i < state_count; i++) { state_node = of_get_cpu_state_node(cpu_node, i - 1); if (!state_node) break; ret = psci_dt_parse_state_node(state_node, &psci_states[i]); of_node_put(state_node); if (ret) return ret; pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); } if (i != state_count) return -ENODEV; /* Initialize optional data, used for the hierarchical topology. */ ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu); if (ret < 0) return ret; /* Idle states parsed correctly, store them in the per-cpu struct. */ data->psci_states = psci_states; return 0; } static int psci_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv, unsigned int cpu, unsigned int state_count) { struct device_node *cpu_node; int ret; /* * If the PSCI cpu_suspend function hook has not been initialized * idle states must not be enabled, so bail out */ if (!psci_ops.cpu_suspend) return -EOPNOTSUPP; cpu_node = of_cpu_device_node_get(cpu); if (!cpu_node) return -ENODEV; ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu); of_node_put(cpu_node); return ret; } static void psci_cpu_deinit_idle(int cpu) { struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); psci_dt_detach_cpu(data->dev); psci_cpuidle_use_cpuhp = false; } static int psci_idle_init_cpu(struct device *dev, int cpu) { struct cpuidle_driver *drv; struct device_node *cpu_node; const char *enable_method; int ret = 0; cpu_node = of_cpu_device_node_get(cpu); if (!cpu_node) return -ENODEV; /* * Check whether the enable-method for the cpu is PSCI, fail * if it is not. */ enable_method = of_get_property(cpu_node, "enable-method", NULL); if (!enable_method || (strcmp(enable_method, "psci"))) ret = -ENODEV; of_node_put(cpu_node); if (ret) return ret; drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM; drv->name = "psci_idle"; drv->owner = THIS_MODULE; drv->cpumask = (struct cpumask *)cpumask_of(cpu); /* * PSCI idle states relies on architectural WFI to be represented as * state index 0. */ drv->states[0].enter = psci_enter_idle_state; drv->states[0].exit_latency = 1; drv->states[0].target_residency = 1; drv->states[0].power_usage = UINT_MAX; strcpy(drv->states[0].name, "WFI"); strcpy(drv->states[0].desc, "ARM WFI"); /* * If no DT idle states are detected (ret == 0) let the driver * initialization fail accordingly since there is no reason to * initialize the idle driver if only wfi is supported, the * default archictectural back-end already executes wfi * on idle entry. */ ret = dt_init_idle_driver(drv, psci_idle_state_match, 1); if (ret <= 0) return ret ? : -ENODEV; /* * Initialize PSCI idle states. */ ret = psci_cpu_init_idle(dev, drv, cpu, ret); if (ret) { pr_err("CPU %d failed to PSCI idle\n", cpu); return ret; } ret = cpuidle_register(drv, NULL); if (ret) goto deinit; cpuidle_cooling_register(drv); return 0; deinit: psci_cpu_deinit_idle(cpu); return ret; } /* * psci_idle_probe - Initializes PSCI cpuidle driver * * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails * to register cpuidle driver then rollback to cancel all CPUs * registration. */ static int psci_cpuidle_probe(struct platform_device *pdev) { int cpu, ret; struct cpuidle_driver *drv; struct cpuidle_device *dev; for_each_possible_cpu(cpu) { ret = psci_idle_init_cpu(&pdev->dev, cpu); if (ret) goto out_fail; } psci_idle_init_cpuhp(); return 0; out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); drv = cpuidle_get_cpu_driver(dev); cpuidle_unregister(drv); psci_cpu_deinit_idle(cpu); } return ret; } static struct platform_driver psci_cpuidle_driver = { .probe = psci_cpuidle_probe, .driver = { .name = "psci-cpuidle", }, }; static int __init psci_idle_init(void) { struct platform_device *pdev; int ret; ret = platform_driver_register(&psci_cpuidle_driver); if (ret) return ret; pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0); if (IS_ERR(pdev)) { platform_driver_unregister(&psci_cpuidle_driver); return PTR_ERR(pdev); } return 0; } device_initcall(psci_idle_init);
linux-master
drivers/cpuidle/cpuidle-psci.c
/* * Marvell Armada 370, 38x and XP SoC cpuidle driver * * Copyright (C) 2014 Marvell * * Nadav Haklai <[email protected]> * Gregory CLEMENT <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * Maintainer: Gregory CLEMENT <[email protected]> */ #include <linux/cpu_pm.h> #include <linux/cpuidle.h> #include <linux/module.h> #include <linux/of.h> #include <linux/suspend.h> #include <linux/platform_device.h> #include <asm/cpuidle.h> #define MVEBU_V7_FLAG_DEEP_IDLE 0x10000 static int (*mvebu_v7_cpu_suspend)(int); static __cpuidle int mvebu_v7_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int ret; bool deepidle = false; cpu_pm_enter(); if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE) deepidle = true; ct_cpuidle_enter(); ret = mvebu_v7_cpu_suspend(deepidle); ct_cpuidle_exit(); cpu_pm_exit(); if (ret) return ret; return index; } static struct cpuidle_driver armadaxp_idle_driver = { .name = "armada_xp_idle", .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = mvebu_v7_enter_idle, .exit_latency = 100, .power_usage = 50, .target_residency = 1000, .flags = CPUIDLE_FLAG_RCU_IDLE, .name = "MV CPU IDLE", .desc = "CPU power down", }, .states[2] = { .enter = mvebu_v7_enter_idle, .exit_latency = 1000, .power_usage = 5, .target_residency = 10000, .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE, .name = "MV CPU DEEP IDLE", .desc = "CPU and L2 Fabric power down", }, .state_count = 3, }; static struct cpuidle_driver armada370_idle_driver = { .name = "armada_370_idle", .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = mvebu_v7_enter_idle, .exit_latency = 100, .power_usage = 5, .target_residency = 1000, .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE, .name = "Deep Idle", .desc = "CPU and L2 Fabric power down", }, .state_count = 2, }; static struct cpuidle_driver armada38x_idle_driver = { .name = "armada_38x_idle", .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = mvebu_v7_enter_idle, .exit_latency = 10, .power_usage = 5, .target_residency = 100, .flags = CPUIDLE_FLAG_RCU_IDLE, .name = "Idle", .desc = "CPU and SCU power down", }, .state_count = 2, }; static int mvebu_v7_cpuidle_probe(struct platform_device *pdev) { const struct platform_device_id *id = pdev->id_entry; if (!id) return -EINVAL; mvebu_v7_cpu_suspend = pdev->dev.platform_data; return cpuidle_register((struct cpuidle_driver *)id->driver_data, NULL); } static const struct platform_device_id mvebu_cpuidle_ids[] = { { .name = "cpuidle-armada-xp", .driver_data = (unsigned long)&armadaxp_idle_driver, }, { .name = "cpuidle-armada-370", .driver_data = (unsigned long)&armada370_idle_driver, }, { .name = "cpuidle-armada-38x", .driver_data = (unsigned long)&armada38x_idle_driver, }, {} }; static struct platform_driver mvebu_cpuidle_driver = { .probe = mvebu_v7_cpuidle_probe, .driver = { .name = "cpuidle-mbevu", .suppress_bind_attrs = true, }, .id_table = mvebu_cpuidle_ids, }; builtin_platform_driver(mvebu_cpuidle_driver); MODULE_AUTHOR("Gregory CLEMENT <[email protected]>"); MODULE_DESCRIPTION("Marvell EBU v7 cpuidle driver"); MODULE_LICENSE("GPL");
linux-master
drivers/cpuidle/cpuidle-mvebu-v7.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012-2013 Xilinx * * CPU idle support for Xilinx Zynq * * based on arch/arm/mach-at91/cpuidle.c * * The cpu idle uses wait-for-interrupt and RAM self refresh in order * to implement two idle states - * #1 wait-for-interrupt * #2 wait-for-interrupt and RAM self refresh * * Maintainer: Michal Simek <[email protected]> */ #include <linux/init.h> #include <linux/cpuidle.h> #include <linux/platform_device.h> #include <asm/cpuidle.h> #define ZYNQ_MAX_STATES 2 /* Actual code that puts the SoC in different idle states */ static int zynq_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { /* Add code for DDR self refresh start */ cpu_do_idle(); return index; } static struct cpuidle_driver zynq_idle_driver = { .name = "zynq_idle", .owner = THIS_MODULE, .states = { ARM_CPUIDLE_WFI_STATE, { .enter = zynq_enter_idle, .exit_latency = 10, .target_residency = 10000, .name = "RAM_SR", .desc = "WFI and RAM Self Refresh", }, }, .safe_state_index = 0, .state_count = ZYNQ_MAX_STATES, }; /* Initialize CPU idle by registering the idle states */ static int zynq_cpuidle_probe(struct platform_device *pdev) { pr_info("Xilinx Zynq CpuIdle Driver started\n"); return cpuidle_register(&zynq_idle_driver, NULL); } static struct platform_driver zynq_cpuidle_driver = { .driver = { .name = "cpuidle-zynq", }, .probe = zynq_cpuidle_probe, }; builtin_platform_driver(zynq_cpuidle_driver);
linux-master
drivers/cpuidle/cpuidle-zynq.c
// SPDX-License-Identifier: GPL-2.0 /* * cpuidle driver for haltpoll governor. * * Copyright 2019 Red Hat, Inc. and/or its affiliates. * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * Authors: Marcelo Tosatti <[email protected]> */ #include <linux/init.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/module.h> #include <linux/sched/idle.h> #include <linux/kvm_para.h> #include <linux/cpuidle_haltpoll.h> static bool force __read_mostly; module_param(force, bool, 0444); MODULE_PARM_DESC(force, "Load unconditionally"); static struct cpuidle_device __percpu *haltpoll_cpuidle_devices; static enum cpuhp_state haltpoll_hp_state; static int default_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { if (current_clr_polling_and_test()) { local_irq_enable(); return index; } arch_cpu_idle(); return index; } static struct cpuidle_driver haltpoll_driver = { .name = "haltpoll", .governor = "haltpoll", .states = { { /* entry 0 is for polling */ }, { .enter = default_enter_idle, .exit_latency = 1, .target_residency = 1, .power_usage = -1, .name = "haltpoll idle", .desc = "default architecture idle", }, }, .safe_state_index = 0, .state_count = 2, }; static int haltpoll_cpu_online(unsigned int cpu) { struct cpuidle_device *dev; dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu); if (!dev->registered) { dev->cpu = cpu; if (cpuidle_register_device(dev)) { pr_notice("cpuidle_register_device %d failed!\n", cpu); return -EIO; } arch_haltpoll_enable(cpu); } return 0; } static int haltpoll_cpu_offline(unsigned int cpu) { struct cpuidle_device *dev; dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu); if (dev->registered) { arch_haltpoll_disable(cpu); cpuidle_unregister_device(dev); } return 0; } static void haltpoll_uninit(void) { if (haltpoll_hp_state) cpuhp_remove_state(haltpoll_hp_state); cpuidle_unregister_driver(&haltpoll_driver); free_percpu(haltpoll_cpuidle_devices); haltpoll_cpuidle_devices = NULL; } static bool haltpoll_want(void) { return kvm_para_has_hint(KVM_HINTS_REALTIME) || force; } static int __init haltpoll_init(void) { int ret; struct cpuidle_driver *drv = &haltpoll_driver; /* Do not load haltpoll if idle= is passed */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) return -ENODEV; if (!kvm_para_available() || !haltpoll_want()) return -ENODEV; cpuidle_poll_state_init(drv); ret = cpuidle_register_driver(drv); if (ret < 0) return ret; haltpoll_cpuidle_devices = alloc_percpu(struct cpuidle_device); if (haltpoll_cpuidle_devices == NULL) { cpuidle_unregister_driver(drv); return -ENOMEM; } ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpuidle/haltpoll:online", haltpoll_cpu_online, haltpoll_cpu_offline); if (ret < 0) { haltpoll_uninit(); } else { haltpoll_hp_state = ret; ret = 0; } return ret; } static void __exit haltpoll_exit(void) { haltpoll_uninit(); } module_init(haltpoll_init); module_exit(haltpoll_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marcelo Tosatti <[email protected]>");
linux-master
drivers/cpuidle/cpuidle-haltpoll.c
// SPDX-License-Identifier: GPL-2.0-only /* * DT idle states parsing code. * * Copyright (C) 2014 ARM Ltd. * Author: Lorenzo Pieralisi <[email protected]> */ #define pr_fmt(fmt) "DT idle-states: " fmt #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include "dt_idle_states.h" static int init_state_node(struct cpuidle_state *idle_state, const struct of_device_id *match_id, struct device_node *state_node) { int err; const char *desc; /* * CPUidle drivers are expected to initialize the const void *data * pointer of the passed in struct of_device_id array to the idle * state enter function. */ idle_state->enter = match_id->data; /* * Since this is not a "coupled" state, it's safe to assume interrupts * won't be enabled when it exits allowing the tick to be frozen * safely. So enter() can be also enter_s2idle() callback. */ idle_state->enter_s2idle = match_id->data; err = of_property_read_u32(state_node, "wakeup-latency-us", &idle_state->exit_latency); if (err) { u32 entry_latency, exit_latency; err = of_property_read_u32(state_node, "entry-latency-us", &entry_latency); if (err) { pr_debug(" * %pOF missing entry-latency-us property\n", state_node); return -EINVAL; } err = of_property_read_u32(state_node, "exit-latency-us", &exit_latency); if (err) { pr_debug(" * %pOF missing exit-latency-us property\n", state_node); return -EINVAL; } /* * If wakeup-latency-us is missing, default to entry+exit * latencies as defined in idle states bindings */ idle_state->exit_latency = entry_latency + exit_latency; } err = of_property_read_u32(state_node, "min-residency-us", &idle_state->target_residency); if (err) { pr_debug(" * %pOF missing min-residency-us property\n", state_node); return -EINVAL; } err = of_property_read_string(state_node, "idle-state-name", &desc); if (err) desc = state_node->name; idle_state->flags = CPUIDLE_FLAG_RCU_IDLE; if (of_property_read_bool(state_node, "local-timer-stop")) idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP; /* * TODO: * replace with kstrdup and pointer assignment when name * and desc become string pointers */ strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1); strncpy(idle_state->desc, desc, CPUIDLE_DESC_LEN - 1); return 0; } /* * Check that the idle state is uniform across all CPUs in the CPUidle driver * cpumask */ static bool idle_state_valid(struct device_node *state_node, unsigned int idx, const cpumask_t *cpumask) { int cpu; struct device_node *cpu_node, *curr_state_node; bool valid = true; /* * Compare idle state phandles for index idx on all CPUs in the * CPUidle driver cpumask. Start from next logical cpu following * cpumask_first(cpumask) since that's the CPU state_node was * retrieved from. If a mismatch is found bail out straight * away since we certainly hit a firmware misconfiguration. */ for (cpu = cpumask_next(cpumask_first(cpumask), cpumask); cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { cpu_node = of_cpu_device_node_get(cpu); curr_state_node = of_get_cpu_state_node(cpu_node, idx); if (state_node != curr_state_node) valid = false; of_node_put(curr_state_node); of_node_put(cpu_node); if (!valid) break; } return valid; } /** * dt_init_idle_driver() - Parse the DT idle states and initialize the * idle driver states array * @drv: Pointer to CPU idle driver to be initialized * @matches: Array of of_device_id match structures to search in for * compatible idle state nodes. The data pointer for each valid * struct of_device_id entry in the matches array must point to * a function with the following signature, that corresponds to * the CPUidle state enter function signature: * * int (*)(struct cpuidle_device *dev, * struct cpuidle_driver *drv, * int index); * * @start_idx: First idle state index to be initialized * * If DT idle states are detected and are valid the state count and states * array entries in the cpuidle driver are initialized accordingly starting * from index start_idx. * * Return: number of valid DT idle states parsed, <0 on failure */ int dt_init_idle_driver(struct cpuidle_driver *drv, const struct of_device_id *matches, unsigned int start_idx) { struct cpuidle_state *idle_state; struct device_node *state_node, *cpu_node; const struct of_device_id *match_id; int i, err = 0; const cpumask_t *cpumask; unsigned int state_idx = start_idx; if (state_idx >= CPUIDLE_STATE_MAX) return -EINVAL; /* * We get the idle states for the first logical cpu in the * driver mask (or cpu_possible_mask if the driver cpumask is not set) * and we check through idle_state_valid() if they are uniform * across CPUs, otherwise we hit a firmware misconfiguration. */ cpumask = drv->cpumask ? : cpu_possible_mask; cpu_node = of_cpu_device_node_get(cpumask_first(cpumask)); for (i = 0; ; i++) { state_node = of_get_cpu_state_node(cpu_node, i); if (!state_node) break; match_id = of_match_node(matches, state_node); if (!match_id) { err = -ENODEV; break; } if (!of_device_is_available(state_node)) { of_node_put(state_node); continue; } if (!idle_state_valid(state_node, i, cpumask)) { pr_warn("%pOF idle state not valid, bailing out\n", state_node); err = -EINVAL; break; } if (state_idx == CPUIDLE_STATE_MAX) { pr_warn("State index reached static CPU idle driver states array size\n"); break; } idle_state = &drv->states[state_idx++]; err = init_state_node(idle_state, match_id, state_node); if (err) { pr_err("Parsing idle state node %pOF failed with err %d\n", state_node, err); err = -EINVAL; break; } of_node_put(state_node); } of_node_put(state_node); of_node_put(cpu_node); if (err) return err; /* Set the number of total supported idle states. */ drv->state_count = state_idx; /* * Return the number of present and valid DT idle states, which can * also be 0 on platforms with missing DT idle states or legacy DT * configuration predating the DT idle states bindings. */ return state_idx - start_idx; } EXPORT_SYMBOL_GPL(dt_init_idle_driver);
linux-master
drivers/cpuidle/dt_idle_states.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Coupled cpuidle support based on the work of: * Colin Cross <[email protected]> * Daniel Lezcano <[email protected]> */ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/export.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/platform_data/cpuidle-exynos.h> #include <asm/suspend.h> #include <asm/cpuidle.h> static atomic_t exynos_idle_barrier; static struct cpuidle_exynos_data *exynos_cpuidle_pdata; static void (*exynos_enter_aftr)(void); static int exynos_enter_coupled_lowpower(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int ret; exynos_cpuidle_pdata->pre_enter_aftr(); /* * Waiting all cpus to reach this point at the same moment */ cpuidle_coupled_parallel_barrier(dev, &exynos_idle_barrier); /* * Both cpus will reach this point at the same time */ ret = dev->cpu ? exynos_cpuidle_pdata->cpu1_powerdown() : exynos_cpuidle_pdata->cpu0_enter_aftr(); if (ret) index = ret; /* * Waiting all cpus to finish the power sequence before going further */ cpuidle_coupled_parallel_barrier(dev, &exynos_idle_barrier); exynos_cpuidle_pdata->post_enter_aftr(); return index; } static int exynos_enter_lowpower(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int new_index = index; /* AFTR can only be entered when cores other than CPU0 are offline */ if (num_online_cpus() > 1 || dev->cpu != 0) new_index = drv->safe_state_index; if (new_index == 0) return arm_cpuidle_simple_enter(dev, drv, new_index); exynos_enter_aftr(); return new_index; } static struct cpuidle_driver exynos_idle_driver = { .name = "exynos_idle", .owner = THIS_MODULE, .states = { [0] = ARM_CPUIDLE_WFI_STATE, [1] = { .enter = exynos_enter_lowpower, .exit_latency = 300, .target_residency = 10000, .name = "C1", .desc = "ARM power down", }, }, .state_count = 2, .safe_state_index = 0, }; static struct cpuidle_driver exynos_coupled_idle_driver = { .name = "exynos_coupled_idle", .owner = THIS_MODULE, .states = { [0] = ARM_CPUIDLE_WFI_STATE, [1] = { .enter = exynos_enter_coupled_lowpower, .exit_latency = 5000, .target_residency = 10000, .flags = CPUIDLE_FLAG_COUPLED | CPUIDLE_FLAG_TIMER_STOP, .name = "C1", .desc = "ARM power down", }, }, .state_count = 2, .safe_state_index = 0, }; static int exynos_cpuidle_probe(struct platform_device *pdev) { int ret; if (IS_ENABLED(CONFIG_SMP) && (of_machine_is_compatible("samsung,exynos4210") || of_machine_is_compatible("samsung,exynos3250"))) { exynos_cpuidle_pdata = pdev->dev.platform_data; ret = cpuidle_register(&exynos_coupled_idle_driver, cpu_possible_mask); } else { exynos_enter_aftr = (void *)(pdev->dev.platform_data); ret = cpuidle_register(&exynos_idle_driver, NULL); } if (ret) { dev_err(&pdev->dev, "failed to register cpuidle driver\n"); return ret; } return 0; } static struct platform_driver exynos_cpuidle_driver = { .probe = exynos_cpuidle_probe, .driver = { .name = "exynos_cpuidle", }, }; builtin_platform_driver(exynos_cpuidle_driver);
linux-master
drivers/cpuidle/cpuidle-exynos.c
/* * cpuidle.c - core cpuidle infrastructure * * (C) 2006-2007 Venkatesh Pallipadi <[email protected]> * Shaohua Li <[email protected]> * Adam Belay <[email protected]> * * This code is licenced under the GPL. */ #include "linux/percpu-defs.h" #include <linux/clockchips.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/idle.h> #include <linux/notifier.h> #include <linux/pm_qos.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/module.h> #include <linux/suspend.h> #include <linux/tick.h> #include <linux/mmu_context.h> #include <linux/context_tracking.h> #include <trace/events/power.h> #include "cpuidle.h" DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); DEFINE_MUTEX(cpuidle_lock); LIST_HEAD(cpuidle_detected_devices); static int enabled_devices; static int off __read_mostly; static int initialized __read_mostly; int cpuidle_disabled(void) { return off; } void disable_cpuidle(void) { off = 1; } bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev) { return off || !initialized || !drv || !dev || !dev->enabled; } /** * cpuidle_play_dead - cpu off-lining * * Returns in case of an error or no driver */ int cpuidle_play_dead(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int i; if (!drv) return -ENODEV; /* Find lowest-power state that supports long-term idle */ for (i = drv->state_count - 1; i >= 0; i--) if (drv->states[i].enter_dead) return drv->states[i].enter_dead(dev, i); return -ENODEV; } static int find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, u64 max_latency_ns, unsigned int forbidden_flags, bool s2idle) { u64 latency_req = 0; int i, ret = 0; for (i = 1; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; if (dev->states_usage[i].disable || s->exit_latency_ns <= latency_req || s->exit_latency_ns > max_latency_ns || (s->flags & forbidden_flags) || (s2idle && !s->enter_s2idle)) continue; latency_req = s->exit_latency_ns; ret = i; } return ret; } /** * cpuidle_use_deepest_state - Set/unset governor override mode. * @latency_limit_ns: Idle state exit latency limit (or no override if 0). * * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle * state with exit latency within @latency_limit_ns (override governors going * forward), or do not override governors if it is zero. */ void cpuidle_use_deepest_state(u64 latency_limit_ns) { struct cpuidle_device *dev; preempt_disable(); dev = cpuidle_get_device(); if (dev) dev->forced_idle_latency_limit_ns = latency_limit_ns; preempt_enable(); } /** * cpuidle_find_deepest_state - Find the deepest available idle state. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. * @latency_limit_ns: Idle state exit latency limit * * Return: the index of the deepest available idle state. */ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, u64 latency_limit_ns) { return find_deepest_state(drv, dev, latency_limit_ns, 0, false); } #ifdef CONFIG_SUSPEND static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { struct cpuidle_state *target_state = &drv->states[index]; ktime_t time_start, time_end; instrumentation_begin(); time_start = ns_to_ktime(local_clock_noinstr()); tick_freeze(); /* * The state used here cannot be a "coupled" one, because the "coupled" * cpuidle mechanism enables interrupts and doing that with timekeeping * suspended is generally unsafe. */ stop_critical_timings(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) { ct_cpuidle_enter(); /* Annotate away the indirect call */ instrumentation_begin(); } target_state->enter_s2idle(dev, drv, index); if (WARN_ON_ONCE(!irqs_disabled())) raw_local_irq_disable(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) { instrumentation_end(); ct_cpuidle_exit(); } tick_unfreeze(); start_critical_timings(); time_end = ns_to_ktime(local_clock_noinstr()); dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start); dev->states_usage[index].s2idle_usage++; instrumentation_end(); } /** * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. * * If there are states with the ->enter_s2idle callback, find the deepest of * them and enter it with frozen tick. */ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) { int index; /* * Find the deepest state with ->enter_s2idle present, which guarantees * that interrupts won't be enabled when it exits and allows the tick to * be frozen safely. */ index = find_deepest_state(drv, dev, U64_MAX, 0, true); if (index > 0) { enter_s2idle_proper(drv, dev, index); local_irq_enable(); } return index; } #endif /* CONFIG_SUSPEND */ /** * cpuidle_enter_state - enter the state and update stats * @dev: cpuidle device for this cpu * @drv: cpuidle driver for this cpu * @index: index into the states table in @drv of the state to enter */ noinstr int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int entered_state; struct cpuidle_state *target_state = &drv->states[index]; bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); ktime_t time_start, time_end; instrumentation_begin(); /* * Tell the time framework to switch to a broadcast timer because our * local timer will be shut down. If a local timer is used from another * CPU as a broadcast timer, this call may fail if it is not available. */ if (broadcast && tick_broadcast_enter()) { index = find_deepest_state(drv, dev, target_state->exit_latency_ns, CPUIDLE_FLAG_TIMER_STOP, false); if (index < 0) { default_idle_call(); return -EBUSY; } target_state = &drv->states[index]; broadcast = false; } if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED) leave_mm(dev->cpu); /* Take note of the planned idle state. */ sched_idle_set_state(target_state); trace_cpu_idle(index, dev->cpu); time_start = ns_to_ktime(local_clock_noinstr()); stop_critical_timings(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) { ct_cpuidle_enter(); /* Annotate away the indirect call */ instrumentation_begin(); } /* * NOTE!! * * For cpuidle_state::enter() methods that do *NOT* set * CPUIDLE_FLAG_RCU_IDLE RCU will be disabled here and these functions * must be marked either noinstr or __cpuidle. * * For cpuidle_state::enter() methods that *DO* set * CPUIDLE_FLAG_RCU_IDLE this isn't required, but they must mark the * function calling ct_cpuidle_enter() as noinstr/__cpuidle and all * functions called within the RCU-idle region. */ entered_state = target_state->enter(dev, drv, index); if (WARN_ONCE(!irqs_disabled(), "%ps leaked IRQ state", target_state->enter)) raw_local_irq_disable(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) { instrumentation_end(); ct_cpuidle_exit(); } start_critical_timings(); sched_clock_idle_wakeup_event(); time_end = ns_to_ktime(local_clock_noinstr()); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ sched_idle_set_state(NULL); if (broadcast) tick_broadcast_exit(); if (!cpuidle_state_is_coupled(drv, index)) local_irq_enable(); if (entered_state >= 0) { s64 diff, delay = drv->states[entered_state].exit_latency_ns; int i; /* * Update cpuidle counters * This can be moved to within driver enter routine, * but that results in multiple copies of same code. */ diff = ktime_sub(time_end, time_start); dev->last_residency_ns = diff; dev->states_usage[entered_state].time_ns += diff; dev->states_usage[entered_state].usage++; if (diff < drv->states[entered_state].target_residency_ns) { for (i = entered_state - 1; i >= 0; i--) { if (dev->states_usage[i].disable) continue; /* Shallower states are enabled, so update. */ dev->states_usage[entered_state].above++; trace_cpu_idle_miss(dev->cpu, entered_state, false); break; } } else if (diff > delay) { for (i = entered_state + 1; i < drv->state_count; i++) { if (dev->states_usage[i].disable) continue; /* * Update if a deeper state would have been a * better match for the observed idle duration. */ if (diff - delay >= drv->states[i].target_residency_ns) { dev->states_usage[entered_state].below++; trace_cpu_idle_miss(dev->cpu, entered_state, true); } break; } } } else { dev->last_residency_ns = 0; dev->states_usage[index].rejected++; } instrumentation_end(); return entered_state; } /** * cpuidle_select - ask the cpuidle framework to choose an idle state * * @drv: the cpuidle driver * @dev: the cpuidle device * @stop_tick: indication on whether or not to stop the tick * * Returns the index of the idle state. The return value must not be negative. * * The memory location pointed to by @stop_tick is expected to be written the * 'false' boolean value if the scheduler tick should not be stopped before * entering the returned state. */ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick) { return cpuidle_curr_governor->select(drv, dev, stop_tick); } /** * cpuidle_enter - enter into the specified idle state * * @drv: the cpuidle driver tied with the cpu * @dev: the cpuidle device * @index: the index in the idle state table * * Returns the index in the idle state, < 0 in case of error. * The error code depends on the backend driver */ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { int ret = 0; /* * Store the next hrtimer, which becomes either next tick or the next * timer event, whatever expires first. Additionally, to make this data * useful for consumers outside cpuidle, we rely on that the governor's * ->select() callback have decided, whether to stop the tick or not. */ WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer()); if (cpuidle_state_is_coupled(drv, index)) ret = cpuidle_enter_state_coupled(dev, drv, index); else ret = cpuidle_enter_state(dev, drv, index); WRITE_ONCE(dev->next_hrtimer, 0); return ret; } /** * cpuidle_reflect - tell the underlying governor what was the state * we were in * * @dev : the cpuidle device * @index: the index in the idle state table * */ void cpuidle_reflect(struct cpuidle_device *dev, int index) { if (cpuidle_curr_governor->reflect && index >= 0) cpuidle_curr_governor->reflect(dev, index); } /* * Min polling interval of 10usec is a guess. It is assuming that * for most users, the time for a single ping-pong workload like * perf bench pipe would generally complete within 10usec but * this is hardware dependant. Actual time can be estimated with * * perf bench sched pipe -l 10000 * * Run multiple times to avoid cpufreq effects. */ #define CPUIDLE_POLL_MIN 10000 #define CPUIDLE_POLL_MAX (TICK_NSEC / 16) /** * cpuidle_poll_time - return amount of time to poll for, * governors can override dev->poll_limit_ns if necessary * * @drv: the cpuidle driver tied with the cpu * @dev: the cpuidle device * */ __cpuidle u64 cpuidle_poll_time(struct cpuidle_driver *drv, struct cpuidle_device *dev) { int i; u64 limit_ns; BUILD_BUG_ON(CPUIDLE_POLL_MIN > CPUIDLE_POLL_MAX); if (dev->poll_limit_ns) return dev->poll_limit_ns; limit_ns = CPUIDLE_POLL_MAX; for (i = 1; i < drv->state_count; i++) { u64 state_limit; if (dev->states_usage[i].disable) continue; state_limit = drv->states[i].target_residency_ns; if (state_limit < CPUIDLE_POLL_MIN) continue; limit_ns = min_t(u64, state_limit, CPUIDLE_POLL_MAX); break; } dev->poll_limit_ns = limit_ns; return dev->poll_limit_ns; } /** * cpuidle_install_idle_handler - installs the cpuidle idle loop handler */ void cpuidle_install_idle_handler(void) { if (enabled_devices) { /* Make sure all changes finished before we switch to new idle */ smp_wmb(); initialized = 1; } } /** * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler */ void cpuidle_uninstall_idle_handler(void) { if (enabled_devices) { initialized = 0; wake_up_all_idle_cpus(); } /* * Make sure external observers (such as the scheduler) * are done looking at pointed idle states. */ synchronize_rcu(); } /** * cpuidle_pause_and_lock - temporarily disables CPUIDLE */ void cpuidle_pause_and_lock(void) { mutex_lock(&cpuidle_lock); cpuidle_uninstall_idle_handler(); } EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); /** * cpuidle_resume_and_unlock - resumes CPUIDLE operation */ void cpuidle_resume_and_unlock(void) { cpuidle_install_idle_handler(); mutex_unlock(&cpuidle_lock); } EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); /* Currently used in suspend/resume path to suspend cpuidle */ void cpuidle_pause(void) { mutex_lock(&cpuidle_lock); cpuidle_uninstall_idle_handler(); mutex_unlock(&cpuidle_lock); } /* Currently used in suspend/resume path to resume cpuidle */ void cpuidle_resume(void) { mutex_lock(&cpuidle_lock); cpuidle_install_idle_handler(); mutex_unlock(&cpuidle_lock); } /** * cpuidle_enable_device - enables idle PM for a CPU * @dev: the CPU * * This function must be called between cpuidle_pause_and_lock and * cpuidle_resume_and_unlock when used externally. */ int cpuidle_enable_device(struct cpuidle_device *dev) { int ret; struct cpuidle_driver *drv; if (!dev) return -EINVAL; if (dev->enabled) return 0; if (!cpuidle_curr_governor) return -EIO; drv = cpuidle_get_cpu_driver(dev); if (!drv) return -EIO; if (!dev->registered) return -EINVAL; ret = cpuidle_add_device_sysfs(dev); if (ret) return ret; if (cpuidle_curr_governor->enable) { ret = cpuidle_curr_governor->enable(drv, dev); if (ret) goto fail_sysfs; } smp_wmb(); dev->enabled = 1; enabled_devices++; return 0; fail_sysfs: cpuidle_remove_device_sysfs(dev); return ret; } EXPORT_SYMBOL_GPL(cpuidle_enable_device); /** * cpuidle_disable_device - disables idle PM for a CPU * @dev: the CPU * * This function must be called between cpuidle_pause_and_lock and * cpuidle_resume_and_unlock when used externally. */ void cpuidle_disable_device(struct cpuidle_device *dev) { struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); if (!dev || !dev->enabled) return; if (!drv || !cpuidle_curr_governor) return; dev->enabled = 0; if (cpuidle_curr_governor->disable) cpuidle_curr_governor->disable(drv, dev); cpuidle_remove_device_sysfs(dev); enabled_devices--; } EXPORT_SYMBOL_GPL(cpuidle_disable_device); static void __cpuidle_unregister_device(struct cpuidle_device *dev) { struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); list_del(&dev->device_list); per_cpu(cpuidle_devices, dev->cpu) = NULL; module_put(drv->owner); dev->registered = 0; } static void __cpuidle_device_init(struct cpuidle_device *dev) { memset(dev->states_usage, 0, sizeof(dev->states_usage)); dev->last_residency_ns = 0; dev->next_hrtimer = 0; } /** * __cpuidle_register_device - internal register function called before register * and enable routines * @dev: the cpu * * cpuidle_lock mutex must be held before this is called */ static int __cpuidle_register_device(struct cpuidle_device *dev) { struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int i, ret; if (!try_module_get(drv->owner)) return -EINVAL; for (i = 0; i < drv->state_count; i++) { if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE) dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER; if (drv->states[i].flags & CPUIDLE_FLAG_OFF) dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; } per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); ret = cpuidle_coupled_register_device(dev); if (ret) __cpuidle_unregister_device(dev); else dev->registered = 1; return ret; } /** * cpuidle_register_device - registers a CPU's idle PM feature * @dev: the cpu */ int cpuidle_register_device(struct cpuidle_device *dev) { int ret = -EBUSY; if (!dev) return -EINVAL; mutex_lock(&cpuidle_lock); if (dev->registered) goto out_unlock; __cpuidle_device_init(dev); ret = __cpuidle_register_device(dev); if (ret) goto out_unlock; ret = cpuidle_add_sysfs(dev); if (ret) goto out_unregister; ret = cpuidle_enable_device(dev); if (ret) goto out_sysfs; cpuidle_install_idle_handler(); out_unlock: mutex_unlock(&cpuidle_lock); return ret; out_sysfs: cpuidle_remove_sysfs(dev); out_unregister: __cpuidle_unregister_device(dev); goto out_unlock; } EXPORT_SYMBOL_GPL(cpuidle_register_device); /** * cpuidle_unregister_device - unregisters a CPU's idle PM feature * @dev: the cpu */ void cpuidle_unregister_device(struct cpuidle_device *dev) { if (!dev || dev->registered == 0) return; cpuidle_pause_and_lock(); cpuidle_disable_device(dev); cpuidle_remove_sysfs(dev); __cpuidle_unregister_device(dev); cpuidle_coupled_unregister_device(dev); cpuidle_resume_and_unlock(); } EXPORT_SYMBOL_GPL(cpuidle_unregister_device); /** * cpuidle_unregister: unregister a driver and the devices. This function * can be used only if the driver has been previously registered through * the cpuidle_register function. * * @drv: a valid pointer to a struct cpuidle_driver */ void cpuidle_unregister(struct cpuidle_driver *drv) { int cpu; struct cpuidle_device *device; for_each_cpu(cpu, drv->cpumask) { device = &per_cpu(cpuidle_dev, cpu); cpuidle_unregister_device(device); } cpuidle_unregister_driver(drv); } EXPORT_SYMBOL_GPL(cpuidle_unregister); /** * cpuidle_register: registers the driver and the cpu devices with the * coupled_cpus passed as parameter. This function is used for all common * initialization pattern there are in the arch specific drivers. The * devices is globally defined in this file. * * @drv : a valid pointer to a struct cpuidle_driver * @coupled_cpus: a cpumask for the coupled states * * Returns 0 on success, < 0 otherwise */ int cpuidle_register(struct cpuidle_driver *drv, const struct cpumask *const coupled_cpus) { int ret, cpu; struct cpuidle_device *device; ret = cpuidle_register_driver(drv); if (ret) { pr_err("failed to register cpuidle driver\n"); return ret; } for_each_cpu(cpu, drv->cpumask) { device = &per_cpu(cpuidle_dev, cpu); device->cpu = cpu; #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED /* * On multiplatform for ARM, the coupled idle states could be * enabled in the kernel even if the cpuidle driver does not * use it. Note, coupled_cpus is a struct copy. */ if (coupled_cpus) device->coupled_cpus = *coupled_cpus; #endif ret = cpuidle_register_device(device); if (!ret) continue; pr_err("Failed to register cpuidle device for cpu%d\n", cpu); cpuidle_unregister(drv); break; } return ret; } EXPORT_SYMBOL_GPL(cpuidle_register); /** * cpuidle_init - core initializer */ static int __init cpuidle_init(void) { if (cpuidle_disabled()) return -ENODEV; return cpuidle_add_interface(); } module_param(off, int, 0444); module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444); core_initcall(cpuidle_init);
linux-master
drivers/cpuidle/cpuidle.c
// SPDX-License-Identifier: GPL-2.0-only /* * PM domains for CPUs via genpd. * * Copyright (C) 2019 Linaro Ltd. * Author: Ulf Hansson <[email protected]> * * Copyright (c) 2021 Western Digital Corporation or its affiliates. * Copyright (c) 2022 Ventana Micro Systems Inc. */ #define pr_fmt(fmt) "dt-idle-genpd: " fmt #include <linux/cpu.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/string.h> #include "dt_idle_genpd.h" static int pd_parse_state_nodes( int (*parse_state)(struct device_node *, u32 *), struct genpd_power_state *states, int state_count) { int i, ret; u32 state, *state_buf; for (i = 0; i < state_count; i++) { ret = parse_state(to_of_node(states[i].fwnode), &state); if (ret) goto free_state; state_buf = kmalloc(sizeof(u32), GFP_KERNEL); if (!state_buf) { ret = -ENOMEM; goto free_state; } *state_buf = state; states[i].data = state_buf; } return 0; free_state: i--; for (; i >= 0; i--) kfree(states[i].data); return ret; } static int pd_parse_states(struct device_node *np, int (*parse_state)(struct device_node *, u32 *), struct genpd_power_state **states, int *state_count) { int ret; /* Parse the domain idle states. */ ret = of_genpd_parse_idle_states(np, states, state_count); if (ret) return ret; /* Fill out the dt specifics for each found state. */ ret = pd_parse_state_nodes(parse_state, *states, *state_count); if (ret) kfree(*states); return ret; } static void pd_free_states(struct genpd_power_state *states, unsigned int state_count) { int i; for (i = 0; i < state_count; i++) kfree(states[i].data); kfree(states); } void dt_idle_pd_free(struct generic_pm_domain *pd) { pd_free_states(pd->states, pd->state_count); kfree(pd->name); kfree(pd); } struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np, int (*parse_state)(struct device_node *, u32 *)) { struct generic_pm_domain *pd; struct genpd_power_state *states = NULL; int ret, state_count = 0; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) goto out; pd->name = kasprintf(GFP_KERNEL, "%pOF", np); if (!pd->name) goto free_pd; /* * Parse the domain idle states and let genpd manage the state selection * for those being compatible with "domain-idle-state". */ ret = pd_parse_states(np, parse_state, &states, &state_count); if (ret) goto free_name; pd->free_states = pd_free_states; pd->name = kbasename(pd->name); pd->states = states; pd->state_count = state_count; pr_debug("alloc PM domain %s\n", pd->name); return pd; free_name: kfree(pd->name); free_pd: kfree(pd); out: pr_err("failed to alloc PM domain %pOF\n", np); return NULL; } int dt_idle_pd_init_topology(struct device_node *np) { struct device_node *node; struct of_phandle_args child, parent; int ret; for_each_child_of_node(np, node) { if (of_parse_phandle_with_args(node, "power-domains", "#power-domain-cells", 0, &parent)) continue; child.np = node; child.args_count = 0; ret = of_genpd_add_subdomain(&parent, &child); of_node_put(parent.np); if (ret) { of_node_put(node); return ret; } } return 0; } int dt_idle_pd_remove_topology(struct device_node *np) { struct device_node *node; struct of_phandle_args child, parent; int ret; for_each_child_of_node(np, node) { if (of_parse_phandle_with_args(node, "power-domains", "#power-domain-cells", 0, &parent)) continue; child.np = node; child.args_count = 0; ret = of_genpd_remove_subdomain(&parent, &child); of_node_put(parent.np); if (ret) { of_node_put(node); return ret; } } return 0; } struct device *dt_idle_attach_cpu(int cpu, const char *name) { struct device *dev; dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), name); if (IS_ERR_OR_NULL(dev)) return dev; pm_runtime_irq_safe(dev); if (cpu_online(cpu)) pm_runtime_get_sync(dev); dev_pm_syscore_device(dev, true); return dev; } void dt_idle_detach_cpu(struct device *dev) { if (IS_ERR_OR_NULL(dev)) return; dev_pm_domain_detach(dev, false); }
linux-master
drivers/cpuidle/dt_idle_genpd.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2014 Imagination Technologies * Author: Paul Burton <[email protected]> */ #include <linux/cpu_pm.h> #include <linux/cpuidle.h> #include <linux/init.h> #include <asm/idle.h> #include <asm/pm-cps.h> /* Enumeration of the various idle states this driver may enter */ enum cps_idle_state { STATE_WAIT = 0, /* MIPS wait instruction, coherent */ STATE_NC_WAIT, /* MIPS wait instruction, non-coherent */ STATE_CLOCK_GATED, /* Core clock gated */ STATE_POWER_GATED, /* Core power gated */ STATE_COUNT }; static int cps_nc_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { enum cps_pm_state pm_state; int err; /* * At least one core must remain powered up & clocked in order for the * system to have any hope of functioning. * * TODO: don't treat core 0 specially, just prevent the final core * TODO: remap interrupt affinity temporarily */ if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT)) index = STATE_NC_WAIT; /* Select the appropriate cps_pm_state */ switch (index) { case STATE_NC_WAIT: pm_state = CPS_PM_NC_WAIT; break; case STATE_CLOCK_GATED: pm_state = CPS_PM_CLOCK_GATED; break; case STATE_POWER_GATED: pm_state = CPS_PM_POWER_GATED; break; default: BUG(); return -EINVAL; } /* Notify listeners the CPU is about to power down */ if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter()) return -EINTR; /* Enter that state */ err = cps_pm_enter_state(pm_state); /* Notify listeners the CPU is back up */ if (pm_state == CPS_PM_POWER_GATED) cpu_pm_exit(); return err ?: index; } static struct cpuidle_driver cps_driver = { .name = "cpc_cpuidle", .owner = THIS_MODULE, .states = { [STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE, [STATE_NC_WAIT] = { .enter = cps_nc_enter, .exit_latency = 200, .target_residency = 450, .name = "nc-wait", .desc = "non-coherent MIPS wait", }, [STATE_CLOCK_GATED] = { .enter = cps_nc_enter, .exit_latency = 300, .target_residency = 700, .flags = CPUIDLE_FLAG_TIMER_STOP, .name = "clock-gated", .desc = "core clock gated", }, [STATE_POWER_GATED] = { .enter = cps_nc_enter, .exit_latency = 600, .target_residency = 1000, .flags = CPUIDLE_FLAG_TIMER_STOP, .name = "power-gated", .desc = "core power gated", }, }, .state_count = STATE_COUNT, .safe_state_index = 0, }; static void __init cps_cpuidle_unregister(void) { int cpu; struct cpuidle_device *device; for_each_possible_cpu(cpu) { device = &per_cpu(cpuidle_dev, cpu); cpuidle_unregister_device(device); } cpuidle_unregister_driver(&cps_driver); } static int __init cps_cpuidle_init(void) { int err, cpu, i; struct cpuidle_device *device; /* Detect supported states */ if (!cps_pm_support_state(CPS_PM_POWER_GATED)) cps_driver.state_count = STATE_CLOCK_GATED + 1; if (!cps_pm_support_state(CPS_PM_CLOCK_GATED)) cps_driver.state_count = STATE_NC_WAIT + 1; if (!cps_pm_support_state(CPS_PM_NC_WAIT)) cps_driver.state_count = STATE_WAIT + 1; /* Inform the user if some states are unavailable */ if (cps_driver.state_count < STATE_COUNT) { pr_info("cpuidle-cps: limited to "); switch (cps_driver.state_count - 1) { case STATE_WAIT: pr_cont("coherent wait\n"); break; case STATE_NC_WAIT: pr_cont("non-coherent wait\n"); break; case STATE_CLOCK_GATED: pr_cont("clock gating\n"); break; } } /* * Set the coupled flag on the appropriate states if this system * requires it. */ if (coupled_coherence) for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++) cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED; err = cpuidle_register_driver(&cps_driver); if (err) { pr_err("Failed to register CPS cpuidle driver\n"); return err; } for_each_possible_cpu(cpu) { device = &per_cpu(cpuidle_dev, cpu); device->cpu = cpu; #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]); #endif err = cpuidle_register_device(device); if (err) { pr_err("Failed to register CPU%d cpuidle device\n", cpu); goto err_out; } } return 0; err_out: cps_cpuidle_unregister(); return err; } device_initcall(cps_cpuidle_init);
linux-master
drivers/cpuidle/cpuidle-cps.c
// SPDX-License-Identifier: GPL-2.0 /* * cpuidle-powernv - idle state cpuidle driver. * Adapted from drivers/cpuidle/cpuidle-pseries * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/cpuidle.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/clockchips.h> #include <linux/of.h> #include <linux/slab.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/opal.h> #include <asm/runlatch.h> #include <asm/cpuidle.h> /* * Expose only those Hardware idle states via the cpuidle framework * that have latency value below POWERNV_THRESHOLD_LATENCY_NS. */ #define POWERNV_THRESHOLD_LATENCY_NS 200000 static struct cpuidle_driver powernv_idle_driver = { .name = "powernv_idle", .owner = THIS_MODULE, }; static int max_idle_state __read_mostly; static struct cpuidle_state *cpuidle_state_table __read_mostly; struct stop_psscr_table { u64 val; u64 mask; }; static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly; static u64 default_snooze_timeout __read_mostly; static bool snooze_timeout_en __read_mostly; static u64 get_snooze_timeout(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int i; if (unlikely(!snooze_timeout_en)) return default_snooze_timeout; for (i = index + 1; i < drv->state_count; i++) { if (dev->states_usage[i].disable) continue; return drv->states[i].target_residency * tb_ticks_per_usec; } return default_snooze_timeout; } static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { u64 snooze_exit_time; set_thread_flag(TIF_POLLING_NRFLAG); local_irq_enable(); snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index); dev->poll_time_limit = false; ppc64_runlatch_off(); HMT_very_low(); while (!need_resched()) { if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) { /* * Task has not woken up but we are exiting the polling * loop anyway. Require a barrier after polling is * cleared to order subsequent test of need_resched(). */ clear_thread_flag(TIF_POLLING_NRFLAG); dev->poll_time_limit = true; smp_mb(); break; } } HMT_medium(); ppc64_runlatch_on(); clear_thread_flag(TIF_POLLING_NRFLAG); local_irq_disable(); return index; } static int nap_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { power7_idle_type(PNV_THREAD_NAP); return index; } /* Register for fastsleep only in oneshot mode of broadcast */ #ifdef CONFIG_TICK_ONESHOT static int fastsleep_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long old_lpcr = mfspr(SPRN_LPCR); unsigned long new_lpcr; if (unlikely(system_state < SYSTEM_RUNNING)) return index; new_lpcr = old_lpcr; /* Do not exit powersave upon decrementer as we've setup the timer * offload. */ new_lpcr &= ~LPCR_PECE1; mtspr(SPRN_LPCR, new_lpcr); power7_idle_type(PNV_THREAD_SLEEP); mtspr(SPRN_LPCR, old_lpcr); return index; } #endif static int stop_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { arch300_idle_type(stop_psscr_table[index].val, stop_psscr_table[index].mask); return index; } /* * States for dedicated partition case. */ static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = { { /* Snooze */ .name = "snooze", .desc = "snooze", .exit_latency = 0, .target_residency = 0, .enter = snooze_loop, .flags = CPUIDLE_FLAG_POLLING }, }; static int powernv_cpuidle_cpu_online(unsigned int cpu) { struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); if (dev && cpuidle_get_driver()) { cpuidle_pause_and_lock(); cpuidle_enable_device(dev); cpuidle_resume_and_unlock(); } return 0; } static int powernv_cpuidle_cpu_dead(unsigned int cpu) { struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); if (dev && cpuidle_get_driver()) { cpuidle_pause_and_lock(); cpuidle_disable_device(dev); cpuidle_resume_and_unlock(); } return 0; } /* * powernv_cpuidle_driver_init() */ static int powernv_cpuidle_driver_init(void) { int idle_state; struct cpuidle_driver *drv = &powernv_idle_driver; drv->state_count = 0; for (idle_state = 0; idle_state < max_idle_state; ++idle_state) { /* Is the state not enabled? */ if (cpuidle_state_table[idle_state].enter == NULL) continue; drv->states[drv->state_count] = /* structure copy */ cpuidle_state_table[idle_state]; drv->state_count += 1; } /* * On the PowerNV platform cpu_present may be less than cpu_possible in * cases when firmware detects the CPU, but it is not available to the * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at * run time and hence cpu_devices are not created for those CPUs by the * generic topology_init(). * * drv->cpumask defaults to cpu_possible_mask in * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where * cpu_devices are not created for CPUs in cpu_possible_mask that * cannot be hot-added later at run time. * * Trying cpuidle_register_device() on a CPU without a cpu_device is * incorrect, so pass a correct CPU mask to the generic cpuidle driver. */ drv->cpumask = (struct cpumask *)cpu_present_mask; return 0; } static inline void add_powernv_state(int index, const char *name, unsigned int flags, int (*idle_fn)(struct cpuidle_device *, struct cpuidle_driver *, int), unsigned int target_residency, unsigned int exit_latency, u64 psscr_val, u64 psscr_mask) { strscpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN); strscpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN); powernv_states[index].flags = flags; powernv_states[index].target_residency = target_residency; powernv_states[index].exit_latency = exit_latency; powernv_states[index].enter = idle_fn; /* For power8 and below psscr_* will be 0 */ stop_psscr_table[index].val = psscr_val; stop_psscr_table[index].mask = psscr_mask; } extern u32 pnv_get_supported_cpuidle_states(void); static int powernv_add_idle_states(void) { int nr_idle_states = 1; /* Snooze */ int dt_idle_states; u32 has_stop_states = 0; int i; u32 supported_flags = pnv_get_supported_cpuidle_states(); /* Currently we have snooze statically defined */ if (nr_pnv_idle_states <= 0) { pr_warn("cpuidle-powernv : Only Snooze is available\n"); goto out; } /* TODO: Count only states which are eligible for cpuidle */ dt_idle_states = nr_pnv_idle_states; /* * Since snooze is used as first idle state, max idle states allowed is * CPUIDLE_STATE_MAX -1 */ if (nr_pnv_idle_states > CPUIDLE_STATE_MAX - 1) { pr_warn("cpuidle-powernv: discovered idle states more than allowed"); dt_idle_states = CPUIDLE_STATE_MAX - 1; } /* * If the idle states use stop instruction, probe for psscr values * and psscr mask which are necessary to specify required stop level. */ has_stop_states = (pnv_idle_states[0].flags & (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP)); for (i = 0; i < dt_idle_states; i++) { unsigned int exit_latency, target_residency; bool stops_timebase = false; struct pnv_idle_states_t *state = &pnv_idle_states[i]; /* * Skip the platform idle state whose flag isn't in * the supported_cpuidle_states flag mask. */ if ((state->flags & supported_flags) != state->flags) continue; /* * If an idle state has exit latency beyond * POWERNV_THRESHOLD_LATENCY_NS then don't use it * in cpu-idle. */ if (state->latency_ns > POWERNV_THRESHOLD_LATENCY_NS) continue; /* * Firmware passes residency and latency values in ns. * cpuidle expects it in us. */ exit_latency = DIV_ROUND_UP(state->latency_ns, 1000); target_residency = DIV_ROUND_UP(state->residency_ns, 1000); if (has_stop_states && !(state->valid)) continue; if (state->flags & OPAL_PM_TIMEBASE_STOP) stops_timebase = true; if (state->flags & OPAL_PM_NAP_ENABLED) { /* Add NAP state */ add_powernv_state(nr_idle_states, "Nap", CPUIDLE_FLAG_NONE, nap_loop, target_residency, exit_latency, 0, 0); } else if (has_stop_states && !stops_timebase) { add_powernv_state(nr_idle_states, state->name, CPUIDLE_FLAG_NONE, stop_loop, target_residency, exit_latency, state->psscr_val, state->psscr_mask); } /* * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come * within this config dependency check. */ #ifdef CONFIG_TICK_ONESHOT else if (state->flags & OPAL_PM_SLEEP_ENABLED || state->flags & OPAL_PM_SLEEP_ENABLED_ER1) { /* Add FASTSLEEP state */ add_powernv_state(nr_idle_states, "FastSleep", CPUIDLE_FLAG_TIMER_STOP, fastsleep_loop, target_residency, exit_latency, 0, 0); } else if (has_stop_states && stops_timebase) { add_powernv_state(nr_idle_states, state->name, CPUIDLE_FLAG_TIMER_STOP, stop_loop, target_residency, exit_latency, state->psscr_val, state->psscr_mask); } #endif else continue; nr_idle_states++; } out: return nr_idle_states; } /* * powernv_idle_probe() * Choose state table for shared versus dedicated partition */ static int powernv_idle_probe(void) { if (cpuidle_disable != IDLE_NO_OVERRIDE) return -ENODEV; if (firmware_has_feature(FW_FEATURE_OPAL)) { cpuidle_state_table = powernv_states; /* Device tree can indicate more idle states */ max_idle_state = powernv_add_idle_states(); default_snooze_timeout = TICK_USEC * tb_ticks_per_usec; if (max_idle_state > 1) snooze_timeout_en = true; } else return -ENODEV; return 0; } static int __init powernv_processor_idle_init(void) { int retval; retval = powernv_idle_probe(); if (retval) return retval; powernv_cpuidle_driver_init(); retval = cpuidle_register(&powernv_idle_driver, NULL); if (retval) { printk(KERN_DEBUG "Registration of powernv driver failed.\n"); return retval; } retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpuidle/powernv:online", powernv_cpuidle_cpu_online, NULL); WARN_ON(retval < 0); retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD, "cpuidle/powernv:dead", NULL, powernv_cpuidle_cpu_dead); WARN_ON(retval < 0); printk(KERN_DEBUG "powernv_idle_driver registered\n"); return 0; } device_initcall(powernv_processor_idle_init);
linux-master
drivers/cpuidle/cpuidle-powernv.c
// SPDX-License-Identifier: GPL-2.0-only /* * poll_state.c - Polling idle state */ #include <linux/cpuidle.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/idle.h> #define POLL_IDLE_RELAX_COUNT 200 static int __cpuidle poll_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { u64 time_start; time_start = local_clock_noinstr(); dev->poll_time_limit = false; raw_local_irq_enable(); if (!current_set_polling_and_test()) { unsigned int loop_count = 0; u64 limit; limit = cpuidle_poll_time(drv, dev); while (!need_resched()) { cpu_relax(); if (loop_count++ < POLL_IDLE_RELAX_COUNT) continue; loop_count = 0; if (local_clock_noinstr() - time_start > limit) { dev->poll_time_limit = true; break; } } } raw_local_irq_disable(); current_clr_polling(); return index; } void cpuidle_poll_state_init(struct cpuidle_driver *drv) { struct cpuidle_state *state = &drv->states[0]; snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); state->exit_latency = 0; state->target_residency = 0; state->exit_latency_ns = 0; state->target_residency_ns = 0; state->power_usage = -1; state->enter = poll_idle; state->flags = CPUIDLE_FLAG_POLLING; } EXPORT_SYMBOL_GPL(cpuidle_poll_state_init);
linux-master
drivers/cpuidle/poll_state.c
// SPDX-License-Identifier: GPL-2.0-only /* * based on arch/arm/mach-kirkwood/cpuidle.c * * CPU idle support for AT91 SoC * * The cpu idle uses wait-for-interrupt and RAM self refresh in order * to implement two idle states - * #1 wait-for-interrupt * #2 wait-for-interrupt and RAM self refresh */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <linux/io.h> #include <linux/export.h> #include <asm/cpuidle.h> #define AT91_MAX_STATES 2 static void (*at91_standby)(void); /* Actual code that puts the SoC in different idle states */ static int at91_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { at91_standby(); return index; } static struct cpuidle_driver at91_idle_driver = { .name = "at91_idle", .owner = THIS_MODULE, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = at91_enter_idle, .exit_latency = 10, .target_residency = 10000, .name = "RAM_SR", .desc = "WFI and DDR Self Refresh", }, .state_count = AT91_MAX_STATES, }; /* Initialize CPU idle by registering the idle states */ static int at91_cpuidle_probe(struct platform_device *dev) { at91_standby = (void *)(dev->dev.platform_data); return cpuidle_register(&at91_idle_driver, NULL); } static struct platform_driver at91_cpuidle_driver = { .driver = { .name = "cpuidle-at91", }, .probe = at91_cpuidle_probe, }; builtin_platform_driver(at91_cpuidle_driver);
linux-master
drivers/cpuidle/cpuidle-at91.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 ARM/Linaro * * Authors: Daniel Lezcano <[email protected]> * Lorenzo Pieralisi <[email protected]> * Nicolas Pitre <[email protected]> * * Maintainer: Lorenzo Pieralisi <[email protected]> * Maintainer: Daniel Lezcano <[email protected]> */ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/slab.h> #include <linux/of.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/cpuidle.h> #include <asm/mcpm.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include "dt_idle_states.h" static int bl_enter_powerdown(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx); /* * NB: Owing to current menu governor behaviour big and LITTLE * index 1 states have to define exit_latency and target_residency for * cluster state since, when all CPUs in a cluster hit it, the cluster * can be shutdown. This means that when a single CPU enters this state * the exit_latency and target_residency values are somewhat overkill. * There is no notion of cluster states in the menu governor, so CPUs * have to define CPU states where possibly the cluster will be shutdown * depending on the state of other CPUs. idle states entry and exit happen * at random times; however the cluster state provides target_residency * values as if all CPUs in a cluster enter the state at once; this is * somewhat optimistic and behaviour should be fixed either in the governor * or in the MCPM back-ends. * To make this driver 100% generic the number of states and the exit_latency * target_residency values must be obtained from device tree bindings. * * exit_latency: refers to the TC2 vexpress test chip and depends on the * current cluster operating point. It is the time it takes to get the CPU * up and running when the CPU is powered up on cluster wake-up from shutdown. * Current values for big and LITTLE clusters are provided for clusters * running at default operating points. * * target_residency: it is the minimum amount of time the cluster has * to be down to break even in terms of power consumption. cluster * shutdown has inherent dynamic power costs (L2 writebacks to DRAM * being the main factor) that depend on the current operating points. * The current values for both clusters are provided for a CPU whose half * of L2 lines are dirty and require cleaning to DRAM, and takes into * account leakage static power values related to the vexpress TC2 testchip. */ static struct cpuidle_driver bl_idle_little_driver = { .name = "little_idle", .owner = THIS_MODULE, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = bl_enter_powerdown, .exit_latency = 700, .target_residency = 2500, .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE, .name = "C1", .desc = "ARM little-cluster power down", }, .state_count = 2, }; static const struct of_device_id bl_idle_state_match[] __initconst = { { .compatible = "arm,idle-state", .data = bl_enter_powerdown }, { }, }; static struct cpuidle_driver bl_idle_big_driver = { .name = "big_idle", .owner = THIS_MODULE, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = bl_enter_powerdown, .exit_latency = 500, .target_residency = 2000, .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE, .name = "C1", .desc = "ARM big-cluster power down", }, .state_count = 2, }; /* * notrace prevents trace shims from getting inserted where they * should not. Global jumps and ldrex/strex must not be inserted * in power down sequences where caches and MMU may be turned off. */ static int notrace bl_powerdown_finisher(unsigned long arg) { /* MCPM works with HW CPU identifiers */ unsigned int mpidr = read_cpuid_mpidr(); unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); mcpm_set_entry_vector(cpu, cluster, cpu_resume); mcpm_cpu_suspend(); /* return value != 0 means failure */ return 1; } /** * bl_enter_powerdown - Programs CPU to enter the specified state * @dev: cpuidle device * @drv: The target state to be programmed * @idx: state index * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static __cpuidle int bl_enter_powerdown(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { cpu_pm_enter(); ct_cpuidle_enter(); cpu_suspend(0, bl_powerdown_finisher); /* signals the MCPM core that CPU is out of low power state */ mcpm_cpu_powered_up(); ct_cpuidle_exit(); cpu_pm_exit(); return idx; } static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id) { struct cpumask *cpumask; int cpu; cpumask = kzalloc(cpumask_size(), GFP_KERNEL); if (!cpumask) return -ENOMEM; for_each_possible_cpu(cpu) if (smp_cpuid_part(cpu) == part_id) cpumask_set_cpu(cpu, cpumask); drv->cpumask = cpumask; return 0; } static const struct of_device_id compatible_machine_match[] = { { .compatible = "arm,vexpress,v2p-ca15_a7" }, { .compatible = "google,peach" }, {}, }; static int __init bl_idle_init(void) { int ret; struct device_node *root = of_find_node_by_path("/"); const struct of_device_id *match_id; if (!root) return -ENODEV; /* * Initialize the driver just for a compliant set of machines */ match_id = of_match_node(compatible_machine_match, root); of_node_put(root); if (!match_id) return -ENODEV; if (!mcpm_is_available()) return -EUNATCH; /* * For now the differentiation between little and big cores * is based on the part number. A7 cores are considered little * cores, A15 are considered big cores. This distinction may * evolve in the future with a more generic matching approach. */ ret = bl_idle_driver_init(&bl_idle_little_driver, ARM_CPU_PART_CORTEX_A7); if (ret) return ret; ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15); if (ret) goto out_uninit_little; /* Start at index 1, index 0 standard WFI */ ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1); if (ret < 0) goto out_uninit_big; /* Start at index 1, index 0 standard WFI */ ret = dt_init_idle_driver(&bl_idle_little_driver, bl_idle_state_match, 1); if (ret < 0) goto out_uninit_big; ret = cpuidle_register(&bl_idle_little_driver, NULL); if (ret) goto out_uninit_big; ret = cpuidle_register(&bl_idle_big_driver, NULL); if (ret) goto out_unregister_little; return 0; out_unregister_little: cpuidle_unregister(&bl_idle_little_driver); out_uninit_big: kfree(bl_idle_big_driver.cpumask); out_uninit_little: kfree(bl_idle_little_driver.cpumask); return ret; } device_initcall(bl_idle_init);
linux-master
drivers/cpuidle/cpuidle-big_little.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2012 Linaro : Daniel Lezcano <[email protected]> (IBM) * * Based on the work of Rickard Andersson <[email protected]> * and Jonas Aaberg <[email protected]>. */ #include <linux/init.h> #include <linux/cpuidle.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/smp.h> #include <linux/mfd/dbx500-prcmu.h> #include <linux/platform_data/arm-ux500-pm.h> #include <linux/platform_device.h> #include <asm/cpuidle.h> static atomic_t master = ATOMIC_INIT(0); static DEFINE_SPINLOCK(master_lock); static inline int ux500_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int this_cpu = smp_processor_id(); bool recouple = false; if (atomic_inc_return(&master) == num_online_cpus()) { /* With this lock, we prevent the other cpu to exit and enter * this function again and become the master */ if (!spin_trylock(&master_lock)) goto wfi; /* decouple the gic from the A9 cores */ if (prcmu_gic_decouple()) { spin_unlock(&master_lock); goto out; } /* If an error occur, we will have to recouple the gic * manually */ recouple = true; /* At this state, as the gic is decoupled, if the other * cpu is in WFI, we have the guarantee it won't be wake * up, so we can safely go to retention */ if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1)) goto out; /* The prcmu will be in charge of watching the interrupts * and wake up the cpus */ if (prcmu_copy_gic_settings()) goto out; /* Check in the meantime an interrupt did * not occur on the gic ... */ if (prcmu_gic_pending_irq()) goto out; /* ... and the prcmu */ if (prcmu_pending_irq()) goto out; /* Go to the retention state, the prcmu will wait for the * cpu to go WFI and this is what happens after exiting this * 'master' critical section */ if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true)) goto out; /* When we switch to retention, the prcmu is in charge * of recoupling the gic automatically */ recouple = false; spin_unlock(&master_lock); } wfi: cpu_do_idle(); out: atomic_dec(&master); if (recouple) { prcmu_gic_recouple(); spin_unlock(&master_lock); } return index; } static struct cpuidle_driver ux500_idle_driver = { .name = "ux500_idle", .owner = THIS_MODULE, .states = { ARM_CPUIDLE_WFI_STATE, { .enter = ux500_enter_idle, .exit_latency = 70, .target_residency = 260, .flags = CPUIDLE_FLAG_TIMER_STOP, .name = "ApIdle", .desc = "ARM Retention", }, }, .safe_state_index = 0, .state_count = 2, }; static int dbx500_cpuidle_probe(struct platform_device *pdev) { /* Configure wake up reasons */ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | PRCMU_WAKEUP(ABB)); return cpuidle_register(&ux500_idle_driver, NULL); } static struct platform_driver dbx500_cpuidle_plat_driver = { .driver = { .name = "db8500-cpuidle", }, .probe = dbx500_cpuidle_probe, }; builtin_platform_driver(dbx500_cpuidle_plat_driver);
linux-master
drivers/cpuidle/cpuidle-ux500.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * Copyright (c) 2014,2015, Linaro Ltd. * * SAW power controller driver */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/firmware/qcom/qcom_scm.h> #include <soc/qcom/spm.h> #include <asm/proc-fns.h> #include <asm/suspend.h> #include "dt_idle_states.h" struct cpuidle_qcom_spm_data { struct cpuidle_driver cpuidle_driver; struct spm_driver_data *spm; }; static int qcom_pm_collapse(unsigned long int unused) { qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON); /* * Returns here only if there was a pending interrupt and we did not * power down as a result. */ return -1; } static int qcom_cpu_spc(struct spm_driver_data *drv) { int ret; spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC); ret = cpu_suspend(0, qcom_pm_collapse); /* * ARM common code executes WFI without calling into our driver and * if the SPM mode is not reset, then we may accidently power down the * cpu when we intended only to gate the cpu clock. * Ensure the state is set to standby before returning. */ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY); return ret; } static __cpuidle int spm_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { struct cpuidle_qcom_spm_data *data = container_of(drv, struct cpuidle_qcom_spm_data, cpuidle_driver); return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data->spm); } static struct cpuidle_driver qcom_spm_idle_driver = { .name = "qcom_spm", .owner = THIS_MODULE, .states[0] = { .enter = spm_enter_idle_state, .exit_latency = 1, .target_residency = 1, .power_usage = UINT_MAX, .name = "WFI", .desc = "ARM WFI", } }; static const struct of_device_id qcom_idle_state_match[] = { { .compatible = "qcom,idle-state-spc", .data = spm_enter_idle_state }, { }, }; static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) { struct platform_device *pdev = NULL; struct device_node *cpu_node, *saw_node; struct cpuidle_qcom_spm_data *data = NULL; int ret; cpu_node = of_cpu_device_node_get(cpu); if (!cpu_node) return -ENODEV; saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); if (!saw_node) return -ENODEV; pdev = of_find_device_by_node(saw_node); of_node_put(saw_node); of_node_put(cpu_node); if (!pdev) return -ENODEV; data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->spm = dev_get_drvdata(&pdev->dev); if (!data->spm) return -EINVAL; data->cpuidle_driver = qcom_spm_idle_driver; data->cpuidle_driver.cpumask = (struct cpumask *)cpumask_of(cpu); ret = dt_init_idle_driver(&data->cpuidle_driver, qcom_idle_state_match, 1); if (ret <= 0) return ret ? : -ENODEV; return cpuidle_register(&data->cpuidle_driver, NULL); } static int spm_cpuidle_drv_probe(struct platform_device *pdev) { int cpu, ret; if (!qcom_scm_is_available()) return -EPROBE_DEFER; ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm); if (ret) return dev_err_probe(&pdev->dev, ret, "set warm boot addr failed"); for_each_possible_cpu(cpu) { ret = spm_cpuidle_register(&pdev->dev, cpu); if (ret && ret != -ENODEV) { dev_err(&pdev->dev, "Cannot register for CPU%d: %d\n", cpu, ret); } } return 0; } static struct platform_driver spm_cpuidle_driver = { .probe = spm_cpuidle_drv_probe, .driver = { .name = "qcom-spm-cpuidle", .suppress_bind_attrs = true, }, }; static bool __init qcom_spm_find_any_cpu(void) { struct device_node *cpu_node, *saw_node; for_each_of_cpu_node(cpu_node) { saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); if (of_device_is_available(saw_node)) { of_node_put(saw_node); of_node_put(cpu_node); return true; } of_node_put(saw_node); } return false; } static int __init qcom_spm_cpuidle_init(void) { struct platform_device *pdev; int ret; ret = platform_driver_register(&spm_cpuidle_driver); if (ret) return ret; /* Make sure there is actually any CPU managed by the SPM */ if (!qcom_spm_find_any_cpu()) return 0; pdev = platform_device_register_simple("qcom-spm-cpuidle", -1, NULL, 0); if (IS_ERR(pdev)) { platform_driver_unregister(&spm_cpuidle_driver); return PTR_ERR(pdev); } return 0; } device_initcall(qcom_spm_cpuidle_init);
linux-master
drivers/cpuidle/cpuidle-qcom-spm.c
/* * governor.c - governor support * * (C) 2006-2007 Venkatesh Pallipadi <[email protected]> * Shaohua Li <[email protected]> * Adam Belay <[email protected]> * * This code is licenced under the GPL. */ #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/pm_qos.h> #include "cpuidle.h" char param_governor[CPUIDLE_NAME_LEN]; LIST_HEAD(cpuidle_governors); struct cpuidle_governor *cpuidle_curr_governor; struct cpuidle_governor *cpuidle_prev_governor; /** * cpuidle_find_governor - finds a governor of the specified name * @str: the name * * Must be called with cpuidle_lock acquired. */ struct cpuidle_governor *cpuidle_find_governor(const char *str) { struct cpuidle_governor *gov; list_for_each_entry(gov, &cpuidle_governors, governor_list) if (!strncasecmp(str, gov->name, CPUIDLE_NAME_LEN)) return gov; return NULL; } /** * cpuidle_switch_governor - changes the governor * @gov: the new target governor * Must be called with cpuidle_lock acquired. */ int cpuidle_switch_governor(struct cpuidle_governor *gov) { struct cpuidle_device *dev; if (!gov) return -EINVAL; if (gov == cpuidle_curr_governor) return 0; cpuidle_uninstall_idle_handler(); if (cpuidle_curr_governor) { list_for_each_entry(dev, &cpuidle_detected_devices, device_list) cpuidle_disable_device(dev); } cpuidle_curr_governor = gov; list_for_each_entry(dev, &cpuidle_detected_devices, device_list) cpuidle_enable_device(dev); cpuidle_install_idle_handler(); pr_info("cpuidle: using governor %s\n", gov->name); return 0; } /** * cpuidle_register_governor - registers a governor * @gov: the governor */ int cpuidle_register_governor(struct cpuidle_governor *gov) { int ret = -EEXIST; if (!gov || !gov->select) return -EINVAL; if (cpuidle_disabled()) return -ENODEV; mutex_lock(&cpuidle_lock); if (cpuidle_find_governor(gov->name) == NULL) { ret = 0; list_add_tail(&gov->governor_list, &cpuidle_governors); if (!cpuidle_curr_governor || !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) || (cpuidle_curr_governor->rating < gov->rating && strncasecmp(param_governor, cpuidle_curr_governor->name, CPUIDLE_NAME_LEN))) cpuidle_switch_governor(gov); } mutex_unlock(&cpuidle_lock); return ret; } /** * cpuidle_governor_latency_req - Compute a latency constraint for CPU * @cpu: Target CPU */ s64 cpuidle_governor_latency_req(unsigned int cpu) { struct device *device = get_cpu_device(cpu); int device_req = dev_pm_qos_raw_resume_latency(device); int global_req = cpu_latency_qos_limit(); if (device_req > global_req) device_req = global_req; return (s64)device_req * NSEC_PER_USEC; }
linux-master
drivers/cpuidle/governor.c
// SPDX-License-Identifier: GPL-2.0-only /* * CPU idle Marvell Kirkwood SoCs * * The cpu idle uses wait-for-interrupt and DDR self refresh in order * to implement two idle states - * #1 wait-for-interrupt * #2 wait-for-interrupt and DDR self refresh * * Maintainer: Jason Cooper <[email protected]> * Maintainer: Andrew Lunn <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <linux/io.h> #include <linux/export.h> #include <asm/cpuidle.h> #define KIRKWOOD_MAX_STATES 2 static void __iomem *ddr_operation_base; /* Actual code that puts the SoC in different idle states */ static int kirkwood_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { writel(0x7, ddr_operation_base); cpu_do_idle(); return index; } static struct cpuidle_driver kirkwood_idle_driver = { .name = "kirkwood_idle", .owner = THIS_MODULE, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = kirkwood_enter_idle, .exit_latency = 10, .target_residency = 100000, .name = "DDR SR", .desc = "WFI and DDR Self Refresh", }, .state_count = KIRKWOOD_MAX_STATES, }; /* Initialize CPU idle by registering the idle states */ static int kirkwood_cpuidle_probe(struct platform_device *pdev) { ddr_operation_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ddr_operation_base)) return PTR_ERR(ddr_operation_base); return cpuidle_register(&kirkwood_idle_driver, NULL); } static int kirkwood_cpuidle_remove(struct platform_device *pdev) { cpuidle_unregister(&kirkwood_idle_driver); return 0; } static struct platform_driver kirkwood_cpuidle_driver = { .probe = kirkwood_cpuidle_probe, .remove = kirkwood_cpuidle_remove, .driver = { .name = "kirkwood_cpuidle", }, }; module_platform_driver(kirkwood_cpuidle_driver); MODULE_AUTHOR("Andrew Lunn <[email protected]>"); MODULE_DESCRIPTION("Kirkwood cpu idle driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:kirkwood-cpuidle");
linux-master
drivers/cpuidle/cpuidle-kirkwood.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Calxeda, Inc. * * Based on arch/arm/plat-mxc/cpuidle.c: #v3.7 * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2012 Linaro Ltd. * * Maintainer: Rob Herring <[email protected]> */ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/psci.h> #include <asm/cpuidle.h> #include <asm/suspend.h> #include <uapi/linux/psci.h> #define CALXEDA_IDLE_PARAM \ ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) static int calxeda_idle_finish(unsigned long val) { return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume)); } static int calxeda_pwrdown_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { cpu_pm_enter(); cpu_suspend(0, calxeda_idle_finish); cpu_pm_exit(); return index; } static struct cpuidle_driver calxeda_idle_driver = { .name = "calxeda_idle", .states = { ARM_CPUIDLE_WFI_STATE, { .name = "PG", .desc = "Power Gate", .exit_latency = 30, .power_usage = 50, .target_residency = 200, .enter = calxeda_pwrdown_idle, }, }, .state_count = 2, }; static int calxeda_cpuidle_probe(struct platform_device *pdev) { return cpuidle_register(&calxeda_idle_driver, NULL); } static struct platform_driver calxeda_cpuidle_plat_driver = { .driver = { .name = "cpuidle-calxeda", }, .probe = calxeda_cpuidle_probe, }; builtin_platform_driver(calxeda_cpuidle_plat_driver);
linux-master
drivers/cpuidle/cpuidle-calxeda.c
/* * driver.c - driver support * * (C) 2006-2007 Venkatesh Pallipadi <[email protected]> * Shaohua Li <[email protected]> * Adam Belay <[email protected]> * * This code is licenced under the GPL. */ #include <linux/mutex.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/sched/idle.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/tick.h> #include <linux/cpu.h> #include "cpuidle.h" DEFINE_SPINLOCK(cpuidle_driver_lock); #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers); /** * __cpuidle_get_cpu_driver - return the cpuidle driver tied to a CPU. * @cpu: the CPU handled by the driver * * Returns a pointer to struct cpuidle_driver or NULL if no driver has been * registered for @cpu. */ static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) { return per_cpu(cpuidle_drivers, cpu); } /** * __cpuidle_unset_driver - unset per CPU driver variables. * @drv: a valid pointer to a struct cpuidle_driver * * For each CPU in the driver's CPU mask, unset the registered driver per CPU * variable. If @drv is different from the registered driver, the corresponding * variable is not cleared. */ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv) { int cpu; for_each_cpu(cpu, drv->cpumask) { if (drv != __cpuidle_get_cpu_driver(cpu)) continue; per_cpu(cpuidle_drivers, cpu) = NULL; } } /** * __cpuidle_set_driver - set per CPU driver variables for the given driver. * @drv: a valid pointer to a struct cpuidle_driver * * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver * different from drv already. */ static inline int __cpuidle_set_driver(struct cpuidle_driver *drv) { int cpu; for_each_cpu(cpu, drv->cpumask) { struct cpuidle_driver *old_drv; old_drv = __cpuidle_get_cpu_driver(cpu); if (old_drv && old_drv != drv) return -EBUSY; } for_each_cpu(cpu, drv->cpumask) per_cpu(cpuidle_drivers, cpu) = drv; return 0; } #else static struct cpuidle_driver *cpuidle_curr_driver; /** * __cpuidle_get_cpu_driver - return the global cpuidle driver pointer. * @cpu: ignored without the multiple driver support * * Return a pointer to a struct cpuidle_driver object or NULL if no driver was * previously registered. */ static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) { return cpuidle_curr_driver; } /** * __cpuidle_set_driver - assign the global cpuidle driver variable. * @drv: pointer to a struct cpuidle_driver object * * Returns 0 on success, -EBUSY if the driver is already registered. */ static inline int __cpuidle_set_driver(struct cpuidle_driver *drv) { if (cpuidle_curr_driver) return -EBUSY; cpuidle_curr_driver = drv; return 0; } /** * __cpuidle_unset_driver - unset the global cpuidle driver variable. * @drv: a pointer to a struct cpuidle_driver * * Reset the global cpuidle variable to NULL. If @drv does not match the * registered driver, do nothing. */ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv) { if (drv == cpuidle_curr_driver) cpuidle_curr_driver = NULL; } #endif /** * cpuidle_setup_broadcast_timer - enable/disable the broadcast timer on a cpu * @arg: a void pointer used to match the SMP cross call API * * If @arg is NULL broadcast is disabled otherwise enabled * * This function is executed per CPU by an SMP cross call. It's not * supposed to be called directly. */ static void cpuidle_setup_broadcast_timer(void *arg) { if (arg) tick_broadcast_enable(); else tick_broadcast_disable(); } /** * __cpuidle_driver_init - initialize the driver's internal data * @drv: a valid pointer to a struct cpuidle_driver */ static void __cpuidle_driver_init(struct cpuidle_driver *drv) { int i; /* * Use all possible CPUs as the default, because if the kernel boots * with some CPUs offline and then we online one of them, the CPU * notifier has to know which driver to assign. */ if (!drv->cpumask) drv->cpumask = (struct cpumask *)cpu_possible_mask; for (i = 0; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; /* * Look for the timer stop flag in the different states and if * it is found, indicate that the broadcast timer has to be set * up. */ if (s->flags & CPUIDLE_FLAG_TIMER_STOP) drv->bctimer = 1; /* * The core will use the target residency and exit latency * values in nanoseconds, but allow drivers to provide them in * microseconds too. */ if (s->target_residency > 0) s->target_residency_ns = s->target_residency * NSEC_PER_USEC; else if (s->target_residency_ns < 0) s->target_residency_ns = 0; else s->target_residency = div_u64(s->target_residency_ns, NSEC_PER_USEC); if (s->exit_latency > 0) s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC; else if (s->exit_latency_ns < 0) s->exit_latency_ns = 0; else s->exit_latency = div_u64(s->exit_latency_ns, NSEC_PER_USEC); } } /** * __cpuidle_register_driver: register the driver * @drv: a valid pointer to a struct cpuidle_driver * * Do some sanity checks, initialize the driver, assign the driver to the * global cpuidle driver variable(s) and set up the broadcast timer if the * cpuidle driver has some states that shut down the local timer. * * Returns 0 on success, a negative error code otherwise: * * -EINVAL if the driver pointer is NULL or no idle states are available * * -ENODEV if the cpuidle framework is disabled * * -EBUSY if the driver is already assigned to the global variable(s) */ static int __cpuidle_register_driver(struct cpuidle_driver *drv) { int ret; if (!drv || !drv->state_count) return -EINVAL; ret = cpuidle_coupled_state_verify(drv); if (ret) return ret; if (cpuidle_disabled()) return -ENODEV; __cpuidle_driver_init(drv); ret = __cpuidle_set_driver(drv); if (ret) return ret; if (drv->bctimer) on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, (void *)1, 1); return 0; } /** * __cpuidle_unregister_driver - unregister the driver * @drv: a valid pointer to a struct cpuidle_driver * * Check if the driver is no longer in use, reset the global cpuidle driver * variable(s) and disable the timer broadcast notification mechanism if it was * in use. * */ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv) { if (drv->bctimer) { drv->bctimer = 0; on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, NULL, 1); } __cpuidle_unset_driver(drv); } /** * cpuidle_register_driver - registers a driver * @drv: a pointer to a valid struct cpuidle_driver * * Register the driver under a lock to prevent concurrent attempts to * [un]register the driver from occuring at the same time. * * Returns 0 on success, a negative error code (returned by * __cpuidle_register_driver()) otherwise. */ int cpuidle_register_driver(struct cpuidle_driver *drv) { struct cpuidle_governor *gov; int ret; spin_lock(&cpuidle_driver_lock); ret = __cpuidle_register_driver(drv); spin_unlock(&cpuidle_driver_lock); if (!ret && !strlen(param_governor) && drv->governor && (cpuidle_get_driver() == drv)) { mutex_lock(&cpuidle_lock); gov = cpuidle_find_governor(drv->governor); if (gov) { cpuidle_prev_governor = cpuidle_curr_governor; if (cpuidle_switch_governor(gov) < 0) cpuidle_prev_governor = NULL; } mutex_unlock(&cpuidle_lock); } return ret; } EXPORT_SYMBOL_GPL(cpuidle_register_driver); /** * cpuidle_unregister_driver - unregisters a driver * @drv: a pointer to a valid struct cpuidle_driver * * Unregisters the cpuidle driver under a lock to prevent concurrent attempts * to [un]register the driver from occuring at the same time. @drv has to * match the currently registered driver. */ void cpuidle_unregister_driver(struct cpuidle_driver *drv) { bool enabled = (cpuidle_get_driver() == drv); spin_lock(&cpuidle_driver_lock); __cpuidle_unregister_driver(drv); spin_unlock(&cpuidle_driver_lock); if (!enabled) return; mutex_lock(&cpuidle_lock); if (cpuidle_prev_governor) { if (!cpuidle_switch_governor(cpuidle_prev_governor)) cpuidle_prev_governor = NULL; } mutex_unlock(&cpuidle_lock); } EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); /** * cpuidle_get_driver - return the driver tied to the current CPU. * * Returns a struct cpuidle_driver pointer, or NULL if no driver is registered. */ struct cpuidle_driver *cpuidle_get_driver(void) { struct cpuidle_driver *drv; int cpu; cpu = get_cpu(); drv = __cpuidle_get_cpu_driver(cpu); put_cpu(); return drv; } EXPORT_SYMBOL_GPL(cpuidle_get_driver); /** * cpuidle_get_cpu_driver - return the driver registered for a CPU. * @dev: a valid pointer to a struct cpuidle_device * * Returns a struct cpuidle_driver pointer, or NULL if no driver is registered * for the CPU associated with @dev. */ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) { if (!dev) return NULL; return __cpuidle_get_cpu_driver(dev->cpu); } EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); /** * cpuidle_driver_state_disabled - Disable or enable an idle state * @drv: cpuidle driver owning the state * @idx: State index * @disable: Whether or not to disable the state */ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, bool disable) { unsigned int cpu; mutex_lock(&cpuidle_lock); spin_lock(&cpuidle_driver_lock); if (!drv->cpumask) { drv->states[idx].flags |= CPUIDLE_FLAG_UNUSABLE; goto unlock; } for_each_cpu(cpu, drv->cpumask) { struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); if (!dev) continue; if (disable) dev->states_usage[idx].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER; else dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER; } unlock: spin_unlock(&cpuidle_driver_lock); mutex_unlock(&cpuidle_lock); }
linux-master
drivers/cpuidle/driver.c
// SPDX-License-Identifier: GPL-2.0 /* * Timer events oriented CPU idle governor * * TEO governor: * Copyright (C) 2018 - 2021 Intel Corporation * Author: Rafael J. Wysocki <[email protected]> * * Util-awareness mechanism: * Copyright (C) 2022 Arm Ltd. * Author: Kajetan Puchalski <[email protected]> */ /** * DOC: teo-description * * The idea of this governor is based on the observation that on many systems * timer events are two or more orders of magnitude more frequent than any * other interrupts, so they are likely to be the most significant cause of CPU * wakeups from idle states. Moreover, information about what happened in the * (relatively recent) past can be used to estimate whether or not the deepest * idle state with target residency within the (known) time till the closest * timer event, referred to as the sleep length, is likely to be suitable for * the upcoming CPU idle period and, if not, then which of the shallower idle * states to choose instead of it. * * Of course, non-timer wakeup sources are more important in some use cases * which can be covered by taking a few most recent idle time intervals of the * CPU into account. However, even in that context it is not necessary to * consider idle duration values greater than the sleep length, because the * closest timer will ultimately wake up the CPU anyway unless it is woken up * earlier. * * Thus this governor estimates whether or not the prospective idle duration of * a CPU is likely to be significantly shorter than the sleep length and selects * an idle state for it accordingly. * * The computations carried out by this governor are based on using bins whose * boundaries are aligned with the target residency parameter values of the CPU * idle states provided by the %CPUIdle driver in the ascending order. That is, * the first bin spans from 0 up to, but not including, the target residency of * the second idle state (idle state 1), the second bin spans from the target * residency of idle state 1 up to, but not including, the target residency of * idle state 2, the third bin spans from the target residency of idle state 2 * up to, but not including, the target residency of idle state 3 and so on. * The last bin spans from the target residency of the deepest idle state * supplied by the driver to infinity. * * Two metrics called "hits" and "intercepts" are associated with each bin. * They are updated every time before selecting an idle state for the given CPU * in accordance with what happened last time. * * The "hits" metric reflects the relative frequency of situations in which the * sleep length and the idle duration measured after CPU wakeup fall into the * same bin (that is, the CPU appears to wake up "on time" relative to the sleep * length). In turn, the "intercepts" metric reflects the relative frequency of * situations in which the measured idle duration is so much shorter than the * sleep length that the bin it falls into corresponds to an idle state * shallower than the one whose bin is fallen into by the sleep length (these * situations are referred to as "intercepts" below). * * In addition to the metrics described above, the governor counts recent * intercepts (that is, intercepts that have occurred during the last * %NR_RECENT invocations of it for the given CPU) for each bin. * * In order to select an idle state for a CPU, the governor takes the following * steps (modulo the possible latency constraint that must be taken into account * too): * * 1. Find the deepest CPU idle state whose target residency does not exceed * the current sleep length (the candidate idle state) and compute 3 sums as * follows: * * - The sum of the "hits" and "intercepts" metrics for the candidate state * and all of the deeper idle states (it represents the cases in which the * CPU was idle long enough to avoid being intercepted if the sleep length * had been equal to the current one). * * - The sum of the "intercepts" metrics for all of the idle states shallower * than the candidate one (it represents the cases in which the CPU was not * idle long enough to avoid being intercepted if the sleep length had been * equal to the current one). * * - The sum of the numbers of recent intercepts for all of the idle states * shallower than the candidate one. * * 2. If the second sum is greater than the first one or the third sum is * greater than %NR_RECENT / 2, the CPU is likely to wake up early, so look * for an alternative idle state to select. * * - Traverse the idle states shallower than the candidate one in the * descending order. * * - For each of them compute the sum of the "intercepts" metrics and the sum * of the numbers of recent intercepts over all of the idle states between * it and the candidate one (including the former and excluding the * latter). * * - If each of these sums that needs to be taken into account (because the * check related to it has indicated that the CPU is likely to wake up * early) is greater than a half of the corresponding sum computed in step * 1 (which means that the target residency of the state in question had * not exceeded the idle duration in over a half of the relevant cases), * select the given idle state instead of the candidate one. * * 3. By default, select the candidate state. * * Util-awareness mechanism: * * The idea behind the util-awareness extension is that there are two distinct * scenarios for the CPU which should result in two different approaches to idle * state selection - utilized and not utilized. * * In this case, 'utilized' means that the average runqueue util of the CPU is * above a certain threshold. * * When the CPU is utilized while going into idle, more likely than not it will * be woken up to do more work soon and so a shallower idle state should be * selected to minimise latency and maximise performance. When the CPU is not * being utilized, the usual metrics-based approach to selecting the deepest * available idle state should be preferred to take advantage of the power * saving. * * In order to achieve this, the governor uses a utilization threshold. * The threshold is computed per-CPU as a percentage of the CPU's capacity * by bit shifting the capacity value. Based on testing, the shift of 6 (~1.56%) * seems to be getting the best results. * * Before selecting the next idle state, the governor compares the current CPU * util to the precomputed util threshold. If it's below, it defaults to the * TEO metrics mechanism. If it's above, the closest shallower idle state will * be selected instead, as long as is not a polling state. */ #include <linux/cpuidle.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/topology.h> #include <linux/tick.h> #include "gov.h" /* * The number of bits to shift the CPU's capacity by in order to determine * the utilized threshold. * * 6 was chosen based on testing as the number that achieved the best balance * of power and performance on average. * * The resulting threshold is high enough to not be triggered by background * noise and low enough to react quickly when activity starts to ramp up. */ #define UTIL_THRESHOLD_SHIFT 6 /* * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value * is used for decreasing metrics on a regular basis. */ #define PULSE 1024 #define DECAY_SHIFT 3 /* * Number of the most recent idle duration values to take into consideration for * the detection of recent early wakeup patterns. */ #define NR_RECENT 9 /** * struct teo_bin - Metrics used by the TEO cpuidle governor. * @intercepts: The "intercepts" metric. * @hits: The "hits" metric. * @recent: The number of recent "intercepts". */ struct teo_bin { unsigned int intercepts; unsigned int hits; unsigned int recent; }; /** * struct teo_cpu - CPU data used by the TEO cpuidle governor. * @time_span_ns: Time between idle state selection and post-wakeup update. * @sleep_length_ns: Time till the closest timer event (at the selection time). * @state_bins: Idle state data bins for this CPU. * @total: Grand total of the "intercepts" and "hits" metrics for all bins. * @next_recent_idx: Index of the next @recent_idx entry to update. * @recent_idx: Indices of bins corresponding to recent "intercepts". * @tick_hits: Number of "hits" after TICK_NSEC. * @util_threshold: Threshold above which the CPU is considered utilized */ struct teo_cpu { s64 time_span_ns; s64 sleep_length_ns; struct teo_bin state_bins[CPUIDLE_STATE_MAX]; unsigned int total; int next_recent_idx; int recent_idx[NR_RECENT]; unsigned int tick_hits; unsigned long util_threshold; }; static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); /** * teo_cpu_is_utilized - Check if the CPU's util is above the threshold * @cpu: Target CPU * @cpu_data: Governor CPU data for the target CPU */ #ifdef CONFIG_SMP static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data) { return sched_cpu_util(cpu) > cpu_data->util_threshold; } #else static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data) { return false; } #endif /** * teo_update - Update CPU metrics after wakeup. * @drv: cpuidle driver containing state data. * @dev: Target CPU. */ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); int i, idx_timer = 0, idx_duration = 0; s64 target_residency_ns; u64 measured_ns; if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) { /* * One of the safety nets has triggered or the wakeup was close * enough to the closest timer event expected at the idle state * selection time to be discarded. */ measured_ns = U64_MAX; } else { u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns; /* * The computations below are to determine whether or not the * (saved) time till the next timer event and the measured idle * duration fall into the same "bin", so use last_residency_ns * for that instead of time_span_ns which includes the cpuidle * overhead. */ measured_ns = dev->last_residency_ns; /* * The delay between the wakeup and the first instruction * executed by the CPU is not likely to be worst-case every * time, so take 1/2 of the exit latency as a very rough * approximation of the average of it. */ if (measured_ns >= lat_ns) measured_ns -= lat_ns / 2; else measured_ns /= 2; } cpu_data->total = 0; /* * Decay the "hits" and "intercepts" metrics for all of the bins and * find the bins that the sleep length and the measured idle duration * fall into. */ for (i = 0; i < drv->state_count; i++) { struct teo_bin *bin = &cpu_data->state_bins[i]; bin->hits -= bin->hits >> DECAY_SHIFT; bin->intercepts -= bin->intercepts >> DECAY_SHIFT; cpu_data->total += bin->hits + bin->intercepts; target_residency_ns = drv->states[i].target_residency_ns; if (target_residency_ns <= cpu_data->sleep_length_ns) { idx_timer = i; if (target_residency_ns <= measured_ns) idx_duration = i; } } i = cpu_data->next_recent_idx++; if (cpu_data->next_recent_idx >= NR_RECENT) cpu_data->next_recent_idx = 0; if (cpu_data->recent_idx[i] >= 0) cpu_data->state_bins[cpu_data->recent_idx[i]].recent--; /* * If the deepest state's target residency is below the tick length, * make a record of it to help teo_select() decide whether or not * to stop the tick. This effectively adds an extra hits-only bin * beyond the last state-related one. */ if (target_residency_ns < TICK_NSEC) { cpu_data->tick_hits -= cpu_data->tick_hits >> DECAY_SHIFT; cpu_data->total += cpu_data->tick_hits; if (TICK_NSEC <= cpu_data->sleep_length_ns) { idx_timer = drv->state_count; if (TICK_NSEC <= measured_ns) { cpu_data->tick_hits += PULSE; goto end; } } } /* * If the measured idle duration falls into the same bin as the sleep * length, this is a "hit", so update the "hits" metric for that bin. * Otherwise, update the "intercepts" metric for the bin fallen into by * the measured idle duration. */ if (idx_timer == idx_duration) { cpu_data->state_bins[idx_timer].hits += PULSE; cpu_data->recent_idx[i] = -1; } else { cpu_data->state_bins[idx_duration].intercepts += PULSE; cpu_data->state_bins[idx_duration].recent++; cpu_data->recent_idx[i] = idx_duration; } end: cpu_data->total += PULSE; } static bool teo_state_ok(int i, struct cpuidle_driver *drv) { return !tick_nohz_tick_stopped() || drv->states[i].target_residency_ns >= TICK_NSEC; } /** * teo_find_shallower_state - Find shallower idle state matching given duration. * @drv: cpuidle driver containing state data. * @dev: Target CPU. * @state_idx: Index of the capping idle state. * @duration_ns: Idle duration value to match. * @no_poll: Don't consider polling states. */ static int teo_find_shallower_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, int state_idx, s64 duration_ns, bool no_poll) { int i; for (i = state_idx - 1; i >= 0; i--) { if (dev->states_usage[i].disable || (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING)) continue; state_idx = i; if (drv->states[i].target_residency_ns <= duration_ns) break; } return state_idx; } /** * teo_select - Selects the next idle state to enter. * @drv: cpuidle driver containing state data. * @dev: Target CPU. * @stop_tick: Indication on whether or not to stop the scheduler tick. */ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); s64 latency_req = cpuidle_governor_latency_req(dev->cpu); ktime_t delta_tick = TICK_NSEC / 2; unsigned int tick_intercept_sum = 0; unsigned int idx_intercept_sum = 0; unsigned int intercept_sum = 0; unsigned int idx_recent_sum = 0; unsigned int recent_sum = 0; unsigned int idx_hit_sum = 0; unsigned int hit_sum = 0; int constraint_idx = 0; int idx0 = 0, idx = -1; bool alt_intercepts, alt_recent; bool cpu_utilized; s64 duration_ns; int i; if (dev->last_state_idx >= 0) { teo_update(drv, dev); dev->last_state_idx = -1; } cpu_data->time_span_ns = local_clock(); /* * Set the expected sleep length to infinity in case of an early * return. */ cpu_data->sleep_length_ns = KTIME_MAX; /* Check if there is any choice in the first place. */ if (drv->state_count < 2) { idx = 0; goto out_tick; } if (!dev->states_usage[0].disable) idx = 0; cpu_utilized = teo_cpu_is_utilized(dev->cpu, cpu_data); /* * If the CPU is being utilized over the threshold and there are only 2 * states to choose from, the metrics need not be considered, so choose * the shallowest non-polling state and exit. */ if (drv->state_count < 3 && cpu_utilized) { /* * If state 0 is enabled and it is not a polling one, select it * right away unless the scheduler tick has been stopped, in * which case care needs to be taken to leave the CPU in a deep * enough state in case it is not woken up any time soon after * all. If state 1 is disabled, though, state 0 must be used * anyway. */ if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING) && teo_state_ok(0, drv)) || dev->states_usage[1].disable) { idx = 0; goto out_tick; } /* Assume that state 1 is not a polling one and use it. */ idx = 1; duration_ns = drv->states[1].target_residency_ns; goto end; } /* Compute the sums of metrics for early wakeup pattern detection. */ for (i = 1; i < drv->state_count; i++) { struct teo_bin *prev_bin = &cpu_data->state_bins[i-1]; struct cpuidle_state *s = &drv->states[i]; /* * Update the sums of idle state mertics for all of the states * shallower than the current one. */ intercept_sum += prev_bin->intercepts; hit_sum += prev_bin->hits; recent_sum += prev_bin->recent; if (dev->states_usage[i].disable) continue; if (idx < 0) idx0 = i; /* first enabled state */ idx = i; if (s->exit_latency_ns <= latency_req) constraint_idx = i; /* Save the sums for the current state. */ idx_intercept_sum = intercept_sum; idx_hit_sum = hit_sum; idx_recent_sum = recent_sum; } /* Avoid unnecessary overhead. */ if (idx < 0) { idx = 0; /* No states enabled, must use 0. */ goto out_tick; } if (idx == idx0) { /* * Only one idle state is enabled, so use it, but do not * allow the tick to be stopped it is shallow enough. */ duration_ns = drv->states[idx].target_residency_ns; goto end; } tick_intercept_sum = intercept_sum + cpu_data->state_bins[drv->state_count-1].intercepts; /* * If the sum of the intercepts metric for all of the idle states * shallower than the current candidate one (idx) is greater than the * sum of the intercepts and hits metrics for the candidate state and * all of the deeper states, or the sum of the numbers of recent * intercepts over all of the states shallower than the candidate one * is greater than a half of the number of recent events taken into * account, a shallower idle state is likely to be a better choice. */ alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum; alt_recent = idx_recent_sum > NR_RECENT / 2; if (alt_recent || alt_intercepts) { int first_suitable_idx = idx; /* * Look for the deepest idle state whose target residency had * not exceeded the idle duration in over a half of the relevant * cases (both with respect to intercepts overall and with * respect to the recent intercepts only) in the past. * * Take the possible duration limitation present if the tick * has been stopped already into account. */ intercept_sum = 0; recent_sum = 0; for (i = idx - 1; i >= 0; i--) { struct teo_bin *bin = &cpu_data->state_bins[i]; intercept_sum += bin->intercepts; recent_sum += bin->recent; if ((!alt_recent || 2 * recent_sum > idx_recent_sum) && (!alt_intercepts || 2 * intercept_sum > idx_intercept_sum)) { /* * Use the current state unless it is too * shallow or disabled, in which case take the * first enabled state that is deep enough. */ if (teo_state_ok(i, drv) && !dev->states_usage[i].disable) idx = i; else idx = first_suitable_idx; break; } if (dev->states_usage[i].disable) continue; if (!teo_state_ok(i, drv)) { /* * The current state is too shallow, but if an * alternative candidate state has been found, * it may still turn out to be a better choice. */ if (first_suitable_idx != idx) continue; break; } first_suitable_idx = i; } } /* * If there is a latency constraint, it may be necessary to select an * idle state shallower than the current candidate one. */ if (idx > constraint_idx) idx = constraint_idx; /* * If the CPU is being utilized over the threshold, choose a shallower * non-polling state to improve latency, unless the scheduler tick has * been stopped already and the shallower state's target residency is * not sufficiently large. */ if (cpu_utilized) { i = teo_find_shallower_state(drv, dev, idx, KTIME_MAX, true); if (teo_state_ok(i, drv)) idx = i; } /* * Skip the timers check if state 0 is the current candidate one, * because an immediate non-timer wakeup is expected in that case. */ if (!idx) goto out_tick; /* * If state 0 is a polling one, check if the target residency of * the current candidate state is low enough and skip the timers * check in that case too. */ if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING) && drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS) goto out_tick; duration_ns = tick_nohz_get_sleep_length(&delta_tick); cpu_data->sleep_length_ns = duration_ns; /* * If the closest expected timer is before the terget residency of the * candidate state, a shallower one needs to be found. */ if (drv->states[idx].target_residency_ns > duration_ns) { i = teo_find_shallower_state(drv, dev, idx, duration_ns, false); if (teo_state_ok(i, drv)) idx = i; } /* * If the selected state's target residency is below the tick length * and intercepts occurring before the tick length are the majority of * total wakeup events, do not stop the tick. */ if (drv->states[idx].target_residency_ns < TICK_NSEC && tick_intercept_sum > cpu_data->total / 2 + cpu_data->total / 8) duration_ns = TICK_NSEC / 2; end: /* * Allow the tick to be stopped unless the selected state is a polling * one or the expected idle duration is shorter than the tick period * length. */ if ((!(drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && duration_ns >= TICK_NSEC) || tick_nohz_tick_stopped()) return idx; /* * The tick is not going to be stopped, so if the target residency of * the state to be returned is not within the time till the closest * timer including the tick, try to correct that. */ if (idx > idx0 && drv->states[idx].target_residency_ns > delta_tick) idx = teo_find_shallower_state(drv, dev, idx, delta_tick, false); out_tick: *stop_tick = false; return idx; } /** * teo_reflect - Note that governor data for the CPU need to be updated. * @dev: Target CPU. * @state: Entered state. */ static void teo_reflect(struct cpuidle_device *dev, int state) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); dev->last_state_idx = state; /* * If the wakeup was not "natural", but triggered by one of the safety * nets, assume that the CPU might have been idle for the entire sleep * length time. */ if (dev->poll_time_limit || (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) { dev->poll_time_limit = false; cpu_data->time_span_ns = cpu_data->sleep_length_ns; } else { cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns; } } /** * teo_enable_device - Initialize the governor's data for the target CPU. * @drv: cpuidle driver (not used). * @dev: Target CPU. */ static int teo_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); unsigned long max_capacity = arch_scale_cpu_capacity(dev->cpu); int i; memset(cpu_data, 0, sizeof(*cpu_data)); cpu_data->util_threshold = max_capacity >> UTIL_THRESHOLD_SHIFT; for (i = 0; i < NR_RECENT; i++) cpu_data->recent_idx[i] = -1; return 0; } static struct cpuidle_governor teo_governor = { .name = "teo", .rating = 19, .enable = teo_enable_device, .select = teo_select, .reflect = teo_reflect, }; static int __init teo_governor_init(void) { return cpuidle_register_governor(&teo_governor); } postcore_initcall(teo_governor_init);
linux-master
drivers/cpuidle/governors/teo.c
// SPDX-License-Identifier: GPL-2.0-only /* * menu.c - the menu idle governor * * Copyright (C) 2006-2007 Adam Belay <[email protected]> * Copyright (C) 2009 Intel Corporation * Author: * Arjan van de Ven <[email protected]> */ #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/time.h> #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/sched.h> #include <linux/sched/loadavg.h> #include <linux/sched/stat.h> #include <linux/math64.h> #include "gov.h" #define BUCKETS 12 #define INTERVAL_SHIFT 3 #define INTERVALS (1UL << INTERVAL_SHIFT) #define RESOLUTION 1024 #define DECAY 8 #define MAX_INTERESTING (50000 * NSEC_PER_USEC) /* * Concepts and ideas behind the menu governor * * For the menu governor, there are 3 decision factors for picking a C * state: * 1) Energy break even point * 2) Performance impact * 3) Latency tolerance (from pmqos infrastructure) * These three factors are treated independently. * * Energy break even point * ----------------------- * C state entry and exit have an energy cost, and a certain amount of time in * the C state is required to actually break even on this cost. CPUIDLE * provides us this duration in the "target_residency" field. So all that we * need is a good prediction of how long we'll be idle. Like the traditional * menu governor, we start with the actual known "next timer event" time. * * Since there are other source of wakeups (interrupts for example) than * the next timer event, this estimation is rather optimistic. To get a * more realistic estimate, a correction factor is applied to the estimate, * that is based on historic behavior. For example, if in the past the actual * duration always was 50% of the next timer tick, the correction factor will * be 0.5. * * menu uses a running average for this correction factor, however it uses a * set of factors, not just a single factor. This stems from the realization * that the ratio is dependent on the order of magnitude of the expected * duration; if we expect 500 milliseconds of idle time the likelihood of * getting an interrupt very early is much higher than if we expect 50 micro * seconds of idle time. A second independent factor that has big impact on * the actual factor is if there is (disk) IO outstanding or not. * (as a special twist, we consider every sleep longer than 50 milliseconds * as perfect; there are no power gains for sleeping longer than this) * * For these two reasons we keep an array of 12 independent factors, that gets * indexed based on the magnitude of the expected duration as well as the * "is IO outstanding" property. * * Repeatable-interval-detector * ---------------------------- * There are some cases where "next timer" is a completely unusable predictor: * Those cases where the interval is fixed, for example due to hardware * interrupt mitigation, but also due to fixed transfer rate devices such as * mice. * For this, we use a different predictor: We track the duration of the last 8 * intervals and if the stand deviation of these 8 intervals is below a * threshold value, we use the average of these intervals as prediction. * * Limiting Performance Impact * --------------------------- * C states, especially those with large exit latencies, can have a real * noticeable impact on workloads, which is not acceptable for most sysadmins, * and in addition, less performance has a power price of its own. * * As a general rule of thumb, menu assumes that the following heuristic * holds: * The busier the system, the less impact of C states is acceptable * * This rule-of-thumb is implemented using a performance-multiplier: * If the exit latency times the performance multiplier is longer than * the predicted duration, the C state is not considered a candidate * for selection due to a too high performance impact. So the higher * this multiplier is, the longer we need to be idle to pick a deep C * state, and thus the less likely a busy CPU will hit such a deep * C state. * * Two factors are used in determing this multiplier: * a value of 10 is added for each point of "per cpu load average" we have. * a value of 5 points is added for each process that is waiting for * IO on this CPU. * (these values are experimentally determined) * * The load average factor gives a longer term (few seconds) input to the * decision, while the iowait value gives a cpu local instantanious input. * The iowait factor may look low, but realize that this is also already * represented in the system load average. * */ struct menu_device { int needs_update; int tick_wakeup; u64 next_timer_ns; unsigned int bucket; unsigned int correction_factor[BUCKETS]; unsigned int intervals[INTERVALS]; int interval_ptr; }; static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) { int bucket = 0; /* * We keep two groups of stats; one with no * IO pending, one without. * This allows us to calculate * E(duration)|iowait */ if (nr_iowaiters) bucket = BUCKETS/2; if (duration_ns < 10ULL * NSEC_PER_USEC) return bucket; if (duration_ns < 100ULL * NSEC_PER_USEC) return bucket + 1; if (duration_ns < 1000ULL * NSEC_PER_USEC) return bucket + 2; if (duration_ns < 10000ULL * NSEC_PER_USEC) return bucket + 3; if (duration_ns < 100000ULL * NSEC_PER_USEC) return bucket + 4; return bucket + 5; } /* * Return a multiplier for the exit latency that is intended * to take performance requirements into account. * The more performance critical we estimate the system * to be, the higher this multiplier, and thus the higher * the barrier to go to an expensive C state. */ static inline int performance_multiplier(unsigned int nr_iowaiters) { /* for IO wait tasks (per cpu!) we add 10x each */ return 1 + 10 * nr_iowaiters; } static DEFINE_PER_CPU(struct menu_device, menu_devices); static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); /* * Try detecting repeating patterns by keeping track of the last 8 * intervals, and checking if the standard deviation of that set * of points is below a threshold. If it is... then use the * average of these 8 points as the estimated value. */ static unsigned int get_typical_interval(struct menu_device *data) { int i, divisor; unsigned int min, max, thresh, avg; uint64_t sum, variance; thresh = INT_MAX; /* Discard outliers above this value */ again: /* First calculate the average of past intervals */ min = UINT_MAX; max = 0; sum = 0; divisor = 0; for (i = 0; i < INTERVALS; i++) { unsigned int value = data->intervals[i]; if (value <= thresh) { sum += value; divisor++; if (value > max) max = value; if (value < min) min = value; } } if (!max) return UINT_MAX; if (divisor == INTERVALS) avg = sum >> INTERVAL_SHIFT; else avg = div_u64(sum, divisor); /* Then try to determine variance */ variance = 0; for (i = 0; i < INTERVALS; i++) { unsigned int value = data->intervals[i]; if (value <= thresh) { int64_t diff = (int64_t)value - avg; variance += diff * diff; } } if (divisor == INTERVALS) variance >>= INTERVAL_SHIFT; else do_div(variance, divisor); /* * The typical interval is obtained when standard deviation is * small (stddev <= 20 us, variance <= 400 us^2) or standard * deviation is small compared to the average interval (avg > * 6*stddev, avg^2 > 36*variance). The average is smaller than * UINT_MAX aka U32_MAX, so computing its square does not * overflow a u64. We simply reject this candidate average if * the standard deviation is greater than 715 s (which is * rather unlikely). * * Use this result only if there is no timer to wake us up sooner. */ if (likely(variance <= U64_MAX/36)) { if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) || variance <= 400) { return avg; } } /* * If we have outliers to the upside in our distribution, discard * those by setting the threshold to exclude these outliers, then * calculate the average and standard deviation again. Once we get * down to the bottom 3/4 of our samples, stop excluding samples. * * This can deal with workloads that have long pauses interspersed * with sporadic activity with a bunch of short pauses. */ if ((divisor * 4) <= INTERVALS * 3) return UINT_MAX; thresh = max - 1; goto again; } /** * menu_select - selects the next idle state to enter * @drv: cpuidle driver containing state data * @dev: the CPU * @stop_tick: indication on whether or not to stop the tick */ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick) { struct menu_device *data = this_cpu_ptr(&menu_devices); s64 latency_req = cpuidle_governor_latency_req(dev->cpu); u64 predicted_ns; u64 interactivity_req; unsigned int nr_iowaiters; ktime_t delta, delta_tick; int i, idx; if (data->needs_update) { menu_update(drv, dev); data->needs_update = 0; } nr_iowaiters = nr_iowait_cpu(dev->cpu); /* Find the shortest expected idle interval. */ predicted_ns = get_typical_interval(data) * NSEC_PER_USEC; if (predicted_ns > RESIDENCY_THRESHOLD_NS) { unsigned int timer_us; /* Determine the time till the closest timer. */ delta = tick_nohz_get_sleep_length(&delta_tick); if (unlikely(delta < 0)) { delta = 0; delta_tick = 0; } data->next_timer_ns = delta; data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); /* Round up the result for half microseconds. */ timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 + data->next_timer_ns * data->correction_factor[data->bucket], RESOLUTION * DECAY * NSEC_PER_USEC); /* Use the lowest expected idle interval to pick the idle state. */ predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns); } else { /* * Because the next timer event is not going to be determined * in this case, assume that without the tick the closest timer * will be in distant future and that the closest tick will occur * after 1/2 of the tick period. */ data->next_timer_ns = KTIME_MAX; delta_tick = TICK_NSEC / 2; data->bucket = which_bucket(KTIME_MAX, nr_iowaiters); } if (unlikely(drv->state_count <= 1 || latency_req == 0) || ((data->next_timer_ns < drv->states[1].target_residency_ns || latency_req < drv->states[1].exit_latency_ns) && !dev->states_usage[0].disable)) { /* * In this case state[0] will be used no matter what, so return * it right away and keep the tick running if state[0] is a * polling one. */ *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING); return 0; } if (tick_nohz_tick_stopped()) { /* * If the tick is already stopped, the cost of possible short * idle duration misprediction is much higher, because the CPU * may be stuck in a shallow idle state for a long time as a * result of it. In that case say we might mispredict and use * the known time till the closest timer event for the idle * state selection. */ if (predicted_ns < TICK_NSEC) predicted_ns = data->next_timer_ns; } else { /* * Use the performance multiplier and the user-configurable * latency_req to determine the maximum exit latency. */ interactivity_req = div64_u64(predicted_ns, performance_multiplier(nr_iowaiters)); if (latency_req > interactivity_req) latency_req = interactivity_req; } /* * Find the idle state with the lowest power while satisfying * our constraints. */ idx = -1; for (i = 0; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; if (dev->states_usage[i].disable) continue; if (idx == -1) idx = i; /* first enabled state */ if (s->target_residency_ns > predicted_ns) { /* * Use a physical idle state, not busy polling, unless * a timer is going to trigger soon enough. */ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && s->exit_latency_ns <= latency_req && s->target_residency_ns <= data->next_timer_ns) { predicted_ns = s->target_residency_ns; idx = i; break; } if (predicted_ns < TICK_NSEC) break; if (!tick_nohz_tick_stopped()) { /* * If the state selected so far is shallow, * waking up early won't hurt, so retain the * tick in that case and let the governor run * again in the next iteration of the loop. */ predicted_ns = drv->states[idx].target_residency_ns; break; } /* * If the state selected so far is shallow and this * state's target residency matches the time till the * closest timer event, select this one to avoid getting * stuck in the shallow one for too long. */ if (drv->states[idx].target_residency_ns < TICK_NSEC && s->target_residency_ns <= delta_tick) idx = i; return idx; } if (s->exit_latency_ns > latency_req) break; idx = i; } if (idx == -1) idx = 0; /* No states enabled. Must use 0. */ /* * Don't stop the tick if the selected state is a polling one or if the * expected idle duration is shorter than the tick period length. */ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) { *stop_tick = false; if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) { /* * The tick is not going to be stopped and the target * residency of the state to be returned is not within * the time until the next timer event including the * tick, so try to correct that. */ for (i = idx - 1; i >= 0; i--) { if (dev->states_usage[i].disable) continue; idx = i; if (drv->states[i].target_residency_ns <= delta_tick) break; } } } return idx; } /** * menu_reflect - records that data structures need update * @dev: the CPU * @index: the index of actual entered state * * NOTE: it's important to be fast here because this operation will add to * the overall exit latency. */ static void menu_reflect(struct cpuidle_device *dev, int index) { struct menu_device *data = this_cpu_ptr(&menu_devices); dev->last_state_idx = index; data->needs_update = 1; data->tick_wakeup = tick_nohz_idle_got_tick(); } /** * menu_update - attempts to guess what happened after entry * @drv: cpuidle driver containing state data * @dev: the CPU */ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = this_cpu_ptr(&menu_devices); int last_idx = dev->last_state_idx; struct cpuidle_state *target = &drv->states[last_idx]; u64 measured_ns; unsigned int new_factor; /* * Try to figure out how much time passed between entry to low * power state and occurrence of the wakeup event. * * If the entered idle state didn't support residency measurements, * we use them anyway if they are short, and if long, * truncate to the whole expected time. * * Any measured amount of time will include the exit latency. * Since we are interested in when the wakeup begun, not when it * was completed, we must subtract the exit latency. However, if * the measured amount of time is less than the exit latency, * assume the state was never reached and the exit latency is 0. */ if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) { /* * The nohz code said that there wouldn't be any events within * the tick boundary (if the tick was stopped), but the idle * duration predictor had a differing opinion. Since the CPU * was woken up by a tick (that wasn't stopped after all), the * predictor was not quite right, so assume that the CPU could * have been idle long (but not forever) to help the idle * duration predictor do a better job next time. */ measured_ns = 9 * MAX_INTERESTING / 10; } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) && dev->poll_time_limit) { /* * The CPU exited the "polling" state due to a time limit, so * the idle duration prediction leading to the selection of that * state was inaccurate. If a better prediction had been made, * the CPU might have been woken up from idle by the next timer. * Assume that to be the case. */ measured_ns = data->next_timer_ns; } else { /* measured value */ measured_ns = dev->last_residency_ns; /* Deduct exit latency */ if (measured_ns > 2 * target->exit_latency_ns) measured_ns -= target->exit_latency_ns; else measured_ns /= 2; } /* Make sure our coefficients do not exceed unity */ if (measured_ns > data->next_timer_ns) measured_ns = data->next_timer_ns; /* Update our correction ratio */ new_factor = data->correction_factor[data->bucket]; new_factor -= new_factor / DECAY; if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING) new_factor += div64_u64(RESOLUTION * measured_ns, data->next_timer_ns); else /* * we were idle so long that we count it as a perfect * prediction */ new_factor += RESOLUTION; /* * We don't want 0 as factor; we always want at least * a tiny bit of estimated time. Fortunately, due to rounding, * new_factor will stay nonzero regardless of measured_us values * and the compiler can eliminate this test as long as DECAY > 1. */ if (DECAY == 1 && unlikely(new_factor == 0)) new_factor = 1; data->correction_factor[data->bucket] = new_factor; /* update the repeating-pattern data */ data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); if (data->interval_ptr >= INTERVALS) data->interval_ptr = 0; } /** * menu_enable_device - scans a CPU's states and does setup * @drv: cpuidle driver * @dev: the CPU */ static int menu_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = &per_cpu(menu_devices, dev->cpu); int i; memset(data, 0, sizeof(struct menu_device)); /* * if the correction factor is 0 (eg first time init or cpu hotplug * etc), we actually want to start out with a unity factor. */ for(i = 0; i < BUCKETS; i++) data->correction_factor[i] = RESOLUTION * DECAY; return 0; } static struct cpuidle_governor menu_governor = { .name = "menu", .rating = 20, .enable = menu_enable_device, .select = menu_select, .reflect = menu_reflect, }; /** * init_menu - initializes the governor */ static int __init init_menu(void) { return cpuidle_register_governor(&menu_governor); } postcore_initcall(init_menu);
linux-master
drivers/cpuidle/governors/menu.c
/* * ladder.c - the residency ladder algorithm * * Copyright (C) 2001, 2002 Andy Grover <[email protected]> * Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]> * Copyright (C) 2004, 2005 Dominik Brodowski <[email protected]> * * (C) 2006-2007 Venkatesh Pallipadi <[email protected]> * Shaohua Li <[email protected]> * Adam Belay <[email protected]> * * This code is licenced under the GPL. */ #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/jiffies.h> #include <linux/tick.h> #include <asm/io.h> #include <linux/uaccess.h> #define PROMOTION_COUNT 4 #define DEMOTION_COUNT 1 struct ladder_device_state { struct { u32 promotion_count; u32 demotion_count; u64 promotion_time_ns; u64 demotion_time_ns; } threshold; struct { int promotion_count; int demotion_count; } stats; }; struct ladder_device { struct ladder_device_state states[CPUIDLE_STATE_MAX]; }; static DEFINE_PER_CPU(struct ladder_device, ladder_devices); /** * ladder_do_selection - prepares private data for a state change * @ldev: the ladder device * @old_idx: the current state index * @new_idx: the new target state index */ static inline void ladder_do_selection(struct cpuidle_device *dev, struct ladder_device *ldev, int old_idx, int new_idx) { ldev->states[old_idx].stats.promotion_count = 0; ldev->states[old_idx].stats.demotion_count = 0; dev->last_state_idx = new_idx; } /** * ladder_select_state - selects the next state to enter * @drv: cpuidle driver * @dev: the CPU * @dummy: not used */ static int ladder_select_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *dummy) { struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); struct ladder_device_state *last_state; int last_idx = dev->last_state_idx; int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; s64 latency_req = cpuidle_governor_latency_req(dev->cpu); s64 last_residency; /* Special case when user has set very strict latency requirement */ if (unlikely(latency_req == 0)) { ladder_do_selection(dev, ldev, last_idx, 0); return 0; } last_state = &ldev->states[last_idx]; last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns; /* consider promotion */ if (last_idx < drv->state_count - 1 && !dev->states_usage[last_idx + 1].disable && last_residency > last_state->threshold.promotion_time_ns && drv->states[last_idx + 1].exit_latency_ns <= latency_req) { last_state->stats.promotion_count++; last_state->stats.demotion_count = 0; if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { ladder_do_selection(dev, ldev, last_idx, last_idx + 1); return last_idx + 1; } } /* consider demotion */ if (last_idx > first_idx && (dev->states_usage[last_idx].disable || drv->states[last_idx].exit_latency_ns > latency_req)) { int i; for (i = last_idx - 1; i > first_idx; i--) { if (drv->states[i].exit_latency_ns <= latency_req) break; } ladder_do_selection(dev, ldev, last_idx, i); return i; } if (last_idx > first_idx && last_residency < last_state->threshold.demotion_time_ns) { last_state->stats.demotion_count++; last_state->stats.promotion_count = 0; if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { ladder_do_selection(dev, ldev, last_idx, last_idx - 1); return last_idx - 1; } } /* otherwise remain at the current state */ return last_idx; } /** * ladder_enable_device - setup for the governor * @drv: cpuidle driver * @dev: the CPU */ static int ladder_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { int i; int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); struct ladder_device_state *lstate; struct cpuidle_state *state; dev->last_state_idx = first_idx; for (i = first_idx; i < drv->state_count; i++) { state = &drv->states[i]; lstate = &ldev->states[i]; lstate->stats.promotion_count = 0; lstate->stats.demotion_count = 0; lstate->threshold.promotion_count = PROMOTION_COUNT; lstate->threshold.demotion_count = DEMOTION_COUNT; if (i < drv->state_count - 1) lstate->threshold.promotion_time_ns = state->exit_latency_ns; if (i > first_idx) lstate->threshold.demotion_time_ns = state->exit_latency_ns; } return 0; } /** * ladder_reflect - update the correct last_state_idx * @dev: the CPU * @index: the index of actual state entered */ static void ladder_reflect(struct cpuidle_device *dev, int index) { if (index > 0) dev->last_state_idx = index; } static struct cpuidle_governor ladder_governor = { .name = "ladder", .rating = 10, .enable = ladder_enable_device, .select = ladder_select_state, .reflect = ladder_reflect, }; /** * init_ladder - initializes the governor */ static int __init init_ladder(void) { /* * When NO_HZ is disabled, or when booting with nohz=off, the ladder * governor is better so give it a higher rating than the menu * governor. */ if (!tick_nohz_enabled) ladder_governor.rating = 25; return cpuidle_register_governor(&ladder_governor); } postcore_initcall(init_ladder);
linux-master
drivers/cpuidle/governors/ladder.c
// SPDX-License-Identifier: GPL-2.0 /* * haltpoll.c - haltpoll idle governor * * Copyright 2019 Red Hat, Inc. and/or its affiliates. * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * Authors: Marcelo Tosatti <[email protected]> */ #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/time.h> #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/kvm_para.h> #include <trace/events/power.h> static unsigned int guest_halt_poll_ns __read_mostly = 200000; module_param(guest_halt_poll_ns, uint, 0644); /* division factor to shrink halt_poll_ns */ static unsigned int guest_halt_poll_shrink __read_mostly = 2; module_param(guest_halt_poll_shrink, uint, 0644); /* multiplication factor to grow per-cpu poll_limit_ns */ static unsigned int guest_halt_poll_grow __read_mostly = 2; module_param(guest_halt_poll_grow, uint, 0644); /* value in us to start growing per-cpu halt_poll_ns */ static unsigned int guest_halt_poll_grow_start __read_mostly = 50000; module_param(guest_halt_poll_grow_start, uint, 0644); /* allow shrinking guest halt poll */ static bool guest_halt_poll_allow_shrink __read_mostly = true; module_param(guest_halt_poll_allow_shrink, bool, 0644); /** * haltpoll_select - selects the next idle state to enter * @drv: cpuidle driver containing state data * @dev: the CPU * @stop_tick: indication on whether or not to stop the tick */ static int haltpoll_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick) { s64 latency_req = cpuidle_governor_latency_req(dev->cpu); if (!drv->state_count || latency_req == 0) { *stop_tick = false; return 0; } if (dev->poll_limit_ns == 0) return 1; /* Last state was poll? */ if (dev->last_state_idx == 0) { /* Halt if no event occurred on poll window */ if (dev->poll_time_limit == true) return 1; *stop_tick = false; /* Otherwise, poll again */ return 0; } *stop_tick = false; /* Last state was halt: poll */ return 0; } static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns) { unsigned int val; /* Grow cpu_halt_poll_us if * cpu_halt_poll_us < block_ns < guest_halt_poll_us */ if (block_ns > dev->poll_limit_ns && block_ns <= guest_halt_poll_ns) { val = dev->poll_limit_ns * guest_halt_poll_grow; if (val < guest_halt_poll_grow_start) val = guest_halt_poll_grow_start; if (val > guest_halt_poll_ns) val = guest_halt_poll_ns; trace_guest_halt_poll_ns_grow(val, dev->poll_limit_ns); dev->poll_limit_ns = val; } else if (block_ns > guest_halt_poll_ns && guest_halt_poll_allow_shrink) { unsigned int shrink = guest_halt_poll_shrink; val = dev->poll_limit_ns; if (shrink == 0) val = 0; else val /= shrink; trace_guest_halt_poll_ns_shrink(val, dev->poll_limit_ns); dev->poll_limit_ns = val; } } /** * haltpoll_reflect - update variables and update poll time * @dev: the CPU * @index: the index of actual entered state */ static void haltpoll_reflect(struct cpuidle_device *dev, int index) { dev->last_state_idx = index; if (index != 0) adjust_poll_limit(dev, dev->last_residency_ns); } /** * haltpoll_enable_device - scans a CPU's states and does setup * @drv: cpuidle driver * @dev: the CPU */ static int haltpoll_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { dev->poll_limit_ns = 0; return 0; } static struct cpuidle_governor haltpoll_governor = { .name = "haltpoll", .rating = 9, .enable = haltpoll_enable_device, .select = haltpoll_select, .reflect = haltpoll_reflect, }; static int __init init_haltpoll(void) { if (kvm_para_available()) return cpuidle_register_governor(&haltpoll_governor); return 0; } postcore_initcall(init_haltpoll);
linux-master
drivers/cpuidle/governors/haltpoll.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core * * Copyright (C) 1995-99 Simon G. Vogl * With some changes from Kyösti Mälkki <[email protected]> * Mux support by Rodolfo Giometti <[email protected]> and * Michael Lawnick <[email protected]> * * Copyright (C) 2013-2017 Wolfram Sang <[email protected]> */ #define pr_fmt(fmt) "i2c-core: " fmt #include <dt-bindings/i2c/i2c.h> #include <linux/acpi.h> #include <linux/clk/clk-conf.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irqflags.h> #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/pinctrl/consumer.h> #include <linux/pinctrl/devinfo.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> #include <linux/property.h> #include <linux/rwsem.h> #include <linux/slab.h> #include "i2c-core.h" #define CREATE_TRACE_POINTS #include <trace/events/i2c.h> #define I2C_ADDR_OFFSET_TEN_BIT 0xa000 #define I2C_ADDR_OFFSET_SLAVE 0x1000 #define I2C_ADDR_7BITS_MAX 0x77 #define I2C_ADDR_7BITS_COUNT (I2C_ADDR_7BITS_MAX + 1) #define I2C_ADDR_DEVICE_ID 0x7c /* * core_lock protects i2c_adapter_idr, and guarantees that device detection, * deletion of detected devices are serialized */ static DEFINE_MUTEX(core_lock); static DEFINE_IDR(i2c_adapter_idr); static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver); static DEFINE_STATIC_KEY_FALSE(i2c_trace_msg_key); static bool is_registered; int i2c_transfer_trace_reg(void) { static_branch_inc(&i2c_trace_msg_key); return 0; } void i2c_transfer_trace_unreg(void) { static_branch_dec(&i2c_trace_msg_key); } const char *i2c_freq_mode_string(u32 bus_freq_hz) { switch (bus_freq_hz) { case I2C_MAX_STANDARD_MODE_FREQ: return "Standard Mode (100 kHz)"; case I2C_MAX_FAST_MODE_FREQ: return "Fast Mode (400 kHz)"; case I2C_MAX_FAST_MODE_PLUS_FREQ: return "Fast Mode Plus (1.0 MHz)"; case I2C_MAX_TURBO_MODE_FREQ: return "Turbo Mode (1.4 MHz)"; case I2C_MAX_HIGH_SPEED_MODE_FREQ: return "High Speed Mode (3.4 MHz)"; case I2C_MAX_ULTRA_FAST_MODE_FREQ: return "Ultra Fast Mode (5.0 MHz)"; default: return "Unknown Mode"; } } EXPORT_SYMBOL_GPL(i2c_freq_mode_string); const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, const struct i2c_client *client) { if (!(id && client)) return NULL; while (id->name[0]) { if (strcmp(client->name, id->name) == 0) return id; id++; } return NULL; } EXPORT_SYMBOL_GPL(i2c_match_id); const void *i2c_get_match_data(const struct i2c_client *client) { struct i2c_driver *driver = to_i2c_driver(client->dev.driver); const struct i2c_device_id *match; const void *data; data = device_get_match_data(&client->dev); if (!data) { match = i2c_match_id(driver->id_table, client); if (!match) return NULL; data = (const void *)match->driver_data; } return data; } EXPORT_SYMBOL(i2c_get_match_data); static int i2c_device_match(struct device *dev, struct device_driver *drv) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; /* Attempt an OF style match */ if (i2c_of_match_device(drv->of_match_table, client)) return 1; /* Then ACPI style match */ if (acpi_driver_match_device(dev, drv)) return 1; driver = to_i2c_driver(drv); /* Finally an I2C match */ if (i2c_match_id(driver->id_table, client)) return 1; return 0; } static int i2c_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct i2c_client *client = to_i2c_client(dev); int rc; rc = of_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; rc = acpi_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name); } /* i2c bus recovery routines */ static int get_scl_gpio_value(struct i2c_adapter *adap) { return gpiod_get_value_cansleep(adap->bus_recovery_info->scl_gpiod); } static void set_scl_gpio_value(struct i2c_adapter *adap, int val) { gpiod_set_value_cansleep(adap->bus_recovery_info->scl_gpiod, val); } static int get_sda_gpio_value(struct i2c_adapter *adap) { return gpiod_get_value_cansleep(adap->bus_recovery_info->sda_gpiod); } static void set_sda_gpio_value(struct i2c_adapter *adap, int val) { gpiod_set_value_cansleep(adap->bus_recovery_info->sda_gpiod, val); } static int i2c_generic_bus_free(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; int ret = -EOPNOTSUPP; if (bri->get_bus_free) ret = bri->get_bus_free(adap); else if (bri->get_sda) ret = bri->get_sda(adap); if (ret < 0) return ret; return ret ? 0 : -EBUSY; } /* * We are generating clock pulses. ndelay() determines durating of clk pulses. * We will generate clock with rate 100 KHz and so duration of both clock levels * is: delay in ns = (10^6 / 100) / 2 */ #define RECOVERY_NDELAY 5000 #define RECOVERY_CLK_CNT 9 int i2c_generic_scl_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; int i = 0, scl = 1, ret = 0; if (bri->prepare_recovery) bri->prepare_recovery(adap); if (bri->pinctrl) pinctrl_select_state(bri->pinctrl, bri->pins_gpio); /* * If we can set SDA, we will always create a STOP to ensure additional * pulses will do no harm. This is achieved by letting SDA follow SCL * half a cycle later. Check the 'incomplete_write_byte' fault injector * for details. Note that we must honour tsu:sto, 4us, but lets use 5us * here for simplicity. */ bri->set_scl(adap, scl); ndelay(RECOVERY_NDELAY); if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); /* * By this time SCL is high, as we need to give 9 falling-rising edges */ while (i++ < RECOVERY_CLK_CNT * 2) { if (scl) { /* SCL shouldn't be low here */ if (!bri->get_scl(adap)) { dev_err(&adap->dev, "SCL is stuck low, exit recovery\n"); ret = -EBUSY; break; } } scl = !scl; bri->set_scl(adap, scl); /* Creating STOP again, see above */ if (scl) { /* Honour minimum tsu:sto */ ndelay(RECOVERY_NDELAY); } else { /* Honour minimum tf and thd:dat */ ndelay(RECOVERY_NDELAY / 2); } if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); if (scl) { ret = i2c_generic_bus_free(adap); if (ret == 0) break; } } /* If we can't check bus status, assume recovery worked */ if (ret == -EOPNOTSUPP) ret = 0; if (bri->unprepare_recovery) bri->unprepare_recovery(adap); if (bri->pinctrl) pinctrl_select_state(bri->pinctrl, bri->pins_default); return ret; } EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); int i2c_recover_bus(struct i2c_adapter *adap) { if (!adap->bus_recovery_info) return -EBUSY; dev_dbg(&adap->dev, "Trying i2c bus recovery\n"); return adap->bus_recovery_info->recover_bus(adap); } EXPORT_SYMBOL_GPL(i2c_recover_bus); static void i2c_gpio_init_pinctrl_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; struct device *dev = &adap->dev; struct pinctrl *p = bri->pinctrl ?: dev_pinctrl(dev->parent); bri->pinctrl = p; /* * we can't change states without pinctrl, so remove the states if * populated */ if (!p) { bri->pins_default = NULL; bri->pins_gpio = NULL; return; } if (!bri->pins_default) { bri->pins_default = pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT); if (IS_ERR(bri->pins_default)) { dev_dbg(dev, PINCTRL_STATE_DEFAULT " state not found for GPIO recovery\n"); bri->pins_default = NULL; } } if (!bri->pins_gpio) { bri->pins_gpio = pinctrl_lookup_state(p, "gpio"); if (IS_ERR(bri->pins_gpio)) bri->pins_gpio = pinctrl_lookup_state(p, "recovery"); if (IS_ERR(bri->pins_gpio)) { dev_dbg(dev, "no gpio or recovery state found for GPIO recovery\n"); bri->pins_gpio = NULL; } } /* for pinctrl state changes, we need all the information */ if (bri->pins_default && bri->pins_gpio) { dev_info(dev, "using pinctrl states for GPIO recovery"); } else { bri->pinctrl = NULL; bri->pins_default = NULL; bri->pins_gpio = NULL; } } static int i2c_gpio_init_generic_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; struct device *dev = &adap->dev; struct gpio_desc *gpiod; int ret = 0; /* * don't touch the recovery information if the driver is not using * generic SCL recovery */ if (bri->recover_bus && bri->recover_bus != i2c_generic_scl_recovery) return 0; /* * pins might be taken as GPIO, so we should inform pinctrl about * this and move the state to GPIO */ if (bri->pinctrl) pinctrl_select_state(bri->pinctrl, bri->pins_gpio); /* * if there is incomplete or no recovery information, see if generic * GPIO recovery is available */ if (!bri->scl_gpiod) { gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); if (PTR_ERR(gpiod) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto cleanup_pinctrl_state; } if (!IS_ERR(gpiod)) { bri->scl_gpiod = gpiod; bri->recover_bus = i2c_generic_scl_recovery; dev_info(dev, "using generic GPIOs for recovery\n"); } } /* SDA GPIOD line is optional, so we care about DEFER only */ if (!bri->sda_gpiod) { /* * We have SCL. Pull SCL low and wait a bit so that SDA glitches * have no effect. */ gpiod_direction_output(bri->scl_gpiod, 0); udelay(10); gpiod = devm_gpiod_get(dev, "sda", GPIOD_IN); /* Wait a bit in case of a SDA glitch, and then release SCL. */ udelay(10); gpiod_direction_output(bri->scl_gpiod, 1); if (PTR_ERR(gpiod) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto cleanup_pinctrl_state; } if (!IS_ERR(gpiod)) bri->sda_gpiod = gpiod; } cleanup_pinctrl_state: /* change the state of the pins back to their default state */ if (bri->pinctrl) pinctrl_select_state(bri->pinctrl, bri->pins_default); return ret; } static int i2c_gpio_init_recovery(struct i2c_adapter *adap) { i2c_gpio_init_pinctrl_recovery(adap); return i2c_gpio_init_generic_recovery(adap); } static int i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; bool is_error_level = true; char *err_str; if (!bri) return 0; if (i2c_gpio_init_recovery(adap) == -EPROBE_DEFER) return -EPROBE_DEFER; if (!bri->recover_bus) { err_str = "no suitable method provided"; is_error_level = false; goto err; } if (bri->scl_gpiod && bri->recover_bus == i2c_generic_scl_recovery) { bri->get_scl = get_scl_gpio_value; bri->set_scl = set_scl_gpio_value; if (bri->sda_gpiod) { bri->get_sda = get_sda_gpio_value; /* FIXME: add proper flag instead of '0' once available */ if (gpiod_get_direction(bri->sda_gpiod) == 0) bri->set_sda = set_sda_gpio_value; } } else if (bri->recover_bus == i2c_generic_scl_recovery) { /* Generic SCL recovery */ if (!bri->set_scl || !bri->get_scl) { err_str = "no {get|set}_scl() found"; goto err; } if (!bri->set_sda && !bri->get_sda) { err_str = "either get_sda() or set_sda() needed"; goto err; } } return 0; err: if (is_error_level) dev_err(&adap->dev, "Not using recovery: %s\n", err_str); else dev_dbg(&adap->dev, "Not using recovery: %s\n", err_str); adap->bus_recovery_info = NULL; return -EINVAL; } static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) { struct i2c_adapter *adap = client->adapter; unsigned int irq; if (!adap->host_notify_domain) return -ENXIO; if (client->flags & I2C_CLIENT_TEN) return -EINVAL; irq = irq_create_mapping(adap->host_notify_domain, client->addr); return irq > 0 ? irq : -ENXIO; } static int i2c_device_probe(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; bool do_power_on; int status; if (!client) return 0; client->irq = client->init_irq; if (!client->irq) { int irq = -ENOENT; if (client->flags & I2C_CLIENT_HOST_NOTIFY) { dev_dbg(dev, "Using Host Notify IRQ\n"); /* Keep adapter active when Host Notify is required */ pm_runtime_get_sync(&client->adapter->dev); irq = i2c_smbus_host_notify_to_irq(client); } else if (dev->of_node) { irq = of_irq_get_byname(dev->of_node, "irq"); if (irq == -EINVAL || irq == -ENODATA) irq = of_irq_get(dev->of_node, 0); } else if (ACPI_COMPANION(dev)) { bool wake_capable; irq = i2c_acpi_get_irq(client, &wake_capable); if (irq > 0 && wake_capable) client->flags |= I2C_CLIENT_WAKE; } if (irq == -EPROBE_DEFER) { status = irq; goto put_sync_adapter; } if (irq < 0) irq = 0; client->irq = irq; } driver = to_i2c_driver(dev->driver); /* * An I2C ID table is not mandatory, if and only if, a suitable OF * or ACPI ID table is supplied for the probing device. */ if (!driver->id_table && !acpi_driver_match_device(dev, dev->driver) && !i2c_of_match_device(dev->driver->of_match_table, client)) { status = -ENODEV; goto put_sync_adapter; } if (client->flags & I2C_CLIENT_WAKE) { int wakeirq; wakeirq = of_irq_get_byname(dev->of_node, "wakeup"); if (wakeirq == -EPROBE_DEFER) { status = wakeirq; goto put_sync_adapter; } device_init_wakeup(&client->dev, true); if (wakeirq > 0 && wakeirq != client->irq) status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); else if (client->irq > 0) status = dev_pm_set_wake_irq(dev, client->irq); else status = 0; if (status) dev_warn(&client->dev, "failed to set up wakeup irq\n"); } dev_dbg(dev, "probe\n"); status = of_clk_set_defaults(dev->of_node, false); if (status < 0) goto err_clear_wakeup_irq; do_power_on = !i2c_acpi_waive_d0_probe(dev); status = dev_pm_domain_attach(&client->dev, do_power_on); if (status) goto err_clear_wakeup_irq; client->devres_group_id = devres_open_group(&client->dev, NULL, GFP_KERNEL); if (!client->devres_group_id) { status = -ENOMEM; goto err_detach_pm_domain; } if (driver->probe) status = driver->probe(client); else status = -EINVAL; /* * Note that we are not closing the devres group opened above so * even resources that were attached to the device after probe is * run are released when i2c_device_remove() is executed. This is * needed as some drivers would allocate additional resources, * for example when updating firmware. */ if (status) goto err_release_driver_resources; return 0; err_release_driver_resources: devres_release_group(&client->dev, client->devres_group_id); err_detach_pm_domain: dev_pm_domain_detach(&client->dev, do_power_on); err_clear_wakeup_irq: dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); put_sync_adapter: if (client->flags & I2C_CLIENT_HOST_NOTIFY) pm_runtime_put_sync(&client->adapter->dev); return status; } static void i2c_device_remove(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct i2c_driver *driver; driver = to_i2c_driver(dev->driver); if (driver->remove) { dev_dbg(dev, "remove\n"); driver->remove(client); } devres_release_group(&client->dev, client->devres_group_id); dev_pm_domain_detach(&client->dev, true); dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); client->irq = 0; if (client->flags & I2C_CLIENT_HOST_NOTIFY) pm_runtime_put(&client->adapter->dev); } static void i2c_device_shutdown(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; if (!client || !dev->driver) return; driver = to_i2c_driver(dev->driver); if (driver->shutdown) driver->shutdown(client); else if (client->irq > 0) disable_irq(client->irq); } static void i2c_client_dev_release(struct device *dev) { kfree(to_i2c_client(dev)); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", dev->type == &i2c_client_type ? to_i2c_client(dev)->name : to_i2c_adapter(dev)->name); } static DEVICE_ATTR_RO(name); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); int len; len = of_device_modalias(dev, buf, PAGE_SIZE); if (len != -ENODEV) return len; len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); if (len != -ENODEV) return len; return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name); } static DEVICE_ATTR_RO(modalias); static struct attribute *i2c_dev_attrs[] = { &dev_attr_name.attr, /* modalias helps coldplug: modprobe $(cat .../modalias) */ &dev_attr_modalias.attr, NULL }; ATTRIBUTE_GROUPS(i2c_dev); struct bus_type i2c_bus_type = { .name = "i2c", .match = i2c_device_match, .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, }; EXPORT_SYMBOL_GPL(i2c_bus_type); struct device_type i2c_client_type = { .groups = i2c_dev_groups, .uevent = i2c_device_uevent, .release = i2c_client_dev_release, }; EXPORT_SYMBOL_GPL(i2c_client_type); /** * i2c_verify_client - return parameter as i2c_client, or NULL * @dev: device, probably from some driver model iterator * * When traversing the driver model tree, perhaps using driver model * iterators like @device_for_each_child(), you can't assume very much * about the nodes you find. Use this function to avoid oopses caused * by wrongly treating some non-I2C device as an i2c_client. */ struct i2c_client *i2c_verify_client(struct device *dev) { return (dev->type == &i2c_client_type) ? to_i2c_client(dev) : NULL; } EXPORT_SYMBOL(i2c_verify_client); /* Return a unique address which takes the flags of the client into account */ static unsigned short i2c_encode_flags_to_addr(struct i2c_client *client) { unsigned short addr = client->addr; /* For some client flags, add an arbitrary offset to avoid collisions */ if (client->flags & I2C_CLIENT_TEN) addr |= I2C_ADDR_OFFSET_TEN_BIT; if (client->flags & I2C_CLIENT_SLAVE) addr |= I2C_ADDR_OFFSET_SLAVE; return addr; } /* This is a permissive address validity check, I2C address map constraints * are purposely not enforced, except for the general call address. */ static int i2c_check_addr_validity(unsigned int addr, unsigned short flags) { if (flags & I2C_CLIENT_TEN) { /* 10-bit address, all values are valid */ if (addr > 0x3ff) return -EINVAL; } else { /* 7-bit address, reject the general call address */ if (addr == 0x00 || addr > 0x7f) return -EINVAL; } return 0; } /* And this is a strict address validity check, used when probing. If a * device uses a reserved address, then it shouldn't be probed. 7-bit * addressing is assumed, 10-bit address devices are rare and should be * explicitly enumerated. */ int i2c_check_7bit_addr_validity_strict(unsigned short addr) { /* * Reserved addresses per I2C specification: * 0x00 General call address / START byte * 0x01 CBUS address * 0x02 Reserved for different bus format * 0x03 Reserved for future purposes * 0x04-0x07 Hs-mode master code * 0x78-0x7b 10-bit slave addressing * 0x7c-0x7f Reserved for future purposes */ if (addr < 0x08 || addr > 0x77) return -EINVAL; return 0; } static int __i2c_check_addr_busy(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); int addr = *(int *)addrp; if (client && i2c_encode_flags_to_addr(client) == addr) return -EBUSY; return 0; } /* walk up mux tree */ static int i2c_check_mux_parents(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result; result = device_for_each_child(&adapter->dev, &addr, __i2c_check_addr_busy); if (!result && parent) result = i2c_check_mux_parents(parent, addr); return result; } /* recurse down mux tree */ static int i2c_check_mux_children(struct device *dev, void *addrp) { int result; if (dev->type == &i2c_adapter_type) result = device_for_each_child(dev, addrp, i2c_check_mux_children); else result = __i2c_check_addr_busy(dev, addrp); return result; } static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result = 0; if (parent) result = i2c_check_mux_parents(parent, addr); if (!result) result = device_for_each_child(&adapter->dev, &addr, i2c_check_mux_children); return result; } /** * i2c_adapter_lock_bus - Get exclusive access to an I2C bus segment * @adapter: Target I2C bus segment * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT * locks only this branch in the adapter tree */ static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); } /** * i2c_adapter_trylock_bus - Try to get exclusive access to an I2C bus segment * @adapter: Target I2C bus segment * @flags: I2C_LOCK_ROOT_ADAPTER trylocks the root i2c adapter, I2C_LOCK_SEGMENT * trylocks only this branch in the adapter tree */ static int i2c_adapter_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { return rt_mutex_trylock(&adapter->bus_lock); } /** * i2c_adapter_unlock_bus - Release exclusive access to an I2C bus segment * @adapter: Target I2C bus segment * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT * unlocks only this branch in the adapter tree */ static void i2c_adapter_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { rt_mutex_unlock(&adapter->bus_lock); } static void i2c_dev_set_name(struct i2c_adapter *adap, struct i2c_client *client, struct i2c_board_info const *info) { struct acpi_device *adev = ACPI_COMPANION(&client->dev); if (info && info->dev_name) { dev_set_name(&client->dev, "i2c-%s", info->dev_name); return; } if (adev) { dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev)); return; } dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), i2c_encode_flags_to_addr(client)); } int i2c_dev_irq_from_resources(const struct resource *resources, unsigned int num_resources) { struct irq_data *irqd; int i; for (i = 0; i < num_resources; i++) { const struct resource *r = &resources[i]; if (resource_type(r) != IORESOURCE_IRQ) continue; if (r->flags & IORESOURCE_BITS) { irqd = irq_get_irq_data(r->start); if (!irqd) break; irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); } return r->start; } return 0; } /** * i2c_new_client_device - instantiate an i2c device * @adap: the adapter managing the device * @info: describes one I2C device; bus_num is ignored * Context: can sleep * * Create an i2c device. Binding is handled through driver model * probe()/remove() methods. A driver may be bound to this device when we * return from this function, or any later moment (e.g. maybe hotplugging will * load the driver module). This call is not appropriate for use by mainboard * initialization logic, which usually runs during an arch_initcall() long * before any i2c_adapter could exist. * * This returns the new i2c client, which may be saved for later use with * i2c_unregister_device(); or an ERR_PTR to describe the error. */ struct i2c_client * i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info) { struct i2c_client *client; int status; client = kzalloc(sizeof *client, GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); client->adapter = adap; client->dev.platform_data = info->platform_data; client->flags = info->flags; client->addr = info->addr; client->init_irq = info->irq; if (!client->init_irq) client->init_irq = i2c_dev_irq_from_resources(info->resources, info->num_resources); strscpy(client->name, info->type, sizeof(client->name)); status = i2c_check_addr_validity(client->addr, client->flags); if (status) { dev_err(&adap->dev, "Invalid %d-bit I2C address 0x%02hx\n", client->flags & I2C_CLIENT_TEN ? 10 : 7, client->addr); goto out_err_silent; } /* Check for address business */ status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client)); if (status) goto out_err; client->dev.parent = &client->adapter->dev; client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; client->dev.of_node = of_node_get(info->of_node); client->dev.fwnode = info->fwnode; device_enable_async_suspend(&client->dev); i2c_dev_set_name(adap, client, info); if (info->swnode) { status = device_add_software_node(&client->dev, info->swnode); if (status) { dev_err(&adap->dev, "Failed to add software node to client %s: %d\n", client->name, status); goto out_err_put_of_node; } } status = device_register(&client->dev); if (status) goto out_remove_swnode; dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", client->name, dev_name(&client->dev)); return client; out_remove_swnode: device_remove_software_node(&client->dev); out_err_put_of_node: of_node_put(info->of_node); out_err: dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x (%d)\n", client->name, client->addr, status); out_err_silent: kfree(client); return ERR_PTR(status); } EXPORT_SYMBOL_GPL(i2c_new_client_device); /** * i2c_unregister_device - reverse effect of i2c_new_*_device() * @client: value returned from i2c_new_*_device() * Context: can sleep */ void i2c_unregister_device(struct i2c_client *client) { if (IS_ERR_OR_NULL(client)) return; if (client->dev.of_node) { of_node_clear_flag(client->dev.of_node, OF_POPULATED); of_node_put(client->dev.of_node); } if (ACPI_COMPANION(&client->dev)) acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); device_remove_software_node(&client->dev); device_unregister(&client->dev); } EXPORT_SYMBOL_GPL(i2c_unregister_device); /** * i2c_find_device_by_fwnode() - find an i2c_client for the fwnode * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_client * * Look up and return the &struct i2c_client corresponding to the @fwnode. * If no client can be found, or @fwnode is NULL, this returns NULL. * * The user must call put_device(&client->dev) once done with the i2c client. */ struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode) { struct i2c_client *client; struct device *dev; if (!fwnode) return NULL; dev = bus_find_device_by_fwnode(&i2c_bus_type, fwnode); if (!dev) return NULL; client = i2c_verify_client(dev); if (!client) put_device(dev); return client; } EXPORT_SYMBOL(i2c_find_device_by_fwnode); static const struct i2c_device_id dummy_id[] = { { "dummy", 0 }, { }, }; static int dummy_probe(struct i2c_client *client) { return 0; } static struct i2c_driver dummy_driver = { .driver.name = "dummy", .probe = dummy_probe, .id_table = dummy_id, }; /** * i2c_new_dummy_device - return a new i2c device bound to a dummy driver * @adapter: the adapter managing the device * @address: seven bit address to be used * Context: can sleep * * This returns an I2C client bound to the "dummy" driver, intended for use * with devices that consume multiple addresses. Examples of such chips * include various EEPROMS (like 24c04 and 24c08 models). * * These dummy devices have two main uses. First, most I2C and SMBus calls * except i2c_transfer() need a client handle; the dummy will be that handle. * And second, this prevents the specified address from being bound to a * different driver. * * This returns the new i2c client, which should be saved for later use with * i2c_unregister_device(); or an ERR_PTR to describe the error. */ struct i2c_client *i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address) { struct i2c_board_info info = { I2C_BOARD_INFO("dummy", address), }; return i2c_new_client_device(adapter, &info); } EXPORT_SYMBOL_GPL(i2c_new_dummy_device); static void devm_i2c_release_dummy(void *client) { i2c_unregister_device(client); } /** * devm_i2c_new_dummy_device - return a new i2c device bound to a dummy driver * @dev: device the managed resource is bound to * @adapter: the adapter managing the device * @address: seven bit address to be used * Context: can sleep * * This is the device-managed version of @i2c_new_dummy_device. It returns the * new i2c client or an ERR_PTR in case of an error. */ struct i2c_client *devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adapter, u16 address) { struct i2c_client *client; int ret; client = i2c_new_dummy_device(adapter, address); if (IS_ERR(client)) return client; ret = devm_add_action_or_reset(dev, devm_i2c_release_dummy, client); if (ret) return ERR_PTR(ret); return client; } EXPORT_SYMBOL_GPL(devm_i2c_new_dummy_device); /** * i2c_new_ancillary_device - Helper to get the instantiated secondary address * and create the associated device * @client: Handle to the primary client * @name: Handle to specify which secondary address to get * @default_addr: Used as a fallback if no secondary address was specified * Context: can sleep * * I2C clients can be composed of multiple I2C slaves bound together in a single * component. The I2C client driver then binds to the master I2C slave and needs * to create I2C dummy clients to communicate with all the other slaves. * * This function creates and returns an I2C dummy client whose I2C address is * retrieved from the platform firmware based on the given slave name. If no * address is specified by the firmware default_addr is used. * * On DT-based platforms the address is retrieved from the "reg" property entry * cell whose "reg-names" value matches the slave name. * * This returns the new i2c client, which should be saved for later use with * i2c_unregister_device(); or an ERR_PTR to describe the error. */ struct i2c_client *i2c_new_ancillary_device(struct i2c_client *client, const char *name, u16 default_addr) { struct device_node *np = client->dev.of_node; u32 addr = default_addr; int i; if (np) { i = of_property_match_string(np, "reg-names", name); if (i >= 0) of_property_read_u32_index(np, "reg", i, &addr); } dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr); return i2c_new_dummy_device(client->adapter, addr); } EXPORT_SYMBOL_GPL(i2c_new_ancillary_device); /* ------------------------------------------------------------------------- */ /* I2C bus adapters -- one roots each I2C or SMBUS segment */ static void i2c_adapter_dev_release(struct device *dev) { struct i2c_adapter *adap = to_i2c_adapter(dev); complete(&adap->dev_released); } unsigned int i2c_adapter_depth(struct i2c_adapter *adapter) { unsigned int depth = 0; while ((adapter = i2c_parent_is_i2c_adapter(adapter))) depth++; WARN_ONCE(depth >= MAX_LOCKDEP_SUBCLASSES, "adapter depth exceeds lockdep subclass limit\n"); return depth; } EXPORT_SYMBOL_GPL(i2c_adapter_depth); /* * Let users instantiate I2C devices through sysfs. This can be used when * platform initialization code doesn't contain the proper data for * whatever reason. Also useful for drivers that do device detection and * detection fails, either because the device uses an unexpected address, * or this is a compatible device with different ID register values. * * Parameter checking may look overzealous, but we really don't want * the user to provide incorrect parameters. */ static ssize_t new_device_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_adapter *adap = to_i2c_adapter(dev); struct i2c_board_info info; struct i2c_client *client; char *blank, end; int res; memset(&info, 0, sizeof(struct i2c_board_info)); blank = strchr(buf, ' '); if (!blank) { dev_err(dev, "%s: Missing parameters\n", "new_device"); return -EINVAL; } if (blank - buf > I2C_NAME_SIZE - 1) { dev_err(dev, "%s: Invalid device name\n", "new_device"); return -EINVAL; } memcpy(info.type, buf, blank - buf); /* Parse remaining parameters, reject extra parameters */ res = sscanf(++blank, "%hi%c", &info.addr, &end); if (res < 1) { dev_err(dev, "%s: Can't parse I2C address\n", "new_device"); return -EINVAL; } if (res > 1 && end != '\n') { dev_err(dev, "%s: Extra parameters\n", "new_device"); return -EINVAL; } if ((info.addr & I2C_ADDR_OFFSET_TEN_BIT) == I2C_ADDR_OFFSET_TEN_BIT) { info.addr &= ~I2C_ADDR_OFFSET_TEN_BIT; info.flags |= I2C_CLIENT_TEN; } if (info.addr & I2C_ADDR_OFFSET_SLAVE) { info.addr &= ~I2C_ADDR_OFFSET_SLAVE; info.flags |= I2C_CLIENT_SLAVE; } client = i2c_new_client_device(adap, &info); if (IS_ERR(client)) return PTR_ERR(client); /* Keep track of the added device */ mutex_lock(&adap->userspace_clients_lock); list_add_tail(&client->detected, &adap->userspace_clients); mutex_unlock(&adap->userspace_clients_lock); dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device", info.type, info.addr); return count; } static DEVICE_ATTR_WO(new_device); /* * And of course let the users delete the devices they instantiated, if * they got it wrong. This interface can only be used to delete devices * instantiated by i2c_sysfs_new_device above. This guarantees that we * don't delete devices to which some kernel code still has references. * * Parameter checking may look overzealous, but we really don't want * the user to delete the wrong device. */ static ssize_t delete_device_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_adapter *adap = to_i2c_adapter(dev); struct i2c_client *client, *next; unsigned short addr; char end; int res; /* Parse parameters, reject extra parameters */ res = sscanf(buf, "%hi%c", &addr, &end); if (res < 1) { dev_err(dev, "%s: Can't parse I2C address\n", "delete_device"); return -EINVAL; } if (res > 1 && end != '\n') { dev_err(dev, "%s: Extra parameters\n", "delete_device"); return -EINVAL; } /* Make sure the device was added through sysfs */ res = -ENOENT; mutex_lock_nested(&adap->userspace_clients_lock, i2c_adapter_depth(adap)); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { if (i2c_encode_flags_to_addr(client) == addr) { dev_info(dev, "%s: Deleting device %s at 0x%02hx\n", "delete_device", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); res = count; break; } } mutex_unlock(&adap->userspace_clients_lock); if (res < 0) dev_err(dev, "%s: Can't find device in list\n", "delete_device"); return res; } static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, delete_device_store); static struct attribute *i2c_adapter_attrs[] = { &dev_attr_name.attr, &dev_attr_new_device.attr, &dev_attr_delete_device.attr, NULL }; ATTRIBUTE_GROUPS(i2c_adapter); struct device_type i2c_adapter_type = { .groups = i2c_adapter_groups, .release = i2c_adapter_dev_release, }; EXPORT_SYMBOL_GPL(i2c_adapter_type); /** * i2c_verify_adapter - return parameter as i2c_adapter or NULL * @dev: device, probably from some driver model iterator * * When traversing the driver model tree, perhaps using driver model * iterators like @device_for_each_child(), you can't assume very much * about the nodes you find. Use this function to avoid oopses caused * by wrongly treating some non-I2C device as an i2c_adapter. */ struct i2c_adapter *i2c_verify_adapter(struct device *dev) { return (dev->type == &i2c_adapter_type) ? to_i2c_adapter(dev) : NULL; } EXPORT_SYMBOL(i2c_verify_adapter); #ifdef CONFIG_I2C_COMPAT static struct class_compat *i2c_adapter_compat_class; #endif static void i2c_scan_static_board_info(struct i2c_adapter *adapter) { struct i2c_devinfo *devinfo; down_read(&__i2c_board_lock); list_for_each_entry(devinfo, &__i2c_board_list, list) { if (devinfo->busnum == adapter->nr && IS_ERR(i2c_new_client_device(adapter, &devinfo->board_info))) dev_err(&adapter->dev, "Can't create device at 0x%02x\n", devinfo->board_info.addr); } up_read(&__i2c_board_lock); } static int i2c_do_add_adapter(struct i2c_driver *driver, struct i2c_adapter *adap) { /* Detect supported devices on that bus, and instantiate them */ i2c_detect(adap, driver); return 0; } static int __process_new_adapter(struct device_driver *d, void *data) { return i2c_do_add_adapter(to_i2c_driver(d), data); } static const struct i2c_lock_operations i2c_adapter_lock_ops = { .lock_bus = i2c_adapter_lock_bus, .trylock_bus = i2c_adapter_trylock_bus, .unlock_bus = i2c_adapter_unlock_bus, }; static void i2c_host_notify_irq_teardown(struct i2c_adapter *adap) { struct irq_domain *domain = adap->host_notify_domain; irq_hw_number_t hwirq; if (!domain) return; for (hwirq = 0 ; hwirq < I2C_ADDR_7BITS_COUNT ; hwirq++) irq_dispose_mapping(irq_find_mapping(domain, hwirq)); irq_domain_remove(domain); adap->host_notify_domain = NULL; } static int i2c_host_notify_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw_irq_num) { irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq); return 0; } static const struct irq_domain_ops i2c_host_notify_irq_ops = { .map = i2c_host_notify_irq_map, }; static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap) { struct irq_domain *domain; if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY)) return 0; domain = irq_domain_create_linear(adap->dev.parent->fwnode, I2C_ADDR_7BITS_COUNT, &i2c_host_notify_irq_ops, adap); if (!domain) return -ENOMEM; adap->host_notify_domain = domain; return 0; } /** * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct * I2C client. * @adap: the adapter * @addr: the I2C address of the notifying device * Context: can't sleep * * Helper function to be called from an I2C bus driver's interrupt * handler. It will schedule the Host Notify IRQ. */ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr) { int irq; if (!adap) return -EINVAL; irq = irq_find_mapping(adap->host_notify_domain, addr); if (irq <= 0) return -ENXIO; generic_handle_irq_safe(irq); return 0; } EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify); static int i2c_register_adapter(struct i2c_adapter *adap) { int res = -EINVAL; /* Can't register until after driver model init */ if (WARN_ON(!is_registered)) { res = -EAGAIN; goto out_list; } /* Sanity checks */ if (WARN(!adap->name[0], "i2c adapter has no name")) goto out_list; if (!adap->algo) { pr_err("adapter '%s': no algo supplied!\n", adap->name); goto out_list; } if (!adap->lock_ops) adap->lock_ops = &i2c_adapter_lock_ops; adap->locked_flags = 0; rt_mutex_init(&adap->bus_lock); rt_mutex_init(&adap->mux_lock); mutex_init(&adap->userspace_clients_lock); INIT_LIST_HEAD(&adap->userspace_clients); /* Set default timeout to 1 second if not already set */ if (adap->timeout == 0) adap->timeout = HZ; /* register soft irqs for Host Notify */ res = i2c_setup_host_notify_irq_domain(adap); if (res) { pr_err("adapter '%s': can't create Host Notify IRQs (%d)\n", adap->name, res); goto out_list; } dev_set_name(&adap->dev, "i2c-%d", adap->nr); adap->dev.bus = &i2c_bus_type; adap->dev.type = &i2c_adapter_type; res = device_register(&adap->dev); if (res) { pr_err("adapter '%s': can't register device (%d)\n", adap->name, res); goto out_list; } res = i2c_setup_smbus_alert(adap); if (res) goto out_reg; device_enable_async_suspend(&adap->dev); pm_runtime_no_callbacks(&adap->dev); pm_suspend_ignore_children(&adap->dev, true); pm_runtime_enable(&adap->dev); res = i2c_init_recovery(adap); if (res == -EPROBE_DEFER) goto out_reg; dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); #ifdef CONFIG_I2C_COMPAT res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev, adap->dev.parent); if (res) dev_warn(&adap->dev, "Failed to create compatibility class link\n"); #endif /* create pre-declared device nodes */ of_i2c_register_devices(adap); i2c_acpi_install_space_handler(adap); i2c_acpi_register_devices(adap); if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); /* Notify drivers */ mutex_lock(&core_lock); bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter); mutex_unlock(&core_lock); return 0; out_reg: init_completion(&adap->dev_released); device_unregister(&adap->dev); wait_for_completion(&adap->dev_released); out_list: mutex_lock(&core_lock); idr_remove(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); return res; } /** * __i2c_add_numbered_adapter - i2c_add_numbered_adapter where nr is never -1 * @adap: the adapter to register (with adap->nr initialized) * Context: can sleep * * See i2c_add_numbered_adapter() for details. */ static int __i2c_add_numbered_adapter(struct i2c_adapter *adap) { int id; mutex_lock(&core_lock); id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1, GFP_KERNEL); mutex_unlock(&core_lock); if (WARN(id < 0, "couldn't get idr")) return id == -ENOSPC ? -EBUSY : id; return i2c_register_adapter(adap); } /** * i2c_add_adapter - declare i2c adapter, use dynamic bus number * @adapter: the adapter to add * Context: can sleep * * This routine is used to declare an I2C adapter when its bus number * doesn't matter or when its bus number is specified by an dt alias. * Examples of bases when the bus number doesn't matter: I2C adapters * dynamically added by USB links or PCI plugin cards. * * When this returns zero, a new bus number was allocated and stored * in adap->nr, and the specified adapter became available for clients. * Otherwise, a negative errno value is returned. */ int i2c_add_adapter(struct i2c_adapter *adapter) { struct device *dev = &adapter->dev; int id; if (dev->of_node) { id = of_alias_get_id(dev->of_node, "i2c"); if (id >= 0) { adapter->nr = id; return __i2c_add_numbered_adapter(adapter); } } mutex_lock(&core_lock); id = idr_alloc(&i2c_adapter_idr, adapter, __i2c_first_dynamic_bus_num, 0, GFP_KERNEL); mutex_unlock(&core_lock); if (WARN(id < 0, "couldn't get idr")) return id; adapter->nr = id; return i2c_register_adapter(adapter); } EXPORT_SYMBOL(i2c_add_adapter); /** * i2c_add_numbered_adapter - declare i2c adapter, use static bus number * @adap: the adapter to register (with adap->nr initialized) * Context: can sleep * * This routine is used to declare an I2C adapter when its bus number * matters. For example, use it for I2C adapters from system-on-chip CPUs, * or otherwise built in to the system's mainboard, and where i2c_board_info * is used to properly configure I2C devices. * * If the requested bus number is set to -1, then this function will behave * identically to i2c_add_adapter, and will dynamically assign a bus number. * * If no devices have pre-been declared for this bus, then be sure to * register the adapter before any dynamically allocated ones. Otherwise * the required bus ID may not be available. * * When this returns zero, the specified adapter became available for * clients using the bus number provided in adap->nr. Also, the table * of I2C devices pre-declared using i2c_register_board_info() is scanned, * and the appropriate driver model device nodes are created. Otherwise, a * negative errno value is returned. */ int i2c_add_numbered_adapter(struct i2c_adapter *adap) { if (adap->nr == -1) /* -1 means dynamically assign bus id */ return i2c_add_adapter(adap); return __i2c_add_numbered_adapter(adap); } EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter); static void i2c_do_del_adapter(struct i2c_driver *driver, struct i2c_adapter *adapter) { struct i2c_client *client, *_n; /* Remove the devices we created ourselves as the result of hardware * probing (using a driver's detect method) */ list_for_each_entry_safe(client, _n, &driver->clients, detected) { if (client->adapter == adapter) { dev_dbg(&adapter->dev, "Removing %s at 0x%x\n", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); } } } static int __unregister_client(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); if (client && strcmp(client->name, "dummy")) i2c_unregister_device(client); return 0; } static int __unregister_dummy(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); i2c_unregister_device(client); return 0; } static int __process_removed_adapter(struct device_driver *d, void *data) { i2c_do_del_adapter(to_i2c_driver(d), data); return 0; } /** * i2c_del_adapter - unregister I2C adapter * @adap: the adapter being unregistered * Context: can sleep * * This unregisters an I2C adapter which was previously registered * by @i2c_add_adapter or @i2c_add_numbered_adapter. */ void i2c_del_adapter(struct i2c_adapter *adap) { struct i2c_adapter *found; struct i2c_client *client, *next; /* First make sure that this adapter was ever added */ mutex_lock(&core_lock); found = idr_find(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); if (found != adap) { pr_debug("attempting to delete unregistered adapter [%s]\n", adap->name); return; } i2c_acpi_remove_space_handler(adap); /* Tell drivers about this removal */ mutex_lock(&core_lock); bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_removed_adapter); mutex_unlock(&core_lock); /* Remove devices instantiated from sysfs */ mutex_lock_nested(&adap->userspace_clients_lock, i2c_adapter_depth(adap)); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); } mutex_unlock(&adap->userspace_clients_lock); /* Detach any active clients. This can't fail, thus we do not * check the returned value. This is a two-pass process, because * we can't remove the dummy devices during the first pass: they * could have been instantiated by real devices wishing to clean * them up properly, so we give them a chance to do that first. */ device_for_each_child(&adap->dev, NULL, __unregister_client); device_for_each_child(&adap->dev, NULL, __unregister_dummy); #ifdef CONFIG_I2C_COMPAT class_compat_remove_link(i2c_adapter_compat_class, &adap->dev, adap->dev.parent); #endif /* device name is gone after device_unregister */ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); pm_runtime_disable(&adap->dev); i2c_host_notify_irq_teardown(adap); /* wait until all references to the device are gone * * FIXME: This is old code and should ideally be replaced by an * alternative which results in decoupling the lifetime of the struct * device from the i2c_adapter, like spi or netdev do. Any solution * should be thoroughly tested with DEBUG_KOBJECT_RELEASE enabled! */ init_completion(&adap->dev_released); device_unregister(&adap->dev); wait_for_completion(&adap->dev_released); /* free bus id */ mutex_lock(&core_lock); idr_remove(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); /* Clear the device structure in case this adapter is ever going to be added again */ memset(&adap->dev, 0, sizeof(adap->dev)); } EXPORT_SYMBOL(i2c_del_adapter); static void devm_i2c_del_adapter(void *adapter) { i2c_del_adapter(adapter); } /** * devm_i2c_add_adapter - device-managed variant of i2c_add_adapter() * @dev: managing device for adding this I2C adapter * @adapter: the adapter to add * Context: can sleep * * Add adapter with dynamic bus number, same with i2c_add_adapter() * but the adapter will be auto deleted on driver detach. */ int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter) { int ret; ret = i2c_add_adapter(adapter); if (ret) return ret; return devm_add_action_or_reset(dev, devm_i2c_del_adapter, adapter); } EXPORT_SYMBOL_GPL(devm_i2c_add_adapter); static int i2c_dev_or_parent_fwnode_match(struct device *dev, const void *data) { if (dev_fwnode(dev) == data) return 1; if (dev->parent && dev_fwnode(dev->parent) == data) return 1; return 0; } /** * i2c_find_adapter_by_fwnode() - find an i2c_adapter for the fwnode * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter * * Look up and return the &struct i2c_adapter corresponding to the @fwnode. * If no adapter can be found, or @fwnode is NULL, this returns NULL. * * The user must call put_device(&adapter->dev) once done with the i2c adapter. */ struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode) { struct i2c_adapter *adapter; struct device *dev; if (!fwnode) return NULL; dev = bus_find_device(&i2c_bus_type, NULL, fwnode, i2c_dev_or_parent_fwnode_match); if (!dev) return NULL; adapter = i2c_verify_adapter(dev); if (!adapter) put_device(dev); return adapter; } EXPORT_SYMBOL(i2c_find_adapter_by_fwnode); /** * i2c_get_adapter_by_fwnode() - find an i2c_adapter for the fwnode * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter * * Look up and return the &struct i2c_adapter corresponding to the @fwnode, * and increment the adapter module's use count. If no adapter can be found, * or @fwnode is NULL, this returns NULL. * * The user must call i2c_put_adapter(adapter) once done with the i2c adapter. * Note that this is different from i2c_find_adapter_by_node(). */ struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode) { struct i2c_adapter *adapter; adapter = i2c_find_adapter_by_fwnode(fwnode); if (!adapter) return NULL; if (!try_module_get(adapter->owner)) { put_device(&adapter->dev); adapter = NULL; } return adapter; } EXPORT_SYMBOL(i2c_get_adapter_by_fwnode); static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p, u32 def_val, bool use_def) { int ret; ret = device_property_read_u32(dev, prop_name, cur_val_p); if (ret && use_def) *cur_val_p = def_val; dev_dbg(dev, "%s: %u\n", prop_name, *cur_val_p); } /** * i2c_parse_fw_timings - get I2C related timing parameters from firmware * @dev: The device to scan for I2C timing properties * @t: the i2c_timings struct to be filled with values * @use_defaults: bool to use sane defaults derived from the I2C specification * when properties are not found, otherwise don't update * * Scan the device for the generic I2C properties describing timing parameters * for the signal and fill the given struct with the results. If a property was * not found and use_defaults was true, then maximum timings are assumed which * are derived from the I2C specification. If use_defaults is not used, the * results will be as before, so drivers can apply their own defaults before * calling this helper. The latter is mainly intended for avoiding regressions * of existing drivers which want to switch to this function. New drivers * almost always should use the defaults. */ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults) { bool u = use_defaults; u32 d; i2c_parse_timing(dev, "clock-frequency", &t->bus_freq_hz, I2C_MAX_STANDARD_MODE_FREQ, u); d = t->bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ ? 1000 : t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ ? 300 : 120; i2c_parse_timing(dev, "i2c-scl-rising-time-ns", &t->scl_rise_ns, d, u); d = t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ ? 300 : 120; i2c_parse_timing(dev, "i2c-scl-falling-time-ns", &t->scl_fall_ns, d, u); i2c_parse_timing(dev, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns, 0, u); i2c_parse_timing(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns, t->scl_fall_ns, u); i2c_parse_timing(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns, 0, u); i2c_parse_timing(dev, "i2c-digital-filter-width-ns", &t->digital_filter_width_ns, 0, u); i2c_parse_timing(dev, "i2c-analog-filter-cutoff-frequency", &t->analog_filter_cutoff_freq_hz, 0, u); } EXPORT_SYMBOL_GPL(i2c_parse_fw_timings); /* ------------------------------------------------------------------------- */ int i2c_for_each_dev(void *data, int (*fn)(struct device *dev, void *data)) { int res; mutex_lock(&core_lock); res = bus_for_each_dev(&i2c_bus_type, NULL, data, fn); mutex_unlock(&core_lock); return res; } EXPORT_SYMBOL_GPL(i2c_for_each_dev); static int __process_new_driver(struct device *dev, void *data) { if (dev->type != &i2c_adapter_type) return 0; return i2c_do_add_adapter(data, to_i2c_adapter(dev)); } /* * An i2c_driver is used with one or more i2c_client (device) nodes to access * i2c slave chips, on a bus instance associated with some i2c_adapter. */ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) { int res; /* Can't register until after driver model init */ if (WARN_ON(!is_registered)) return -EAGAIN; /* add the driver to the list of i2c drivers in the driver core */ driver->driver.owner = owner; driver->driver.bus = &i2c_bus_type; INIT_LIST_HEAD(&driver->clients); /* When registration returns, the driver core * will have called probe() for all matching-but-unbound devices. */ res = driver_register(&driver->driver); if (res) return res; pr_debug("driver [%s] registered\n", driver->driver.name); /* Walk the adapters that are already present */ i2c_for_each_dev(driver, __process_new_driver); return 0; } EXPORT_SYMBOL(i2c_register_driver); static int __process_removed_driver(struct device *dev, void *data) { if (dev->type == &i2c_adapter_type) i2c_do_del_adapter(data, to_i2c_adapter(dev)); return 0; } /** * i2c_del_driver - unregister I2C driver * @driver: the driver being unregistered * Context: can sleep */ void i2c_del_driver(struct i2c_driver *driver) { i2c_for_each_dev(driver, __process_removed_driver); driver_unregister(&driver->driver); pr_debug("driver [%s] unregistered\n", driver->driver.name); } EXPORT_SYMBOL(i2c_del_driver); /* ------------------------------------------------------------------------- */ struct i2c_cmd_arg { unsigned cmd; void *arg; }; static int i2c_cmd(struct device *dev, void *_arg) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_cmd_arg *arg = _arg; struct i2c_driver *driver; if (!client || !client->dev.driver) return 0; driver = to_i2c_driver(client->dev.driver); if (driver->command) driver->command(client, arg->cmd, arg->arg); return 0; } void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg) { struct i2c_cmd_arg cmd_arg; cmd_arg.cmd = cmd; cmd_arg.arg = arg; device_for_each_child(&adap->dev, &cmd_arg, i2c_cmd); } EXPORT_SYMBOL(i2c_clients_command); static int __init i2c_init(void) { int retval; retval = of_alias_get_highest_id("i2c"); down_write(&__i2c_board_lock); if (retval >= __i2c_first_dynamic_bus_num) __i2c_first_dynamic_bus_num = retval + 1; up_write(&__i2c_board_lock); retval = bus_register(&i2c_bus_type); if (retval) return retval; is_registered = true; #ifdef CONFIG_I2C_COMPAT i2c_adapter_compat_class = class_compat_register("i2c-adapter"); if (!i2c_adapter_compat_class) { retval = -ENOMEM; goto bus_err; } #endif retval = i2c_add_driver(&dummy_driver); if (retval) goto class_err; if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_register(&i2c_of_notifier)); if (IS_ENABLED(CONFIG_ACPI)) WARN_ON(acpi_reconfig_notifier_register(&i2c_acpi_notifier)); return 0; class_err: #ifdef CONFIG_I2C_COMPAT class_compat_unregister(i2c_adapter_compat_class); bus_err: #endif is_registered = false; bus_unregister(&i2c_bus_type); return retval; } static void __exit i2c_exit(void) { if (IS_ENABLED(CONFIG_ACPI)) WARN_ON(acpi_reconfig_notifier_unregister(&i2c_acpi_notifier)); if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier)); i2c_del_driver(&dummy_driver); #ifdef CONFIG_I2C_COMPAT class_compat_unregister(i2c_adapter_compat_class); #endif bus_unregister(&i2c_bus_type); tracepoint_synchronize_unregister(); } /* We must initialize early, because some subsystems register i2c drivers * in subsys_initcall() code, but are linked (and initialized) before i2c. */ postcore_initcall(i2c_init); module_exit(i2c_exit); /* ---------------------------------------------------- * the functional interface to the i2c busses. * ---------------------------------------------------- */ /* Check if val is exceeding the quirk IFF quirk is non 0 */ #define i2c_quirk_exceeded(val, quirk) ((quirk) && ((val) > (quirk))) static int i2c_quirk_error(struct i2c_adapter *adap, struct i2c_msg *msg, char *err_msg) { dev_err_ratelimited(&adap->dev, "adapter quirk: %s (addr 0x%04x, size %u, %s)\n", err_msg, msg->addr, msg->len, msg->flags & I2C_M_RD ? "read" : "write"); return -EOPNOTSUPP; } static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { const struct i2c_adapter_quirks *q = adap->quirks; int max_num = q->max_num_msgs, i; bool do_len_check = true; if (q->flags & I2C_AQ_COMB) { max_num = 2; /* special checks for combined messages */ if (num == 2) { if (q->flags & I2C_AQ_COMB_WRITE_FIRST && msgs[0].flags & I2C_M_RD) return i2c_quirk_error(adap, &msgs[0], "1st comb msg must be write"); if (q->flags & I2C_AQ_COMB_READ_SECOND && !(msgs[1].flags & I2C_M_RD)) return i2c_quirk_error(adap, &msgs[1], "2nd comb msg must be read"); if (q->flags & I2C_AQ_COMB_SAME_ADDR && msgs[0].addr != msgs[1].addr) return i2c_quirk_error(adap, &msgs[0], "comb msg only to same addr"); if (i2c_quirk_exceeded(msgs[0].len, q->max_comb_1st_msg_len)) return i2c_quirk_error(adap, &msgs[0], "msg too long"); if (i2c_quirk_exceeded(msgs[1].len, q->max_comb_2nd_msg_len)) return i2c_quirk_error(adap, &msgs[1], "msg too long"); do_len_check = false; } } if (i2c_quirk_exceeded(num, max_num)) return i2c_quirk_error(adap, &msgs[0], "too many messages"); for (i = 0; i < num; i++) { u16 len = msgs[i].len; if (msgs[i].flags & I2C_M_RD) { if (do_len_check && i2c_quirk_exceeded(len, q->max_read_len)) return i2c_quirk_error(adap, &msgs[i], "msg too long"); if (q->flags & I2C_AQ_NO_ZERO_LEN_READ && len == 0) return i2c_quirk_error(adap, &msgs[i], "no zero length"); } else { if (do_len_check && i2c_quirk_exceeded(len, q->max_write_len)) return i2c_quirk_error(adap, &msgs[i], "msg too long"); if (q->flags & I2C_AQ_NO_ZERO_LEN_WRITE && len == 0) return i2c_quirk_error(adap, &msgs[i], "no zero length"); } } return 0; } /** * __i2c_transfer - unlocked flavor of i2c_transfer * @adap: Handle to I2C bus * @msgs: One or more messages to execute before STOP is issued to * terminate the operation; each message begins with a START. * @num: Number of messages to be executed. * * Returns negative errno, else the number of messages executed. * * Adapter lock must be held when calling this function. No debug logging * takes place. adap->algo->master_xfer existence isn't checked. */ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { unsigned long orig_jiffies; int ret, try; if (WARN_ON(!msgs || num < 1)) return -EINVAL; ret = __i2c_check_suspended(adap); if (ret) return ret; if (adap->quirks && i2c_check_for_quirks(adap, msgs, num)) return -EOPNOTSUPP; /* * i2c_trace_msg_key gets enabled when tracepoint i2c_transfer gets * enabled. This is an efficient way of keeping the for-loop from * being executed when not needed. */ if (static_branch_unlikely(&i2c_trace_msg_key)) { int i; for (i = 0; i < num; i++) if (msgs[i].flags & I2C_M_RD) trace_i2c_read(adap, &msgs[i], i); else trace_i2c_write(adap, &msgs[i], i); } /* Retry automatically on arbitration loss */ orig_jiffies = jiffies; for (ret = 0, try = 0; try <= adap->retries; try++) { if (i2c_in_atomic_xfer_mode() && adap->algo->master_xfer_atomic) ret = adap->algo->master_xfer_atomic(adap, msgs, num); else ret = adap->algo->master_xfer(adap, msgs, num); if (ret != -EAGAIN) break; if (time_after(jiffies, orig_jiffies + adap->timeout)) break; } if (static_branch_unlikely(&i2c_trace_msg_key)) { int i; for (i = 0; i < ret; i++) if (msgs[i].flags & I2C_M_RD) trace_i2c_reply(adap, &msgs[i], i); trace_i2c_result(adap, num, ret); } return ret; } EXPORT_SYMBOL(__i2c_transfer); /** * i2c_transfer - execute a single or combined I2C message * @adap: Handle to I2C bus * @msgs: One or more messages to execute before STOP is issued to * terminate the operation; each message begins with a START. * @num: Number of messages to be executed. * * Returns negative errno, else the number of messages executed. * * Note that there is no requirement that each message be sent to * the same slave address, although that is the most common model. */ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int ret; if (!adap->algo->master_xfer) { dev_dbg(&adap->dev, "I2C level transfers not supported\n"); return -EOPNOTSUPP; } /* REVISIT the fault reporting model here is weak: * * - When we get an error after receiving N bytes from a slave, * there is no way to report "N". * * - When we get a NAK after transmitting N bytes to a slave, * there is no way to report "N" ... or to let the master * continue executing the rest of this combined message, if * that's the appropriate response. * * - When for example "num" is two and we successfully complete * the first message but get an error part way through the * second, it's unclear whether that should be reported as * one (discarding status on the second message) or errno * (discarding status on the first one). */ ret = __i2c_lock_bus_helper(adap); if (ret) return ret; ret = __i2c_transfer(adap, msgs, num); i2c_unlock_bus(adap, I2C_LOCK_SEGMENT); return ret; } EXPORT_SYMBOL(i2c_transfer); /** * i2c_transfer_buffer_flags - issue a single I2C message transferring data * to/from a buffer * @client: Handle to slave device * @buf: Where the data is stored * @count: How many bytes to transfer, must be less than 64k since msg.len is u16 * @flags: The flags to be used for the message, e.g. I2C_M_RD for reads * * Returns negative errno, or else the number of bytes transferred. */ int i2c_transfer_buffer_flags(const struct i2c_client *client, char *buf, int count, u16 flags) { int ret; struct i2c_msg msg = { .addr = client->addr, .flags = flags | (client->flags & I2C_M_TEN), .len = count, .buf = buf, }; ret = i2c_transfer(client->adapter, &msg, 1); /* * If everything went ok (i.e. 1 msg transferred), return #bytes * transferred, else error code. */ return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_transfer_buffer_flags); /** * i2c_get_device_id - get manufacturer, part id and die revision of a device * @client: The device to query * @id: The queried information * * Returns negative errno on error, zero on success. */ int i2c_get_device_id(const struct i2c_client *client, struct i2c_device_identity *id) { struct i2c_adapter *adap = client->adapter; union i2c_smbus_data raw_id; int ret; if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) return -EOPNOTSUPP; raw_id.block[0] = 3; ret = i2c_smbus_xfer(adap, I2C_ADDR_DEVICE_ID, 0, I2C_SMBUS_READ, client->addr << 1, I2C_SMBUS_I2C_BLOCK_DATA, &raw_id); if (ret) return ret; id->manufacturer_id = (raw_id.block[1] << 4) | (raw_id.block[2] >> 4); id->part_id = ((raw_id.block[2] & 0xf) << 5) | (raw_id.block[3] >> 3); id->die_revision = raw_id.block[3] & 0x7; return 0; } EXPORT_SYMBOL_GPL(i2c_get_device_id); /** * i2c_client_get_device_id - get the driver match table entry of a device * @client: the device to query. The device must be bound to a driver * * Returns a pointer to the matching entry if found, NULL otherwise. */ const struct i2c_device_id *i2c_client_get_device_id(const struct i2c_client *client) { const struct i2c_driver *drv = to_i2c_driver(client->dev.driver); return i2c_match_id(drv->id_table, client); } EXPORT_SYMBOL_GPL(i2c_client_get_device_id); /* ---------------------------------------------------- * the i2c address scanning function * Will not work for 10-bit addresses! * ---------------------------------------------------- */ /* * Legacy default probe function, mostly relevant for SMBus. The default * probe method is a quick write, but it is known to corrupt the 24RF08 * EEPROMs due to a state machine bug, and could also irreversibly * write-protect some EEPROMs, so for address ranges 0x30-0x37 and 0x50-0x5f, * we use a short byte read instead. Also, some bus drivers don't implement * quick write, so we fallback to a byte read in that case too. * On x86, there is another special case for FSC hardware monitoring chips, * which want regular byte reads (address 0x73.) Fortunately, these are the * only known chips using this I2C address on PC hardware. * Returns 1 if probe succeeded, 0 if not. */ static int i2c_default_probe(struct i2c_adapter *adap, unsigned short addr) { int err; union i2c_smbus_data dummy; #ifdef CONFIG_X86 if (addr == 0x73 && (adap->class & I2C_CLASS_HWMON) && i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE_DATA)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE_DATA, &dummy); else #endif if (!((addr & ~0x07) == 0x30 || (addr & ~0x0f) == 0x50) && i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_QUICK, NULL); else if (i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy); else { dev_warn(&adap->dev, "No suitable probing method supported for address 0x%02X\n", addr); err = -EOPNOTSUPP; } return err >= 0; } static int i2c_detect_address(struct i2c_client *temp_client, struct i2c_driver *driver) { struct i2c_board_info info; struct i2c_adapter *adapter = temp_client->adapter; int addr = temp_client->addr; int err; /* Make sure the address is valid */ err = i2c_check_7bit_addr_validity_strict(addr); if (err) { dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n", addr); return err; } /* Skip if already in use (7 bit, no need to encode flags) */ if (i2c_check_addr_busy(adapter, addr)) return 0; /* Make sure there is something at this address */ if (!i2c_default_probe(adapter, addr)) return 0; /* Finally call the custom detection function */ memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = addr; err = driver->detect(temp_client, &info); if (err) { /* -ENODEV is returned if the detection fails. We catch it here as this isn't an error. */ return err == -ENODEV ? 0 : err; } /* Consistency check */ if (info.type[0] == '\0') { dev_err(&adapter->dev, "%s detection function provided no name for 0x%x\n", driver->driver.name, addr); } else { struct i2c_client *client; /* Detection succeeded, instantiate the device */ if (adapter->class & I2C_CLASS_DEPRECATED) dev_warn(&adapter->dev, "This adapter will soon drop class based instantiation of devices. " "Please make sure client 0x%02x gets instantiated by other means. " "Check 'Documentation/i2c/instantiating-devices.rst' for details.\n", info.addr); dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n", info.type, info.addr); client = i2c_new_client_device(adapter, &info); if (!IS_ERR(client)) list_add_tail(&client->detected, &driver->clients); else dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n", info.type, info.addr); } return 0; } static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) { const unsigned short *address_list; struct i2c_client *temp_client; int i, err = 0; address_list = driver->address_list; if (!driver->detect || !address_list) return 0; /* Warn that the adapter lost class based instantiation */ if (adapter->class == I2C_CLASS_DEPRECATED) { dev_dbg(&adapter->dev, "This adapter dropped support for I2C classes and won't auto-detect %s devices anymore. " "If you need it, check 'Documentation/i2c/instantiating-devices.rst' for alternatives.\n", driver->driver.name); return 0; } /* Stop here if the classes do not match */ if (!(adapter->class & driver->class)) return 0; /* Set up a temporary client to help detect callback */ temp_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); if (!temp_client) return -ENOMEM; temp_client->adapter = adapter; for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) { dev_dbg(&adapter->dev, "found normal entry for adapter %d, addr 0x%02x\n", i2c_adapter_id(adapter), address_list[i]); temp_client->addr = address_list[i]; err = i2c_detect_address(temp_client, driver); if (unlikely(err)) break; } kfree(temp_client); return err; } int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr) { return i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK, NULL) >= 0; } EXPORT_SYMBOL_GPL(i2c_probe_func_quick_read); struct i2c_client * i2c_new_scanned_device(struct i2c_adapter *adap, struct i2c_board_info *info, unsigned short const *addr_list, int (*probe)(struct i2c_adapter *adap, unsigned short addr)) { int i; if (!probe) probe = i2c_default_probe; for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) { /* Check address validity */ if (i2c_check_7bit_addr_validity_strict(addr_list[i]) < 0) { dev_warn(&adap->dev, "Invalid 7-bit address 0x%02x\n", addr_list[i]); continue; } /* Check address availability (7 bit, no need to encode flags) */ if (i2c_check_addr_busy(adap, addr_list[i])) { dev_dbg(&adap->dev, "Address 0x%02x already in use, not probing\n", addr_list[i]); continue; } /* Test address responsiveness */ if (probe(adap, addr_list[i])) break; } if (addr_list[i] == I2C_CLIENT_END) { dev_dbg(&adap->dev, "Probing failed, no device found\n"); return ERR_PTR(-ENODEV); } info->addr = addr_list[i]; return i2c_new_client_device(adap, info); } EXPORT_SYMBOL_GPL(i2c_new_scanned_device); struct i2c_adapter *i2c_get_adapter(int nr) { struct i2c_adapter *adapter; mutex_lock(&core_lock); adapter = idr_find(&i2c_adapter_idr, nr); if (!adapter) goto exit; if (try_module_get(adapter->owner)) get_device(&adapter->dev); else adapter = NULL; exit: mutex_unlock(&core_lock); return adapter; } EXPORT_SYMBOL(i2c_get_adapter); void i2c_put_adapter(struct i2c_adapter *adap) { if (!adap) return; module_put(adap->owner); /* Should be last, otherwise we risk use-after-free with 'adap' */ put_device(&adap->dev); } EXPORT_SYMBOL(i2c_put_adapter); /** * i2c_get_dma_safe_msg_buf() - get a DMA safe buffer for the given i2c_msg * @msg: the message to be checked * @threshold: the minimum number of bytes for which using DMA makes sense. * Should at least be 1. * * Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO. * Or a valid pointer to be used with DMA. After use, release it by * calling i2c_put_dma_safe_msg_buf(). * * This function must only be called from process context! */ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) { /* also skip 0-length msgs for bogus thresholds of 0 */ if (!threshold) pr_debug("DMA buffer for addr=0x%02x with length 0 is bogus\n", msg->addr); if (msg->len < threshold || msg->len == 0) return NULL; if (msg->flags & I2C_M_DMA_SAFE) return msg->buf; pr_debug("using bounce buffer for addr=0x%02x, len=%d\n", msg->addr, msg->len); if (msg->flags & I2C_M_RD) return kzalloc(msg->len, GFP_KERNEL); else return kmemdup(msg->buf, msg->len, GFP_KERNEL); } EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); /** * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. * @msg: the message which the buffer corresponds to * @xferred: bool saying if the message was transferred */ void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) { if (!buf || buf == msg->buf) return; if (xferred && msg->flags & I2C_M_RD) memcpy(msg->buf, buf, msg->len); kfree(buf); } EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); MODULE_AUTHOR("Simon G. Vogl <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus main module"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/i2c-core-base.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C slave mode testunit * * Copyright (C) 2020 by Wolfram Sang, Sang Engineering <[email protected]> * Copyright (C) 2020 by Renesas Electronics Corporation */ #include <linux/bitops.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/workqueue.h> /* FIXME: is system_long_wq the best choice? */ #define TU_CUR_VERSION 0x01 enum testunit_cmds { TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */ TU_CMD_HOST_NOTIFY, TU_CMD_SMBUS_BLOCK_PROC_CALL, TU_NUM_CMDS }; enum testunit_regs { TU_REG_CMD, TU_REG_DATAL, TU_REG_DATAH, TU_REG_DELAY, TU_NUM_REGS }; enum testunit_flags { TU_FLAG_IN_PROCESS, }; struct testunit_data { unsigned long flags; u8 regs[TU_NUM_REGS]; u8 reg_idx; struct i2c_client *client; struct delayed_work worker; }; static void i2c_slave_testunit_work(struct work_struct *work) { struct testunit_data *tu = container_of(work, struct testunit_data, worker.work); struct i2c_msg msg; u8 msgbuf[256]; int ret = 0; msg.addr = I2C_CLIENT_END; msg.buf = msgbuf; switch (tu->regs[TU_REG_CMD]) { case TU_CMD_READ_BYTES: msg.addr = tu->regs[TU_REG_DATAL]; msg.flags = I2C_M_RD; msg.len = tu->regs[TU_REG_DATAH]; break; case TU_CMD_HOST_NOTIFY: msg.addr = 0x08; msg.flags = 0; msg.len = 3; msgbuf[0] = tu->client->addr; msgbuf[1] = tu->regs[TU_REG_DATAL]; msgbuf[2] = tu->regs[TU_REG_DATAH]; break; default: break; } if (msg.addr != I2C_CLIENT_END) { ret = i2c_transfer(tu->client->adapter, &msg, 1); /* convert '0 msgs transferred' to errno */ ret = (ret == 0) ? -EIO : ret; } if (ret < 0) dev_err(&tu->client->dev, "CMD%02X failed (%d)\n", tu->regs[TU_REG_CMD], ret); clear_bit(TU_FLAG_IN_PROCESS, &tu->flags); } static int i2c_slave_testunit_slave_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { struct testunit_data *tu = i2c_get_clientdata(client); bool is_proc_call = tu->reg_idx == 3 && tu->regs[TU_REG_DATAL] == 1 && tu->regs[TU_REG_CMD] == TU_CMD_SMBUS_BLOCK_PROC_CALL; int ret = 0; switch (event) { case I2C_SLAVE_WRITE_RECEIVED: if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags)) return -EBUSY; if (tu->reg_idx < TU_NUM_REGS) tu->regs[tu->reg_idx] = *val; else ret = -EMSGSIZE; if (tu->reg_idx <= TU_NUM_REGS) tu->reg_idx++; /* TU_REG_CMD always written at this point */ if (tu->regs[TU_REG_CMD] >= TU_NUM_CMDS) ret = -EINVAL; break; case I2C_SLAVE_STOP: if (tu->reg_idx == TU_NUM_REGS) { set_bit(TU_FLAG_IN_PROCESS, &tu->flags); queue_delayed_work(system_long_wq, &tu->worker, msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY])); } fallthrough; case I2C_SLAVE_WRITE_REQUESTED: memset(tu->regs, 0, TU_NUM_REGS); tu->reg_idx = 0; break; case I2C_SLAVE_READ_PROCESSED: if (is_proc_call && tu->regs[TU_REG_DATAH]) tu->regs[TU_REG_DATAH]--; fallthrough; case I2C_SLAVE_READ_REQUESTED: *val = is_proc_call ? tu->regs[TU_REG_DATAH] : TU_CUR_VERSION; break; } return ret; } static int i2c_slave_testunit_probe(struct i2c_client *client) { struct testunit_data *tu; tu = devm_kzalloc(&client->dev, sizeof(struct testunit_data), GFP_KERNEL); if (!tu) return -ENOMEM; tu->client = client; i2c_set_clientdata(client, tu); INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work); return i2c_slave_register(client, i2c_slave_testunit_slave_cb); }; static void i2c_slave_testunit_remove(struct i2c_client *client) { struct testunit_data *tu = i2c_get_clientdata(client); cancel_delayed_work_sync(&tu->worker); i2c_slave_unregister(client); } static const struct i2c_device_id i2c_slave_testunit_id[] = { { "slave-testunit", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, i2c_slave_testunit_id); static struct i2c_driver i2c_slave_testunit_driver = { .driver = { .name = "i2c-slave-testunit", }, .probe = i2c_slave_testunit_probe, .remove = i2c_slave_testunit_remove, .id_table = i2c_slave_testunit_id, }; module_i2c_driver(i2c_slave_testunit_driver); MODULE_AUTHOR("Wolfram Sang <[email protected]>"); MODULE_DESCRIPTION("I2C slave mode test unit"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/i2c-slave-testunit.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core SMBus and SMBus emulation code * * This file contains the SMBus functions which are always included in the I2C * core because they can be emulated via I2C. SMBus specific extensions * (e.g. smbalert) are handled in a separate i2c-smbus module. * * All SMBus-related things are written by Frodo Looijaard <[email protected]> * SMBus 2.0 support by Mark Studebaker <[email protected]> and * Jean Delvare <[email protected]> */ #include <linux/device.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/property.h> #include <linux/slab.h> #include "i2c-core.h" #define CREATE_TRACE_POINTS #include <trace/events/smbus.h> /* The SMBus parts */ #define POLY (0x1070U << 3) static u8 crc8(u16 data) { int i; for (i = 0; i < 8; i++) { if (data & 0x8000) data = data ^ POLY; data = data << 1; } return (u8)(data >> 8); } /** * i2c_smbus_pec - Incremental CRC8 over the given input data array * @crc: previous return crc8 value * @p: pointer to data buffer. * @count: number of bytes in data buffer. * * Incremental CRC8 over count bytes in the array pointed to by p */ u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count) { int i; for (i = 0; i < count; i++) crc = crc8((crc ^ p[i]) << 8); return crc; } EXPORT_SYMBOL(i2c_smbus_pec); /* Assume a 7-bit address, which is reasonable for SMBus */ static u8 i2c_smbus_msg_pec(u8 pec, struct i2c_msg *msg) { /* The address will be sent first */ u8 addr = i2c_8bit_addr_from_msg(msg); pec = i2c_smbus_pec(pec, &addr, 1); /* The data buffer follows */ return i2c_smbus_pec(pec, msg->buf, msg->len); } /* Used for write only transactions */ static inline void i2c_smbus_add_pec(struct i2c_msg *msg) { msg->buf[msg->len] = i2c_smbus_msg_pec(0, msg); msg->len++; } /* Return <0 on CRC error If there was a write before this read (most cases) we need to take the partial CRC from the write part into account. Note that this function does modify the message (we need to decrease the message length to hide the CRC byte from the caller). */ static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg) { u8 rpec = msg->buf[--msg->len]; cpec = i2c_smbus_msg_pec(cpec, msg); if (rpec != cpec) { pr_debug("Bad PEC 0x%02x vs. 0x%02x\n", rpec, cpec); return -EBADMSG; } return 0; } /** * i2c_smbus_read_byte - SMBus "receive byte" protocol * @client: Handle to slave device * * This executes the SMBus "receive byte" protocol, returning negative errno * else the byte received from the device. */ s32 i2c_smbus_read_byte(const struct i2c_client *client) { union i2c_smbus_data data; int status; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data); return (status < 0) ? status : data.byte; } EXPORT_SYMBOL(i2c_smbus_read_byte); /** * i2c_smbus_write_byte - SMBus "send byte" protocol * @client: Handle to slave device * @value: Byte to be sent * * This executes the SMBus "send byte" protocol, returning negative errno * else zero on success. */ s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value) { return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL); } EXPORT_SYMBOL(i2c_smbus_write_byte); /** * i2c_smbus_read_byte_data - SMBus "read byte" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * * This executes the SMBus "read byte" protocol, returning negative errno * else a data byte received from the device. */ s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command) { union i2c_smbus_data data; int status; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_BYTE_DATA, &data); return (status < 0) ? status : data.byte; } EXPORT_SYMBOL(i2c_smbus_read_byte_data); /** * i2c_smbus_write_byte_data - SMBus "write byte" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @value: Byte being written * * This executes the SMBus "write byte" protocol, returning negative errno * else zero on success. */ s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command, u8 value) { union i2c_smbus_data data; data.byte = value; return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_BYTE_DATA, &data); } EXPORT_SYMBOL(i2c_smbus_write_byte_data); /** * i2c_smbus_read_word_data - SMBus "read word" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * * This executes the SMBus "read word" protocol, returning negative errno * else a 16-bit unsigned "word" received from the device. */ s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command) { union i2c_smbus_data data; int status; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_WORD_DATA, &data); return (status < 0) ? status : data.word; } EXPORT_SYMBOL(i2c_smbus_read_word_data); /** * i2c_smbus_write_word_data - SMBus "write word" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @value: 16-bit "word" being written * * This executes the SMBus "write word" protocol, returning negative errno * else zero on success. */ s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command, u16 value) { union i2c_smbus_data data; data.word = value; return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_WORD_DATA, &data); } EXPORT_SYMBOL(i2c_smbus_write_word_data); /** * i2c_smbus_read_block_data - SMBus "block read" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @values: Byte array into which data will be read; big enough to hold * the data returned by the slave. SMBus allows at most 32 bytes. * * This executes the SMBus "block read" protocol, returning negative errno * else the number of data bytes in the slave's response. * * Note that using this function requires that the client's adapter support * the I2C_FUNC_SMBUS_READ_BLOCK_DATA functionality. Not all adapter drivers * support this; its emulation through I2C messaging relies on a specific * mechanism (I2C_M_RECV_LEN) which may not be implemented. */ s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command, u8 *values) { union i2c_smbus_data data; int status; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_BLOCK_DATA, &data); if (status) return status; memcpy(values, &data.block[1], data.block[0]); return data.block[0]; } EXPORT_SYMBOL(i2c_smbus_read_block_data); /** * i2c_smbus_write_block_data - SMBus "block write" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @length: Size of data block; SMBus allows at most 32 bytes * @values: Byte array which will be written. * * This executes the SMBus "block write" protocol, returning negative errno * else zero on success. */ s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command, u8 length, const u8 *values) { union i2c_smbus_data data; if (length > I2C_SMBUS_BLOCK_MAX) length = I2C_SMBUS_BLOCK_MAX; data.block[0] = length; memcpy(&data.block[1], values, length); return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_BLOCK_DATA, &data); } EXPORT_SYMBOL(i2c_smbus_write_block_data); /* Returns the number of read bytes */ s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command, u8 length, u8 *values) { union i2c_smbus_data data; int status; if (length > I2C_SMBUS_BLOCK_MAX) length = I2C_SMBUS_BLOCK_MAX; data.block[0] = length; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_I2C_BLOCK_DATA, &data); if (status < 0) return status; memcpy(values, &data.block[1], data.block[0]); return data.block[0]; } EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data); s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command, u8 length, const u8 *values) { union i2c_smbus_data data; if (length > I2C_SMBUS_BLOCK_MAX) length = I2C_SMBUS_BLOCK_MAX; data.block[0] = length; memcpy(data.block + 1, values, length); return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_I2C_BLOCK_DATA, &data); } EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data); static void i2c_smbus_try_get_dmabuf(struct i2c_msg *msg, u8 init_val) { bool is_read = msg->flags & I2C_M_RD; unsigned char *dma_buf; dma_buf = kzalloc(I2C_SMBUS_BLOCK_MAX + (is_read ? 2 : 3), GFP_KERNEL); if (!dma_buf) return; msg->buf = dma_buf; msg->flags |= I2C_M_DMA_SAFE; if (init_val) msg->buf[0] = init_val; } /* * Simulate a SMBus command using the I2C protocol. * No checking of parameters is done! */ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* * So we need to generate a series of msgs. In the case of writing, we * need to use only one message; when reading, we need two. We * initialize most things with sane defaults, to keep the code below * somewhat simpler. */ unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3]; unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2]; int nmsgs = read_write == I2C_SMBUS_READ ? 2 : 1; u8 partial_pec = 0; int status; struct i2c_msg msg[2] = { { .addr = addr, .flags = flags, .len = 1, .buf = msgbuf0, }, { .addr = addr, .flags = flags | I2C_M_RD, .len = 0, .buf = msgbuf1, }, }; bool wants_pec = ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA); msgbuf0[0] = command; switch (size) { case I2C_SMBUS_QUICK: msg[0].len = 0; /* Special case: The read/write field is used as data */ msg[0].flags = flags | (read_write == I2C_SMBUS_READ ? I2C_M_RD : 0); nmsgs = 1; break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_READ) { /* Special case: only a read! */ msg[0].flags = I2C_M_RD | flags; nmsgs = 1; } break; case I2C_SMBUS_BYTE_DATA: if (read_write == I2C_SMBUS_READ) msg[1].len = 1; else { msg[0].len = 2; msgbuf0[1] = data->byte; } break; case I2C_SMBUS_WORD_DATA: if (read_write == I2C_SMBUS_READ) msg[1].len = 2; else { msg[0].len = 3; msgbuf0[1] = data->word & 0xff; msgbuf0[2] = data->word >> 8; } break; case I2C_SMBUS_PROC_CALL: nmsgs = 2; /* Special case */ read_write = I2C_SMBUS_READ; msg[0].len = 3; msg[1].len = 2; msgbuf0[1] = data->word & 0xff; msgbuf0[2] = data->word >> 8; break; case I2C_SMBUS_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { msg[1].flags |= I2C_M_RECV_LEN; msg[1].len = 1; /* block length will be added by the underlying bus driver */ i2c_smbus_try_get_dmabuf(&msg[1], 0); } else { msg[0].len = data->block[0] + 2; if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 2) { dev_err(&adapter->dev, "Invalid block write size %d\n", data->block[0]); return -EINVAL; } i2c_smbus_try_get_dmabuf(&msg[0], command); memcpy(msg[0].buf + 1, data->block, msg[0].len - 1); } break; case I2C_SMBUS_BLOCK_PROC_CALL: nmsgs = 2; /* Another special case */ read_write = I2C_SMBUS_READ; if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { dev_err(&adapter->dev, "Invalid block write size %d\n", data->block[0]); return -EINVAL; } msg[0].len = data->block[0] + 2; i2c_smbus_try_get_dmabuf(&msg[0], command); memcpy(msg[0].buf + 1, data->block, msg[0].len - 1); msg[1].flags |= I2C_M_RECV_LEN; msg[1].len = 1; /* block length will be added by the underlying bus driver */ i2c_smbus_try_get_dmabuf(&msg[1], 0); break; case I2C_SMBUS_I2C_BLOCK_DATA: if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { dev_err(&adapter->dev, "Invalid block %s size %d\n", read_write == I2C_SMBUS_READ ? "read" : "write", data->block[0]); return -EINVAL; } if (read_write == I2C_SMBUS_READ) { msg[1].len = data->block[0]; i2c_smbus_try_get_dmabuf(&msg[1], 0); } else { msg[0].len = data->block[0] + 1; i2c_smbus_try_get_dmabuf(&msg[0], command); memcpy(msg[0].buf + 1, data->block + 1, data->block[0]); } break; default: dev_err(&adapter->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } if (wants_pec) { /* Compute PEC if first message is a write */ if (!(msg[0].flags & I2C_M_RD)) { if (nmsgs == 1) /* Write only */ i2c_smbus_add_pec(&msg[0]); else /* Write followed by read */ partial_pec = i2c_smbus_msg_pec(0, &msg[0]); } /* Ask for PEC if last message is a read */ if (msg[nmsgs - 1].flags & I2C_M_RD) msg[nmsgs - 1].len++; } status = __i2c_transfer(adapter, msg, nmsgs); if (status < 0) goto cleanup; if (status != nmsgs) { status = -EIO; goto cleanup; } status = 0; /* Check PEC if last message is a read */ if (wants_pec && (msg[nmsgs - 1].flags & I2C_M_RD)) { status = i2c_smbus_check_pec(partial_pec, &msg[nmsgs - 1]); if (status < 0) goto cleanup; } if (read_write == I2C_SMBUS_READ) switch (size) { case I2C_SMBUS_BYTE: data->byte = msgbuf0[0]; break; case I2C_SMBUS_BYTE_DATA: data->byte = msgbuf1[0]; break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: data->word = msgbuf1[0] | (msgbuf1[1] << 8); break; case I2C_SMBUS_I2C_BLOCK_DATA: memcpy(data->block + 1, msg[1].buf, data->block[0]); break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) { dev_err(&adapter->dev, "Invalid block size returned: %d\n", msg[1].buf[0]); status = -EPROTO; goto cleanup; } memcpy(data->block, msg[1].buf, msg[1].buf[0] + 1); break; } cleanup: if (msg[0].flags & I2C_M_DMA_SAFE) kfree(msg[0].buf); if (msg[1].flags & I2C_M_DMA_SAFE) kfree(msg[1].buf); return status; } /** * i2c_smbus_xfer - execute SMBus protocol operations * @adapter: Handle to I2C bus * @addr: Address of SMBus slave on that bus * @flags: I2C_CLIENT_* flags (usually zero or I2C_CLIENT_PEC) * @read_write: I2C_SMBUS_READ or I2C_SMBUS_WRITE * @command: Byte interpreted by slave, for protocols which use such bytes * @protocol: SMBus protocol operation to execute, such as I2C_SMBUS_PROC_CALL * @data: Data to be read or written * * This executes an SMBus protocol operation, and returns a negative * errno code else zero on success. */ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int protocol, union i2c_smbus_data *data) { s32 res; res = __i2c_lock_bus_helper(adapter); if (res) return res; res = __i2c_smbus_xfer(adapter, addr, flags, read_write, command, protocol, data); i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT); return res; } EXPORT_SYMBOL(i2c_smbus_xfer); s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int protocol, union i2c_smbus_data *data) { int (*xfer_func)(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data); unsigned long orig_jiffies; int try; s32 res; res = __i2c_check_suspended(adapter); if (res) return res; /* If enabled, the following two tracepoints are conditional on * read_write and protocol. */ trace_smbus_write(adapter, addr, flags, read_write, command, protocol, data); trace_smbus_read(adapter, addr, flags, read_write, command, protocol); flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB; xfer_func = adapter->algo->smbus_xfer; if (i2c_in_atomic_xfer_mode()) { if (adapter->algo->smbus_xfer_atomic) xfer_func = adapter->algo->smbus_xfer_atomic; else if (adapter->algo->master_xfer_atomic) xfer_func = NULL; /* fallback to I2C emulation */ } if (xfer_func) { /* Retry automatically on arbitration loss */ orig_jiffies = jiffies; for (res = 0, try = 0; try <= adapter->retries; try++) { res = xfer_func(adapter, addr, flags, read_write, command, protocol, data); if (res != -EAGAIN) break; if (time_after(jiffies, orig_jiffies + adapter->timeout)) break; } if (res != -EOPNOTSUPP || !adapter->algo->master_xfer) goto trace; /* * Fall back to i2c_smbus_xfer_emulated if the adapter doesn't * implement native support for the SMBus operation. */ } res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write, command, protocol, data); trace: /* If enabled, the reply tracepoint is conditional on read_write. */ trace_smbus_reply(adapter, addr, flags, read_write, command, protocol, data, res); trace_smbus_result(adapter, addr, flags, read_write, command, protocol, res); return res; } EXPORT_SYMBOL(__i2c_smbus_xfer); /** * i2c_smbus_read_i2c_block_data_or_emulated - read block or emulate * @client: Handle to slave device * @command: Byte interpreted by slave * @length: Size of data block; SMBus allows at most I2C_SMBUS_BLOCK_MAX bytes * @values: Byte array into which data will be read; big enough to hold * the data returned by the slave. SMBus allows at most * I2C_SMBUS_BLOCK_MAX bytes. * * This executes the SMBus "block read" protocol if supported by the adapter. * If block read is not supported, it emulates it using either word or byte * read protocols depending on availability. * * The addresses of the I2C slave device that are accessed with this function * must be mapped to a linear region, so that a block read will have the same * effect as a byte read. Before using this function you must double-check * if the I2C slave does support exchanging a block transfer with a byte * transfer. */ s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, u8 command, u8 length, u8 *values) { u8 i = 0; int status; if (length > I2C_SMBUS_BLOCK_MAX) length = I2C_SMBUS_BLOCK_MAX; if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) return i2c_smbus_read_i2c_block_data(client, command, length, values); if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) return -EOPNOTSUPP; if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) { while ((i + 2) <= length) { status = i2c_smbus_read_word_data(client, command + i); if (status < 0) return status; values[i] = status & 0xff; values[i + 1] = status >> 8; i += 2; } } while (i < length) { status = i2c_smbus_read_byte_data(client, command + i); if (status < 0) return status; values[i] = status; i++; } return i; } EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated); /** * i2c_new_smbus_alert_device - get ara client for SMBus alert support * @adapter: the target adapter * @setup: setup data for the SMBus alert handler * Context: can sleep * * Setup handling of the SMBus alert protocol on a given I2C bus segment. * * Handling can be done either through our IRQ handler, or by the * adapter (from its handler, periodic polling, or whatever). * * This returns the ara client, which should be saved for later use with * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or an * ERRPTR to indicate an error. */ struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter, struct i2c_smbus_alert_setup *setup) { struct i2c_board_info ara_board_info = { I2C_BOARD_INFO("smbus_alert", 0x0c), .platform_data = setup, }; return i2c_new_client_device(adapter, &ara_board_info); } EXPORT_SYMBOL_GPL(i2c_new_smbus_alert_device); #if IS_ENABLED(CONFIG_I2C_SMBUS) int i2c_setup_smbus_alert(struct i2c_adapter *adapter) { struct device *parent = adapter->dev.parent; int irq; /* Adapter instantiated without parent, skip the SMBus alert setup */ if (!parent) return 0; irq = device_property_match_string(parent, "interrupt-names", "smbus_alert"); if (irq == -EINVAL || irq == -ENODATA) return 0; else if (irq < 0) return irq; return PTR_ERR_OR_ZERO(i2c_new_smbus_alert_device(adapter, NULL)); } #endif
linux-master
drivers/i2c/i2c-core-smbus.c
// SPDX-License-Identifier: GPL-2.0-or-later /* i2c-stub.c - I2C/SMBus chip emulator Copyright (c) 2004 Mark M. Hoffman <[email protected]> Copyright (C) 2007-2014 Jean Delvare <[email protected]> */ #define pr_fmt(fmt) "i2c-stub: " fmt #include <linux/errno.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #define MAX_CHIPS 10 /* * Support for I2C_FUNC_SMBUS_BLOCK_DATA is disabled by default and must * be enabled explicitly by setting the I2C_FUNC_SMBUS_BLOCK_DATA bits * in the 'functionality' module parameter. */ #define STUB_FUNC_DEFAULT \ (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \ I2C_FUNC_SMBUS_I2C_BLOCK) #define STUB_FUNC_ALL \ (STUB_FUNC_DEFAULT | I2C_FUNC_SMBUS_BLOCK_DATA) static unsigned short chip_addr[MAX_CHIPS]; module_param_array(chip_addr, ushort, NULL, S_IRUGO); MODULE_PARM_DESC(chip_addr, "Chip addresses (up to 10, between 0x03 and 0x77)"); static unsigned long functionality = STUB_FUNC_DEFAULT; module_param(functionality, ulong, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(functionality, "Override functionality bitfield"); /* Some chips have banked register ranges */ static u8 bank_reg[MAX_CHIPS]; module_param_array(bank_reg, byte, NULL, S_IRUGO); MODULE_PARM_DESC(bank_reg, "Bank register"); static u8 bank_mask[MAX_CHIPS]; module_param_array(bank_mask, byte, NULL, S_IRUGO); MODULE_PARM_DESC(bank_mask, "Bank value mask"); static u8 bank_start[MAX_CHIPS]; module_param_array(bank_start, byte, NULL, S_IRUGO); MODULE_PARM_DESC(bank_start, "First banked register"); static u8 bank_end[MAX_CHIPS]; module_param_array(bank_end, byte, NULL, S_IRUGO); MODULE_PARM_DESC(bank_end, "Last banked register"); struct smbus_block_data { struct list_head node; u8 command; u8 len; u8 block[I2C_SMBUS_BLOCK_MAX]; }; struct stub_chip { u8 pointer; u16 words[256]; /* Byte operations use the LSB as per SMBus specification */ struct list_head smbus_blocks; /* For chips with banks, extra registers are allocated dynamically */ u8 bank_reg; u8 bank_shift; u8 bank_mask; u8 bank_sel; /* Currently selected bank */ u8 bank_start; u8 bank_end; u16 bank_size; u16 *bank_words; /* Room for bank_mask * bank_size registers */ }; static struct stub_chip *stub_chips; static int stub_chips_nr; static struct smbus_block_data *stub_find_block(struct device *dev, struct stub_chip *chip, u8 command, bool create) { struct smbus_block_data *b, *rb = NULL; list_for_each_entry(b, &chip->smbus_blocks, node) { if (b->command == command) { rb = b; break; } } if (rb == NULL && create) { rb = devm_kzalloc(dev, sizeof(*rb), GFP_KERNEL); if (rb == NULL) return rb; rb->command = command; list_add(&rb->node, &chip->smbus_blocks); } return rb; } static u16 *stub_get_wordp(struct stub_chip *chip, u8 offset) { if (chip->bank_sel && offset >= chip->bank_start && offset <= chip->bank_end) return chip->bank_words + (chip->bank_sel - 1) * chip->bank_size + offset - chip->bank_start; else return chip->words + offset; } /* Return negative errno on error. */ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { s32 ret; int i, len; struct stub_chip *chip = NULL; struct smbus_block_data *b; u16 *wordp; /* Search for the right chip */ for (i = 0; i < stub_chips_nr; i++) { if (addr == chip_addr[i]) { chip = stub_chips + i; break; } } if (!chip) return -ENODEV; switch (size) { case I2C_SMBUS_QUICK: dev_dbg(&adap->dev, "smbus quick - addr 0x%02x\n", addr); ret = 0; break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_WRITE) { chip->pointer = command; dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, wrote 0x%02x.\n", addr, command); } else { wordp = stub_get_wordp(chip, chip->pointer++); data->byte = *wordp & 0xff; dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, read 0x%02x.\n", addr, data->byte); } ret = 0; break; case I2C_SMBUS_BYTE_DATA: wordp = stub_get_wordp(chip, command); if (read_write == I2C_SMBUS_WRITE) { *wordp &= 0xff00; *wordp |= data->byte; dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n", addr, data->byte, command); /* Set the bank as needed */ if (chip->bank_words && command == chip->bank_reg) { chip->bank_sel = (data->byte >> chip->bank_shift) & chip->bank_mask; dev_dbg(&adap->dev, "switching to bank %u.\n", chip->bank_sel); } } else { data->byte = *wordp & 0xff; dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n", addr, data->byte, command); } chip->pointer = command + 1; ret = 0; break; case I2C_SMBUS_WORD_DATA: wordp = stub_get_wordp(chip, command); if (read_write == I2C_SMBUS_WRITE) { *wordp = data->word; dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n", addr, data->word, command); } else { data->word = *wordp; dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n", addr, data->word, command); } ret = 0; break; case I2C_SMBUS_I2C_BLOCK_DATA: /* * We ignore banks here, because banked chips don't use I2C * block transfers */ if (data->block[0] > 256 - command) /* Avoid overrun */ data->block[0] = 256 - command; len = data->block[0]; if (read_write == I2C_SMBUS_WRITE) { for (i = 0; i < len; i++) { chip->words[command + i] &= 0xff00; chip->words[command + i] |= data->block[1 + i]; } dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n", addr, len, command); } else { for (i = 0; i < len; i++) { data->block[1 + i] = chip->words[command + i] & 0xff; } dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, read %d bytes at 0x%02x.\n", addr, len, command); } ret = 0; break; case I2C_SMBUS_BLOCK_DATA: /* * We ignore banks here, because chips typically don't use both * banks and SMBus block transfers */ b = stub_find_block(&adap->dev, chip, command, false); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) { ret = -EINVAL; break; } if (b == NULL) { b = stub_find_block(&adap->dev, chip, command, true); if (b == NULL) { ret = -ENOMEM; break; } } /* Largest write sets read block length */ if (len > b->len) b->len = len; for (i = 0; i < len; i++) b->block[i] = data->block[i + 1]; /* update for byte and word commands */ chip->words[command] = (b->block[0] << 8) | b->len; dev_dbg(&adap->dev, "smbus block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n", addr, len, command); } else { if (b == NULL) { dev_dbg(&adap->dev, "SMBus block read command without prior block write not supported\n"); ret = -EOPNOTSUPP; break; } len = b->len; data->block[0] = len; for (i = 0; i < len; i++) data->block[i + 1] = b->block[i]; dev_dbg(&adap->dev, "smbus block data - addr 0x%02x, read %d bytes at 0x%02x.\n", addr, len, command); } ret = 0; break; default: dev_dbg(&adap->dev, "Unsupported I2C/SMBus command\n"); ret = -EOPNOTSUPP; break; } /* switch (size) */ return ret; } static u32 stub_func(struct i2c_adapter *adapter) { return STUB_FUNC_ALL & functionality; } static const struct i2c_algorithm smbus_algorithm = { .functionality = stub_func, .smbus_xfer = stub_xfer, }; static struct i2c_adapter stub_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, .name = "SMBus stub driver", }; static int __init i2c_stub_allocate_banks(int i) { struct stub_chip *chip = stub_chips + i; chip->bank_reg = bank_reg[i]; chip->bank_start = bank_start[i]; chip->bank_end = bank_end[i]; chip->bank_size = bank_end[i] - bank_start[i] + 1; /* We assume that all bits in the mask are contiguous */ chip->bank_mask = bank_mask[i]; while (!(chip->bank_mask & 1)) { chip->bank_shift++; chip->bank_mask >>= 1; } chip->bank_words = kcalloc(chip->bank_mask * chip->bank_size, sizeof(u16), GFP_KERNEL); if (!chip->bank_words) return -ENOMEM; pr_debug("Allocated %u banks of %u words each (registers 0x%02x to 0x%02x)\n", chip->bank_mask, chip->bank_size, chip->bank_start, chip->bank_end); return 0; } static void i2c_stub_free(void) { int i; for (i = 0; i < stub_chips_nr; i++) kfree(stub_chips[i].bank_words); kfree(stub_chips); } static int __init i2c_stub_init(void) { int i, ret; if (!chip_addr[0]) { pr_err("Please specify a chip address\n"); return -ENODEV; } for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) { if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) { pr_err("Invalid chip address 0x%02x\n", chip_addr[i]); return -EINVAL; } pr_info("Virtual chip at 0x%02x\n", chip_addr[i]); } /* Allocate memory for all chips at once */ stub_chips_nr = i; stub_chips = kcalloc(stub_chips_nr, sizeof(struct stub_chip), GFP_KERNEL); if (!stub_chips) return -ENOMEM; for (i = 0; i < stub_chips_nr; i++) { INIT_LIST_HEAD(&stub_chips[i].smbus_blocks); /* Allocate extra memory for banked register ranges */ if (bank_mask[i]) { ret = i2c_stub_allocate_banks(i); if (ret) goto fail_free; } } ret = i2c_add_adapter(&stub_adapter); if (ret) goto fail_free; return 0; fail_free: i2c_stub_free(); return ret; } static void __exit i2c_stub_exit(void) { i2c_del_adapter(&stub_adapter); i2c_stub_free(); } MODULE_AUTHOR("Mark M. Hoffman <[email protected]>"); MODULE_DESCRIPTION("I2C stub driver"); MODULE_LICENSE("GPL"); module_init(i2c_stub_init); module_exit(i2c_stub_exit);
linux-master
drivers/i2c/i2c-stub.c
// SPDX-License-Identifier: GPL-2.0 /* * I2C Address Translator * * Copyright (c) 2019,2022 Luca Ceresoli <[email protected]> * Copyright (c) 2022,2023 Tomi Valkeinen <[email protected]> * * Originally based on i2c-mux.c */ #include <linux/fwnode.h> #include <linux/i2c-atr.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/spinlock.h> #define ATR_MAX_ADAPTERS 100 /* Just a sanity limit */ #define ATR_MAX_SYMLINK_LEN 11 /* Longest name is 10 chars: "channel-99" */ /** * struct i2c_atr_alias_pair - Holds the alias assigned to a client. * @node: List node * @client: Pointer to the client on the child bus * @alias: I2C alias address assigned by the driver. * This is the address that will be used to issue I2C transactions * on the parent (physical) bus. */ struct i2c_atr_alias_pair { struct list_head node; const struct i2c_client *client; u16 alias; }; /** * struct i2c_atr_chan - Data for a channel. * @adap: The &struct i2c_adapter for the channel * @atr: The parent I2C ATR * @chan_id: The ID of this channel * @alias_list: List of @struct i2c_atr_alias_pair containing the * assigned aliases * @orig_addrs_lock: Mutex protecting @orig_addrs * @orig_addrs: Buffer used to store the original addresses during transmit * @orig_addrs_size: Size of @orig_addrs */ struct i2c_atr_chan { struct i2c_adapter adap; struct i2c_atr *atr; u32 chan_id; struct list_head alias_list; /* Lock orig_addrs during xfer */ struct mutex orig_addrs_lock; u16 *orig_addrs; unsigned int orig_addrs_size; }; /** * struct i2c_atr - The I2C ATR instance * @parent: The parent &struct i2c_adapter * @dev: The device that owns the I2C ATR instance * @ops: &struct i2c_atr_ops * @priv: Private driver data, set with i2c_atr_set_driver_data() * @algo: The &struct i2c_algorithm for adapters * @lock: Lock for the I2C bus segment (see &struct i2c_lock_operations) * @max_adapters: Maximum number of adapters this I2C ATR can have * @num_aliases: Number of aliases in the aliases array * @aliases: The aliases array * @alias_mask_lock: Lock protecting alias_use_mask * @alias_use_mask: Bitmask for used aliases in aliases array * @i2c_nb: Notifier for remote client add & del events * @adapter: Array of adapters */ struct i2c_atr { struct i2c_adapter *parent; struct device *dev; const struct i2c_atr_ops *ops; void *priv; struct i2c_algorithm algo; /* lock for the I2C bus segment (see struct i2c_lock_operations) */ struct mutex lock; int max_adapters; size_t num_aliases; const u16 *aliases; /* Protects alias_use_mask */ spinlock_t alias_mask_lock; unsigned long *alias_use_mask; struct notifier_block i2c_nb; struct i2c_adapter *adapter[]; }; static struct i2c_atr_alias_pair * i2c_atr_find_mapping_by_client(const struct list_head *list, const struct i2c_client *client) { struct i2c_atr_alias_pair *c2a; list_for_each_entry(c2a, list, node) { if (c2a->client == client) return c2a; } return NULL; } static struct i2c_atr_alias_pair * i2c_atr_find_mapping_by_addr(const struct list_head *list, u16 phys_addr) { struct i2c_atr_alias_pair *c2a; list_for_each_entry(c2a, list, node) { if (c2a->client->addr == phys_addr) return c2a; } return NULL; } /* * Replace all message addresses with their aliases, saving the original * addresses. * * This function is internal for use in i2c_atr_master_xfer(). It must be * followed by i2c_atr_unmap_msgs() to restore the original addresses. */ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs, int num) { struct i2c_atr *atr = chan->atr; static struct i2c_atr_alias_pair *c2a; int i; /* Ensure we have enough room to save the original addresses */ if (unlikely(chan->orig_addrs_size < num)) { u16 *new_buf; /* We don't care about old data, hence no realloc() */ new_buf = kmalloc_array(num, sizeof(*new_buf), GFP_KERNEL); if (!new_buf) return -ENOMEM; kfree(chan->orig_addrs); chan->orig_addrs = new_buf; chan->orig_addrs_size = num; } for (i = 0; i < num; i++) { chan->orig_addrs[i] = msgs[i].addr; c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, msgs[i].addr); if (!c2a) { dev_err(atr->dev, "client 0x%02x not mapped!\n", msgs[i].addr); while (i--) msgs[i].addr = chan->orig_addrs[i]; return -ENXIO; } msgs[i].addr = c2a->alias; } return 0; } /* * Restore all message address aliases with the original addresses. This * function is internal for use in i2c_atr_master_xfer() and for this reason it * needs no null and size checks on orig_addr. * * @see i2c_atr_map_msgs() */ static void i2c_atr_unmap_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs, int num) { int i; for (i = 0; i < num; i++) msgs[i].addr = chan->orig_addrs[i]; } static int i2c_atr_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_atr_chan *chan = adap->algo_data; struct i2c_atr *atr = chan->atr; struct i2c_adapter *parent = atr->parent; int ret; /* Translate addresses */ mutex_lock(&chan->orig_addrs_lock); ret = i2c_atr_map_msgs(chan, msgs, num); if (ret < 0) goto err_unlock; /* Perform the transfer */ ret = i2c_transfer(parent, msgs, num); /* Restore addresses */ i2c_atr_unmap_msgs(chan, msgs, num); err_unlock: mutex_unlock(&chan->orig_addrs_lock); return ret; } static int i2c_atr_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct i2c_atr_chan *chan = adap->algo_data; struct i2c_atr *atr = chan->atr; struct i2c_adapter *parent = atr->parent; struct i2c_atr_alias_pair *c2a; c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, addr); if (!c2a) { dev_err(atr->dev, "client 0x%02x not mapped!\n", addr); return -ENXIO; } return i2c_smbus_xfer(parent, c2a->alias, flags, read_write, command, size, data); } static u32 i2c_atr_functionality(struct i2c_adapter *adap) { struct i2c_atr_chan *chan = adap->algo_data; struct i2c_adapter *parent = chan->atr->parent; return parent->algo->functionality(parent); } static void i2c_atr_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_atr_chan *chan = adapter->algo_data; struct i2c_atr *atr = chan->atr; mutex_lock(&atr->lock); } static int i2c_atr_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_atr_chan *chan = adapter->algo_data; struct i2c_atr *atr = chan->atr; return mutex_trylock(&atr->lock); } static void i2c_atr_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_atr_chan *chan = adapter->algo_data; struct i2c_atr *atr = chan->atr; mutex_unlock(&atr->lock); } static const struct i2c_lock_operations i2c_atr_lock_ops = { .lock_bus = i2c_atr_lock_bus, .trylock_bus = i2c_atr_trylock_bus, .unlock_bus = i2c_atr_unlock_bus, }; static int i2c_atr_reserve_alias(struct i2c_atr *atr) { unsigned long idx; spin_lock(&atr->alias_mask_lock); idx = find_first_zero_bit(atr->alias_use_mask, atr->num_aliases); if (idx >= atr->num_aliases) { spin_unlock(&atr->alias_mask_lock); dev_err(atr->dev, "failed to find a free alias\n"); return -EBUSY; } set_bit(idx, atr->alias_use_mask); spin_unlock(&atr->alias_mask_lock); return atr->aliases[idx]; } static void i2c_atr_release_alias(struct i2c_atr *atr, u16 alias) { unsigned int idx; spin_lock(&atr->alias_mask_lock); for (idx = 0; idx < atr->num_aliases; ++idx) { if (atr->aliases[idx] == alias) { clear_bit(idx, atr->alias_use_mask); spin_unlock(&atr->alias_mask_lock); return; } } spin_unlock(&atr->alias_mask_lock); /* This should never happen */ dev_warn(atr->dev, "Unable to find mapped alias\n"); } static int i2c_atr_attach_client(struct i2c_adapter *adapter, const struct i2c_client *client) { struct i2c_atr_chan *chan = adapter->algo_data; struct i2c_atr *atr = chan->atr; struct i2c_atr_alias_pair *c2a; u16 alias; int ret; ret = i2c_atr_reserve_alias(atr); if (ret < 0) return ret; alias = ret; c2a = kzalloc(sizeof(*c2a), GFP_KERNEL); if (!c2a) { ret = -ENOMEM; goto err_release_alias; } ret = atr->ops->attach_client(atr, chan->chan_id, client, alias); if (ret) goto err_free; dev_dbg(atr->dev, "chan%u: client 0x%02x mapped at alias 0x%02x (%s)\n", chan->chan_id, client->addr, alias, client->name); c2a->client = client; c2a->alias = alias; list_add(&c2a->node, &chan->alias_list); return 0; err_free: kfree(c2a); err_release_alias: i2c_atr_release_alias(atr, alias); return ret; } static void i2c_atr_detach_client(struct i2c_adapter *adapter, const struct i2c_client *client) { struct i2c_atr_chan *chan = adapter->algo_data; struct i2c_atr *atr = chan->atr; struct i2c_atr_alias_pair *c2a; atr->ops->detach_client(atr, chan->chan_id, client); c2a = i2c_atr_find_mapping_by_client(&chan->alias_list, client); if (!c2a) { /* This should never happen */ dev_warn(atr->dev, "Unable to find address mapping\n"); return; } i2c_atr_release_alias(atr, c2a->alias); dev_dbg(atr->dev, "chan%u: client 0x%02x unmapped from alias 0x%02x (%s)\n", chan->chan_id, client->addr, c2a->alias, client->name); list_del(&c2a->node); kfree(c2a); } static int i2c_atr_bus_notifier_call(struct notifier_block *nb, unsigned long event, void *device) { struct i2c_atr *atr = container_of(nb, struct i2c_atr, i2c_nb); struct device *dev = device; struct i2c_client *client; u32 chan_id; int ret; client = i2c_verify_client(dev); if (!client) return NOTIFY_DONE; /* Is the client in one of our adapters? */ for (chan_id = 0; chan_id < atr->max_adapters; ++chan_id) { if (client->adapter == atr->adapter[chan_id]) break; } if (chan_id == atr->max_adapters) return NOTIFY_DONE; switch (event) { case BUS_NOTIFY_ADD_DEVICE: ret = i2c_atr_attach_client(client->adapter, client); if (ret) dev_err(atr->dev, "Failed to attach remote client '%s': %d\n", dev_name(dev), ret); break; case BUS_NOTIFY_DEL_DEVICE: i2c_atr_detach_client(client->adapter, client); break; default: break; } return NOTIFY_DONE; } static int i2c_atr_parse_alias_pool(struct i2c_atr *atr) { struct device *dev = atr->dev; unsigned long *alias_use_mask; size_t num_aliases; unsigned int i; u32 *aliases32; u16 *aliases16; int ret; ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool"); if (ret < 0) { dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n", ret); return ret; } num_aliases = ret; if (!num_aliases) return 0; aliases32 = kcalloc(num_aliases, sizeof(*aliases32), GFP_KERNEL); if (!aliases32) return -ENOMEM; ret = fwnode_property_read_u32_array(dev_fwnode(dev), "i2c-alias-pool", aliases32, num_aliases); if (ret < 0) { dev_err(dev, "Failed to read 'i2c-alias-pool' property: %d\n", ret); goto err_free_aliases32; } aliases16 = kcalloc(num_aliases, sizeof(*aliases16), GFP_KERNEL); if (!aliases16) { ret = -ENOMEM; goto err_free_aliases32; } for (i = 0; i < num_aliases; i++) { if (!(aliases32[i] & 0xffff0000)) { aliases16[i] = aliases32[i]; continue; } dev_err(dev, "Failed to parse 'i2c-alias-pool' property: I2C flags are not supported\n"); ret = -EINVAL; goto err_free_aliases16; } alias_use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL); if (!alias_use_mask) { ret = -ENOMEM; goto err_free_aliases16; } kfree(aliases32); atr->num_aliases = num_aliases; atr->aliases = aliases16; atr->alias_use_mask = alias_use_mask; dev_dbg(dev, "i2c-alias-pool has %zu aliases", atr->num_aliases); return 0; err_free_aliases16: kfree(aliases16); err_free_aliases32: kfree(aliases32); return ret; } struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev, const struct i2c_atr_ops *ops, int max_adapters) { struct i2c_atr *atr; int ret; if (max_adapters > ATR_MAX_ADAPTERS) return ERR_PTR(-EINVAL); if (!ops || !ops->attach_client || !ops->detach_client) return ERR_PTR(-EINVAL); atr = kzalloc(struct_size(atr, adapter, max_adapters), GFP_KERNEL); if (!atr) return ERR_PTR(-ENOMEM); mutex_init(&atr->lock); spin_lock_init(&atr->alias_mask_lock); atr->parent = parent; atr->dev = dev; atr->ops = ops; atr->max_adapters = max_adapters; if (parent->algo->master_xfer) atr->algo.master_xfer = i2c_atr_master_xfer; if (parent->algo->smbus_xfer) atr->algo.smbus_xfer = i2c_atr_smbus_xfer; atr->algo.functionality = i2c_atr_functionality; ret = i2c_atr_parse_alias_pool(atr); if (ret) goto err_destroy_mutex; atr->i2c_nb.notifier_call = i2c_atr_bus_notifier_call; ret = bus_register_notifier(&i2c_bus_type, &atr->i2c_nb); if (ret) goto err_free_aliases; return atr; err_free_aliases: bitmap_free(atr->alias_use_mask); kfree(atr->aliases); err_destroy_mutex: mutex_destroy(&atr->lock); kfree(atr); return ERR_PTR(ret); } EXPORT_SYMBOL_NS_GPL(i2c_atr_new, I2C_ATR); void i2c_atr_delete(struct i2c_atr *atr) { unsigned int i; for (i = 0; i < atr->max_adapters; ++i) WARN_ON(atr->adapter[i]); bus_unregister_notifier(&i2c_bus_type, &atr->i2c_nb); bitmap_free(atr->alias_use_mask); kfree(atr->aliases); mutex_destroy(&atr->lock); kfree(atr); } EXPORT_SYMBOL_NS_GPL(i2c_atr_delete, I2C_ATR); int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id, struct device *adapter_parent, struct fwnode_handle *bus_handle) { struct i2c_adapter *parent = atr->parent; struct device *dev = atr->dev; struct i2c_atr_chan *chan; char symlink_name[ATR_MAX_SYMLINK_LEN]; int ret; if (chan_id >= atr->max_adapters) { dev_err(dev, "No room for more i2c-atr adapters\n"); return -EINVAL; } if (atr->adapter[chan_id]) { dev_err(dev, "Adapter %d already present\n", chan_id); return -EEXIST; } chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; if (!adapter_parent) adapter_parent = dev; chan->atr = atr; chan->chan_id = chan_id; INIT_LIST_HEAD(&chan->alias_list); mutex_init(&chan->orig_addrs_lock); snprintf(chan->adap.name, sizeof(chan->adap.name), "i2c-%d-atr-%d", i2c_adapter_id(parent), chan_id); chan->adap.owner = THIS_MODULE; chan->adap.algo = &atr->algo; chan->adap.algo_data = chan; chan->adap.dev.parent = adapter_parent; chan->adap.retries = parent->retries; chan->adap.timeout = parent->timeout; chan->adap.quirks = parent->quirks; chan->adap.lock_ops = &i2c_atr_lock_ops; if (bus_handle) { device_set_node(&chan->adap.dev, fwnode_handle_get(bus_handle)); } else { struct fwnode_handle *atr_node; struct fwnode_handle *child; u32 reg; atr_node = device_get_named_child_node(dev, "i2c-atr"); fwnode_for_each_child_node(atr_node, child) { ret = fwnode_property_read_u32(child, "reg", &reg); if (ret) continue; if (chan_id == reg) break; } device_set_node(&chan->adap.dev, child); fwnode_handle_put(atr_node); } atr->adapter[chan_id] = &chan->adap; ret = i2c_add_adapter(&chan->adap); if (ret) { dev_err(dev, "failed to add atr-adapter %u (error=%d)\n", chan_id, ret); goto err_fwnode_put; } snprintf(symlink_name, sizeof(symlink_name), "channel-%u", chan->chan_id); ret = sysfs_create_link(&chan->adap.dev.kobj, &dev->kobj, "atr_device"); if (ret) dev_warn(dev, "can't create symlink to atr device\n"); ret = sysfs_create_link(&dev->kobj, &chan->adap.dev.kobj, symlink_name); if (ret) dev_warn(dev, "can't create symlink for channel %u\n", chan_id); dev_dbg(dev, "Added ATR child bus %d\n", i2c_adapter_id(&chan->adap)); return 0; err_fwnode_put: fwnode_handle_put(dev_fwnode(&chan->adap.dev)); mutex_destroy(&chan->orig_addrs_lock); kfree(chan); return ret; } EXPORT_SYMBOL_NS_GPL(i2c_atr_add_adapter, I2C_ATR); void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id) { char symlink_name[ATR_MAX_SYMLINK_LEN]; struct i2c_adapter *adap; struct i2c_atr_chan *chan; struct fwnode_handle *fwnode; struct device *dev = atr->dev; adap = atr->adapter[chan_id]; if (!adap) return; chan = adap->algo_data; fwnode = dev_fwnode(&adap->dev); dev_dbg(dev, "Removing ATR child bus %d\n", i2c_adapter_id(adap)); snprintf(symlink_name, sizeof(symlink_name), "channel-%u", chan->chan_id); sysfs_remove_link(&dev->kobj, symlink_name); sysfs_remove_link(&chan->adap.dev.kobj, "atr_device"); i2c_del_adapter(adap); atr->adapter[chan_id] = NULL; fwnode_handle_put(fwnode); mutex_destroy(&chan->orig_addrs_lock); kfree(chan->orig_addrs); kfree(chan); } EXPORT_SYMBOL_NS_GPL(i2c_atr_del_adapter, I2C_ATR); void i2c_atr_set_driver_data(struct i2c_atr *atr, void *data) { atr->priv = data; } EXPORT_SYMBOL_NS_GPL(i2c_atr_set_driver_data, I2C_ATR); void *i2c_atr_get_driver_data(struct i2c_atr *atr) { return atr->priv; } EXPORT_SYMBOL_NS_GPL(i2c_atr_get_driver_data, I2C_ATR); MODULE_AUTHOR("Luca Ceresoli <[email protected]>"); MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); MODULE_DESCRIPTION("I2C Address Translator"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/i2c-atr.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core ACPI support code * * Copyright (C) 2014 Intel Corp, Author: Lan Tianyu <[email protected]> */ #include <linux/acpi.h> #include <linux/device.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include "i2c-core.h" struct i2c_acpi_handler_data { struct acpi_connection_info info; struct i2c_adapter *adapter; }; struct gsb_buffer { u8 status; u8 len; union { u16 wdata; u8 bdata; DECLARE_FLEX_ARRAY(u8, data); }; } __packed; struct i2c_acpi_lookup { struct i2c_board_info *info; acpi_handle adapter_handle; acpi_handle device_handle; acpi_handle search_handle; int n; int index; u32 speed; u32 min_speed; u32 force_speed; }; /** * i2c_acpi_get_i2c_resource - Gets I2cSerialBus resource if type matches * @ares: ACPI resource * @i2c: Pointer to I2cSerialBus resource will be returned here * * Checks if the given ACPI resource is of type I2cSerialBus. * In this case, returns a pointer to it to the caller. * * Returns true if resource type is of I2cSerialBus, otherwise false. */ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, struct acpi_resource_i2c_serialbus **i2c) { struct acpi_resource_i2c_serialbus *sb; if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) return false; sb = &ares->data.i2c_serial_bus; if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) return false; *i2c = sb; return true; } EXPORT_SYMBOL_GPL(i2c_acpi_get_i2c_resource); static int i2c_acpi_resource_count(struct acpi_resource *ares, void *data) { struct acpi_resource_i2c_serialbus *sb; int *count = data; if (i2c_acpi_get_i2c_resource(ares, &sb)) *count = *count + 1; return 1; } /** * i2c_acpi_client_count - Count the number of I2cSerialBus resources * @adev: ACPI device * * Returns the number of I2cSerialBus resources in the ACPI-device's * resource-list; or a negative error code. */ int i2c_acpi_client_count(struct acpi_device *adev) { int ret, count = 0; LIST_HEAD(r); ret = acpi_dev_get_resources(adev, &r, i2c_acpi_resource_count, &count); if (ret < 0) return ret; acpi_dev_free_resource_list(&r); return count; } EXPORT_SYMBOL_GPL(i2c_acpi_client_count); static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data) { struct i2c_acpi_lookup *lookup = data; struct i2c_board_info *info = lookup->info; struct acpi_resource_i2c_serialbus *sb; acpi_status status; if (info->addr || !i2c_acpi_get_i2c_resource(ares, &sb)) return 1; if (lookup->index != -1 && lookup->n++ != lookup->index) return 1; status = acpi_get_handle(lookup->device_handle, sb->resource_source.string_ptr, &lookup->adapter_handle); if (ACPI_FAILURE(status)) return 1; info->addr = sb->slave_address; lookup->speed = sb->connection_speed; if (sb->access_mode == ACPI_I2C_10BIT_MODE) info->flags |= I2C_CLIENT_TEN; return 1; } static const struct acpi_device_id i2c_acpi_ignored_device_ids[] = { /* * ACPI video acpi_devices, which are handled by the acpi-video driver * sometimes contain a SERIAL_TYPE_I2C ACPI resource, ignore these. */ { ACPI_VIDEO_HID, 0 }, {} }; struct i2c_acpi_irq_context { int irq; bool wake_capable; }; static int i2c_acpi_do_lookup(struct acpi_device *adev, struct i2c_acpi_lookup *lookup) { struct i2c_board_info *info = lookup->info; struct list_head resource_list; int ret; if (acpi_bus_get_status(adev)) return -EINVAL; if (!acpi_dev_ready_for_enumeration(adev)) return -ENODEV; if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0) return -ENODEV; memset(info, 0, sizeof(*info)); lookup->device_handle = acpi_device_handle(adev); /* Look up for I2cSerialBus resource */ INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_fill_info, lookup); acpi_dev_free_resource_list(&resource_list); if (ret < 0 || !info->addr) return -EINVAL; return 0; } static int i2c_acpi_add_irq_resource(struct acpi_resource *ares, void *data) { struct i2c_acpi_irq_context *irq_ctx = data; struct resource r; if (irq_ctx->irq > 0) return 1; if (!acpi_dev_resource_interrupt(ares, 0, &r)) return 1; irq_ctx->irq = i2c_dev_irq_from_resources(&r, 1); irq_ctx->wake_capable = r.flags & IORESOURCE_IRQ_WAKECAPABLE; return 1; /* No need to add resource to the list */ } /** * i2c_acpi_get_irq - get device IRQ number from ACPI * @client: Pointer to the I2C client device * @wake_capable: Set to true if the IRQ is wake capable * * Find the IRQ number used by a specific client device. * * Return: The IRQ number or an error code. */ int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable) { struct acpi_device *adev = ACPI_COMPANION(&client->dev); struct list_head resource_list; struct i2c_acpi_irq_context irq_ctx = { .irq = -ENOENT, }; int ret; INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_add_irq_resource, &irq_ctx); if (ret < 0) return ret; acpi_dev_free_resource_list(&resource_list); if (irq_ctx.irq == -ENOENT) irq_ctx.irq = acpi_dev_gpio_irq_wake_get(adev, 0, &irq_ctx.wake_capable); if (irq_ctx.irq < 0) return irq_ctx.irq; if (wake_capable) *wake_capable = irq_ctx.wake_capable; return irq_ctx.irq; } static int i2c_acpi_get_info(struct acpi_device *adev, struct i2c_board_info *info, struct i2c_adapter *adapter, acpi_handle *adapter_handle) { struct i2c_acpi_lookup lookup; int ret; memset(&lookup, 0, sizeof(lookup)); lookup.info = info; lookup.index = -1; if (acpi_device_enumerated(adev)) return -EINVAL; ret = i2c_acpi_do_lookup(adev, &lookup); if (ret) return ret; if (adapter) { /* The adapter must match the one in I2cSerialBus() connector */ if (ACPI_HANDLE(&adapter->dev) != lookup.adapter_handle) return -ENODEV; } else { struct acpi_device *adapter_adev; /* The adapter must be present */ adapter_adev = acpi_fetch_acpi_dev(lookup.adapter_handle); if (!adapter_adev) return -ENODEV; if (acpi_bus_get_status(adapter_adev) || !adapter_adev->status.present) return -ENODEV; } info->fwnode = acpi_fwnode_handle(adev); if (adapter_handle) *adapter_handle = lookup.adapter_handle; acpi_set_modalias(adev, dev_name(&adev->dev), info->type, sizeof(info->type)); return 0; } static void i2c_acpi_register_device(struct i2c_adapter *adapter, struct acpi_device *adev, struct i2c_board_info *info) { /* * Skip registration on boards where the ACPI tables are * known to contain bogus I2C devices. */ if (acpi_quirk_skip_i2c_client_enumeration(adev)) return; adev->power.flags.ignore_parent = true; acpi_device_set_enumerated(adev); if (IS_ERR(i2c_new_client_device(adapter, info))) adev->power.flags.ignore_parent = false; } static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { struct i2c_adapter *adapter = data; struct acpi_device *adev = acpi_fetch_acpi_dev(handle); struct i2c_board_info info; if (!adev || i2c_acpi_get_info(adev, &info, adapter, NULL)) return AE_OK; i2c_acpi_register_device(adapter, adev, &info); return AE_OK; } #define I2C_ACPI_MAX_SCAN_DEPTH 32 /** * i2c_acpi_register_devices - enumerate I2C slave devices behind adapter * @adap: pointer to adapter * * Enumerate all I2C slave devices behind this adapter by walking the ACPI * namespace. When a device is found it will be added to the Linux device * model and bound to the corresponding ACPI handle. */ void i2c_acpi_register_devices(struct i2c_adapter *adap) { struct acpi_device *adev; acpi_status status; if (!has_acpi_companion(&adap->dev)) return; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, I2C_ACPI_MAX_SCAN_DEPTH, i2c_acpi_add_device, NULL, adap, NULL); if (ACPI_FAILURE(status)) dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); if (!adap->dev.parent) return; adev = ACPI_COMPANION(adap->dev.parent); if (!adev) return; acpi_dev_clear_dependencies(adev); } static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = { /* * These Silead touchscreen controllers only work at 400KHz, for * some reason they do not work at 100KHz. On some devices the ACPI * tables list another device at their bus as only being capable * of 100KHz, testing has shown that these other devices work fine * at 400KHz (as can be expected of any recent i2c hw) so we force * the speed of the bus to 400 KHz if a Silead device is present. */ { "MSSL1680", 0 }, {} }; static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, void *data, void **return_value) { struct i2c_acpi_lookup *lookup = data; struct acpi_device *adev = acpi_fetch_acpi_dev(handle); if (!adev || i2c_acpi_do_lookup(adev, lookup)) return AE_OK; if (lookup->search_handle != lookup->adapter_handle) return AE_OK; if (lookup->speed <= lookup->min_speed) lookup->min_speed = lookup->speed; if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0) lookup->force_speed = I2C_MAX_FAST_MODE_FREQ; return AE_OK; } /** * i2c_acpi_find_bus_speed - find I2C bus speed from ACPI * @dev: The device owning the bus * * Find the I2C bus speed by walking the ACPI namespace for all I2C slaves * devices connected to this bus and use the speed of slowest device. * * Returns the speed in Hz or zero */ u32 i2c_acpi_find_bus_speed(struct device *dev) { struct i2c_acpi_lookup lookup; struct i2c_board_info dummy; acpi_status status; if (!has_acpi_companion(dev)) return 0; memset(&lookup, 0, sizeof(lookup)); lookup.search_handle = ACPI_HANDLE(dev); lookup.min_speed = UINT_MAX; lookup.info = &dummy; lookup.index = -1; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, I2C_ACPI_MAX_SCAN_DEPTH, i2c_acpi_lookup_speed, NULL, &lookup, NULL); if (ACPI_FAILURE(status)) { dev_warn(dev, "unable to find I2C bus speed from ACPI\n"); return 0; } if (lookup.force_speed) { if (lookup.force_speed != lookup.min_speed) dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n", lookup.min_speed, lookup.force_speed); return lookup.force_speed; } else if (lookup.min_speed != UINT_MAX) { return lookup.min_speed; } else { return 0; } } EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) { struct i2c_adapter *adapter; struct device *dev; dev = bus_find_device(&i2c_bus_type, NULL, handle, device_match_acpi_handle); if (!dev) return NULL; adapter = i2c_verify_adapter(dev); if (!adapter) put_device(dev); return adapter; } EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle); static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) { return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev)); } static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, void *arg) { struct acpi_device *adev = arg; struct i2c_board_info info; acpi_handle adapter_handle; struct i2c_adapter *adapter; struct i2c_client *client; switch (value) { case ACPI_RECONFIG_DEVICE_ADD: if (i2c_acpi_get_info(adev, &info, NULL, &adapter_handle)) break; adapter = i2c_acpi_find_adapter_by_handle(adapter_handle); if (!adapter) break; i2c_acpi_register_device(adapter, adev, &info); put_device(&adapter->dev); break; case ACPI_RECONFIG_DEVICE_REMOVE: if (!acpi_device_enumerated(adev)) break; client = i2c_acpi_find_client_by_adev(adev); if (!client) break; i2c_unregister_device(client); put_device(&client->dev); break; } return NOTIFY_OK; } struct notifier_block i2c_acpi_notifier = { .notifier_call = i2c_acpi_notify, }; /** * i2c_acpi_new_device_by_fwnode - Create i2c-client for the Nth I2cSerialBus resource * @fwnode: fwnode with the ACPI resources to get the client from * @index: Index of ACPI resource to get * @info: describes the I2C device; note this is modified (addr gets set) * Context: can sleep * * By default the i2c subsys creates an i2c-client for the first I2cSerialBus * resource of an acpi_device, but some acpi_devices have multiple I2cSerialBus * resources, in that case this function can be used to create an i2c-client * for other I2cSerialBus resources in the Current Resource Settings table. * * Also see i2c_new_client_device, which this function calls to create the * i2c-client. * * Returns a pointer to the new i2c-client, or error pointer in case of failure. * Specifically, -EPROBE_DEFER is returned if the adapter is not found. */ struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode, int index, struct i2c_board_info *info) { struct i2c_acpi_lookup lookup; struct i2c_adapter *adapter; struct acpi_device *adev; LIST_HEAD(resource_list); int ret; adev = to_acpi_device_node(fwnode); if (!adev) return ERR_PTR(-ENODEV); memset(&lookup, 0, sizeof(lookup)); lookup.info = info; lookup.device_handle = acpi_device_handle(adev); lookup.index = index; ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_fill_info, &lookup); if (ret < 0) return ERR_PTR(ret); acpi_dev_free_resource_list(&resource_list); if (!info->addr) return ERR_PTR(-EADDRNOTAVAIL); adapter = i2c_acpi_find_adapter_by_handle(lookup.adapter_handle); if (!adapter) return ERR_PTR(-EPROBE_DEFER); return i2c_new_client_device(adapter, info); } EXPORT_SYMBOL_GPL(i2c_acpi_new_device_by_fwnode); bool i2c_acpi_waive_d0_probe(struct device *dev) { struct i2c_driver *driver = to_i2c_driver(dev->driver); struct acpi_device *adev = ACPI_COMPANION(dev); return driver->flags & I2C_DRV_ACPI_WAIVE_D0_PROBE && adev && adev->power.state_for_enumeration >= adev->power.state; } EXPORT_SYMBOL_GPL(i2c_acpi_waive_d0_probe); #ifdef CONFIG_ACPI_I2C_OPREGION static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, u8 cmd, u8 *data, u8 data_len) { struct i2c_msg msgs[2]; int ret; u8 *buffer; buffer = kzalloc(data_len, GFP_KERNEL); if (!buffer) return AE_NO_MEMORY; msgs[0].addr = client->addr; msgs[0].flags = client->flags; msgs[0].len = 1; msgs[0].buf = &cmd; msgs[1].addr = client->addr; msgs[1].flags = client->flags | I2C_M_RD; msgs[1].len = data_len; msgs[1].buf = buffer; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret < 0) { /* Getting a NACK is unfortunately normal with some DSTDs */ if (ret == -EREMOTEIO) dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", data_len, client->addr, cmd, ret); else dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", data_len, client->addr, cmd, ret); /* 2 transfers must have completed successfully */ } else if (ret == 2) { memcpy(data, buffer, data_len); ret = 0; } else { ret = -EIO; } kfree(buffer); return ret; } static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, u8 cmd, u8 *data, u8 data_len) { struct i2c_msg msgs[1]; u8 *buffer; int ret = AE_OK; buffer = kzalloc(data_len + 1, GFP_KERNEL); if (!buffer) return AE_NO_MEMORY; buffer[0] = cmd; memcpy(buffer + 1, data, data_len); msgs[0].addr = client->addr; msgs[0].flags = client->flags; msgs[0].len = data_len + 1; msgs[0].buf = buffer; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); kfree(buffer); if (ret < 0) { dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret); return ret; } /* 1 transfer must have completed successfully */ return (ret == 1) ? 0 : -EIO; } static acpi_status i2c_acpi_space_handler(u32 function, acpi_physical_address command, u32 bits, u64 *value64, void *handler_context, void *region_context) { struct gsb_buffer *gsb = (struct gsb_buffer *)value64; struct i2c_acpi_handler_data *data = handler_context; struct acpi_connection_info *info = &data->info; struct acpi_resource_i2c_serialbus *sb; struct i2c_adapter *adapter = data->adapter; struct i2c_client *client; struct acpi_resource *ares; u32 accessor_type = function >> 16; u8 action = function & ACPI_IO_MASK; acpi_status ret; int status; ret = acpi_buffer_to_resource(info->connection, info->length, &ares); if (ACPI_FAILURE(ret)) return ret; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) { ret = AE_NO_MEMORY; goto err; } if (!value64 || !i2c_acpi_get_i2c_resource(ares, &sb)) { ret = AE_BAD_PARAMETER; goto err; } client->adapter = adapter; client->addr = sb->slave_address; if (sb->access_mode == ACPI_I2C_10BIT_MODE) client->flags |= I2C_CLIENT_TEN; switch (accessor_type) { case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: if (action == ACPI_READ) { status = i2c_smbus_read_byte(client); if (status >= 0) { gsb->bdata = status; status = 0; } } else { status = i2c_smbus_write_byte(client, gsb->bdata); } break; case ACPI_GSB_ACCESS_ATTRIB_BYTE: if (action == ACPI_READ) { status = i2c_smbus_read_byte_data(client, command); if (status >= 0) { gsb->bdata = status; status = 0; } } else { status = i2c_smbus_write_byte_data(client, command, gsb->bdata); } break; case ACPI_GSB_ACCESS_ATTRIB_WORD: if (action == ACPI_READ) { status = i2c_smbus_read_word_data(client, command); if (status >= 0) { gsb->wdata = status; status = 0; } } else { status = i2c_smbus_write_word_data(client, command, gsb->wdata); } break; case ACPI_GSB_ACCESS_ATTRIB_BLOCK: if (action == ACPI_READ) { status = i2c_smbus_read_block_data(client, command, gsb->data); if (status >= 0) { gsb->len = status; status = 0; } } else { status = i2c_smbus_write_block_data(client, command, gsb->len, gsb->data); } break; case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: if (action == ACPI_READ) { status = acpi_gsb_i2c_read_bytes(client, command, gsb->data, info->access_length); } else { status = acpi_gsb_i2c_write_bytes(client, command, gsb->data, info->access_length); } break; default: dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", accessor_type, client->addr); ret = AE_BAD_PARAMETER; goto err; } gsb->status = status; err: kfree(client); ACPI_FREE(ares); return ret; } int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) { acpi_handle handle; struct i2c_acpi_handler_data *data; acpi_status status; if (!adapter->dev.parent) return -ENODEV; handle = ACPI_HANDLE(adapter->dev.parent); if (!handle) return -ENODEV; data = kzalloc(sizeof(struct i2c_acpi_handler_data), GFP_KERNEL); if (!data) return -ENOMEM; data->adapter = adapter; status = acpi_bus_attach_private_data(handle, (void *)data); if (ACPI_FAILURE(status)) { kfree(data); return -ENOMEM; } status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GSBUS, &i2c_acpi_space_handler, NULL, data); if (ACPI_FAILURE(status)) { dev_err(&adapter->dev, "Error installing i2c space handler\n"); acpi_bus_detach_private_data(handle); kfree(data); return -ENOMEM; } return 0; } void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter) { acpi_handle handle; struct i2c_acpi_handler_data *data; acpi_status status; if (!adapter->dev.parent) return; handle = ACPI_HANDLE(adapter->dev.parent); if (!handle) return; acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GSBUS, &i2c_acpi_space_handler); status = acpi_bus_get_private_data(handle, (void **)&data); if (ACPI_SUCCESS(status)) kfree(data); acpi_bus_detach_private_data(handle); } #endif /* CONFIG_ACPI_I2C_OPREGION */
linux-master
drivers/i2c/i2c-core-acpi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core OF support code * * Copyright (C) 2008 Jochen Friedrich <[email protected]> * based on a previous patch from Jon Smirl <[email protected]> * * Copyright (C) 2013, 2018 Wolfram Sang <[email protected]> */ #include <dt-bindings/i2c/i2c.h> #include <linux/device.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/sysfs.h> #include "i2c-core.h" int of_i2c_get_board_info(struct device *dev, struct device_node *node, struct i2c_board_info *info) { u32 addr; int ret; memset(info, 0, sizeof(*info)); if (of_alias_from_compatible(node, info->type, sizeof(info->type)) < 0) { dev_err(dev, "of_i2c: modalias failure on %pOF\n", node); return -EINVAL; } ret = of_property_read_u32(node, "reg", &addr); if (ret) { dev_err(dev, "of_i2c: invalid reg on %pOF\n", node); return ret; } if (addr & I2C_TEN_BIT_ADDRESS) { addr &= ~I2C_TEN_BIT_ADDRESS; info->flags |= I2C_CLIENT_TEN; } if (addr & I2C_OWN_SLAVE_ADDRESS) { addr &= ~I2C_OWN_SLAVE_ADDRESS; info->flags |= I2C_CLIENT_SLAVE; } info->addr = addr; info->of_node = node; info->fwnode = of_fwnode_handle(node); if (of_property_read_bool(node, "host-notify")) info->flags |= I2C_CLIENT_HOST_NOTIFY; if (of_property_read_bool(node, "wakeup-source")) info->flags |= I2C_CLIENT_WAKE; return 0; } EXPORT_SYMBOL_GPL(of_i2c_get_board_info); static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, struct device_node *node) { struct i2c_client *client; struct i2c_board_info info; int ret; dev_dbg(&adap->dev, "of_i2c: register %pOF\n", node); ret = of_i2c_get_board_info(&adap->dev, node, &info); if (ret) return ERR_PTR(ret); client = i2c_new_client_device(adap, &info); if (IS_ERR(client)) dev_err(&adap->dev, "of_i2c: Failure registering %pOF\n", node); return client; } void of_i2c_register_devices(struct i2c_adapter *adap) { struct device_node *bus, *node; struct i2c_client *client; /* Only register child devices if the adapter has a node pointer set */ if (!adap->dev.of_node) return; dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus"); if (!bus) bus = of_node_get(adap->dev.of_node); for_each_available_child_of_node(bus, node) { if (of_node_test_and_set_flag(node, OF_POPULATED)) continue; client = of_i2c_register_device(adap, node); if (IS_ERR(client)) { dev_err(&adap->dev, "Failed to create I2C device for %pOF\n", node); of_node_clear_flag(node, OF_POPULATED); } } of_node_put(bus); } static const struct of_device_id* i2c_of_match_device_sysfs(const struct of_device_id *matches, struct i2c_client *client) { const char *name; for (; matches->compatible[0]; matches++) { /* * Adding devices through the i2c sysfs interface provides us * a string to match which may be compatible with the device * tree compatible strings, however with no actual of_node the * of_match_device() will not match */ if (sysfs_streq(client->name, matches->compatible)) return matches; name = strchr(matches->compatible, ','); if (!name) name = matches->compatible; else name++; if (sysfs_streq(client->name, name)) return matches; } return NULL; } const struct of_device_id *i2c_of_match_device(const struct of_device_id *matches, struct i2c_client *client) { const struct of_device_id *match; if (!(client && matches)) return NULL; match = of_match_device(matches, &client->dev); if (match) return match; return i2c_of_match_device_sysfs(matches, client); } EXPORT_SYMBOL_GPL(i2c_of_match_device); #if IS_ENABLED(CONFIG_OF_DYNAMIC) static int of_i2c_notify(struct notifier_block *nb, unsigned long action, void *arg) { struct of_reconfig_data *rd = arg; struct i2c_adapter *adap; struct i2c_client *client; switch (of_reconfig_get_state_change(action, rd)) { case OF_RECONFIG_CHANGE_ADD: adap = of_find_i2c_adapter_by_node(rd->dn->parent); if (adap == NULL) return NOTIFY_OK; /* not for us */ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { put_device(&adap->dev); return NOTIFY_OK; } /* * Clear the flag before adding the device so that fw_devlink * doesn't skip adding consumers to this device. */ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; client = of_i2c_register_device(adap, rd->dn); if (IS_ERR(client)) { dev_err(&adap->dev, "failed to create client for '%pOF'\n", rd->dn); put_device(&adap->dev); of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(PTR_ERR(client)); } put_device(&adap->dev); break; case OF_RECONFIG_CHANGE_REMOVE: /* already depopulated? */ if (!of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; /* find our device by node */ client = of_find_i2c_device_by_node(rd->dn); if (client == NULL) return NOTIFY_OK; /* no? not meant for us */ /* unregister takes one ref away */ i2c_unregister_device(client); /* and put the reference of the find */ put_device(&client->dev); break; } return NOTIFY_OK; } struct notifier_block i2c_of_notifier = { .notifier_call = of_i2c_notify, }; #endif /* CONFIG_OF_DYNAMIC */
linux-master
drivers/i2c/i2c-core-of.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c-smbus.c - SMBus extensions to the I2C protocol * * Copyright (C) 2008 David Brownell * Copyright (C) 2010-2019 Jean Delvare <[email protected]> */ #include <linux/device.h> #include <linux/dmi.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/property.h> #include <linux/slab.h> #include <linux/workqueue.h> struct i2c_smbus_alert { struct work_struct alert; struct i2c_client *ara; /* Alert response address */ }; struct alert_data { unsigned short addr; enum i2c_alert_protocol type; unsigned int data; }; /* If this is the alerting device, notify its driver */ static int smbus_do_alert(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); struct alert_data *data = addrp; struct i2c_driver *driver; if (!client || client->addr != data->addr) return 0; if (client->flags & I2C_CLIENT_TEN) return 0; /* * Drivers should either disable alerts, or provide at least * a minimal handler. Lock so the driver won't change. */ device_lock(dev); if (client->dev.driver) { driver = to_i2c_driver(client->dev.driver); if (driver->alert) driver->alert(client, data->type, data->data); else dev_warn(&client->dev, "no driver alert()!\n"); } else dev_dbg(&client->dev, "alert with no driver\n"); device_unlock(dev); /* Stop iterating after we find the device */ return -EBUSY; } /* * The alert IRQ handler needs to hand work off to a task which can issue * SMBus calls, because those sleeping calls can't be made in IRQ context. */ static irqreturn_t smbus_alert(int irq, void *d) { struct i2c_smbus_alert *alert = d; struct i2c_client *ara; ara = alert->ara; for (;;) { s32 status; struct alert_data data; /* * Devices with pending alerts reply in address order, low * to high, because of slave transmit arbitration. After * responding, an SMBus device stops asserting SMBALERT#. * * Note that SMBus 2.0 reserves 10-bit addresses for future * use. We neither handle them, nor try to use PEC here. */ status = i2c_smbus_read_byte(ara); if (status < 0) break; data.data = status & 1; data.addr = status >> 1; data.type = I2C_PROTOCOL_SMBUS_ALERT; dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n", data.addr, data.data); /* Notify driver for the device which issued the alert */ device_for_each_child(&ara->adapter->dev, &data, smbus_do_alert); } return IRQ_HANDLED; } static void smbalert_work(struct work_struct *work) { struct i2c_smbus_alert *alert; alert = container_of(work, struct i2c_smbus_alert, alert); smbus_alert(0, alert); } /* Setup SMBALERT# infrastructure */ static int smbalert_probe(struct i2c_client *ara) { struct i2c_smbus_alert_setup *setup = dev_get_platdata(&ara->dev); struct i2c_smbus_alert *alert; struct i2c_adapter *adapter = ara->adapter; int res, irq; alert = devm_kzalloc(&ara->dev, sizeof(struct i2c_smbus_alert), GFP_KERNEL); if (!alert) return -ENOMEM; if (setup) { irq = setup->irq; } else { irq = fwnode_irq_get_byname(dev_fwnode(adapter->dev.parent), "smbus_alert"); if (irq <= 0) return irq; } INIT_WORK(&alert->alert, smbalert_work); alert->ara = ara; if (irq > 0) { res = devm_request_threaded_irq(&ara->dev, irq, NULL, smbus_alert, IRQF_SHARED | IRQF_ONESHOT, "smbus_alert", alert); if (res) return res; } i2c_set_clientdata(ara, alert); dev_info(&adapter->dev, "supports SMBALERT#\n"); return 0; } /* IRQ and memory resources are managed so they are freed automatically */ static void smbalert_remove(struct i2c_client *ara) { struct i2c_smbus_alert *alert = i2c_get_clientdata(ara); cancel_work_sync(&alert->alert); } static const struct i2c_device_id smbalert_ids[] = { { "smbus_alert", 0 }, { /* LIST END */ } }; MODULE_DEVICE_TABLE(i2c, smbalert_ids); static struct i2c_driver smbalert_driver = { .driver = { .name = "smbus_alert", }, .probe = smbalert_probe, .remove = smbalert_remove, .id_table = smbalert_ids, }; /** * i2c_handle_smbus_alert - Handle an SMBus alert * @ara: the ARA client on the relevant adapter * Context: can't sleep * * Helper function to be called from an I2C bus driver's interrupt * handler. It will schedule the alert work, in turn calling the * corresponding I2C device driver's alert function. * * It is assumed that ara is a valid i2c client previously returned by * i2c_new_smbus_alert_device(). */ int i2c_handle_smbus_alert(struct i2c_client *ara) { struct i2c_smbus_alert *alert = i2c_get_clientdata(ara); return schedule_work(&alert->alert); } EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert); module_i2c_driver(smbalert_driver); #if IS_ENABLED(CONFIG_I2C_SLAVE) #define SMBUS_HOST_NOTIFY_LEN 3 struct i2c_slave_host_notify_status { u8 index; u8 addr; }; static int i2c_slave_host_notify_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { struct i2c_slave_host_notify_status *status = client->dev.platform_data; switch (event) { case I2C_SLAVE_WRITE_RECEIVED: /* We only retrieve the first byte received (addr) * since there is currently no support to retrieve the data * parameter from the client. */ if (status->index == 0) status->addr = *val; if (status->index < U8_MAX) status->index++; break; case I2C_SLAVE_STOP: if (status->index == SMBUS_HOST_NOTIFY_LEN) i2c_handle_smbus_host_notify(client->adapter, status->addr); fallthrough; case I2C_SLAVE_WRITE_REQUESTED: status->index = 0; break; case I2C_SLAVE_READ_REQUESTED: case I2C_SLAVE_READ_PROCESSED: *val = 0xff; break; } return 0; } /** * i2c_new_slave_host_notify_device - get a client for SMBus host-notify support * @adapter: the target adapter * Context: can sleep * * Setup handling of the SMBus host-notify protocol on a given I2C bus segment. * * Handling is done by creating a device and its callback and handling data * received via the SMBus host-notify address (0x8) * * This returns the client, which should be ultimately freed using * i2c_free_slave_host_notify_device(); or an ERRPTR to indicate an error. */ struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter) { struct i2c_board_info host_notify_board_info = { I2C_BOARD_INFO("smbus_host_notify", 0x08), .flags = I2C_CLIENT_SLAVE, }; struct i2c_slave_host_notify_status *status; struct i2c_client *client; int ret; status = kzalloc(sizeof(struct i2c_slave_host_notify_status), GFP_KERNEL); if (!status) return ERR_PTR(-ENOMEM); host_notify_board_info.platform_data = status; client = i2c_new_client_device(adapter, &host_notify_board_info); if (IS_ERR(client)) { kfree(status); return client; } ret = i2c_slave_register(client, i2c_slave_host_notify_cb); if (ret) { i2c_unregister_device(client); kfree(status); return ERR_PTR(ret); } return client; } EXPORT_SYMBOL_GPL(i2c_new_slave_host_notify_device); /** * i2c_free_slave_host_notify_device - free the client for SMBus host-notify * support * @client: the client to free * Context: can sleep * * Free the i2c_client allocated via i2c_new_slave_host_notify_device */ void i2c_free_slave_host_notify_device(struct i2c_client *client) { if (IS_ERR_OR_NULL(client)) return; i2c_slave_unregister(client); kfree(client->dev.platform_data); i2c_unregister_device(client); } EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device); #endif /* * SPD is not part of SMBus but we include it here for convenience as the * target systems are the same. * Restrictions to automatic SPD instantiation: * - Only works if all filled slots have the same memory type * - Only works for DDR2, DDR3 and DDR4 for now * - Only works on systems with 1 to 4 memory slots */ #if IS_ENABLED(CONFIG_DMI) void i2c_register_spd(struct i2c_adapter *adap) { int n, slot_count = 0, dimm_count = 0; u16 handle; u8 common_mem_type = 0x0, mem_type; u64 mem_size; const char *name; while ((handle = dmi_memdev_handle(slot_count)) != 0xffff) { slot_count++; /* Skip empty slots */ mem_size = dmi_memdev_size(handle); if (!mem_size) continue; /* Skip undefined memory type */ mem_type = dmi_memdev_type(handle); if (mem_type <= 0x02) /* Invalid, Other, Unknown */ continue; if (!common_mem_type) { /* First filled slot */ common_mem_type = mem_type; } else { /* Check that all filled slots have the same type */ if (mem_type != common_mem_type) { dev_warn(&adap->dev, "Different memory types mixed, not instantiating SPD\n"); return; } } dimm_count++; } /* No useful DMI data, bail out */ if (!dimm_count) return; dev_info(&adap->dev, "%d/%d memory slots populated (from DMI)\n", dimm_count, slot_count); if (slot_count > 4) { dev_warn(&adap->dev, "Systems with more than 4 memory slots not supported yet, not instantiating SPD\n"); return; } /* * Memory types could be found at section 7.18.2 (Memory Device — Type), table 78 * https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_3.6.0.pdf */ switch (common_mem_type) { case 0x12: /* DDR */ case 0x13: /* DDR2 */ case 0x18: /* DDR3 */ case 0x1B: /* LPDDR */ case 0x1C: /* LPDDR2 */ case 0x1D: /* LPDDR3 */ name = "spd"; break; case 0x1A: /* DDR4 */ case 0x1E: /* LPDDR4 */ name = "ee1004"; break; default: dev_info(&adap->dev, "Memory type 0x%02x not supported yet, not instantiating SPD\n", common_mem_type); return; } /* * We don't know in which slots the memory modules are. We could * try to guess from the slot names, but that would be rather complex * and unreliable, so better probe all possible addresses until we * have found all memory modules. */ for (n = 0; n < slot_count && dimm_count; n++) { struct i2c_board_info info; unsigned short addr_list[2]; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, name, I2C_NAME_SIZE); addr_list[0] = 0x50 + n; addr_list[1] = I2C_CLIENT_END; if (!IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL))) { dev_info(&adap->dev, "Successfully instantiated SPD at 0x%hx\n", addr_list[0]); dimm_count--; } } } EXPORT_SYMBOL_GPL(i2c_register_spd); #endif MODULE_AUTHOR("Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("SMBus protocol extensions support"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/i2c-smbus.c
/* * Multiplexed I2C bus driver. * * Copyright (c) 2008-2009 Rodolfo Giometti <[email protected]> * Copyright (c) 2008-2009 Eurotech S.p.A. <[email protected]> * Copyright (c) 2009-2010 NSN GmbH & Co KG <[email protected]> * * Simplifies access to complex multiplexed I2C bus topologies, by presenting * each multiplexed bus segment as an additional I2C adapter. * Supports multi-level mux'ing (mux behind a mux). * * Based on: * i2c-virt.c from Kumar Gala <[email protected]> * i2c-virtual.c from Ken Harrenstien, Copyright (c) 2004 Google, Inc. * i2c-virtual.c from Brian Kuschak <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/acpi.h> #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/sysfs.h> /* multiplexer per channel data */ struct i2c_mux_priv { struct i2c_adapter adap; struct i2c_algorithm algo; struct i2c_mux_core *muxc; u32 chan_id; }; static int __i2c_mux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct i2c_mux_priv *priv = adap->algo_data; struct i2c_mux_core *muxc = priv->muxc; struct i2c_adapter *parent = muxc->parent; int ret; /* Switch to the right mux port and perform the transfer. */ ret = muxc->select(muxc, priv->chan_id); if (ret >= 0) ret = __i2c_transfer(parent, msgs, num); if (muxc->deselect) muxc->deselect(muxc, priv->chan_id); return ret; } static int i2c_mux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct i2c_mux_priv *priv = adap->algo_data; struct i2c_mux_core *muxc = priv->muxc; struct i2c_adapter *parent = muxc->parent; int ret; /* Switch to the right mux port and perform the transfer. */ ret = muxc->select(muxc, priv->chan_id); if (ret >= 0) ret = i2c_transfer(parent, msgs, num); if (muxc->deselect) muxc->deselect(muxc, priv->chan_id); return ret; } static int __i2c_mux_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct i2c_mux_priv *priv = adap->algo_data; struct i2c_mux_core *muxc = priv->muxc; struct i2c_adapter *parent = muxc->parent; int ret; /* Select the right mux port and perform the transfer. */ ret = muxc->select(muxc, priv->chan_id); if (ret >= 0) ret = __i2c_smbus_xfer(parent, addr, flags, read_write, command, size, data); if (muxc->deselect) muxc->deselect(muxc, priv->chan_id); return ret; } static int i2c_mux_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct i2c_mux_priv *priv = adap->algo_data; struct i2c_mux_core *muxc = priv->muxc; struct i2c_adapter *parent = muxc->parent; int ret; /* Select the right mux port and perform the transfer. */ ret = muxc->select(muxc, priv->chan_id); if (ret >= 0) ret = i2c_smbus_xfer(parent, addr, flags, read_write, command, size, data); if (muxc->deselect) muxc->deselect(muxc, priv->chan_id); return ret; } /* Return the parent's functionality */ static u32 i2c_mux_functionality(struct i2c_adapter *adap) { struct i2c_mux_priv *priv = adap->algo_data; struct i2c_adapter *parent = priv->muxc->parent; return parent->algo->functionality(parent); } /* Return all parent classes, merged */ static unsigned int i2c_mux_parent_classes(struct i2c_adapter *parent) { unsigned int class = 0; do { class |= parent->class; parent = i2c_parent_is_i2c_adapter(parent); } while (parent); return class; } static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); if (!(flags & I2C_LOCK_ROOT_ADAPTER)) return; i2c_lock_bus(parent, flags); } static int i2c_mux_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; if (!rt_mutex_trylock(&parent->mux_lock)) return 0; /* mux_lock not locked, failure */ if (!(flags & I2C_LOCK_ROOT_ADAPTER)) return 1; /* we only want mux_lock, success */ if (i2c_trylock_bus(parent, flags)) return 1; /* parent locked too, success */ rt_mutex_unlock(&parent->mux_lock); return 0; /* parent not locked, failure */ } static void i2c_mux_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; if (flags & I2C_LOCK_ROOT_ADAPTER) i2c_unlock_bus(parent, flags); rt_mutex_unlock(&parent->mux_lock); } static void i2c_parent_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); i2c_lock_bus(parent, flags); } static int i2c_parent_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; if (!rt_mutex_trylock(&parent->mux_lock)) return 0; /* mux_lock not locked, failure */ if (i2c_trylock_bus(parent, flags)) return 1; /* parent locked too, success */ rt_mutex_unlock(&parent->mux_lock); return 0; /* parent not locked, failure */ } static void i2c_parent_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; i2c_unlock_bus(parent, flags); rt_mutex_unlock(&parent->mux_lock); } struct i2c_adapter *i2c_root_adapter(struct device *dev) { struct device *i2c; struct i2c_adapter *i2c_root; /* * Walk up the device tree to find an i2c adapter, indicating * that this is an i2c client device. Check all ancestors to * handle mfd devices etc. */ for (i2c = dev; i2c; i2c = i2c->parent) { if (i2c->type == &i2c_adapter_type) break; } if (!i2c) return NULL; /* Continue up the tree to find the root i2c adapter */ i2c_root = to_i2c_adapter(i2c); while (i2c_parent_is_i2c_adapter(i2c_root)) i2c_root = i2c_parent_is_i2c_adapter(i2c_root); return i2c_root; } EXPORT_SYMBOL_GPL(i2c_root_adapter); struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, struct device *dev, int max_adapters, int sizeof_priv, u32 flags, int (*select)(struct i2c_mux_core *, u32), int (*deselect)(struct i2c_mux_core *, u32)) { struct i2c_mux_core *muxc; size_t mux_size; mux_size = struct_size(muxc, adapter, max_adapters); muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL); if (!muxc) return NULL; if (sizeof_priv) muxc->priv = &muxc->adapter[max_adapters]; muxc->parent = parent; muxc->dev = dev; if (flags & I2C_MUX_LOCKED) muxc->mux_locked = true; if (flags & I2C_MUX_ARBITRATOR) muxc->arbitrator = true; if (flags & I2C_MUX_GATE) muxc->gate = true; muxc->select = select; muxc->deselect = deselect; muxc->max_adapters = max_adapters; return muxc; } EXPORT_SYMBOL_GPL(i2c_mux_alloc); static const struct i2c_lock_operations i2c_mux_lock_ops = { .lock_bus = i2c_mux_lock_bus, .trylock_bus = i2c_mux_trylock_bus, .unlock_bus = i2c_mux_unlock_bus, }; static const struct i2c_lock_operations i2c_parent_lock_ops = { .lock_bus = i2c_parent_lock_bus, .trylock_bus = i2c_parent_trylock_bus, .unlock_bus = i2c_parent_unlock_bus, }; int i2c_mux_add_adapter(struct i2c_mux_core *muxc, u32 force_nr, u32 chan_id, unsigned int class) { struct i2c_adapter *parent = muxc->parent; struct i2c_mux_priv *priv; char symlink_name[20]; int ret; if (muxc->num_adapters >= muxc->max_adapters) { dev_err(muxc->dev, "No room for more i2c-mux adapters\n"); return -EINVAL; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* Set up private adapter data */ priv->muxc = muxc; priv->chan_id = chan_id; /* Need to do algo dynamically because we don't know ahead * of time what sort of physical adapter we'll be dealing with. */ if (parent->algo->master_xfer) { if (muxc->mux_locked) priv->algo.master_xfer = i2c_mux_master_xfer; else priv->algo.master_xfer = __i2c_mux_master_xfer; } if (parent->algo->master_xfer_atomic) priv->algo.master_xfer_atomic = priv->algo.master_xfer; if (parent->algo->smbus_xfer) { if (muxc->mux_locked) priv->algo.smbus_xfer = i2c_mux_smbus_xfer; else priv->algo.smbus_xfer = __i2c_mux_smbus_xfer; } if (parent->algo->smbus_xfer_atomic) priv->algo.smbus_xfer_atomic = priv->algo.smbus_xfer; priv->algo.functionality = i2c_mux_functionality; /* Now fill out new adapter structure */ snprintf(priv->adap.name, sizeof(priv->adap.name), "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id); priv->adap.owner = THIS_MODULE; priv->adap.algo = &priv->algo; priv->adap.algo_data = priv; priv->adap.dev.parent = &parent->dev; priv->adap.retries = parent->retries; priv->adap.timeout = parent->timeout; priv->adap.quirks = parent->quirks; if (muxc->mux_locked) priv->adap.lock_ops = &i2c_mux_lock_ops; else priv->adap.lock_ops = &i2c_parent_lock_ops; /* Sanity check on class */ if (i2c_mux_parent_classes(parent) & class) dev_err(&parent->dev, "Segment %d behind mux can't share classes with ancestors\n", chan_id); else priv->adap.class = class; /* * Try to populate the mux adapter's of_node, expands to * nothing if !CONFIG_OF. */ if (muxc->dev->of_node) { struct device_node *dev_node = muxc->dev->of_node; struct device_node *mux_node, *child = NULL; u32 reg; if (muxc->arbitrator) mux_node = of_get_child_by_name(dev_node, "i2c-arb"); else if (muxc->gate) mux_node = of_get_child_by_name(dev_node, "i2c-gate"); else mux_node = of_get_child_by_name(dev_node, "i2c-mux"); if (mux_node) { /* A "reg" property indicates an old-style DT entry */ if (!of_property_read_u32(mux_node, "reg", &reg)) { of_node_put(mux_node); mux_node = NULL; } } if (!mux_node) mux_node = of_node_get(dev_node); else if (muxc->arbitrator || muxc->gate) child = of_node_get(mux_node); if (!child) { for_each_child_of_node(mux_node, child) { ret = of_property_read_u32(child, "reg", &reg); if (ret) continue; if (chan_id == reg) break; } } priv->adap.dev.of_node = child; of_node_put(mux_node); } /* * Associate the mux channel with an ACPI node. */ if (has_acpi_companion(muxc->dev)) acpi_preset_companion(&priv->adap.dev, ACPI_COMPANION(muxc->dev), chan_id); if (force_nr) { priv->adap.nr = force_nr; ret = i2c_add_numbered_adapter(&priv->adap); if (ret < 0) { dev_err(&parent->dev, "failed to add mux-adapter %u as bus %u (error=%d)\n", chan_id, force_nr, ret); goto err_free_priv; } } else { ret = i2c_add_adapter(&priv->adap); if (ret < 0) { dev_err(&parent->dev, "failed to add mux-adapter %u (error=%d)\n", chan_id, ret); goto err_free_priv; } } WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj, "mux_device"), "can't create symlink to mux device\n"); snprintf(symlink_name, sizeof(symlink_name), "channel-%u", chan_id); WARN(sysfs_create_link(&muxc->dev->kobj, &priv->adap.dev.kobj, symlink_name), "can't create symlink to channel %u\n", chan_id); dev_info(&parent->dev, "Added multiplexed i2c bus %d\n", i2c_adapter_id(&priv->adap)); muxc->adapter[muxc->num_adapters++] = &priv->adap; return 0; err_free_priv: kfree(priv); return ret; } EXPORT_SYMBOL_GPL(i2c_mux_add_adapter); void i2c_mux_del_adapters(struct i2c_mux_core *muxc) { char symlink_name[20]; while (muxc->num_adapters) { struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters]; struct i2c_mux_priv *priv = adap->algo_data; struct device_node *np = adap->dev.of_node; muxc->adapter[muxc->num_adapters] = NULL; snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id); sysfs_remove_link(&muxc->dev->kobj, symlink_name); sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); i2c_del_adapter(adap); of_node_put(np); kfree(priv); } } EXPORT_SYMBOL_GPL(i2c_mux_del_adapters); MODULE_AUTHOR("Rodolfo Giometti <[email protected]>"); MODULE_DESCRIPTION("I2C driver for multiplexed I2C busses"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/i2c-mux.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c-boardinfo.c - collect pre-declarations of I2C devices */ #include <linux/export.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/property.h> #include <linux/rwsem.h> #include <linux/slab.h> #include "i2c-core.h" /* These symbols are exported ONLY FOR the i2c core. * No other users will be supported. */ DECLARE_RWSEM(__i2c_board_lock); EXPORT_SYMBOL_GPL(__i2c_board_lock); LIST_HEAD(__i2c_board_list); EXPORT_SYMBOL_GPL(__i2c_board_list); int __i2c_first_dynamic_bus_num; EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num); /** * i2c_register_board_info - statically declare I2C devices * @busnum: identifies the bus to which these devices belong * @info: vector of i2c device descriptors * @len: how many descriptors in the vector; may be zero to reserve * the specified bus number. * * Systems using the Linux I2C driver stack can declare tables of board info * while they initialize. This should be done in board-specific init code * near arch_initcall() time, or equivalent, before any I2C adapter driver is * registered. For example, mainboard init code could define several devices, * as could the init code for each daughtercard in a board stack. * * The I2C devices will be created later, after the adapter for the relevant * bus has been registered. After that moment, standard driver model tools * are used to bind "new style" I2C drivers to the devices. The bus number * for any device declared using this routine is not available for dynamic * allocation. * * The board info passed can safely be __initdata, but be careful of embedded * pointers (for platform_data, functions, etc) since that won't be copied. */ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len) { int status; down_write(&__i2c_board_lock); /* dynamic bus numbers will be assigned after the last static one */ if (busnum >= __i2c_first_dynamic_bus_num) __i2c_first_dynamic_bus_num = busnum + 1; for (status = 0; len; len--, info++) { struct i2c_devinfo *devinfo; devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); if (!devinfo) { pr_debug("i2c-core: can't register boardinfo!\n"); status = -ENOMEM; break; } devinfo->busnum = busnum; devinfo->board_info = *info; if (info->resources) { devinfo->board_info.resources = kmemdup(info->resources, info->num_resources * sizeof(*info->resources), GFP_KERNEL); if (!devinfo->board_info.resources) { status = -ENOMEM; kfree(devinfo); break; } } list_add_tail(&devinfo->list, &__i2c_board_list); } up_write(&__i2c_board_lock); return status; }
linux-master
drivers/i2c/i2c-boardinfo.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core slave support code * * Copyright (C) 2014 by Wolfram Sang <[email protected]> */ #include <dt-bindings/i2c/i2c.h> #include <linux/acpi.h> #include <linux/device.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/of.h> #include "i2c-core.h" #define CREATE_TRACE_POINTS #include <trace/events/i2c_slave.h> int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n")) return -EINVAL; if (!(client->flags & I2C_CLIENT_SLAVE)) dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", __func__); if (!(client->flags & I2C_CLIENT_TEN)) { /* Enforce stricter address checking */ ret = i2c_check_7bit_addr_validity_strict(client->addr); if (ret) { dev_err(&client->dev, "%s: invalid address\n", __func__); return ret; } } if (!client->adapter->algo->reg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; } client->slave_cb = slave_cb; i2c_lock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER); ret = client->adapter->algo->reg_slave(client); i2c_unlock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER); if (ret) { client->slave_cb = NULL; dev_err(&client->dev, "%s: adapter returned error %d\n", __func__, ret); } return ret; } EXPORT_SYMBOL_GPL(i2c_slave_register); int i2c_slave_unregister(struct i2c_client *client) { int ret; if (IS_ERR_OR_NULL(client)) return -EINVAL; if (!client->adapter->algo->unreg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; } i2c_lock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER); ret = client->adapter->algo->unreg_slave(client); i2c_unlock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER); if (ret == 0) client->slave_cb = NULL; else dev_err(&client->dev, "%s: adapter returned error %d\n", __func__, ret); return ret; } EXPORT_SYMBOL_GPL(i2c_slave_unregister); int i2c_slave_event(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { int ret = client->slave_cb(client, event, val); if (trace_i2c_slave_enabled()) trace_i2c_slave(client, event, val, ret); return ret; } EXPORT_SYMBOL_GPL(i2c_slave_event); /** * i2c_detect_slave_mode - detect operation mode * @dev: The device owning the bus * * This checks the device nodes for an I2C slave by checking the address * used in the reg property. If the address match the I2C_OWN_SLAVE_ADDRESS * flag this means the device is configured to act as a I2C slave and it will * be listening at that address. * * Returns true if an I2C own slave address is detected, otherwise returns * false. */ bool i2c_detect_slave_mode(struct device *dev) { if (IS_BUILTIN(CONFIG_OF) && dev->of_node) { struct device_node *child; u32 reg; for_each_child_of_node(dev->of_node, child) { of_property_read_u32(child, "reg", &reg); if (reg & I2C_OWN_SLAVE_ADDRESS) { of_node_put(child); return true; } } } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) { dev_dbg(dev, "ACPI slave is not supported yet\n"); } return false; } EXPORT_SYMBOL_GPL(i2c_detect_slave_mode);
linux-master
drivers/i2c/i2c-core-slave.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C slave mode EEPROM simulator * * Copyright (C) 2014 by Wolfram Sang, Sang Engineering <[email protected]> * Copyright (C) 2014 by Renesas Electronics Corporation * * Because most slave IP cores can only detect one I2C slave address anyhow, * this driver does not support simulating EEPROM types which take more than * one address. */ /* * FIXME: What to do if only 8 bits of a 16 bit address are sent? * The ST-M24C64 sends only 0xff then. Needs verification with other * EEPROMs, though. We currently use the 8 bit as a valid address. */ #include <linux/bitfield.h> #include <linux/firmware.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/sysfs.h> struct eeprom_data { struct bin_attribute bin; spinlock_t buffer_lock; u16 buffer_idx; u16 address_mask; u8 num_address_bytes; u8 idx_write_cnt; bool read_only; u8 buffer[]; }; #define I2C_SLAVE_BYTELEN GENMASK(15, 0) #define I2C_SLAVE_FLAG_ADDR16 BIT(16) #define I2C_SLAVE_FLAG_RO BIT(17) #define I2C_SLAVE_DEVICE_MAGIC(_len, _flags) ((_flags) | ((_len) - 1)) static int i2c_slave_eeprom_slave_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val) { struct eeprom_data *eeprom = i2c_get_clientdata(client); switch (event) { case I2C_SLAVE_WRITE_RECEIVED: if (eeprom->idx_write_cnt < eeprom->num_address_bytes) { if (eeprom->idx_write_cnt == 0) eeprom->buffer_idx = 0; eeprom->buffer_idx = *val | (eeprom->buffer_idx << 8); eeprom->idx_write_cnt++; } else { if (!eeprom->read_only) { spin_lock(&eeprom->buffer_lock); eeprom->buffer[eeprom->buffer_idx++ & eeprom->address_mask] = *val; spin_unlock(&eeprom->buffer_lock); } } break; case I2C_SLAVE_READ_PROCESSED: /* The previous byte made it to the bus, get next one */ eeprom->buffer_idx++; fallthrough; case I2C_SLAVE_READ_REQUESTED: spin_lock(&eeprom->buffer_lock); *val = eeprom->buffer[eeprom->buffer_idx & eeprom->address_mask]; spin_unlock(&eeprom->buffer_lock); /* * Do not increment buffer_idx here, because we don't know if * this byte will be actually used. Read Linux I2C slave docs * for details. */ break; case I2C_SLAVE_STOP: case I2C_SLAVE_WRITE_REQUESTED: eeprom->idx_write_cnt = 0; break; default: break; } return 0; } static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct eeprom_data *eeprom; unsigned long flags; eeprom = dev_get_drvdata(kobj_to_dev(kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); memcpy(buf, &eeprom->buffer[off], count); spin_unlock_irqrestore(&eeprom->buffer_lock, flags); return count; } static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct eeprom_data *eeprom; unsigned long flags; eeprom = dev_get_drvdata(kobj_to_dev(kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); memcpy(&eeprom->buffer[off], buf, count); spin_unlock_irqrestore(&eeprom->buffer_lock, flags); return count; } static int i2c_slave_init_eeprom_data(struct eeprom_data *eeprom, struct i2c_client *client, unsigned int size) { const struct firmware *fw; const char *eeprom_data; int ret = device_property_read_string(&client->dev, "firmware-name", &eeprom_data); if (!ret) { ret = request_firmware_into_buf(&fw, eeprom_data, &client->dev, eeprom->buffer, size); if (ret) return ret; release_firmware(fw); } else { /* An empty eeprom typically has all bits set to 1 */ memset(eeprom->buffer, 0xff, size); } return 0; } static int i2c_slave_eeprom_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); struct eeprom_data *eeprom; int ret; unsigned int size = FIELD_GET(I2C_SLAVE_BYTELEN, id->driver_data) + 1; unsigned int flag_addr16 = FIELD_GET(I2C_SLAVE_FLAG_ADDR16, id->driver_data); eeprom = devm_kzalloc(&client->dev, sizeof(struct eeprom_data) + size, GFP_KERNEL); if (!eeprom) return -ENOMEM; eeprom->num_address_bytes = flag_addr16 ? 2 : 1; eeprom->address_mask = size - 1; eeprom->read_only = FIELD_GET(I2C_SLAVE_FLAG_RO, id->driver_data); spin_lock_init(&eeprom->buffer_lock); i2c_set_clientdata(client, eeprom); ret = i2c_slave_init_eeprom_data(eeprom, client, size); if (ret) return ret; sysfs_bin_attr_init(&eeprom->bin); eeprom->bin.attr.name = "slave-eeprom"; eeprom->bin.attr.mode = S_IRUSR | S_IWUSR; eeprom->bin.read = i2c_slave_eeprom_bin_read; eeprom->bin.write = i2c_slave_eeprom_bin_write; eeprom->bin.size = size; ret = sysfs_create_bin_file(&client->dev.kobj, &eeprom->bin); if (ret) return ret; ret = i2c_slave_register(client, i2c_slave_eeprom_slave_cb); if (ret) { sysfs_remove_bin_file(&client->dev.kobj, &eeprom->bin); return ret; } return 0; }; static void i2c_slave_eeprom_remove(struct i2c_client *client) { struct eeprom_data *eeprom = i2c_get_clientdata(client); i2c_slave_unregister(client); sysfs_remove_bin_file(&client->dev.kobj, &eeprom->bin); } static const struct i2c_device_id i2c_slave_eeprom_id[] = { { "slave-24c02", I2C_SLAVE_DEVICE_MAGIC(2048 / 8, 0) }, { "slave-24c02ro", I2C_SLAVE_DEVICE_MAGIC(2048 / 8, I2C_SLAVE_FLAG_RO) }, { "slave-24c32", I2C_SLAVE_DEVICE_MAGIC(32768 / 8, I2C_SLAVE_FLAG_ADDR16) }, { "slave-24c32ro", I2C_SLAVE_DEVICE_MAGIC(32768 / 8, I2C_SLAVE_FLAG_ADDR16 | I2C_SLAVE_FLAG_RO) }, { "slave-24c64", I2C_SLAVE_DEVICE_MAGIC(65536 / 8, I2C_SLAVE_FLAG_ADDR16) }, { "slave-24c64ro", I2C_SLAVE_DEVICE_MAGIC(65536 / 8, I2C_SLAVE_FLAG_ADDR16 | I2C_SLAVE_FLAG_RO) }, { "slave-24c512", I2C_SLAVE_DEVICE_MAGIC(524288 / 8, I2C_SLAVE_FLAG_ADDR16) }, { "slave-24c512ro", I2C_SLAVE_DEVICE_MAGIC(524288 / 8, I2C_SLAVE_FLAG_ADDR16 | I2C_SLAVE_FLAG_RO) }, { } }; MODULE_DEVICE_TABLE(i2c, i2c_slave_eeprom_id); static struct i2c_driver i2c_slave_eeprom_driver = { .driver = { .name = "i2c-slave-eeprom", }, .probe = i2c_slave_eeprom_probe, .remove = i2c_slave_eeprom_remove, .id_table = i2c_slave_eeprom_id, }; module_i2c_driver(i2c_slave_eeprom_driver); MODULE_AUTHOR("Wolfram Sang <[email protected]>"); MODULE_DESCRIPTION("I2C slave mode EEPROM simulator"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/i2c-slave-eeprom.c
// SPDX-License-Identifier: GPL-2.0-or-later /* i2c-dev.c - i2c-bus driver, char device interface Copyright (C) 1995-97 Simon G. Vogl Copyright (C) 1998-99 Frodo Looijaard <[email protected]> Copyright (C) 2003 Greg Kroah-Hartman <[email protected]> */ /* Note that this is a complete rewrite of Simon Vogl's i2c-dev module. But I have used so much of his original code and ideas that it seems only fair to recognize him as co-author -- Frodo */ /* The I2C_RDWR ioctl code is written by Kolja Waschk <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cdev.h> #include <linux/compat.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/i2c-dev.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/uaccess.h> /* * An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a * slave (i2c_client) with which messages will be exchanged. It's coupled * with a character special file which is accessed by user mode drivers. * * The list of i2c_dev structures is parallel to the i2c_adapter lists * maintained by the driver model, and is updated using bus notifications. */ struct i2c_dev { struct list_head list; struct i2c_adapter *adap; struct device dev; struct cdev cdev; }; #define I2C_MINORS (MINORMASK + 1) static LIST_HEAD(i2c_dev_list); static DEFINE_SPINLOCK(i2c_dev_list_lock); static struct i2c_dev *i2c_dev_get_by_minor(unsigned index) { struct i2c_dev *i2c_dev; spin_lock(&i2c_dev_list_lock); list_for_each_entry(i2c_dev, &i2c_dev_list, list) { if (i2c_dev->adap->nr == index) goto found; } i2c_dev = NULL; found: spin_unlock(&i2c_dev_list_lock); return i2c_dev; } static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) { struct i2c_dev *i2c_dev; if (adap->nr >= I2C_MINORS) { pr_err("Out of device minors (%d)\n", adap->nr); return ERR_PTR(-ENODEV); } i2c_dev = kzalloc(sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return ERR_PTR(-ENOMEM); i2c_dev->adap = adap; spin_lock(&i2c_dev_list_lock); list_add_tail(&i2c_dev->list, &i2c_dev_list); spin_unlock(&i2c_dev_list_lock); return i2c_dev; } static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) { spin_lock(&i2c_dev_list_lock); list_del(&i2c_dev->list); spin_unlock(&i2c_dev_list_lock); if (del_cdev) cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); put_device(&i2c_dev->dev); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt)); if (!i2c_dev) return -ENODEV; return sysfs_emit(buf, "%s\n", i2c_dev->adap->name); } static DEVICE_ATTR_RO(name); static struct attribute *i2c_attrs[] = { &dev_attr_name.attr, NULL, }; ATTRIBUTE_GROUPS(i2c); /* ------------------------------------------------------------------------- */ /* * After opening an instance of this character special file, a file * descriptor starts out associated only with an i2c_adapter (and bus). * * Using the I2C_RDWR ioctl(), you can then *immediately* issue i2c_msg * traffic to any devices on the bus used by that adapter. That's because * the i2c_msg vectors embed all the addressing information they need, and * are submitted directly to an i2c_adapter. However, SMBus-only adapters * don't support that interface. * * To use read()/write() system calls on that file descriptor, or to use * SMBus interfaces (and work with SMBus-only hosts!), you must first issue * an I2C_SLAVE (or I2C_SLAVE_FORCE) ioctl. That configures an anonymous * (never registered) i2c_client so it holds the addressing information * needed by those system calls and by this SMBus interface. */ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { char *tmp; int ret; struct i2c_client *client = file->private_data; if (count > 8192) count = 8192; tmp = kzalloc(count, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; pr_debug("i2c-%d reading %zu bytes.\n", iminor(file_inode(file)), count); ret = i2c_master_recv(client, tmp, count); if (ret >= 0) if (copy_to_user(buf, tmp, ret)) ret = -EFAULT; kfree(tmp); return ret; } static ssize_t i2cdev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { int ret; char *tmp; struct i2c_client *client = file->private_data; if (count > 8192) count = 8192; tmp = memdup_user(buf, count); if (IS_ERR(tmp)) return PTR_ERR(tmp); pr_debug("i2c-%d writing %zu bytes.\n", iminor(file_inode(file)), count); ret = i2c_master_send(client, tmp, count); kfree(tmp); return ret; } static int i2cdev_check(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); if (!client || client->addr != *(unsigned int *)addrp) return 0; return dev->driver ? -EBUSY : 0; } /* walk up mux tree */ static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result; result = device_for_each_child(&adapter->dev, &addr, i2cdev_check); if (!result && parent) result = i2cdev_check_mux_parents(parent, addr); return result; } /* recurse down mux tree */ static int i2cdev_check_mux_children(struct device *dev, void *addrp) { int result; if (dev->type == &i2c_adapter_type) result = device_for_each_child(dev, addrp, i2cdev_check_mux_children); else result = i2cdev_check(dev, addrp); return result; } /* This address checking function differs from the one in i2c-core in that it considers an address with a registered device, but no driver bound to it, as NOT busy. */ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result = 0; if (parent) result = i2cdev_check_mux_parents(parent, addr); if (!result) result = device_for_each_child(&adapter->dev, &addr, i2cdev_check_mux_children); return result; } static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, unsigned nmsgs, struct i2c_msg *msgs) { u8 __user **data_ptrs; int i, res; data_ptrs = kmalloc_array(nmsgs, sizeof(u8 __user *), GFP_KERNEL); if (data_ptrs == NULL) { kfree(msgs); return -ENOMEM; } res = 0; for (i = 0; i < nmsgs; i++) { /* Limit the size of the message to a sane amount */ if (msgs[i].len > 8192) { res = -EINVAL; break; } data_ptrs[i] = (u8 __user *)msgs[i].buf; msgs[i].buf = memdup_user(data_ptrs[i], msgs[i].len); if (IS_ERR(msgs[i].buf)) { res = PTR_ERR(msgs[i].buf); break; } /* memdup_user allocates with GFP_KERNEL, so DMA is ok */ msgs[i].flags |= I2C_M_DMA_SAFE; /* * If the message length is received from the slave (similar * to SMBus block read), we must ensure that the buffer will * be large enough to cope with a message length of * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus * drivers allow. The first byte in the buffer must be * pre-filled with the number of extra bytes, which must be * at least one to hold the message length, but can be * greater (for example to account for a checksum byte at * the end of the message.) */ if (msgs[i].flags & I2C_M_RECV_LEN) { if (!(msgs[i].flags & I2C_M_RD) || msgs[i].len < 1 || msgs[i].buf[0] < 1 || msgs[i].len < msgs[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { i++; res = -EINVAL; break; } msgs[i].len = msgs[i].buf[0]; } } if (res < 0) { int j; for (j = 0; j < i; ++j) kfree(msgs[j].buf); kfree(data_ptrs); kfree(msgs); return res; } res = i2c_transfer(client->adapter, msgs, nmsgs); while (i-- > 0) { if (res >= 0 && (msgs[i].flags & I2C_M_RD)) { if (copy_to_user(data_ptrs[i], msgs[i].buf, msgs[i].len)) res = -EFAULT; } kfree(msgs[i].buf); } kfree(data_ptrs); kfree(msgs); return res; } static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, u8 read_write, u8 command, u32 size, union i2c_smbus_data __user *data) { union i2c_smbus_data temp = {}; int datasize, res; if ((size != I2C_SMBUS_BYTE) && (size != I2C_SMBUS_QUICK) && (size != I2C_SMBUS_BYTE_DATA) && (size != I2C_SMBUS_WORD_DATA) && (size != I2C_SMBUS_PROC_CALL) && (size != I2C_SMBUS_BLOCK_DATA) && (size != I2C_SMBUS_I2C_BLOCK_BROKEN) && (size != I2C_SMBUS_I2C_BLOCK_DATA) && (size != I2C_SMBUS_BLOCK_PROC_CALL)) { dev_dbg(&client->adapter->dev, "size out of range (%x) in ioctl I2C_SMBUS.\n", size); return -EINVAL; } /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1, so the check is valid if size==I2C_SMBUS_QUICK too. */ if ((read_write != I2C_SMBUS_READ) && (read_write != I2C_SMBUS_WRITE)) { dev_dbg(&client->adapter->dev, "read_write out of range (%x) in ioctl I2C_SMBUS.\n", read_write); return -EINVAL; } /* Note that command values are always valid! */ if ((size == I2C_SMBUS_QUICK) || ((size == I2C_SMBUS_BYTE) && (read_write == I2C_SMBUS_WRITE))) /* These are special: we do not use data */ return i2c_smbus_xfer(client->adapter, client->addr, client->flags, read_write, command, size, NULL); if (data == NULL) { dev_dbg(&client->adapter->dev, "data is NULL pointer in ioctl I2C_SMBUS.\n"); return -EINVAL; } if ((size == I2C_SMBUS_BYTE_DATA) || (size == I2C_SMBUS_BYTE)) datasize = sizeof(data->byte); else if ((size == I2C_SMBUS_WORD_DATA) || (size == I2C_SMBUS_PROC_CALL)) datasize = sizeof(data->word); else /* size == smbus block, i2c block, or block proc. call */ datasize = sizeof(data->block); if ((size == I2C_SMBUS_PROC_CALL) || (size == I2C_SMBUS_BLOCK_PROC_CALL) || (size == I2C_SMBUS_I2C_BLOCK_DATA) || (read_write == I2C_SMBUS_WRITE)) { if (copy_from_user(&temp, data, datasize)) return -EFAULT; } if (size == I2C_SMBUS_I2C_BLOCK_BROKEN) { /* Convert old I2C block commands to the new convention. This preserves binary compatibility. */ size = I2C_SMBUS_I2C_BLOCK_DATA; if (read_write == I2C_SMBUS_READ) temp.block[0] = I2C_SMBUS_BLOCK_MAX; } res = i2c_smbus_xfer(client->adapter, client->addr, client->flags, read_write, command, size, &temp); if (!res && ((size == I2C_SMBUS_PROC_CALL) || (size == I2C_SMBUS_BLOCK_PROC_CALL) || (read_write == I2C_SMBUS_READ))) { if (copy_to_user(data, &temp, datasize)) return -EFAULT; } return res; } static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct i2c_client *client = file->private_data; unsigned long funcs; dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n", cmd, arg); switch (cmd) { case I2C_SLAVE: case I2C_SLAVE_FORCE: if ((arg > 0x3ff) || (((client->flags & I2C_M_TEN) == 0) && arg > 0x7f)) return -EINVAL; if (cmd == I2C_SLAVE && i2cdev_check_addr(client->adapter, arg)) return -EBUSY; /* REVISIT: address could become busy later */ client->addr = arg; return 0; case I2C_TENBIT: if (arg) client->flags |= I2C_M_TEN; else client->flags &= ~I2C_M_TEN; return 0; case I2C_PEC: /* * Setting the PEC flag here won't affect kernel drivers, * which will be using the i2c_client node registered with * the driver model core. Likewise, when that client has * the PEC flag already set, the i2c-dev driver won't see * (or use) this setting. */ if (arg) client->flags |= I2C_CLIENT_PEC; else client->flags &= ~I2C_CLIENT_PEC; return 0; case I2C_FUNCS: funcs = i2c_get_functionality(client->adapter); return put_user(funcs, (unsigned long __user *)arg); case I2C_RDWR: { struct i2c_rdwr_ioctl_data rdwr_arg; struct i2c_msg *rdwr_pa; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data __user *)arg, sizeof(rdwr_arg))) return -EFAULT; if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0) return -EINVAL; /* * Put an arbitrary limit on the number of messages that can * be sent at once */ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) return -EINVAL; rdwr_pa = memdup_user(rdwr_arg.msgs, rdwr_arg.nmsgs * sizeof(struct i2c_msg)); if (IS_ERR(rdwr_pa)) return PTR_ERR(rdwr_pa); return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); } case I2C_SMBUS: { struct i2c_smbus_ioctl_data data_arg; if (copy_from_user(&data_arg, (struct i2c_smbus_ioctl_data __user *) arg, sizeof(struct i2c_smbus_ioctl_data))) return -EFAULT; return i2cdev_ioctl_smbus(client, data_arg.read_write, data_arg.command, data_arg.size, data_arg.data); } case I2C_RETRIES: if (arg > INT_MAX) return -EINVAL; client->adapter->retries = arg; break; case I2C_TIMEOUT: if (arg > INT_MAX) return -EINVAL; /* For historical reasons, user-space sets the timeout * value in units of 10 ms. */ client->adapter->timeout = msecs_to_jiffies(arg * 10); break; default: /* NOTE: returning a fault code here could cause trouble * in buggy userspace code. Some old kernel bugs returned * zero in this case, and userspace code might accidentally * have depended on that bug. */ return -ENOTTY; } return 0; } #ifdef CONFIG_COMPAT struct i2c_smbus_ioctl_data32 { u8 read_write; u8 command; u32 size; compat_caddr_t data; /* union i2c_smbus_data *data */ }; struct i2c_msg32 { u16 addr; u16 flags; u16 len; compat_caddr_t buf; }; struct i2c_rdwr_ioctl_data32 { compat_caddr_t msgs; /* struct i2c_msg __user *msgs */ u32 nmsgs; }; static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct i2c_client *client = file->private_data; unsigned long funcs; switch (cmd) { case I2C_FUNCS: funcs = i2c_get_functionality(client->adapter); return put_user(funcs, (compat_ulong_t __user *)arg); case I2C_RDWR: { struct i2c_rdwr_ioctl_data32 rdwr_arg; struct i2c_msg32 __user *p; struct i2c_msg *rdwr_pa; int i; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data32 __user *)arg, sizeof(rdwr_arg))) return -EFAULT; if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0) return -EINVAL; if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) return -EINVAL; rdwr_pa = kmalloc_array(rdwr_arg.nmsgs, sizeof(struct i2c_msg), GFP_KERNEL); if (!rdwr_pa) return -ENOMEM; p = compat_ptr(rdwr_arg.msgs); for (i = 0; i < rdwr_arg.nmsgs; i++) { struct i2c_msg32 umsg; if (copy_from_user(&umsg, p + i, sizeof(umsg))) { kfree(rdwr_pa); return -EFAULT; } rdwr_pa[i] = (struct i2c_msg) { .addr = umsg.addr, .flags = umsg.flags, .len = umsg.len, .buf = (__force __u8 *)compat_ptr(umsg.buf), }; } return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); } case I2C_SMBUS: { struct i2c_smbus_ioctl_data32 data32; if (copy_from_user(&data32, (void __user *) arg, sizeof(data32))) return -EFAULT; return i2cdev_ioctl_smbus(client, data32.read_write, data32.command, data32.size, compat_ptr(data32.data)); } default: return i2cdev_ioctl(file, cmd, arg); } } #else #define compat_i2cdev_ioctl NULL #endif static int i2cdev_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct i2c_client *client; struct i2c_adapter *adap; adap = i2c_get_adapter(minor); if (!adap) return -ENODEV; /* This creates an anonymous i2c_client, which may later be * pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE. * * This client is ** NEVER REGISTERED ** with the driver model * or I2C core code!! It just holds private copies of addressing * information and maybe a PEC flag. */ client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) { i2c_put_adapter(adap); return -ENOMEM; } snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr); client->adapter = adap; file->private_data = client; return 0; } static int i2cdev_release(struct inode *inode, struct file *file) { struct i2c_client *client = file->private_data; i2c_put_adapter(client->adapter); kfree(client); file->private_data = NULL; return 0; } static const struct file_operations i2cdev_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = i2cdev_read, .write = i2cdev_write, .unlocked_ioctl = i2cdev_ioctl, .compat_ioctl = compat_i2cdev_ioctl, .open = i2cdev_open, .release = i2cdev_release, }; /* ------------------------------------------------------------------------- */ static struct class *i2c_dev_class; static void i2cdev_dev_release(struct device *dev) { struct i2c_dev *i2c_dev; i2c_dev = container_of(dev, struct i2c_dev, dev); kfree(i2c_dev); } static int i2cdev_attach_adapter(struct device *dev) { struct i2c_adapter *adap; struct i2c_dev *i2c_dev; int res; if (dev->type != &i2c_adapter_type) return NOTIFY_DONE; adap = to_i2c_adapter(dev); i2c_dev = get_free_i2c_dev(adap); if (IS_ERR(i2c_dev)) return NOTIFY_DONE; cdev_init(&i2c_dev->cdev, &i2cdev_fops); i2c_dev->cdev.owner = THIS_MODULE; device_initialize(&i2c_dev->dev); i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); i2c_dev->dev.class = i2c_dev_class; i2c_dev->dev.parent = &adap->dev; i2c_dev->dev.release = i2cdev_dev_release; res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); if (res) goto err_put_i2c_dev; res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); if (res) goto err_put_i2c_dev; pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr); return NOTIFY_OK; err_put_i2c_dev: put_i2c_dev(i2c_dev, false); return NOTIFY_DONE; } static int i2cdev_detach_adapter(struct device *dev) { struct i2c_adapter *adap; struct i2c_dev *i2c_dev; if (dev->type != &i2c_adapter_type) return NOTIFY_DONE; adap = to_i2c_adapter(dev); i2c_dev = i2c_dev_get_by_minor(adap->nr); if (!i2c_dev) /* attach_adapter must have failed */ return NOTIFY_DONE; put_i2c_dev(i2c_dev, true); pr_debug("adapter [%s] unregistered\n", adap->name); return NOTIFY_OK; } static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; switch (action) { case BUS_NOTIFY_ADD_DEVICE: return i2cdev_attach_adapter(dev); case BUS_NOTIFY_DEL_DEVICE: return i2cdev_detach_adapter(dev); } return NOTIFY_DONE; } static struct notifier_block i2cdev_notifier = { .notifier_call = i2cdev_notifier_call, }; /* ------------------------------------------------------------------------- */ static int __init i2c_dev_attach_adapter(struct device *dev, void *dummy) { i2cdev_attach_adapter(dev); return 0; } static int __exit i2c_dev_detach_adapter(struct device *dev, void *dummy) { i2cdev_detach_adapter(dev); return 0; } /* * module load/unload record keeping */ static int __init i2c_dev_init(void) { int res; pr_info("i2c /dev entries driver\n"); res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c"); if (res) goto out; i2c_dev_class = class_create("i2c-dev"); if (IS_ERR(i2c_dev_class)) { res = PTR_ERR(i2c_dev_class); goto out_unreg_chrdev; } i2c_dev_class->dev_groups = i2c_groups; /* Keep track of adapters which will be added or removed later */ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier); if (res) goto out_unreg_class; /* Bind to already existing adapters right away */ i2c_for_each_dev(NULL, i2c_dev_attach_adapter); return 0; out_unreg_class: class_destroy(i2c_dev_class); out_unreg_chrdev: unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); out: pr_err("Driver Initialisation failed\n"); return res; } static void __exit i2c_dev_exit(void) { bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); i2c_for_each_dev(NULL, i2c_dev_detach_adapter); class_destroy(i2c_dev_class); unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); } MODULE_AUTHOR("Frodo Looijaard <[email protected]>"); MODULE_AUTHOR("Simon G. Vogl <[email protected]>"); MODULE_DESCRIPTION("I2C /dev entries driver"); MODULE_LICENSE("GPL"); module_init(i2c_dev_init); module_exit(i2c_dev_exit);
linux-master
drivers/i2c/i2c-dev.c
// SPDX-License-Identifier: GPL-2.0 /* * Microchip PCI1XXXX I2C adapter driver for PCIe Switch * which has I2C controller in one of its downstream functions * * Copyright (C) 2021 - 2022 Microchip Technology Inc. * * Authors: Tharun Kumar P <[email protected]> * Kumaravel Thiagarajan <[email protected]> */ #include <linux/bits.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> #define SMBUS_MAST_CORE_ADDR_BASE 0x00000 #define SMBUS_MAST_SYS_REG_ADDR_BASE 0x01000 /* SMB register space. */ #define SMB_CORE_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x00) #define SMB_CORE_CTRL_ESO BIT(6) #define SMB_CORE_CTRL_FW_ACK BIT(4) #define SMB_CORE_CTRL_ACK BIT(0) #define SMB_CORE_CMD_REG_OFF3 (SMBUS_MAST_CORE_ADDR_BASE + 0x0F) #define SMB_CORE_CMD_REG_OFF2 (SMBUS_MAST_CORE_ADDR_BASE + 0x0E) #define SMB_CORE_CMD_REG_OFF1 (SMBUS_MAST_CORE_ADDR_BASE + 0x0D) #define SMB_CORE_CMD_READM BIT(4) #define SMB_CORE_CMD_STOP BIT(2) #define SMB_CORE_CMD_START BIT(0) #define SMB_CORE_CMD_REG_OFF0 (SMBUS_MAST_CORE_ADDR_BASE + 0x0C) #define SMB_CORE_CMD_M_PROCEED BIT(1) #define SMB_CORE_CMD_M_RUN BIT(0) #define SMB_CORE_SR_HOLD_TIME_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x18) /* * SR_HOLD_TIME_XK_TICKS field will indicate the number of ticks of the * baud clock required to program 'Hold Time' at X KHz. */ #define SR_HOLD_TIME_100K_TICKS 150 #define SR_HOLD_TIME_400K_TICKS 20 #define SR_HOLD_TIME_1000K_TICKS 12 #define SMB_CORE_COMPLETION_REG_OFF3 (SMBUS_MAST_CORE_ADDR_BASE + 0x23) #define COMPLETION_MDONE BIT(6) #define COMPLETION_IDLE BIT(5) #define COMPLETION_MNAKX BIT(0) #define SMB_CORE_IDLE_SCALING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x24) /* * FAIR_BUS_IDLE_MIN_XK_TICKS field will indicate the number of ticks of * the baud clock required to program 'fair idle delay' at X KHz. Fair idle * delay establishes the MCTP T(IDLE_DELAY) period. */ #define FAIR_BUS_IDLE_MIN_100K_TICKS 992 #define FAIR_BUS_IDLE_MIN_400K_TICKS 500 #define FAIR_BUS_IDLE_MIN_1000K_TICKS 500 /* * FAIR_IDLE_DELAY_XK_TICKS field will indicate the number of ticks of the * baud clock required to satisfy the fairness protocol at X KHz. */ #define FAIR_IDLE_DELAY_100K_TICKS 963 #define FAIR_IDLE_DELAY_400K_TICKS 156 #define FAIR_IDLE_DELAY_1000K_TICKS 156 #define SMB_IDLE_SCALING_100K \ ((FAIR_IDLE_DELAY_100K_TICKS << 16) | FAIR_BUS_IDLE_MIN_100K_TICKS) #define SMB_IDLE_SCALING_400K \ ((FAIR_IDLE_DELAY_400K_TICKS << 16) | FAIR_BUS_IDLE_MIN_400K_TICKS) #define SMB_IDLE_SCALING_1000K \ ((FAIR_IDLE_DELAY_1000K_TICKS << 16) | FAIR_BUS_IDLE_MIN_1000K_TICKS) #define SMB_CORE_CONFIG_REG3 (SMBUS_MAST_CORE_ADDR_BASE + 0x2B) #define SMB_CONFIG3_ENMI BIT(6) #define SMB_CONFIG3_ENIDI BIT(5) #define SMB_CORE_CONFIG_REG2 (SMBUS_MAST_CORE_ADDR_BASE + 0x2A) #define SMB_CORE_CONFIG_REG1 (SMBUS_MAST_CORE_ADDR_BASE + 0x29) #define SMB_CONFIG1_ASR BIT(7) #define SMB_CONFIG1_ENAB BIT(2) #define SMB_CONFIG1_RESET BIT(1) #define SMB_CONFIG1_FEN BIT(0) #define SMB_CORE_BUS_CLK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x2C) /* * BUS_CLK_XK_LOW_PERIOD_TICKS field defines the number of I2C Baud Clock * periods that make up the low phase of the I2C/SMBus bus clock at X KHz. */ #define BUS_CLK_100K_LOW_PERIOD_TICKS 156 #define BUS_CLK_400K_LOW_PERIOD_TICKS 41 #define BUS_CLK_1000K_LOW_PERIOD_TICKS 15 /* * BUS_CLK_XK_HIGH_PERIOD_TICKS field defines the number of I2C Baud Clock * periods that make up the high phase of the I2C/SMBus bus clock at X KHz. */ #define BUS_CLK_100K_HIGH_PERIOD_TICKS 154 #define BUS_CLK_400K_HIGH_PERIOD_TICKS 35 #define BUS_CLK_1000K_HIGH_PERIOD_TICKS 14 #define BUS_CLK_100K \ ((BUS_CLK_100K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_100K_LOW_PERIOD_TICKS) #define BUS_CLK_400K \ ((BUS_CLK_400K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_400K_LOW_PERIOD_TICKS) #define BUS_CLK_1000K \ ((BUS_CLK_1000K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_1000K_LOW_PERIOD_TICKS) #define SMB_CORE_CLK_SYNC_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x3C) /* * CLK_SYNC_XK defines the number of clock cycles to sync up to the external * clock before comparing the internal and external clocks for clock stretching * at X KHz. */ #define CLK_SYNC_100K 4 #define CLK_SYNC_400K 4 #define CLK_SYNC_1000K 4 #define SMB_CORE_DATA_TIMING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x40) /* * * FIRST_START_HOLD_XK_TICKS will indicate the number of ticks of the baud * clock required to program 'FIRST_START_HOLD' timer at X KHz. This timer * determines the SCLK hold time following SDAT driven low during the first * START bit in a transfer. */ #define FIRST_START_HOLD_100K_TICKS 23 #define FIRST_START_HOLD_400K_TICKS 8 #define FIRST_START_HOLD_1000K_TICKS 12 /* * STOP_SETUP_XK_TICKS will indicate the number of ticks of the baud clock * required to program 'STOP_SETUP' timer at X KHz. This timer determines the * SDAT setup time from the rising edge of SCLK for a STOP condition. */ #define STOP_SETUP_100K_TICKS 150 #define STOP_SETUP_400K_TICKS 20 #define STOP_SETUP_1000K_TICKS 12 /* * RESTART_SETUP_XK_TICKS will indicate the number of ticks of the baud clock * required to program 'RESTART_SETUP' timer at X KHz. This timer determines the * SDAT setup time from the rising edge of SCLK for a repeated START condition. */ #define RESTART_SETUP_100K_TICKS 156 #define RESTART_SETUP_400K_TICKS 20 #define RESTART_SETUP_1000K_TICKS 12 /* * DATA_HOLD_XK_TICKS will indicate the number of ticks of the baud clock * required to program 'DATA_HOLD' timer at X KHz. This timer determines the * SDAT hold time following SCLK driven low. */ #define DATA_HOLD_100K_TICKS 12 #define DATA_HOLD_400K_TICKS 2 #define DATA_HOLD_1000K_TICKS 2 #define DATA_TIMING_100K \ ((FIRST_START_HOLD_100K_TICKS << 24) | (STOP_SETUP_100K_TICKS << 16) | \ (RESTART_SETUP_100K_TICKS << 8) | DATA_HOLD_100K_TICKS) #define DATA_TIMING_400K \ ((FIRST_START_HOLD_400K_TICKS << 24) | (STOP_SETUP_400K_TICKS << 16) | \ (RESTART_SETUP_400K_TICKS << 8) | DATA_HOLD_400K_TICKS) #define DATA_TIMING_1000K \ ((FIRST_START_HOLD_1000K_TICKS << 24) | (STOP_SETUP_1000K_TICKS << 16) | \ (RESTART_SETUP_1000K_TICKS << 8) | DATA_HOLD_1000K_TICKS) #define SMB_CORE_TO_SCALING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x44) /* * BUS_IDLE_MIN_XK_TICKS defines Bus Idle Minimum Time. * Bus Idle Minimum time = BUS_IDLE_MIN[7:0] x Baud_Clock_Period x * (BUS_IDLE_MIN_XK_TICKS[7] ? 4,1) */ #define BUS_IDLE_MIN_100K_TICKS 36UL #define BUS_IDLE_MIN_400K_TICKS 10UL #define BUS_IDLE_MIN_1000K_TICKS 4UL /* * CTRL_CUM_TIME_OUT_XK_TICKS defines SMBus Controller Cumulative Time-Out. * SMBus Controller Cumulative Time-Out duration = * CTRL_CUM_TIME_OUT_XK_TICKS[7:0] x Baud_Clock_Period x 2048 */ #define CTRL_CUM_TIME_OUT_100K_TICKS 76 #define CTRL_CUM_TIME_OUT_400K_TICKS 76 #define CTRL_CUM_TIME_OUT_1000K_TICKS 76 /* * TARGET_CUM_TIME_OUT_XK_TICKS defines SMBus Target Cumulative Time-Out duration. * SMBus Target Cumulative Time-Out duration = TARGET_CUM_TIME_OUT_XK_TICKS[7:0] x * Baud_Clock_Period x 4096 */ #define TARGET_CUM_TIME_OUT_100K_TICKS 95 #define TARGET_CUM_TIME_OUT_400K_TICKS 95 #define TARGET_CUM_TIME_OUT_1000K_TICKS 95 /* * CLOCK_HIGH_TIME_OUT_XK defines Clock High time out period. * Clock High time out period = CLOCK_HIGH_TIME_OUT_XK[7:0] x Baud_Clock_Period x 8 */ #define CLOCK_HIGH_TIME_OUT_100K_TICKS 97 #define CLOCK_HIGH_TIME_OUT_400K_TICKS 97 #define CLOCK_HIGH_TIME_OUT_1000K_TICKS 97 #define TO_SCALING_100K \ ((BUS_IDLE_MIN_100K_TICKS << 24) | (CTRL_CUM_TIME_OUT_100K_TICKS << 16) | \ (TARGET_CUM_TIME_OUT_100K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_100K_TICKS) #define TO_SCALING_400K \ ((BUS_IDLE_MIN_400K_TICKS << 24) | (CTRL_CUM_TIME_OUT_400K_TICKS << 16) | \ (TARGET_CUM_TIME_OUT_400K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_400K_TICKS) #define TO_SCALING_1000K \ ((BUS_IDLE_MIN_1000K_TICKS << 24) | (CTRL_CUM_TIME_OUT_1000K_TICKS << 16) | \ (TARGET_CUM_TIME_OUT_1000K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_1000K_TICKS) #define I2C_SCL_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x100) #define I2C_SDA_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x101) #define I2C_FOD_EN BIT(4) #define I2C_PULL_UP_EN BIT(3) #define I2C_PULL_DOWN_EN BIT(2) #define I2C_INPUT_EN BIT(1) #define I2C_OUTPUT_EN BIT(0) #define SMBUS_CONTROL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x200) #define CTL_RESET_COUNTERS BIT(3) #define CTL_TRANSFER_DIR BIT(2) #define CTL_HOST_FIFO_ENTRY BIT(1) #define CTL_RUN BIT(0) #define I2C_DIRN_WRITE 0 #define I2C_DIRN_READ 1 #define SMBUS_STATUS_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x204) #define STA_DMA_TERM BIT(7) #define STA_DMA_REQ BIT(6) #define STA_THRESHOLD BIT(2) #define STA_BUF_FULL BIT(1) #define STA_BUF_EMPTY BIT(0) #define SMBUS_INTR_STAT_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x208) #define INTR_STAT_DMA_TERM BIT(7) #define INTR_STAT_THRESHOLD BIT(2) #define INTR_STAT_BUF_FULL BIT(1) #define INTR_STAT_BUF_EMPTY BIT(0) #define SMBUS_INTR_MSK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x20C) #define INTR_MSK_DMA_TERM BIT(7) #define INTR_MSK_THRESHOLD BIT(2) #define INTR_MSK_BUF_FULL BIT(1) #define INTR_MSK_BUF_EMPTY BIT(0) #define ALL_NW_LAYER_INTERRUPTS \ (INTR_MSK_DMA_TERM | INTR_MSK_THRESHOLD | INTR_MSK_BUF_FULL | \ INTR_MSK_BUF_EMPTY) #define SMBUS_MCU_COUNTER_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x214) #define SMBALERT_MST_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x230) #define SMBALERT_MST_PU BIT(0) #define SMBUS_GEN_INT_STAT_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x23C) #define SMBUS_GEN_INT_MASK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x240) #define SMBALERT_INTR_MASK BIT(10) #define I2C_BUF_MSTR_INTR_MASK BIT(9) #define I2C_INTR_MASK BIT(8) #define SMBALERT_WAKE_INTR_MASK BIT(2) #define I2C_BUF_MSTR_WAKE_INTR_MASK BIT(1) #define I2C_WAKE_INTR_MASK BIT(0) #define ALL_HIGH_LAYER_INTR \ (SMBALERT_INTR_MASK | I2C_BUF_MSTR_INTR_MASK | I2C_INTR_MASK | \ SMBALERT_WAKE_INTR_MASK | I2C_BUF_MSTR_WAKE_INTR_MASK | \ I2C_WAKE_INTR_MASK) #define SMBUS_RESET_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x248) #define PERI_SMBUS_D3_RESET_DIS BIT(16) #define SMBUS_MST_BUF (SMBUS_MAST_CORE_ADDR_BASE + 0x280) #define SMBUS_BUF_MAX_SIZE 0x80 #define I2C_FLAGS_DIRECT_MODE BIT(7) #define I2C_FLAGS_POLLING_MODE BIT(6) #define I2C_FLAGS_STOP BIT(5) #define I2C_FLAGS_SMB_BLK_READ BIT(4) #define PCI1XXXX_I2C_TIMEOUT_MS 1000 /* General Purpose Register. */ #define SMB_GPR_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x1000 + 0x0c00 + \ 0x00) /* Lock Register. */ #define SMB_GPR_LOCK_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x1000 + 0x0000 + \ 0x00A0) #define SMBUS_PERI_LOCK BIT(3) struct pci1xxxx_i2c { struct completion i2c_xfer_done; bool i2c_xfer_in_progress; struct i2c_adapter adap; void __iomem *i2c_base; u32 freq; u32 flags; }; static int set_sys_lock(struct pci1xxxx_i2c *i2c) { void __iomem *p = i2c->i2c_base + SMB_GPR_LOCK_REG; u8 data; writel(SMBUS_PERI_LOCK, p); data = readl(p); if (data != SMBUS_PERI_LOCK) return -EPERM; return 0; } static int release_sys_lock(struct pci1xxxx_i2c *i2c) { void __iomem *p = i2c->i2c_base + SMB_GPR_LOCK_REG; u8 data; data = readl(p); if (data != SMBUS_PERI_LOCK) return 0; writel(0, p); data = readl(p); if (data & SMBUS_PERI_LOCK) return -EPERM; return 0; } static void pci1xxxx_ack_high_level_intr(struct pci1xxxx_i2c *i2c, u16 intr_msk) { writew(intr_msk, i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF); } static void pci1xxxx_i2c_configure_smbalert_pin(struct pci1xxxx_i2c *i2c, bool enable) { void __iomem *p = i2c->i2c_base + SMBALERT_MST_PAD_CTRL_REG_OFF; u8 regval; regval = readb(p); if (enable) regval |= SMBALERT_MST_PU; else regval &= ~SMBALERT_MST_PU; writeb(regval, p); } static void pci1xxxx_i2c_send_start_stop(struct pci1xxxx_i2c *i2c, bool start) { void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1; u8 regval; regval = readb(p); if (start) regval |= SMB_CORE_CMD_START; else regval |= SMB_CORE_CMD_STOP; writeb(regval, p); } /* * When accessing the core control reg, we should not do a read modified write * as they are write '1' to clear bits. Instead we need to write with the * specific bits that needs to be set. */ static void pci1xxxx_i2c_set_clear_FW_ACK(struct pci1xxxx_i2c *i2c, bool set) { u8 regval; if (set) regval = SMB_CORE_CTRL_FW_ACK | SMB_CORE_CTRL_ESO | SMB_CORE_CTRL_ACK; else regval = SMB_CORE_CTRL_ESO | SMB_CORE_CTRL_ACK; writeb(regval, i2c->i2c_base + SMB_CORE_CTRL_REG_OFF); } static void pci1xxxx_i2c_buffer_write(struct pci1xxxx_i2c *i2c, u8 slaveaddr, u8 transferlen, unsigned char *buf) { void __iomem *p = i2c->i2c_base + SMBUS_MST_BUF; if (slaveaddr) writeb(slaveaddr, p++); if (buf) memcpy_toio(p, buf, transferlen); } /* * When accessing the core control reg, we should not do a read modified write * as there are write '1' to clear bits. Instead we need to write with the * specific bits that needs to be set. */ static void pci1xxxx_i2c_enable_ESO(struct pci1xxxx_i2c *i2c) { writeb(SMB_CORE_CTRL_ESO, i2c->i2c_base + SMB_CORE_CTRL_REG_OFF); } static void pci1xxxx_i2c_reset_counters(struct pci1xxxx_i2c *i2c) { void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF; u8 regval; regval = readb(p); regval |= CTL_RESET_COUNTERS; writeb(regval, p); } static void pci1xxxx_i2c_set_transfer_dir(struct pci1xxxx_i2c *i2c, u8 direction) { void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF; u8 regval; regval = readb(p); if (direction == I2C_DIRN_WRITE) regval &= ~CTL_TRANSFER_DIR; else regval |= CTL_TRANSFER_DIR; writeb(regval, p); } static void pci1xxxx_i2c_set_mcu_count(struct pci1xxxx_i2c *i2c, u8 count) { writeb(count, i2c->i2c_base + SMBUS_MCU_COUNTER_REG_OFF); } static void pci1xxxx_i2c_set_read_count(struct pci1xxxx_i2c *i2c, u8 readcount) { writeb(readcount, i2c->i2c_base + SMB_CORE_CMD_REG_OFF3); } static void pci1xxxx_i2c_set_write_count(struct pci1xxxx_i2c *i2c, u8 writecount) { writeb(writecount, i2c->i2c_base + SMB_CORE_CMD_REG_OFF2); } static void pci1xxxx_i2c_set_DMA_run(struct pci1xxxx_i2c *i2c) { void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF; u8 regval; regval = readb(p); regval |= CTL_RUN; writeb(regval, p); } static void pci1xxxx_i2c_set_mrun_proceed(struct pci1xxxx_i2c *i2c) { void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF0; u8 regval; regval = readb(p); regval |= SMB_CORE_CMD_M_RUN; regval |= SMB_CORE_CMD_M_PROCEED; writeb(regval, p); } static void pci1xxxx_i2c_start_DMA(struct pci1xxxx_i2c *i2c) { pci1xxxx_i2c_set_DMA_run(i2c); pci1xxxx_i2c_set_mrun_proceed(i2c); } static void pci1xxxx_i2c_config_asr(struct pci1xxxx_i2c *i2c, bool enable) { void __iomem *p = i2c->i2c_base + SMB_CORE_CONFIG_REG1; u8 regval; regval = readb(p); if (enable) regval |= SMB_CONFIG1_ASR; else regval &= ~SMB_CONFIG1_ASR; writeb(regval, p); } static irqreturn_t pci1xxxx_i2c_isr(int irq, void *dev) { struct pci1xxxx_i2c *i2c = dev; void __iomem *p1 = i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF; void __iomem *p2 = i2c->i2c_base + SMBUS_INTR_STAT_REG_OFF; irqreturn_t intr_handled = IRQ_NONE; u16 reg1; u8 reg3; /* * Read the SMBus interrupt status register to see if the * DMA_TERM interrupt has caused this callback. */ reg1 = readw(p1); if (reg1 & I2C_BUF_MSTR_INTR_MASK) { reg3 = readb(p2); if (reg3 & INTR_STAT_DMA_TERM) { complete(&i2c->i2c_xfer_done); intr_handled = IRQ_HANDLED; writeb(INTR_STAT_DMA_TERM, p2); } pci1xxxx_ack_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK); } if (reg1 & SMBALERT_INTR_MASK) { intr_handled = IRQ_HANDLED; pci1xxxx_ack_high_level_intr(i2c, SMBALERT_INTR_MASK); } return intr_handled; } static void pci1xxxx_i2c_set_count(struct pci1xxxx_i2c *i2c, u8 mcucount, u8 writecount, u8 readcount) { pci1xxxx_i2c_set_mcu_count(i2c, mcucount); pci1xxxx_i2c_set_write_count(i2c, writecount); pci1xxxx_i2c_set_read_count(i2c, readcount); } static void pci1xxxx_i2c_set_readm(struct pci1xxxx_i2c *i2c, bool enable) { void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1; u8 regval; regval = readb(p); if (enable) regval |= SMB_CORE_CMD_READM; else regval &= ~SMB_CORE_CMD_READM; writeb(regval, p); } static void pci1xxxx_ack_nw_layer_intr(struct pci1xxxx_i2c *i2c, u8 ack_intr_msk) { writeb(ack_intr_msk, i2c->i2c_base + SMBUS_INTR_STAT_REG_OFF); } static void pci1xxxx_config_nw_layer_intr(struct pci1xxxx_i2c *i2c, u8 intr_msk, bool enable) { void __iomem *p = i2c->i2c_base + SMBUS_INTR_MSK_REG_OFF; u8 regval; regval = readb(p); if (enable) regval &= ~intr_msk; else regval |= intr_msk; writeb(regval, p); } static void pci1xxxx_i2c_config_padctrl(struct pci1xxxx_i2c *i2c, bool enable) { void __iomem *p1 = i2c->i2c_base + I2C_SCL_PAD_CTRL_REG_OFF; void __iomem *p2 = i2c->i2c_base + I2C_SDA_PAD_CTRL_REG_OFF; u8 regval; regval = readb(p1); if (enable) regval |= I2C_INPUT_EN | I2C_OUTPUT_EN; else regval &= ~(I2C_INPUT_EN | I2C_OUTPUT_EN); writeb(regval, p1); regval = readb(p2); if (enable) regval |= I2C_INPUT_EN | I2C_OUTPUT_EN; else regval &= ~(I2C_INPUT_EN | I2C_OUTPUT_EN); writeb(regval, p2); } static void pci1xxxx_i2c_set_mode(struct pci1xxxx_i2c *i2c) { void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF; u8 regval; regval = readb(p); if (i2c->flags & I2C_FLAGS_DIRECT_MODE) regval &= ~CTL_HOST_FIFO_ENTRY; else regval |= CTL_HOST_FIFO_ENTRY; writeb(regval, p); } static void pci1xxxx_i2c_config_high_level_intr(struct pci1xxxx_i2c *i2c, u16 intr_msk, bool enable) { void __iomem *p = i2c->i2c_base + SMBUS_GEN_INT_MASK_REG_OFF; u16 regval; regval = readw(p); if (enable) regval &= ~intr_msk; else regval |= intr_msk; writew(regval, p); } static void pci1xxxx_i2c_configure_core_reg(struct pci1xxxx_i2c *i2c, bool enable) { void __iomem *p1 = i2c->i2c_base + SMB_CORE_CONFIG_REG1; void __iomem *p3 = i2c->i2c_base + SMB_CORE_CONFIG_REG3; u8 reg1; u8 reg3; reg1 = readb(p1); reg3 = readb(p3); if (enable) { reg1 |= SMB_CONFIG1_ENAB | SMB_CONFIG1_FEN; reg3 |= SMB_CONFIG3_ENMI | SMB_CONFIG3_ENIDI; } else { reg1 &= ~(SMB_CONFIG1_ENAB | SMB_CONFIG1_FEN); reg3 &= ~(SMB_CONFIG3_ENMI | SMB_CONFIG3_ENIDI); } writeb(reg1, p1); writeb(reg3, p3); } static void pci1xxxx_i2c_set_freq(struct pci1xxxx_i2c *i2c) { void __iomem *bp = i2c->i2c_base; void __iomem *p_idle_scaling = bp + SMB_CORE_IDLE_SCALING_REG_OFF; void __iomem *p_data_timing = bp + SMB_CORE_DATA_TIMING_REG_OFF; void __iomem *p_hold_time = bp + SMB_CORE_SR_HOLD_TIME_REG_OFF; void __iomem *p_to_scaling = bp + SMB_CORE_TO_SCALING_REG_OFF; void __iomem *p_clk_sync = bp + SMB_CORE_CLK_SYNC_REG_OFF; void __iomem *p_clk_reg = bp + SMB_CORE_BUS_CLK_REG_OFF; switch (i2c->freq) { case I2C_MAX_STANDARD_MODE_FREQ: writeb(SR_HOLD_TIME_100K_TICKS, p_hold_time); writel(SMB_IDLE_SCALING_100K, p_idle_scaling); writew(BUS_CLK_100K, p_clk_reg); writel(CLK_SYNC_100K, p_clk_sync); writel(DATA_TIMING_100K, p_data_timing); writel(TO_SCALING_100K, p_to_scaling); break; case I2C_MAX_FAST_MODE_PLUS_FREQ: writeb(SR_HOLD_TIME_1000K_TICKS, p_hold_time); writel(SMB_IDLE_SCALING_1000K, p_idle_scaling); writew(BUS_CLK_1000K, p_clk_reg); writel(CLK_SYNC_1000K, p_clk_sync); writel(DATA_TIMING_1000K, p_data_timing); writel(TO_SCALING_1000K, p_to_scaling); break; case I2C_MAX_FAST_MODE_FREQ: default: writeb(SR_HOLD_TIME_400K_TICKS, p_hold_time); writel(SMB_IDLE_SCALING_400K, p_idle_scaling); writew(BUS_CLK_400K, p_clk_reg); writel(CLK_SYNC_400K, p_clk_sync); writel(DATA_TIMING_400K, p_data_timing); writel(TO_SCALING_400K, p_to_scaling); break; } } static void pci1xxxx_i2c_init(struct pci1xxxx_i2c *i2c) { void __iomem *p2 = i2c->i2c_base + SMBUS_STATUS_REG_OFF; void __iomem *p1 = i2c->i2c_base + SMB_GPR_REG; u8 regval; int ret; ret = set_sys_lock(i2c); if (ret == -EPERM) { /* * Configure I2C Fast Mode as default frequency if unable * to acquire sys lock. */ regval = 0; } else { regval = readl(p1); release_sys_lock(i2c); } switch (regval) { case 0: i2c->freq = I2C_MAX_FAST_MODE_FREQ; pci1xxxx_i2c_set_freq(i2c); break; case 1: i2c->freq = I2C_MAX_STANDARD_MODE_FREQ; pci1xxxx_i2c_set_freq(i2c); break; case 2: i2c->freq = I2C_MAX_FAST_MODE_PLUS_FREQ; pci1xxxx_i2c_set_freq(i2c); break; case 3: default: break; } pci1xxxx_i2c_config_padctrl(i2c, true); i2c->flags |= I2C_FLAGS_DIRECT_MODE; pci1xxxx_i2c_set_mode(i2c); /* * Added as a precaution since BUF_EMPTY in status register * also trigered an Interrupt. */ writeb(STA_BUF_EMPTY, p2); /* Configure core I2c control registers. */ pci1xxxx_i2c_configure_core_reg(i2c, true); /* * Enable pull-up for the SMB alert pin which is just used for * wakeup right now. */ pci1xxxx_i2c_configure_smbalert_pin(i2c, true); } static void pci1xxxx_i2c_clear_flags(struct pci1xxxx_i2c *i2c) { u8 regval; /* Reset the internal buffer counters. */ pci1xxxx_i2c_reset_counters(i2c); /* Clear low level interrupts. */ regval = COMPLETION_MNAKX | COMPLETION_IDLE | COMPLETION_MDONE; writeb(regval, i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3); reinit_completion(&i2c->i2c_xfer_done); pci1xxxx_ack_nw_layer_intr(i2c, ALL_NW_LAYER_INTERRUPTS); pci1xxxx_ack_high_level_intr(i2c, ALL_HIGH_LAYER_INTR); } static int pci1xxxx_i2c_read(struct pci1xxxx_i2c *i2c, u8 slaveaddr, unsigned char *buf, u16 total_len) { void __iomem *p2 = i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3; void __iomem *p1 = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1; void __iomem *p3 = i2c->i2c_base + SMBUS_MST_BUF; unsigned long time_left; u16 remainingbytes; u8 transferlen; int retval = 0; u8 read_count; u32 regval; u16 count; /* Enable I2C host controller by setting the ESO bit in the CONTROL REG. */ pci1xxxx_i2c_enable_ESO(i2c); pci1xxxx_i2c_clear_flags(i2c); pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, true); pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, true); /* * The I2C transfer could be more than 128 bytes. Our Core is * capable of only sending 128 at a time. * As far as the I2C read is concerned, initailly send the * read slave address along with the number of bytes to read in * ReadCount. After sending the slave address the interrupt * is generated. On seeing the ACK for the slave address, reverse the * buffer direction and run the DMA to initiate Read from slave. */ for (count = 0; count < total_len; count += transferlen) { /* * Before start of any transaction clear the existing * START/STOP conditions. */ writeb(0, p1); remainingbytes = total_len - count; transferlen = min_t(u16, remainingbytes, SMBUS_BUF_MAX_SIZE); /* * Send STOP bit for the last chunk in the transaction. * For I2C read transaction of more than BUF_SIZE, NACK should * only be sent for the last read. * Hence a bit FW_ACK is set for all the read chunks except for * the last chunk. For the last chunk NACK should be sent and * FW_ACK is cleared Send STOP only when I2C_FLAGS_STOP bit is * set in the flags and only for the last transaction. */ if ((count + transferlen >= total_len) && (i2c->flags & I2C_FLAGS_STOP)) { pci1xxxx_i2c_set_clear_FW_ACK(i2c, false); pci1xxxx_i2c_send_start_stop(i2c, 0); } else { pci1xxxx_i2c_set_clear_FW_ACK(i2c, true); } /* Send START bit for the first transaction. */ if (count == 0) { pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_WRITE); pci1xxxx_i2c_send_start_stop(i2c, 1); /* Write I2c buffer with just the slave addr. */ pci1xxxx_i2c_buffer_write(i2c, slaveaddr, 0, NULL); /* Set the count. Readcount is the transfer bytes. */ pci1xxxx_i2c_set_count(i2c, 1, 1, transferlen); /* * Set the Auto_start_read bit so that the HW itself * will take care of the read phase. */ pci1xxxx_i2c_config_asr(i2c, true); if (i2c->flags & I2C_FLAGS_SMB_BLK_READ) pci1xxxx_i2c_set_readm(i2c, true); } else { pci1xxxx_i2c_set_count(i2c, 0, 0, transferlen); pci1xxxx_i2c_config_asr(i2c, false); pci1xxxx_i2c_clear_flags(i2c); pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_READ); } /* Start the DMA. */ pci1xxxx_i2c_start_DMA(i2c); /* Wait for the DMA_TERM interrupt. */ time_left = wait_for_completion_timeout(&i2c->i2c_xfer_done, msecs_to_jiffies(PCI1XXXX_I2C_TIMEOUT_MS)); if (time_left == 0) { /* Reset the I2C core to release the bus lock. */ pci1xxxx_i2c_init(i2c); retval = -ETIMEDOUT; goto cleanup; } /* Read the completion reg to know the reason for DMA_TERM. */ regval = readb(p2); /* Slave did not respond. */ if (regval & COMPLETION_MNAKX) { writeb(COMPLETION_MNAKX, p2); retval = -ETIMEDOUT; goto cleanup; } if (i2c->flags & I2C_FLAGS_SMB_BLK_READ) { buf[0] = readb(p3); read_count = buf[0]; memcpy_fromio(&buf[1], p3 + 1, read_count); } else { memcpy_fromio(&buf[count], p3, transferlen); } } cleanup: /* Disable all the interrupts. */ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, false); pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, false); pci1xxxx_i2c_config_asr(i2c, false); return retval; } static int pci1xxxx_i2c_write(struct pci1xxxx_i2c *i2c, u8 slaveaddr, unsigned char *buf, u16 total_len) { void __iomem *p2 = i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3; void __iomem *p1 = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1; unsigned long time_left; u16 remainingbytes; u8 actualwritelen; u8 transferlen; int retval = 0; u32 regval; u16 count; /* Enable I2C host controller by setting the ESO bit in the CONTROL REG. */ pci1xxxx_i2c_enable_ESO(i2c); /* Set the Buffer direction. */ pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_WRITE); pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, true); pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, true); /* * The i2c transfer could be more than 128 bytes. Our Core is * capable of only sending 128 at a time. */ for (count = 0; count < total_len; count += transferlen) { /* * Before start of any transaction clear the existing * START/STOP conditions. */ writeb(0, p1); pci1xxxx_i2c_clear_flags(i2c); remainingbytes = total_len - count; /* If it is the starting of the transaction send START. */ if (count == 0) { pci1xxxx_i2c_send_start_stop(i2c, 1); /* -1 for the slave address. */ transferlen = min_t(u16, SMBUS_BUF_MAX_SIZE - 1, remainingbytes); pci1xxxx_i2c_buffer_write(i2c, slaveaddr, transferlen, &buf[count]); /* * The actual number of bytes written on the I2C bus * is including the slave address. */ actualwritelen = transferlen + 1; } else { transferlen = min_t(u16, SMBUS_BUF_MAX_SIZE, remainingbytes); pci1xxxx_i2c_buffer_write(i2c, 0, transferlen, &buf[count]); actualwritelen = transferlen; } pci1xxxx_i2c_set_count(i2c, actualwritelen, actualwritelen, 0); /* * Send STOP only when I2C_FLAGS_STOP bit is set in the flags and * only for the last transaction. */ if (remainingbytes <= transferlen && (i2c->flags & I2C_FLAGS_STOP)) pci1xxxx_i2c_send_start_stop(i2c, 0); pci1xxxx_i2c_start_DMA(i2c); /* * Wait for the DMA_TERM interrupt. */ time_left = wait_for_completion_timeout(&i2c->i2c_xfer_done, msecs_to_jiffies(PCI1XXXX_I2C_TIMEOUT_MS)); if (time_left == 0) { /* Reset the I2C core to release the bus lock. */ pci1xxxx_i2c_init(i2c); retval = -ETIMEDOUT; goto cleanup; } regval = readb(p2); if (regval & COMPLETION_MNAKX) { writeb(COMPLETION_MNAKX, p2); retval = -ETIMEDOUT; goto cleanup; } } cleanup: /* Disable all the interrupts. */ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, false); pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, false); return retval; } static int pci1xxxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct pci1xxxx_i2c *i2c = i2c_get_adapdata(adap); u8 slaveaddr; int retval; u32 i; i2c->i2c_xfer_in_progress = true; for (i = 0; i < num; i++) { slaveaddr = i2c_8bit_addr_from_msg(&msgs[i]); /* * Send the STOP bit if the transfer is the final one or * if the I2C_M_STOP flag is set. */ if ((i == num - 1) || (msgs[i].flags & I2C_M_STOP)) i2c->flags |= I2C_FLAGS_STOP; else i2c->flags &= ~I2C_FLAGS_STOP; if (msgs[i].flags & I2C_M_RECV_LEN) i2c->flags |= I2C_FLAGS_SMB_BLK_READ; else i2c->flags &= ~I2C_FLAGS_SMB_BLK_READ; if (msgs[i].flags & I2C_M_RD) retval = pci1xxxx_i2c_read(i2c, slaveaddr, msgs[i].buf, msgs[i].len); else retval = pci1xxxx_i2c_write(i2c, slaveaddr, msgs[i].buf, msgs[i].len); if (retval < 0) break; } i2c->i2c_xfer_in_progress = false; if (retval < 0) return retval; return num; } /* * List of supported functions by the driver. */ static u32 pci1xxxx_i2c_get_funcs(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_PROTOCOL_MANGLING | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm pci1xxxx_i2c_algo = { .master_xfer = pci1xxxx_i2c_xfer, .functionality = pci1xxxx_i2c_get_funcs, }; static const struct i2c_adapter_quirks pci1xxxx_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; static const struct i2c_adapter pci1xxxx_i2c_ops = { .owner = THIS_MODULE, .name = "PCI1xxxx I2C Adapter", .algo = &pci1xxxx_i2c_algo, .quirks = &pci1xxxx_i2c_quirks, }; static int pci1xxxx_i2c_suspend(struct device *dev) { struct pci1xxxx_i2c *i2c = dev_get_drvdata(dev); void __iomem *p = i2c->i2c_base + SMBUS_RESET_REG; struct pci_dev *pdev = to_pci_dev(dev); u32 regval; i2c_mark_adapter_suspended(&i2c->adap); /* * If the system is put into 'suspend' state when the I2C transfer is in * progress, wait until the transfer completes. */ while (i2c->i2c_xfer_in_progress) msleep(20); pci1xxxx_i2c_config_high_level_intr(i2c, SMBALERT_WAKE_INTR_MASK, true); /* * Enable the PERST_DIS bit to mask the PERST from resetting the core * registers. */ regval = readl(p); regval |= PERI_SMBUS_D3_RESET_DIS; writel(regval, p); /* Enable PCI wake in the PMCSR register. */ device_set_wakeup_enable(dev, true); pci_wake_from_d3(pdev, true); return 0; } static int pci1xxxx_i2c_resume(struct device *dev) { struct pci1xxxx_i2c *i2c = dev_get_drvdata(dev); void __iomem *p1 = i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF; void __iomem *p2 = i2c->i2c_base + SMBUS_RESET_REG; struct pci_dev *pdev = to_pci_dev(dev); u32 regval; regval = readw(p1); writew(regval, p1); pci1xxxx_i2c_config_high_level_intr(i2c, SMBALERT_WAKE_INTR_MASK, false); regval = readl(p2); regval &= ~PERI_SMBUS_D3_RESET_DIS; writel(regval, p2); i2c_mark_adapter_resumed(&i2c->adap); pci_wake_from_d3(pdev, false); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_i2c_pm_ops, pci1xxxx_i2c_suspend, pci1xxxx_i2c_resume); static void pci1xxxx_i2c_shutdown(void *data) { struct pci1xxxx_i2c *i2c = data; pci1xxxx_i2c_config_padctrl(i2c, false); pci1xxxx_i2c_configure_core_reg(i2c, false); } static int pci1xxxx_i2c_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct pci1xxxx_i2c *i2c; int ret; i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; pci_set_drvdata(pdev, i2c); i2c->i2c_xfer_in_progress = false; ret = pcim_enable_device(pdev); if (ret) return ret; pci_set_master(pdev); /* * We are getting the base address of the SMB core. SMB core uses * BAR0 and size is 32K. */ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); if (ret < 0) return ret; i2c->i2c_base = pcim_iomap_table(pdev)[0]; init_completion(&i2c->i2c_xfer_done); pci1xxxx_i2c_init(i2c); ret = devm_add_action(dev, pci1xxxx_i2c_shutdown, i2c); if (ret) return ret; ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (ret < 0) return ret; ret = devm_request_irq(dev, pci_irq_vector(pdev, 0), pci1xxxx_i2c_isr, 0, pci_name(pdev), i2c); if (ret) return ret; i2c->adap = pci1xxxx_i2c_ops; i2c->adap.dev.parent = dev; snprintf(i2c->adap.name, sizeof(i2c->adap.name), "MCHP PCI1xxxx i2c adapter at %s", pci_name(pdev)); i2c_set_adapdata(&i2c->adap, i2c); ret = devm_i2c_add_adapter(dev, &i2c->adap); if (ret) return dev_err_probe(dev, ret, "i2c add adapter failed\n"); return 0; } static const struct pci_device_id pci1xxxx_i2c_pci_id_table[] = { { PCI_VDEVICE(EFAR, 0xA003) }, { PCI_VDEVICE(EFAR, 0xA013) }, { PCI_VDEVICE(EFAR, 0xA023) }, { PCI_VDEVICE(EFAR, 0xA033) }, { PCI_VDEVICE(EFAR, 0xA043) }, { } }; MODULE_DEVICE_TABLE(pci, pci1xxxx_i2c_pci_id_table); static struct pci_driver pci1xxxx_i2c_pci_driver = { .name = "i2c-mchp-pci1xxxx", .id_table = pci1xxxx_i2c_pci_id_table, .probe = pci1xxxx_i2c_probe_pci, .driver = { .pm = pm_sleep_ptr(&pci1xxxx_i2c_pm_ops), }, }; module_pci_driver(pci1xxxx_i2c_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tharun Kumar P<[email protected]>"); MODULE_AUTHOR("Kumaravel Thiagarajan <[email protected]>"); MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx I2C bus driver");
linux-master
drivers/i2c/busses/i2c-mchp-pci1xxxx.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C adapter for the IMG Serial Control Bus (SCB) IP block. * * Copyright (C) 2009, 2010, 2012, 2014 Imagination Technologies Ltd. * * There are three ways that this I2C controller can be driven: * * - Raw control of the SDA and SCK signals. * * This corresponds to MODE_RAW, which takes control of the signals * directly for a certain number of clock cycles (the INT_TIMING * interrupt can be used for timing). * * - Atomic commands. A low level I2C symbol (such as generate * start/stop/ack/nack bit, generate byte, receive byte, and receive * ACK) is given to the hardware, with detection of completion by bits * in the LINESTAT register. * * This mode of operation is used by MODE_ATOMIC, which uses an I2C * state machine in the interrupt handler to compose/react to I2C * transactions using atomic mode commands, and also by MODE_SEQUENCE, * which emits a simple fixed sequence of atomic mode commands. * * Due to software control, the use of atomic commands usually results * in suboptimal use of the bus, with gaps between the I2C symbols while * the driver decides what to do next. * * - Automatic mode. A bus address, and whether to read/write is * specified, and the hardware takes care of the I2C state machine, * using a FIFO to send/receive bytes of data to an I2C slave. The * driver just has to keep the FIFO drained or filled in response to the * appropriate FIFO interrupts. * * This corresponds to MODE_AUTOMATIC, which manages the FIFOs and deals * with control of repeated start bits between I2C messages. * * Use of automatic mode and the FIFO can make much more efficient use * of the bus compared to individual atomic commands, with potentially * no wasted time between I2C symbols or I2C messages. * * In most cases MODE_AUTOMATIC is used, however if any of the messages in * a transaction are zero byte writes (e.g. used by i2cdetect for probing * the bus), MODE_ATOMIC must be used since automatic mode is normally * started by the writing of data into the FIFO. * * The other modes are used in specific circumstances where MODE_ATOMIC and * MODE_AUTOMATIC aren't appropriate. MODE_RAW is used to implement a bus * recovery routine. MODE_SEQUENCE is used to reset the bus and make sure * it is in a sane state. * * Notice that the driver implements a timer-based timeout mechanism. * The reason for this mechanism is to reduce the number of interrupts * received in automatic mode. * * The driver would get a slave event and transaction done interrupts for * each atomic mode command that gets completed. However, these events are * not needed in automatic mode, becase those atomic mode commands are * managed automatically by the hardware. * * In practice, normal I2C transactions will be complete well before you * get the timer interrupt, as the timer is re-scheduled during FIFO * maintenance and disabled after the transaction is complete. * * In this way normal automatic mode operation isn't impacted by * unnecessary interrupts, but the exceptional abort condition can still be * detected (with a slight delay). */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/timer.h> /* Register offsets */ #define SCB_STATUS_REG 0x00 #define SCB_OVERRIDE_REG 0x04 #define SCB_READ_ADDR_REG 0x08 #define SCB_READ_COUNT_REG 0x0c #define SCB_WRITE_ADDR_REG 0x10 #define SCB_READ_DATA_REG 0x14 #define SCB_WRITE_DATA_REG 0x18 #define SCB_FIFO_STATUS_REG 0x1c #define SCB_CONTROL_SOFT_RESET 0x1f #define SCB_CLK_SET_REG 0x3c #define SCB_INT_STATUS_REG 0x40 #define SCB_INT_CLEAR_REG 0x44 #define SCB_INT_MASK_REG 0x48 #define SCB_CONTROL_REG 0x4c #define SCB_TIME_TPL_REG 0x50 #define SCB_TIME_TPH_REG 0x54 #define SCB_TIME_TP2S_REG 0x58 #define SCB_TIME_TBI_REG 0x60 #define SCB_TIME_TSL_REG 0x64 #define SCB_TIME_TDL_REG 0x68 #define SCB_TIME_TSDL_REG 0x6c #define SCB_TIME_TSDH_REG 0x70 #define SCB_READ_XADDR_REG 0x74 #define SCB_WRITE_XADDR_REG 0x78 #define SCB_WRITE_COUNT_REG 0x7c #define SCB_CORE_REV_REG 0x80 #define SCB_TIME_TCKH_REG 0x84 #define SCB_TIME_TCKL_REG 0x88 #define SCB_FIFO_FLUSH_REG 0x8c #define SCB_READ_FIFO_REG 0x94 #define SCB_CLEAR_REG 0x98 /* SCB_CONTROL_REG bits */ #define SCB_CONTROL_CLK_ENABLE 0x1e0 #define SCB_CONTROL_TRANSACTION_HALT 0x200 #define FIFO_READ_FULL BIT(0) #define FIFO_READ_EMPTY BIT(1) #define FIFO_WRITE_FULL BIT(2) #define FIFO_WRITE_EMPTY BIT(3) /* SCB_CLK_SET_REG bits */ #define SCB_FILT_DISABLE BIT(31) #define SCB_FILT_BYPASS BIT(30) #define SCB_FILT_INC_MASK 0x7f #define SCB_FILT_INC_SHIFT 16 #define SCB_INC_MASK 0x7f #define SCB_INC_SHIFT 8 /* SCB_INT_*_REG bits */ #define INT_BUS_INACTIVE BIT(0) #define INT_UNEXPECTED_START BIT(1) #define INT_SCLK_LOW_TIMEOUT BIT(2) #define INT_SDAT_LOW_TIMEOUT BIT(3) #define INT_WRITE_ACK_ERR BIT(4) #define INT_ADDR_ACK_ERR BIT(5) #define INT_FIFO_FULL BIT(9) #define INT_FIFO_FILLING BIT(10) #define INT_FIFO_EMPTY BIT(11) #define INT_FIFO_EMPTYING BIT(12) #define INT_TRANSACTION_DONE BIT(15) #define INT_SLAVE_EVENT BIT(16) #define INT_MASTER_HALTED BIT(17) #define INT_TIMING BIT(18) #define INT_STOP_DETECTED BIT(19) #define INT_FIFO_FULL_FILLING (INT_FIFO_FULL | INT_FIFO_FILLING) /* Level interrupts need clearing after handling instead of before */ #define INT_LEVEL 0x01e00 /* Don't allow any interrupts while the clock may be off */ #define INT_ENABLE_MASK_INACTIVE 0x00000 /* Interrupt masks for the different driver modes */ #define INT_ENABLE_MASK_RAW INT_TIMING #define INT_ENABLE_MASK_ATOMIC (INT_TRANSACTION_DONE | \ INT_SLAVE_EVENT | \ INT_ADDR_ACK_ERR | \ INT_WRITE_ACK_ERR) #define INT_ENABLE_MASK_AUTOMATIC (INT_SCLK_LOW_TIMEOUT | \ INT_ADDR_ACK_ERR | \ INT_WRITE_ACK_ERR | \ INT_FIFO_FULL | \ INT_FIFO_FILLING | \ INT_FIFO_EMPTY | \ INT_MASTER_HALTED | \ INT_STOP_DETECTED) #define INT_ENABLE_MASK_WAITSTOP (INT_SLAVE_EVENT | \ INT_ADDR_ACK_ERR | \ INT_WRITE_ACK_ERR) /* SCB_STATUS_REG fields */ #define LINESTAT_SCLK_LINE_STATUS BIT(0) #define LINESTAT_SCLK_EN BIT(1) #define LINESTAT_SDAT_LINE_STATUS BIT(2) #define LINESTAT_SDAT_EN BIT(3) #define LINESTAT_DET_START_STATUS BIT(4) #define LINESTAT_DET_STOP_STATUS BIT(5) #define LINESTAT_DET_ACK_STATUS BIT(6) #define LINESTAT_DET_NACK_STATUS BIT(7) #define LINESTAT_BUS_IDLE BIT(8) #define LINESTAT_T_DONE_STATUS BIT(9) #define LINESTAT_SCLK_OUT_STATUS BIT(10) #define LINESTAT_SDAT_OUT_STATUS BIT(11) #define LINESTAT_GEN_LINE_MASK_STATUS BIT(12) #define LINESTAT_START_BIT_DET BIT(13) #define LINESTAT_STOP_BIT_DET BIT(14) #define LINESTAT_ACK_DET BIT(15) #define LINESTAT_NACK_DET BIT(16) #define LINESTAT_INPUT_HELD_V BIT(17) #define LINESTAT_ABORT_DET BIT(18) #define LINESTAT_ACK_OR_NACK_DET (LINESTAT_ACK_DET | LINESTAT_NACK_DET) #define LINESTAT_INPUT_DATA 0xff000000 #define LINESTAT_INPUT_DATA_SHIFT 24 #define LINESTAT_CLEAR_SHIFT 13 #define LINESTAT_LATCHED (0x3f << LINESTAT_CLEAR_SHIFT) /* SCB_OVERRIDE_REG fields */ #define OVERRIDE_SCLK_OVR BIT(0) #define OVERRIDE_SCLKEN_OVR BIT(1) #define OVERRIDE_SDAT_OVR BIT(2) #define OVERRIDE_SDATEN_OVR BIT(3) #define OVERRIDE_MASTER BIT(9) #define OVERRIDE_LINE_OVR_EN BIT(10) #define OVERRIDE_DIRECT BIT(11) #define OVERRIDE_CMD_SHIFT 4 #define OVERRIDE_CMD_MASK 0x1f #define OVERRIDE_DATA_SHIFT 24 #define OVERRIDE_SCLK_DOWN (OVERRIDE_LINE_OVR_EN | \ OVERRIDE_SCLKEN_OVR) #define OVERRIDE_SCLK_UP (OVERRIDE_LINE_OVR_EN | \ OVERRIDE_SCLKEN_OVR | \ OVERRIDE_SCLK_OVR) #define OVERRIDE_SDAT_DOWN (OVERRIDE_LINE_OVR_EN | \ OVERRIDE_SDATEN_OVR) #define OVERRIDE_SDAT_UP (OVERRIDE_LINE_OVR_EN | \ OVERRIDE_SDATEN_OVR | \ OVERRIDE_SDAT_OVR) /* OVERRIDE_CMD values */ #define CMD_PAUSE 0x00 #define CMD_GEN_DATA 0x01 #define CMD_GEN_START 0x02 #define CMD_GEN_STOP 0x03 #define CMD_GEN_ACK 0x04 #define CMD_GEN_NACK 0x05 #define CMD_RET_DATA 0x08 #define CMD_RET_ACK 0x09 /* Fixed timing values */ #define TIMEOUT_TBI 0x0 #define TIMEOUT_TSL 0xffff #define TIMEOUT_TDL 0x0 /* Transaction timeout */ #define IMG_I2C_TIMEOUT (msecs_to_jiffies(1000)) /* * Worst incs are 1 (inaccurate) and 16*256 (irregular). * So a sensible inc is the logarithmic mean: 64 (2^6), which is * in the middle of the valid range (0-127). */ #define SCB_OPT_INC 64 /* Setup the clock enable filtering for 25 ns */ #define SCB_FILT_GLITCH 25 /* * Bits to return from interrupt handler functions for different modes. * This delays completion until we've finished with the registers, so that the * function waiting for completion can safely disable the clock to save power. */ #define ISR_COMPLETE_M BIT(31) #define ISR_FATAL_M BIT(30) #define ISR_WAITSTOP BIT(29) #define ISR_STATUS_M 0x0000ffff /* contains +ve errno */ #define ISR_COMPLETE(err) (ISR_COMPLETE_M | (ISR_STATUS_M & (err))) #define ISR_FATAL(err) (ISR_COMPLETE(err) | ISR_FATAL_M) #define IMG_I2C_PM_TIMEOUT 1000 /* ms */ enum img_i2c_mode { MODE_INACTIVE, MODE_RAW, MODE_ATOMIC, MODE_AUTOMATIC, MODE_SEQUENCE, MODE_FATAL, MODE_WAITSTOP, MODE_SUSPEND, }; /* Timing parameters for i2c modes (in ns) */ struct img_i2c_timings { const char *name; unsigned int max_bitrate; unsigned int tckh, tckl, tsdh, tsdl; unsigned int tp2s, tpl, tph; }; /* The timings array must be ordered from slower to faster */ static struct img_i2c_timings timings[] = { /* Standard mode */ { .name = "standard", .max_bitrate = I2C_MAX_STANDARD_MODE_FREQ, .tckh = 4000, .tckl = 4700, .tsdh = 4700, .tsdl = 8700, .tp2s = 4700, .tpl = 4700, .tph = 4000, }, /* Fast mode */ { .name = "fast", .max_bitrate = I2C_MAX_FAST_MODE_FREQ, .tckh = 600, .tckl = 1300, .tsdh = 600, .tsdl = 1200, .tp2s = 1300, .tpl = 600, .tph = 600, }, }; /* Reset dance */ static u8 img_i2c_reset_seq[] = { CMD_GEN_START, CMD_GEN_DATA, 0xff, CMD_RET_ACK, CMD_GEN_START, CMD_GEN_STOP, 0 }; /* Just issue a stop (after an abort condition) */ static u8 img_i2c_stop_seq[] = { CMD_GEN_STOP, 0 }; /* We're interested in different interrupts depending on the mode */ static unsigned int img_i2c_int_enable_by_mode[] = { [MODE_INACTIVE] = INT_ENABLE_MASK_INACTIVE, [MODE_RAW] = INT_ENABLE_MASK_RAW, [MODE_ATOMIC] = INT_ENABLE_MASK_ATOMIC, [MODE_AUTOMATIC] = INT_ENABLE_MASK_AUTOMATIC, [MODE_SEQUENCE] = INT_ENABLE_MASK_ATOMIC, [MODE_FATAL] = 0, [MODE_WAITSTOP] = INT_ENABLE_MASK_WAITSTOP, [MODE_SUSPEND] = 0, }; /* Atomic command names */ static const char * const img_i2c_atomic_cmd_names[] = { [CMD_PAUSE] = "PAUSE", [CMD_GEN_DATA] = "GEN_DATA", [CMD_GEN_START] = "GEN_START", [CMD_GEN_STOP] = "GEN_STOP", [CMD_GEN_ACK] = "GEN_ACK", [CMD_GEN_NACK] = "GEN_NACK", [CMD_RET_DATA] = "RET_DATA", [CMD_RET_ACK] = "RET_ACK", }; struct img_i2c { struct i2c_adapter adap; void __iomem *base; /* * The scb core clock is used to get the input frequency, and to disable * it after every set of transactions to save some power. */ struct clk *scb_clk, *sys_clk; unsigned int bitrate; bool need_wr_rd_fence; /* state */ struct completion msg_complete; spinlock_t lock; /* lock before doing anything with the state */ struct i2c_msg msg; /* After the last transaction, wait for a stop bit */ bool last_msg; int msg_status; enum img_i2c_mode mode; u32 int_enable; /* depends on mode */ u32 line_status; /* line status over command */ /* * To avoid slave event interrupts in automatic mode, use a timer to * poll the abort condition if we don't get an interrupt for too long. */ struct timer_list check_timer; bool t_halt; /* atomic mode state */ bool at_t_done; bool at_slave_event; int at_cur_cmd; u8 at_cur_data; /* Sequence: either reset or stop. See img_i2c_sequence. */ u8 *seq; /* raw mode */ unsigned int raw_timeout; }; static int img_i2c_runtime_suspend(struct device *dev); static int img_i2c_runtime_resume(struct device *dev); static void img_i2c_writel(struct img_i2c *i2c, u32 offset, u32 value) { writel(value, i2c->base + offset); } static u32 img_i2c_readl(struct img_i2c *i2c, u32 offset) { return readl(i2c->base + offset); } /* * The code to read from the master read fifo, and write to the master * write fifo, checks a bit in an SCB register before every byte to * ensure that the fifo is not full (write fifo) or empty (read fifo). * Due to clock domain crossing inside the SCB block the updated value * of this bit is only visible after 2 cycles. * * The scb_wr_rd_fence() function does 2 dummy writes (to the read-only * revision register), and it's called after reading from or writing to the * fifos to ensure that subsequent reads of the fifo status bits do not read * stale values. */ static void img_i2c_wr_rd_fence(struct img_i2c *i2c) { if (i2c->need_wr_rd_fence) { img_i2c_writel(i2c, SCB_CORE_REV_REG, 0); img_i2c_writel(i2c, SCB_CORE_REV_REG, 0); } } static void img_i2c_switch_mode(struct img_i2c *i2c, enum img_i2c_mode mode) { i2c->mode = mode; i2c->int_enable = img_i2c_int_enable_by_mode[mode]; i2c->line_status = 0; } static void img_i2c_raw_op(struct img_i2c *i2c) { i2c->raw_timeout = 0; img_i2c_writel(i2c, SCB_OVERRIDE_REG, OVERRIDE_SCLKEN_OVR | OVERRIDE_SDATEN_OVR | OVERRIDE_MASTER | OVERRIDE_LINE_OVR_EN | OVERRIDE_DIRECT | ((i2c->at_cur_cmd & OVERRIDE_CMD_MASK) << OVERRIDE_CMD_SHIFT) | (i2c->at_cur_data << OVERRIDE_DATA_SHIFT)); } static const char *img_i2c_atomic_op_name(unsigned int cmd) { if (unlikely(cmd >= ARRAY_SIZE(img_i2c_atomic_cmd_names))) return "UNKNOWN"; return img_i2c_atomic_cmd_names[cmd]; } /* Send a single atomic mode command to the hardware */ static void img_i2c_atomic_op(struct img_i2c *i2c, int cmd, u8 data) { i2c->at_cur_cmd = cmd; i2c->at_cur_data = data; /* work around lack of data setup time when generating data */ if (cmd == CMD_GEN_DATA && i2c->mode == MODE_ATOMIC) { u32 line_status = img_i2c_readl(i2c, SCB_STATUS_REG); if (line_status & LINESTAT_SDAT_LINE_STATUS && !(data & 0x80)) { /* hold the data line down for a moment */ img_i2c_switch_mode(i2c, MODE_RAW); img_i2c_raw_op(i2c); return; } } dev_dbg(i2c->adap.dev.parent, "atomic cmd=%s (%d) data=%#x\n", img_i2c_atomic_op_name(cmd), cmd, data); i2c->at_t_done = (cmd == CMD_RET_DATA || cmd == CMD_RET_ACK); i2c->at_slave_event = false; i2c->line_status = 0; img_i2c_writel(i2c, SCB_OVERRIDE_REG, ((cmd & OVERRIDE_CMD_MASK) << OVERRIDE_CMD_SHIFT) | OVERRIDE_MASTER | OVERRIDE_DIRECT | (data << OVERRIDE_DATA_SHIFT)); } /* Start a transaction in atomic mode */ static void img_i2c_atomic_start(struct img_i2c *i2c) { img_i2c_switch_mode(i2c, MODE_ATOMIC); img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable); img_i2c_atomic_op(i2c, CMD_GEN_START, 0x00); } static void img_i2c_soft_reset(struct img_i2c *i2c) { i2c->t_halt = false; img_i2c_writel(i2c, SCB_CONTROL_REG, 0); img_i2c_writel(i2c, SCB_CONTROL_REG, SCB_CONTROL_CLK_ENABLE | SCB_CONTROL_SOFT_RESET); } /* * Enable or release transaction halt for control of repeated starts. * In version 3.3 of the IP when transaction halt is set, an interrupt * will be generated after each byte of a transfer instead of after * every transfer but before the stop bit. * Due to this behaviour we have to be careful that every time we * release the transaction halt we have to re-enable it straight away * so that we only process a single byte, not doing so will result in * all remaining bytes been processed and a stop bit being issued, * which will prevent us having a repeated start. */ static void img_i2c_transaction_halt(struct img_i2c *i2c, bool t_halt) { u32 val; if (i2c->t_halt == t_halt) return; i2c->t_halt = t_halt; val = img_i2c_readl(i2c, SCB_CONTROL_REG); if (t_halt) val |= SCB_CONTROL_TRANSACTION_HALT; else val &= ~SCB_CONTROL_TRANSACTION_HALT; img_i2c_writel(i2c, SCB_CONTROL_REG, val); } /* Drain data from the FIFO into the buffer (automatic mode) */ static void img_i2c_read_fifo(struct img_i2c *i2c) { while (i2c->msg.len) { u32 fifo_status; u8 data; img_i2c_wr_rd_fence(i2c); fifo_status = img_i2c_readl(i2c, SCB_FIFO_STATUS_REG); if (fifo_status & FIFO_READ_EMPTY) break; data = img_i2c_readl(i2c, SCB_READ_DATA_REG); *i2c->msg.buf = data; img_i2c_writel(i2c, SCB_READ_FIFO_REG, 0xff); i2c->msg.len--; i2c->msg.buf++; } } /* Fill the FIFO with data from the buffer (automatic mode) */ static void img_i2c_write_fifo(struct img_i2c *i2c) { while (i2c->msg.len) { u32 fifo_status; img_i2c_wr_rd_fence(i2c); fifo_status = img_i2c_readl(i2c, SCB_FIFO_STATUS_REG); if (fifo_status & FIFO_WRITE_FULL) break; img_i2c_writel(i2c, SCB_WRITE_DATA_REG, *i2c->msg.buf); i2c->msg.len--; i2c->msg.buf++; } /* Disable fifo emptying interrupt if nothing more to write */ if (!i2c->msg.len) i2c->int_enable &= ~INT_FIFO_EMPTYING; } /* Start a read transaction in automatic mode */ static void img_i2c_read(struct img_i2c *i2c) { img_i2c_switch_mode(i2c, MODE_AUTOMATIC); if (!i2c->last_msg) i2c->int_enable |= INT_SLAVE_EVENT; img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable); img_i2c_writel(i2c, SCB_READ_ADDR_REG, i2c->msg.addr); img_i2c_writel(i2c, SCB_READ_COUNT_REG, i2c->msg.len); mod_timer(&i2c->check_timer, jiffies + msecs_to_jiffies(1)); } /* Start a write transaction in automatic mode */ static void img_i2c_write(struct img_i2c *i2c) { img_i2c_switch_mode(i2c, MODE_AUTOMATIC); if (!i2c->last_msg) i2c->int_enable |= INT_SLAVE_EVENT; img_i2c_writel(i2c, SCB_WRITE_ADDR_REG, i2c->msg.addr); img_i2c_writel(i2c, SCB_WRITE_COUNT_REG, i2c->msg.len); mod_timer(&i2c->check_timer, jiffies + msecs_to_jiffies(1)); img_i2c_write_fifo(i2c); /* img_i2c_write_fifo() may modify int_enable */ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable); } /* * Indicate that the transaction is complete. This is called from the * ISR to wake up the waiting thread, after which the ISR must not * access any more SCB registers. */ static void img_i2c_complete_transaction(struct img_i2c *i2c, int status) { img_i2c_switch_mode(i2c, MODE_INACTIVE); if (status) { i2c->msg_status = status; img_i2c_transaction_halt(i2c, false); } complete(&i2c->msg_complete); } static unsigned int img_i2c_raw_atomic_delay_handler(struct img_i2c *i2c, u32 int_status, u32 line_status) { /* Stay in raw mode for this, so we don't just loop infinitely */ img_i2c_atomic_op(i2c, i2c->at_cur_cmd, i2c->at_cur_data); img_i2c_switch_mode(i2c, MODE_ATOMIC); return 0; } static unsigned int img_i2c_raw(struct img_i2c *i2c, u32 int_status, u32 line_status) { if (int_status & INT_TIMING) { if (i2c->raw_timeout == 0) return img_i2c_raw_atomic_delay_handler(i2c, int_status, line_status); --i2c->raw_timeout; } return 0; } static unsigned int img_i2c_sequence(struct img_i2c *i2c, u32 int_status) { static const unsigned int continue_bits[] = { [CMD_GEN_START] = LINESTAT_START_BIT_DET, [CMD_GEN_DATA] = LINESTAT_INPUT_HELD_V, [CMD_RET_ACK] = LINESTAT_ACK_DET | LINESTAT_NACK_DET, [CMD_RET_DATA] = LINESTAT_INPUT_HELD_V, [CMD_GEN_STOP] = LINESTAT_STOP_BIT_DET, }; int next_cmd = -1; u8 next_data = 0x00; if (int_status & INT_SLAVE_EVENT) i2c->at_slave_event = true; if (int_status & INT_TRANSACTION_DONE) i2c->at_t_done = true; if (!i2c->at_slave_event || !i2c->at_t_done) return 0; /* wait if no continue bits are set */ if (i2c->at_cur_cmd >= 0 && i2c->at_cur_cmd < ARRAY_SIZE(continue_bits)) { unsigned int cont_bits = continue_bits[i2c->at_cur_cmd]; if (cont_bits) { cont_bits |= LINESTAT_ABORT_DET; if (!(i2c->line_status & cont_bits)) return 0; } } /* follow the sequence of commands in i2c->seq */ next_cmd = *i2c->seq; /* stop on a nil */ if (!next_cmd) { img_i2c_writel(i2c, SCB_OVERRIDE_REG, 0); return ISR_COMPLETE(0); } /* when generating data, the next byte is the data */ if (next_cmd == CMD_GEN_DATA) { ++i2c->seq; next_data = *i2c->seq; } ++i2c->seq; img_i2c_atomic_op(i2c, next_cmd, next_data); return 0; } static void img_i2c_reset_start(struct img_i2c *i2c) { /* Initiate the magic dance */ img_i2c_switch_mode(i2c, MODE_SEQUENCE); img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable); i2c->seq = img_i2c_reset_seq; i2c->at_slave_event = true; i2c->at_t_done = true; i2c->at_cur_cmd = -1; /* img_i2c_reset_seq isn't empty so the following won't fail */ img_i2c_sequence(i2c, 0); } static void img_i2c_stop_start(struct img_i2c *i2c) { /* Initiate a stop bit sequence */ img_i2c_switch_mode(i2c, MODE_SEQUENCE); img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable); i2c->seq = img_i2c_stop_seq; i2c->at_slave_event = true; i2c->at_t_done = true; i2c->at_cur_cmd = -1; /* img_i2c_stop_seq isn't empty so the following won't fail */ img_i2c_sequence(i2c, 0); } static unsigned int img_i2c_atomic(struct img_i2c *i2c, u32 int_status, u32 line_status) { int next_cmd = -1; u8 next_data = 0x00; if (int_status & INT_SLAVE_EVENT) i2c->at_slave_event = true; if (int_status & INT_TRANSACTION_DONE) i2c->at_t_done = true; if (!i2c->at_slave_event || !i2c->at_t_done) goto next_atomic_cmd; if (i2c->line_status & LINESTAT_ABORT_DET) { dev_dbg(i2c->adap.dev.parent, "abort condition detected\n"); next_cmd = CMD_GEN_STOP; i2c->msg_status = -EIO; goto next_atomic_cmd; } /* i2c->at_cur_cmd may have completed */ switch (i2c->at_cur_cmd) { case CMD_GEN_START: next_cmd = CMD_GEN_DATA; next_data = i2c_8bit_addr_from_msg(&i2c->msg); break; case CMD_GEN_DATA: if (i2c->line_status & LINESTAT_INPUT_HELD_V) next_cmd = CMD_RET_ACK; break; case CMD_RET_ACK: if (i2c->line_status & LINESTAT_ACK_DET || (i2c->line_status & LINESTAT_NACK_DET && i2c->msg.flags & I2C_M_IGNORE_NAK)) { if (i2c->msg.len == 0) { next_cmd = CMD_GEN_STOP; } else if (i2c->msg.flags & I2C_M_RD) { next_cmd = CMD_RET_DATA; } else { next_cmd = CMD_GEN_DATA; next_data = *i2c->msg.buf; --i2c->msg.len; ++i2c->msg.buf; } } else if (i2c->line_status & LINESTAT_NACK_DET) { i2c->msg_status = -EIO; next_cmd = CMD_GEN_STOP; } break; case CMD_RET_DATA: if (i2c->line_status & LINESTAT_INPUT_HELD_V) { *i2c->msg.buf = (i2c->line_status & LINESTAT_INPUT_DATA) >> LINESTAT_INPUT_DATA_SHIFT; --i2c->msg.len; ++i2c->msg.buf; if (i2c->msg.len) next_cmd = CMD_GEN_ACK; else next_cmd = CMD_GEN_NACK; } break; case CMD_GEN_ACK: if (i2c->line_status & LINESTAT_ACK_DET) { next_cmd = CMD_RET_DATA; } else { i2c->msg_status = -EIO; next_cmd = CMD_GEN_STOP; } break; case CMD_GEN_NACK: next_cmd = CMD_GEN_STOP; break; case CMD_GEN_STOP: img_i2c_writel(i2c, SCB_OVERRIDE_REG, 0); return ISR_COMPLETE(0); default: dev_err(i2c->adap.dev.parent, "bad atomic command %d\n", i2c->at_cur_cmd); i2c->msg_status = -EIO; next_cmd = CMD_GEN_STOP; break; } next_atomic_cmd: if (next_cmd != -1) { /* don't actually stop unless we're the last transaction */ if (next_cmd == CMD_GEN_STOP && !i2c->msg_status && !i2c->last_msg) return ISR_COMPLETE(0); img_i2c_atomic_op(i2c, next_cmd, next_data); } return 0; } /* * Timer function to check if something has gone wrong in automatic mode (so we * don't have to handle so many interrupts just to catch an exception). */ static void img_i2c_check_timer(struct timer_list *t) { struct img_i2c *i2c = from_timer(i2c, t, check_timer); unsigned long flags; unsigned int line_status; spin_lock_irqsave(&i2c->lock, flags); line_status = img_i2c_readl(i2c, SCB_STATUS_REG); /* check for an abort condition */ if (line_status & LINESTAT_ABORT_DET) { dev_dbg(i2c->adap.dev.parent, "abort condition detected by check timer\n"); /* enable slave event interrupt mask to trigger irq */ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable | INT_SLAVE_EVENT); } spin_unlock_irqrestore(&i2c->lock, flags); } static unsigned int img_i2c_auto(struct img_i2c *i2c, unsigned int int_status, unsigned int line_status) { if (int_status & (INT_WRITE_ACK_ERR | INT_ADDR_ACK_ERR)) return ISR_COMPLETE(EIO); if (line_status & LINESTAT_ABORT_DET) { dev_dbg(i2c->adap.dev.parent, "abort condition detected\n"); /* empty the read fifo */ if ((i2c->msg.flags & I2C_M_RD) && (int_status & INT_FIFO_FULL_FILLING)) img_i2c_read_fifo(i2c); /* use atomic mode and try to force a stop bit */ i2c->msg_status = -EIO; img_i2c_stop_start(i2c); return 0; } /* Enable transaction halt on start bit */ if (!i2c->last_msg && line_status & LINESTAT_START_BIT_DET) { img_i2c_transaction_halt(i2c, !i2c->last_msg); /* we're no longer interested in the slave event */ i2c->int_enable &= ~INT_SLAVE_EVENT; } mod_timer(&i2c->check_timer, jiffies + msecs_to_jiffies(1)); if (int_status & INT_STOP_DETECTED) { /* Drain remaining data in FIFO and complete transaction */ if (i2c->msg.flags & I2C_M_RD) img_i2c_read_fifo(i2c); return ISR_COMPLETE(0); } if (i2c->msg.flags & I2C_M_RD) { if (int_status & (INT_FIFO_FULL_FILLING | INT_MASTER_HALTED)) { img_i2c_read_fifo(i2c); if (i2c->msg.len == 0) return ISR_WAITSTOP; } } else { if (int_status & (INT_FIFO_EMPTY | INT_MASTER_HALTED)) { if ((int_status & INT_FIFO_EMPTY) && i2c->msg.len == 0) return ISR_WAITSTOP; img_i2c_write_fifo(i2c); } } if (int_status & INT_MASTER_HALTED) { /* * Release and then enable transaction halt, to * allow only a single byte to proceed. */ img_i2c_transaction_halt(i2c, false); img_i2c_transaction_halt(i2c, !i2c->last_msg); } return 0; } static irqreturn_t img_i2c_isr(int irq, void *dev_id) { struct img_i2c *i2c = dev_id; u32 int_status, line_status; /* We handle transaction completion AFTER accessing registers */ unsigned int hret; /* Read interrupt status register. */ int_status = img_i2c_readl(i2c, SCB_INT_STATUS_REG); /* Clear detected interrupts. */ img_i2c_writel(i2c, SCB_INT_CLEAR_REG, int_status); /* * Read line status and clear it until it actually is clear. We have * to be careful not to lose any line status bits that get latched. */ line_status = img_i2c_readl(i2c, SCB_STATUS_REG); if (line_status & LINESTAT_LATCHED) { img_i2c_writel(i2c, SCB_CLEAR_REG, (line_status & LINESTAT_LATCHED) >> LINESTAT_CLEAR_SHIFT); img_i2c_wr_rd_fence(i2c); } spin_lock(&i2c->lock); /* Keep track of line status bits received */ i2c->line_status &= ~LINESTAT_INPUT_DATA; i2c->line_status |= line_status; /* * Certain interrupts indicate that sclk low timeout is not * a problem. If any of these are set, just continue. */ if ((int_status & INT_SCLK_LOW_TIMEOUT) && !(int_status & (INT_SLAVE_EVENT | INT_FIFO_EMPTY | INT_FIFO_FULL))) { dev_crit(i2c->adap.dev.parent, "fatal: clock low timeout occurred %s addr 0x%02x\n", (i2c->msg.flags & I2C_M_RD) ? "reading" : "writing", i2c->msg.addr); hret = ISR_FATAL(EIO); goto out; } if (i2c->mode == MODE_ATOMIC) hret = img_i2c_atomic(i2c, int_status, line_status); else if (i2c->mode == MODE_AUTOMATIC) hret = img_i2c_auto(i2c, int_status, line_status); else if (i2c->mode == MODE_SEQUENCE) hret = img_i2c_sequence(i2c, int_status); else if (i2c->mode == MODE_WAITSTOP && (int_status & INT_SLAVE_EVENT) && (line_status & LINESTAT_STOP_BIT_DET)) hret = ISR_COMPLETE(0); else if (i2c->mode == MODE_RAW) hret = img_i2c_raw(i2c, int_status, line_status); else hret = 0; /* Clear detected level interrupts. */ img_i2c_writel(i2c, SCB_INT_CLEAR_REG, int_status & INT_LEVEL); out: if (hret & ISR_WAITSTOP) { /* * Only wait for stop on last message. * Also we may already have detected the stop bit. */ if (!i2c->last_msg || i2c->line_status & LINESTAT_STOP_BIT_DET) hret = ISR_COMPLETE(0); else img_i2c_switch_mode(i2c, MODE_WAITSTOP); } /* now we've finished using regs, handle transaction completion */ if (hret & ISR_COMPLETE_M) { int status = -(hret & ISR_STATUS_M); img_i2c_complete_transaction(i2c, status); if (hret & ISR_FATAL_M) img_i2c_switch_mode(i2c, MODE_FATAL); } /* Enable interrupts (int_enable may be altered by changing mode) */ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable); spin_unlock(&i2c->lock); return IRQ_HANDLED; } /* Force a bus reset sequence and wait for it to complete */ static int img_i2c_reset_bus(struct img_i2c *i2c) { unsigned long flags; unsigned long time_left; spin_lock_irqsave(&i2c->lock, flags); reinit_completion(&i2c->msg_complete); img_i2c_reset_start(i2c); spin_unlock_irqrestore(&i2c->lock, flags); time_left = wait_for_completion_timeout(&i2c->msg_complete, IMG_I2C_TIMEOUT); if (time_left == 0) return -ETIMEDOUT; return 0; } static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct img_i2c *i2c = i2c_get_adapdata(adap); bool atomic = false; int i, ret; unsigned long time_left; if (i2c->mode == MODE_SUSPEND) { WARN(1, "refusing to service transaction in suspended state\n"); return -EIO; } if (i2c->mode == MODE_FATAL) return -EIO; for (i = 0; i < num; i++) { /* * 0 byte reads are not possible because the slave could try * and pull the data line low, preventing a stop bit. */ if (!msgs[i].len && msgs[i].flags & I2C_M_RD) return -EIO; /* * 0 byte writes are possible and used for probing, but we * cannot do them in automatic mode, so use atomic mode * instead. * * Also, the I2C_M_IGNORE_NAK mode can only be implemented * in atomic mode. */ if (!msgs[i].len || (msgs[i].flags & I2C_M_IGNORE_NAK)) atomic = true; } ret = pm_runtime_resume_and_get(adap->dev.parent); if (ret < 0) return ret; for (i = 0; i < num; i++) { struct i2c_msg *msg = &msgs[i]; unsigned long flags; spin_lock_irqsave(&i2c->lock, flags); /* * Make a copy of the message struct. We mustn't modify the * original or we'll confuse drivers and i2c-dev. */ i2c->msg = *msg; i2c->msg_status = 0; /* * After the last message we must have waited for a stop bit. * Not waiting can cause problems when the clock is disabled * before the stop bit is sent, and the linux I2C interface * requires separate transfers not to joined with repeated * start. */ i2c->last_msg = (i == num - 1); reinit_completion(&i2c->msg_complete); /* * Clear line status and all interrupts before starting a * transfer, as we may have unserviced interrupts from * previous transfers that might be handled in the context * of the new transfer. */ img_i2c_writel(i2c, SCB_INT_CLEAR_REG, ~0); img_i2c_writel(i2c, SCB_CLEAR_REG, ~0); if (atomic) { img_i2c_atomic_start(i2c); } else { /* * Enable transaction halt if not the last message in * the queue so that we can control repeated starts. */ img_i2c_transaction_halt(i2c, !i2c->last_msg); if (msg->flags & I2C_M_RD) img_i2c_read(i2c); else img_i2c_write(i2c); /* * Release and then enable transaction halt, to * allow only a single byte to proceed. * This doesn't have an effect on the initial transfer * but will allow the following transfers to start * processing if the previous transfer was marked as * complete while the i2c block was halted. */ img_i2c_transaction_halt(i2c, false); img_i2c_transaction_halt(i2c, !i2c->last_msg); } spin_unlock_irqrestore(&i2c->lock, flags); time_left = wait_for_completion_timeout(&i2c->msg_complete, IMG_I2C_TIMEOUT); del_timer_sync(&i2c->check_timer); if (time_left == 0) { dev_err(adap->dev.parent, "i2c transfer timed out\n"); i2c->msg_status = -ETIMEDOUT; break; } if (i2c->msg_status) break; } pm_runtime_mark_last_busy(adap->dev.parent); pm_runtime_put_autosuspend(adap->dev.parent); return i2c->msg_status ? i2c->msg_status : num; } static u32 img_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm img_i2c_algo = { .master_xfer = img_i2c_xfer, .functionality = img_i2c_func, }; static int img_i2c_init(struct img_i2c *i2c) { unsigned int clk_khz, bitrate_khz, clk_period, tckh, tckl, tsdh; unsigned int i, data, prescale, inc, int_bitrate, filt; struct img_i2c_timings timing; u32 rev; int ret; ret = pm_runtime_resume_and_get(i2c->adap.dev.parent); if (ret < 0) return ret; rev = img_i2c_readl(i2c, SCB_CORE_REV_REG); if ((rev & 0x00ffffff) < 0x00020200) { dev_info(i2c->adap.dev.parent, "Unknown hardware revision (%d.%d.%d.%d)\n", (rev >> 24) & 0xff, (rev >> 16) & 0xff, (rev >> 8) & 0xff, rev & 0xff); pm_runtime_mark_last_busy(i2c->adap.dev.parent); pm_runtime_put_autosuspend(i2c->adap.dev.parent); return -EINVAL; } /* Fencing enabled by default. */ i2c->need_wr_rd_fence = true; /* Determine what mode we're in from the bitrate */ timing = timings[0]; for (i = 0; i < ARRAY_SIZE(timings); i++) { if (i2c->bitrate <= timings[i].max_bitrate) { timing = timings[i]; break; } } if (i2c->bitrate > timings[ARRAY_SIZE(timings) - 1].max_bitrate) { dev_warn(i2c->adap.dev.parent, "requested bitrate (%u) is higher than the max bitrate supported (%u)\n", i2c->bitrate, timings[ARRAY_SIZE(timings) - 1].max_bitrate); timing = timings[ARRAY_SIZE(timings) - 1]; i2c->bitrate = timing.max_bitrate; } bitrate_khz = i2c->bitrate / 1000; clk_khz = clk_get_rate(i2c->scb_clk) / 1000; /* Find the prescale that would give us that inc (approx delay = 0) */ prescale = SCB_OPT_INC * clk_khz / (256 * 16 * bitrate_khz); prescale = clamp_t(unsigned int, prescale, 1, 8); clk_khz /= prescale; /* Setup the clock increment value */ inc = (256 * 16 * bitrate_khz) / clk_khz; /* * The clock generation logic allows to filter glitches on the bus. * This filter is able to remove bus glitches shorter than 50ns. * If the clock enable rate is greater than 20 MHz, no filtering * is required, so we need to disable it. * If it's between the 20-40 MHz range, there's no need to divide * the clock to get a filter. */ if (clk_khz < 20000) { filt = SCB_FILT_DISABLE; } else if (clk_khz < 40000) { filt = SCB_FILT_BYPASS; } else { /* Calculate filter clock */ filt = (64000 / ((clk_khz / 1000) * SCB_FILT_GLITCH)); /* Scale up if needed */ if (64000 % ((clk_khz / 1000) * SCB_FILT_GLITCH)) inc++; if (filt > SCB_FILT_INC_MASK) filt = SCB_FILT_INC_MASK; filt = (filt & SCB_FILT_INC_MASK) << SCB_FILT_INC_SHIFT; } data = filt | ((inc & SCB_INC_MASK) << SCB_INC_SHIFT) | (prescale - 1); img_i2c_writel(i2c, SCB_CLK_SET_REG, data); /* Obtain the clock period of the fx16 clock in ns */ clk_period = (256 * 1000000) / (clk_khz * inc); /* Calculate the bitrate in terms of internal clock pulses */ int_bitrate = 1000000 / (bitrate_khz * clk_period); if ((1000000 % (bitrate_khz * clk_period)) >= ((bitrate_khz * clk_period) / 2)) int_bitrate++; /* * Setup clock duty cycle, start with 50% and adjust TCKH and TCKL * values from there if they don't meet minimum timing requirements */ tckh = int_bitrate / 2; tckl = int_bitrate - tckh; /* Adjust TCKH and TCKL values */ data = DIV_ROUND_UP(timing.tckl, clk_period); if (tckl < data) { tckl = data; tckh = int_bitrate - tckl; } if (tckh > 0) --tckh; if (tckl > 0) --tckl; img_i2c_writel(i2c, SCB_TIME_TCKH_REG, tckh); img_i2c_writel(i2c, SCB_TIME_TCKL_REG, tckl); /* Setup TSDH value */ tsdh = DIV_ROUND_UP(timing.tsdh, clk_period); if (tsdh > 1) data = tsdh - 1; else data = 0x01; img_i2c_writel(i2c, SCB_TIME_TSDH_REG, data); /* This value is used later */ tsdh = data; /* Setup TPL value */ data = timing.tpl / clk_period; if (data > 0) --data; img_i2c_writel(i2c, SCB_TIME_TPL_REG, data); /* Setup TPH value */ data = timing.tph / clk_period; if (data > 0) --data; img_i2c_writel(i2c, SCB_TIME_TPH_REG, data); /* Setup TSDL value to TPL + TSDH + 2 */ img_i2c_writel(i2c, SCB_TIME_TSDL_REG, data + tsdh + 2); /* Setup TP2S value */ data = timing.tp2s / clk_period; if (data > 0) --data; img_i2c_writel(i2c, SCB_TIME_TP2S_REG, data); img_i2c_writel(i2c, SCB_TIME_TBI_REG, TIMEOUT_TBI); img_i2c_writel(i2c, SCB_TIME_TSL_REG, TIMEOUT_TSL); img_i2c_writel(i2c, SCB_TIME_TDL_REG, TIMEOUT_TDL); /* Take module out of soft reset and enable clocks */ img_i2c_soft_reset(i2c); /* Disable all interrupts */ img_i2c_writel(i2c, SCB_INT_MASK_REG, 0); /* Clear all interrupts */ img_i2c_writel(i2c, SCB_INT_CLEAR_REG, ~0); /* Clear the scb_line_status events */ img_i2c_writel(i2c, SCB_CLEAR_REG, ~0); /* Enable interrupts */ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable); /* Perform a synchronous sequence to reset the bus */ ret = img_i2c_reset_bus(i2c); pm_runtime_mark_last_busy(i2c->adap.dev.parent); pm_runtime_put_autosuspend(i2c->adap.dev.parent); return ret; } static int img_i2c_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct img_i2c *i2c; int irq, ret; u32 val; i2c = devm_kzalloc(&pdev->dev, sizeof(struct img_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; i2c->sys_clk = devm_clk_get(&pdev->dev, "sys"); if (IS_ERR(i2c->sys_clk)) { dev_err(&pdev->dev, "can't get system clock\n"); return PTR_ERR(i2c->sys_clk); } i2c->scb_clk = devm_clk_get(&pdev->dev, "scb"); if (IS_ERR(i2c->scb_clk)) { dev_err(&pdev->dev, "can't get core clock\n"); return PTR_ERR(i2c->scb_clk); } ret = devm_request_irq(&pdev->dev, irq, img_i2c_isr, 0, pdev->name, i2c); if (ret) { dev_err(&pdev->dev, "can't request irq %d\n", irq); return ret; } /* Set up the exception check timer */ timer_setup(&i2c->check_timer, img_i2c_check_timer, 0); i2c->bitrate = timings[0].max_bitrate; if (!of_property_read_u32(node, "clock-frequency", &val)) i2c->bitrate = val; i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = node; i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &img_i2c_algo; i2c->adap.retries = 5; i2c->adap.nr = pdev->id; snprintf(i2c->adap.name, sizeof(i2c->adap.name), "IMG SCB I2C"); img_i2c_switch_mode(i2c, MODE_INACTIVE); spin_lock_init(&i2c->lock); init_completion(&i2c->msg_complete); platform_set_drvdata(pdev, i2c); pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = img_i2c_runtime_resume(&pdev->dev); if (ret) return ret; } ret = img_i2c_init(i2c); if (ret) goto rpm_disable; ret = i2c_add_numbered_adapter(&i2c->adap); if (ret < 0) goto rpm_disable; return 0; rpm_disable: if (!pm_runtime_enabled(&pdev->dev)) img_i2c_runtime_suspend(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); return ret; } static void img_i2c_remove(struct platform_device *dev) { struct img_i2c *i2c = platform_get_drvdata(dev); i2c_del_adapter(&i2c->adap); pm_runtime_disable(&dev->dev); if (!pm_runtime_status_suspended(&dev->dev)) img_i2c_runtime_suspend(&dev->dev); } static int img_i2c_runtime_suspend(struct device *dev) { struct img_i2c *i2c = dev_get_drvdata(dev); clk_disable_unprepare(i2c->scb_clk); clk_disable_unprepare(i2c->sys_clk); return 0; } static int img_i2c_runtime_resume(struct device *dev) { struct img_i2c *i2c = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(i2c->sys_clk); if (ret) { dev_err(dev, "Unable to enable sys clock\n"); return ret; } ret = clk_prepare_enable(i2c->scb_clk); if (ret) { dev_err(dev, "Unable to enable scb clock\n"); clk_disable_unprepare(i2c->sys_clk); return ret; } return 0; } static int img_i2c_suspend(struct device *dev) { struct img_i2c *i2c = dev_get_drvdata(dev); int ret; ret = pm_runtime_force_suspend(dev); if (ret) return ret; img_i2c_switch_mode(i2c, MODE_SUSPEND); return 0; } static int img_i2c_resume(struct device *dev) { struct img_i2c *i2c = dev_get_drvdata(dev); int ret; ret = pm_runtime_force_resume(dev); if (ret) return ret; img_i2c_init(i2c); return 0; } static const struct dev_pm_ops img_i2c_pm = { RUNTIME_PM_OPS(img_i2c_runtime_suspend, img_i2c_runtime_resume, NULL) SYSTEM_SLEEP_PM_OPS(img_i2c_suspend, img_i2c_resume) }; static const struct of_device_id img_scb_i2c_match[] = { { .compatible = "img,scb-i2c" }, { } }; MODULE_DEVICE_TABLE(of, img_scb_i2c_match); static struct platform_driver img_scb_i2c_driver = { .driver = { .name = "img-i2c-scb", .of_match_table = img_scb_i2c_match, .pm = pm_ptr(&img_i2c_pm), }, .probe = img_i2c_probe, .remove_new = img_i2c_remove, }; module_platform_driver(img_scb_i2c_driver); MODULE_AUTHOR("James Hogan <[email protected]>"); MODULE_DESCRIPTION("IMG host I2C driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-img-scb.c
/* * Driver for the i2c controller on the Marvell line of host bridges * (e.g, gt642[46]0, mv643[46]0, mv644[46]0, and Orion SoC family). * * Author: Mark A. Greer <[email protected]> * * 2005 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/mv643xx_i2c.h> #include <linux/platform_device.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/delay.h> #define MV64XXX_I2C_ADDR_ADDR(val) ((val & 0x7f) << 1) #define MV64XXX_I2C_BAUD_DIV_N(val) (val & 0x7) #define MV64XXX_I2C_BAUD_DIV_M(val) ((val & 0xf) << 3) #define MV64XXX_I2C_REG_CONTROL_ACK BIT(2) #define MV64XXX_I2C_REG_CONTROL_IFLG BIT(3) #define MV64XXX_I2C_REG_CONTROL_STOP BIT(4) #define MV64XXX_I2C_REG_CONTROL_START BIT(5) #define MV64XXX_I2C_REG_CONTROL_TWSIEN BIT(6) #define MV64XXX_I2C_REG_CONTROL_INTEN BIT(7) /* Ctlr status values */ #define MV64XXX_I2C_STATUS_BUS_ERR 0x00 #define MV64XXX_I2C_STATUS_MAST_START 0x08 #define MV64XXX_I2C_STATUS_MAST_REPEAT_START 0x10 #define MV64XXX_I2C_STATUS_MAST_WR_ADDR_ACK 0x18 #define MV64XXX_I2C_STATUS_MAST_WR_ADDR_NO_ACK 0x20 #define MV64XXX_I2C_STATUS_MAST_WR_ACK 0x28 #define MV64XXX_I2C_STATUS_MAST_WR_NO_ACK 0x30 #define MV64XXX_I2C_STATUS_MAST_LOST_ARB 0x38 #define MV64XXX_I2C_STATUS_MAST_RD_ADDR_ACK 0x40 #define MV64XXX_I2C_STATUS_MAST_RD_ADDR_NO_ACK 0x48 #define MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK 0x50 #define MV64XXX_I2C_STATUS_MAST_RD_DATA_NO_ACK 0x58 #define MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_ACK 0xd0 #define MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_NO_ACK 0xd8 #define MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_ACK 0xe0 #define MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_NO_ACK 0xe8 #define MV64XXX_I2C_STATUS_NO_STATUS 0xf8 /* Register defines (I2C bridge) */ #define MV64XXX_I2C_REG_TX_DATA_LO 0xc0 #define MV64XXX_I2C_REG_TX_DATA_HI 0xc4 #define MV64XXX_I2C_REG_RX_DATA_LO 0xc8 #define MV64XXX_I2C_REG_RX_DATA_HI 0xcc #define MV64XXX_I2C_REG_BRIDGE_CONTROL 0xd0 #define MV64XXX_I2C_REG_BRIDGE_STATUS 0xd4 #define MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE 0xd8 #define MV64XXX_I2C_REG_BRIDGE_INTR_MASK 0xdC #define MV64XXX_I2C_REG_BRIDGE_TIMING 0xe0 /* Bridge Control values */ #define MV64XXX_I2C_BRIDGE_CONTROL_WR BIT(0) #define MV64XXX_I2C_BRIDGE_CONTROL_RD BIT(1) #define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT 2 #define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT BIT(12) #define MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT 13 #define MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT 16 #define MV64XXX_I2C_BRIDGE_CONTROL_ENABLE BIT(19) #define MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START BIT(20) /* Bridge Status values */ #define MV64XXX_I2C_BRIDGE_STATUS_ERROR BIT(0) /* Driver states */ enum { MV64XXX_I2C_STATE_INVALID, MV64XXX_I2C_STATE_IDLE, MV64XXX_I2C_STATE_WAITING_FOR_START_COND, MV64XXX_I2C_STATE_WAITING_FOR_RESTART, MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK, MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK, MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK, MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_DATA, }; /* Driver actions */ enum { MV64XXX_I2C_ACTION_INVALID, MV64XXX_I2C_ACTION_CONTINUE, MV64XXX_I2C_ACTION_SEND_RESTART, MV64XXX_I2C_ACTION_SEND_ADDR_1, MV64XXX_I2C_ACTION_SEND_ADDR_2, MV64XXX_I2C_ACTION_SEND_DATA, MV64XXX_I2C_ACTION_RCV_DATA, MV64XXX_I2C_ACTION_RCV_DATA_STOP, MV64XXX_I2C_ACTION_SEND_STOP, }; struct mv64xxx_i2c_regs { u8 addr; u8 ext_addr; u8 data; u8 control; u8 status; u8 clock; u8 soft_reset; }; struct mv64xxx_i2c_data { struct i2c_msg *msgs; int num_msgs; int irq; u32 state; u32 action; u32 aborting; u32 cntl_bits; void __iomem *reg_base; struct mv64xxx_i2c_regs reg_offsets; u32 addr1; u32 addr2; u32 bytes_left; u32 byte_posn; u32 send_stop; u32 block; int rc; u32 freq_m; u32 freq_n; struct clk *clk; struct clk *reg_clk; wait_queue_head_t waitq; spinlock_t lock; struct i2c_msg *msg; struct i2c_adapter adapter; bool offload_enabled; /* 5us delay in order to avoid repeated start timing violation */ bool errata_delay; struct reset_control *rstc; bool irq_clear_inverted; /* Clk div is 2 to the power n, not 2 to the power n + 1 */ bool clk_n_base_0; struct i2c_bus_recovery_info rinfo; bool atomic; }; static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { .addr = 0x00, .ext_addr = 0x10, .data = 0x04, .control = 0x08, .status = 0x0c, .clock = 0x0c, .soft_reset = 0x1c, }; static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_sun4i = { .addr = 0x00, .ext_addr = 0x04, .data = 0x08, .control = 0x0c, .status = 0x10, .clock = 0x14, .soft_reset = 0x18, }; static void mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg) { u32 dir = 0; drv_data->cntl_bits = MV64XXX_I2C_REG_CONTROL_ACK | MV64XXX_I2C_REG_CONTROL_TWSIEN; if (!drv_data->atomic) drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_INTEN; if (msg->flags & I2C_M_RD) dir = 1; if (msg->flags & I2C_M_TEN) { drv_data->addr1 = 0xf0 | (((u32)msg->addr & 0x300) >> 7) | dir; drv_data->addr2 = (u32)msg->addr & 0xff; } else { drv_data->addr1 = MV64XXX_I2C_ADDR_ADDR((u32)msg->addr) | dir; drv_data->addr2 = 0; } } /* ***************************************************************************** * * Finite State Machine & Interrupt Routines * ***************************************************************************** */ /* Reset hardware and initialize FSM */ static void mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data) { if (drv_data->offload_enabled) { writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL); writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_TIMING); writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE); writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_INTR_MASK); } writel(0, drv_data->reg_base + drv_data->reg_offsets.soft_reset); writel(MV64XXX_I2C_BAUD_DIV_M(drv_data->freq_m) | MV64XXX_I2C_BAUD_DIV_N(drv_data->freq_n), drv_data->reg_base + drv_data->reg_offsets.clock); writel(0, drv_data->reg_base + drv_data->reg_offsets.addr); writel(0, drv_data->reg_base + drv_data->reg_offsets.ext_addr); writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + drv_data->reg_offsets.control); if (drv_data->errata_delay) udelay(5); drv_data->state = MV64XXX_I2C_STATE_IDLE; } static void mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status) { /* * If state is idle, then this is likely the remnants of an old * operation that driver has given up on or the user has killed. * If so, issue the stop condition and go to idle. */ if (drv_data->state == MV64XXX_I2C_STATE_IDLE) { drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; return; } /* The status from the ctlr [mostly] tells us what to do next */ switch (status) { /* Start condition interrupt */ case MV64XXX_I2C_STATUS_MAST_START: /* 0x08 */ case MV64XXX_I2C_STATUS_MAST_REPEAT_START: /* 0x10 */ drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_1; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK; break; /* Performing a write */ case MV64XXX_I2C_STATUS_MAST_WR_ADDR_ACK: /* 0x18 */ if (drv_data->msg->flags & I2C_M_TEN) { drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_2; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK; break; } fallthrough; case MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_ACK: /* 0xd0 */ case MV64XXX_I2C_STATUS_MAST_WR_ACK: /* 0x28 */ if ((drv_data->bytes_left == 0) || (drv_data->aborting && (drv_data->byte_posn != 0))) { if (drv_data->send_stop || drv_data->aborting) { drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; } else { drv_data->action = MV64XXX_I2C_ACTION_SEND_RESTART; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_RESTART; } } else { drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK; drv_data->bytes_left--; } break; /* Performing a read */ case MV64XXX_I2C_STATUS_MAST_RD_ADDR_ACK: /* 40 */ if (drv_data->msg->flags & I2C_M_TEN) { drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_2; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK; break; } fallthrough; case MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_ACK: /* 0xe0 */ if (drv_data->bytes_left == 0) { drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; break; } fallthrough; case MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK: /* 0x50 */ if (status != MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK) drv_data->action = MV64XXX_I2C_ACTION_CONTINUE; else { drv_data->action = MV64XXX_I2C_ACTION_RCV_DATA; drv_data->bytes_left--; } drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_DATA; if ((drv_data->bytes_left == 1) || drv_data->aborting) drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_ACK; break; case MV64XXX_I2C_STATUS_MAST_RD_DATA_NO_ACK: /* 0x58 */ drv_data->action = MV64XXX_I2C_ACTION_RCV_DATA_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; break; case MV64XXX_I2C_STATUS_MAST_WR_ADDR_NO_ACK: /* 0x20 */ case MV64XXX_I2C_STATUS_MAST_WR_NO_ACK: /* 30 */ case MV64XXX_I2C_STATUS_MAST_RD_ADDR_NO_ACK: /* 48 */ /* Doesn't seem to be a device at other end */ drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; drv_data->rc = -ENXIO; break; default: dev_err(&drv_data->adapter.dev, "mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, " "status: 0x%x, addr: 0x%x, flags: 0x%x\n", drv_data->state, status, drv_data->msg->addr, drv_data->msg->flags); drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; mv64xxx_i2c_hw_init(drv_data); i2c_recover_bus(&drv_data->adapter); drv_data->rc = -EAGAIN; } } static void mv64xxx_i2c_send_start(struct mv64xxx_i2c_data *drv_data) { drv_data->msg = drv_data->msgs; drv_data->byte_posn = 0; drv_data->bytes_left = drv_data->msg->len; drv_data->aborting = 0; drv_data->rc = 0; mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs); writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, drv_data->reg_base + drv_data->reg_offsets.control); } static void mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) { switch(drv_data->action) { case MV64XXX_I2C_ACTION_SEND_RESTART: /* We should only get here if we have further messages */ BUG_ON(drv_data->num_msgs == 0); drv_data->msgs++; drv_data->num_msgs--; mv64xxx_i2c_send_start(drv_data); if (drv_data->errata_delay) udelay(5); /* * We're never at the start of the message here, and by this * time it's already too late to do any protocol mangling. * Thankfully, do not advertise support for that feature. */ drv_data->send_stop = drv_data->num_msgs == 1; break; case MV64XXX_I2C_ACTION_CONTINUE: writel(drv_data->cntl_bits, drv_data->reg_base + drv_data->reg_offsets.control); break; case MV64XXX_I2C_ACTION_SEND_ADDR_1: writel(drv_data->addr1, drv_data->reg_base + drv_data->reg_offsets.data); writel(drv_data->cntl_bits, drv_data->reg_base + drv_data->reg_offsets.control); break; case MV64XXX_I2C_ACTION_SEND_ADDR_2: writel(drv_data->addr2, drv_data->reg_base + drv_data->reg_offsets.data); writel(drv_data->cntl_bits, drv_data->reg_base + drv_data->reg_offsets.control); break; case MV64XXX_I2C_ACTION_SEND_DATA: writel(drv_data->msg->buf[drv_data->byte_posn++], drv_data->reg_base + drv_data->reg_offsets.data); writel(drv_data->cntl_bits, drv_data->reg_base + drv_data->reg_offsets.control); break; case MV64XXX_I2C_ACTION_RCV_DATA: drv_data->msg->buf[drv_data->byte_posn++] = readl(drv_data->reg_base + drv_data->reg_offsets.data); writel(drv_data->cntl_bits, drv_data->reg_base + drv_data->reg_offsets.control); break; case MV64XXX_I2C_ACTION_RCV_DATA_STOP: drv_data->msg->buf[drv_data->byte_posn++] = readl(drv_data->reg_base + drv_data->reg_offsets.data); if (!drv_data->atomic) drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN; writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + drv_data->reg_offsets.control); drv_data->block = 0; if (drv_data->errata_delay) udelay(5); wake_up(&drv_data->waitq); break; case MV64XXX_I2C_ACTION_INVALID: default: dev_err(&drv_data->adapter.dev, "mv64xxx_i2c_do_action: Invalid action: %d\n", drv_data->action); drv_data->rc = -EIO; fallthrough; case MV64XXX_I2C_ACTION_SEND_STOP: if (!drv_data->atomic) drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN; writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + drv_data->reg_offsets.control); drv_data->block = 0; wake_up(&drv_data->waitq); break; } } static void mv64xxx_i2c_read_offload_rx_data(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg) { u32 buf[2]; buf[0] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_LO); buf[1] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_HI); memcpy(msg->buf, buf, msg->len); } static int mv64xxx_i2c_intr_offload(struct mv64xxx_i2c_data *drv_data) { u32 cause, status; cause = readl(drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE); if (!cause) return IRQ_NONE; status = readl(drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_STATUS); if (status & MV64XXX_I2C_BRIDGE_STATUS_ERROR) { drv_data->rc = -EIO; goto out; } drv_data->rc = 0; /* * Transaction is a one message read transaction, read data * for this message. */ if (drv_data->num_msgs == 1 && drv_data->msgs[0].flags & I2C_M_RD) { mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs); drv_data->msgs++; drv_data->num_msgs--; } /* * Transaction is a two messages write/read transaction, read * data for the second (read) message. */ else if (drv_data->num_msgs == 2 && !(drv_data->msgs[0].flags & I2C_M_RD) && drv_data->msgs[1].flags & I2C_M_RD) { mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs + 1); drv_data->msgs += 2; drv_data->num_msgs -= 2; } out: writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL); writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE); drv_data->block = 0; wake_up(&drv_data->waitq); return IRQ_HANDLED; } static irqreturn_t mv64xxx_i2c_intr(int irq, void *dev_id) { struct mv64xxx_i2c_data *drv_data = dev_id; u32 status; irqreturn_t rc = IRQ_NONE; spin_lock(&drv_data->lock); if (drv_data->offload_enabled) rc = mv64xxx_i2c_intr_offload(drv_data); while (readl(drv_data->reg_base + drv_data->reg_offsets.control) & MV64XXX_I2C_REG_CONTROL_IFLG) { /* * It seems that sometime the controller updates the status * register only after it asserts IFLG in control register. * This may result in weird bugs when in atomic mode. A delay * of 100 ns before reading the status register solves this * issue. This bug does not seem to appear when using * interrupts. */ if (drv_data->atomic) ndelay(100); status = readl(drv_data->reg_base + drv_data->reg_offsets.status); mv64xxx_i2c_fsm(drv_data, status); mv64xxx_i2c_do_action(drv_data); if (drv_data->irq_clear_inverted) writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_IFLG, drv_data->reg_base + drv_data->reg_offsets.control); rc = IRQ_HANDLED; } spin_unlock(&drv_data->lock); return rc; } /* ***************************************************************************** * * I2C Msg Execution Routines * ***************************************************************************** */ static void mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data) { long time_left; unsigned long flags; char abort = 0; time_left = wait_event_timeout(drv_data->waitq, !drv_data->block, drv_data->adapter.timeout); spin_lock_irqsave(&drv_data->lock, flags); if (!time_left) { /* Timed out */ drv_data->rc = -ETIMEDOUT; abort = 1; } else if (time_left < 0) { /* Interrupted/Error */ drv_data->rc = time_left; /* errno value */ abort = 1; } if (abort && drv_data->block) { drv_data->aborting = 1; spin_unlock_irqrestore(&drv_data->lock, flags); time_left = wait_event_timeout(drv_data->waitq, !drv_data->block, drv_data->adapter.timeout); if ((time_left <= 0) && drv_data->block) { drv_data->state = MV64XXX_I2C_STATE_IDLE; dev_err(&drv_data->adapter.dev, "mv64xxx: I2C bus locked, block: %d, " "time_left: %d\n", drv_data->block, (int)time_left); mv64xxx_i2c_hw_init(drv_data); i2c_recover_bus(&drv_data->adapter); } } else spin_unlock_irqrestore(&drv_data->lock, flags); } static void mv64xxx_i2c_wait_polling(struct mv64xxx_i2c_data *drv_data) { ktime_t timeout = ktime_add_ms(ktime_get(), drv_data->adapter.timeout); while (READ_ONCE(drv_data->block) && ktime_compare(ktime_get(), timeout) < 0) { udelay(5); mv64xxx_i2c_intr(0, drv_data); } } static int mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg, int is_last) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; drv_data->send_stop = is_last; drv_data->block = 1; mv64xxx_i2c_send_start(drv_data); spin_unlock_irqrestore(&drv_data->lock, flags); if (!drv_data->atomic) mv64xxx_i2c_wait_for_completion(drv_data); else mv64xxx_i2c_wait_polling(drv_data); return drv_data->rc; } static void mv64xxx_i2c_prepare_tx(struct mv64xxx_i2c_data *drv_data) { struct i2c_msg *msg = drv_data->msgs; u32 buf[2]; memcpy(buf, msg->buf, msg->len); writel(buf[0], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); writel(buf[1], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); } static int mv64xxx_i2c_offload_xfer(struct mv64xxx_i2c_data *drv_data) { struct i2c_msg *msgs = drv_data->msgs; int num = drv_data->num_msgs; unsigned long ctrl_reg; unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); /* Build transaction */ ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE | (msgs[0].addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT); if (msgs[0].flags & I2C_M_TEN) ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT; /* Single write message transaction */ if (num == 1 && !(msgs[0].flags & I2C_M_RD)) { size_t len = msgs[0].len - 1; ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | (len << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT); mv64xxx_i2c_prepare_tx(drv_data); } /* Single read message transaction */ else if (num == 1 && msgs[0].flags & I2C_M_RD) { size_t len = msgs[0].len - 1; ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD | (len << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT); } /* * Transaction with one write and one read message. This is * guaranteed by the mv64xx_i2c_can_offload() checks. */ else if (num == 2) { size_t lentx = msgs[0].len - 1; size_t lenrx = msgs[1].len - 1; ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD | MV64XXX_I2C_BRIDGE_CONTROL_WR | (lentx << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT) | (lenrx << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT) | MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START; mv64xxx_i2c_prepare_tx(drv_data); } /* Execute transaction */ drv_data->block = 1; writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL); spin_unlock_irqrestore(&drv_data->lock, flags); mv64xxx_i2c_wait_for_completion(drv_data); return drv_data->rc; } static bool mv64xxx_i2c_valid_offload_sz(struct i2c_msg *msg) { return msg->len <= 8 && msg->len >= 1; } static bool mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data) { struct i2c_msg *msgs = drv_data->msgs; int num = drv_data->num_msgs; if (!drv_data->offload_enabled) return false; /* * We can offload a transaction consisting of a single * message, as long as the message has a length between 1 and * 8 bytes. */ if (num == 1 && mv64xxx_i2c_valid_offload_sz(msgs)) return true; /* * We can offload a transaction consisting of two messages, if * the first is a write and a second is a read, and both have * a length between 1 and 8 bytes. */ if (num == 2 && mv64xxx_i2c_valid_offload_sz(msgs) && mv64xxx_i2c_valid_offload_sz(msgs + 1) && !(msgs[0].flags & I2C_M_RD) && msgs[1].flags & I2C_M_RD) return true; return false; } /* ***************************************************************************** * * I2C Core Support Routines (Interface to higher level I2C code) * ***************************************************************************** */ static u32 mv64xxx_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL; } static int mv64xxx_i2c_xfer_core(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap); int rc, ret = num; rc = pm_runtime_resume_and_get(&adap->dev); if (rc) return rc; BUG_ON(drv_data->msgs != NULL); drv_data->msgs = msgs; drv_data->num_msgs = num; if (mv64xxx_i2c_can_offload(drv_data) && !drv_data->atomic) rc = mv64xxx_i2c_offload_xfer(drv_data); else rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1); if (rc < 0) ret = rc; drv_data->num_msgs = 0; drv_data->msgs = NULL; pm_runtime_mark_last_busy(&adap->dev); pm_runtime_put_autosuspend(&adap->dev); return ret; } static int mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap); drv_data->atomic = 0; return mv64xxx_i2c_xfer_core(adap, msgs, num); } static int mv64xxx_i2c_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap); drv_data->atomic = 1; return mv64xxx_i2c_xfer_core(adap, msgs, num); } static const struct i2c_algorithm mv64xxx_i2c_algo = { .master_xfer = mv64xxx_i2c_xfer, .master_xfer_atomic = mv64xxx_i2c_xfer_atomic, .functionality = mv64xxx_i2c_functionality, }; /* ***************************************************************************** * * Driver Interface & Early Init Routines * ***************************************************************************** */ static const struct of_device_id mv64xxx_i2c_of_match_table[] = { { .compatible = "allwinner,sun4i-a10-i2c", .data = &mv64xxx_i2c_regs_sun4i}, { .compatible = "allwinner,sun6i-a31-i2c", .data = &mv64xxx_i2c_regs_sun4i}, { .compatible = "marvell,mv64xxx-i2c", .data = &mv64xxx_i2c_regs_mv64xxx}, { .compatible = "marvell,mv78230-i2c", .data = &mv64xxx_i2c_regs_mv64xxx}, { .compatible = "marvell,mv78230-a0-i2c", .data = &mv64xxx_i2c_regs_mv64xxx}, {} }; MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); #ifdef CONFIG_OF static int mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data, const int tclk, const int n, const int m) { if (drv_data->clk_n_base_0) return tclk / (10 * (m + 1) * (1 << n)); else return tclk / (10 * (m + 1) * (2 << n)); } static bool mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data, const u32 req_freq, const u32 tclk) { int freq, delta, best_delta = INT_MAX; int m, n; for (n = 0; n <= 7; n++) for (m = 0; m <= 15; m++) { freq = mv64xxx_calc_freq(drv_data, tclk, n, m); delta = req_freq - freq; if (delta >= 0 && delta < best_delta) { drv_data->freq_m = m; drv_data->freq_n = n; best_delta = delta; } if (best_delta == 0) return true; } if (best_delta == INT_MAX) return false; return true; } static int mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, struct device *dev) { const struct of_device_id *device; struct device_node *np = dev->of_node; u32 bus_freq, tclk; int rc = 0; /* CLK is mandatory when using DT to describe the i2c bus. We * need to know tclk in order to calculate bus clock * factors. */ if (!drv_data->clk) { rc = -ENODEV; goto out; } tclk = clk_get_rate(drv_data->clk); if (of_property_read_u32(np, "clock-frequency", &bus_freq)) bus_freq = I2C_MAX_STANDARD_MODE_FREQ; /* 100kHz by default */ if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") || of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) drv_data->clk_n_base_0 = true; if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) { rc = -EINVAL; goto out; } drv_data->rstc = devm_reset_control_get_optional_exclusive(dev, NULL); if (IS_ERR(drv_data->rstc)) { rc = PTR_ERR(drv_data->rstc); goto out; } /* Its not yet defined how timeouts will be specified in device tree. * So hard code the value to 1 second. */ drv_data->adapter.timeout = HZ; device = of_match_device(mv64xxx_i2c_of_match_table, dev); if (!device) return -ENODEV; memcpy(&drv_data->reg_offsets, device->data, sizeof(drv_data->reg_offsets)); /* * For controllers embedded in new SoCs activate the * Transaction Generator support and the errata fix. */ if (of_device_is_compatible(np, "marvell,mv78230-i2c")) { drv_data->offload_enabled = true; /* The delay is only needed in standard mode (100kHz) */ if (bus_freq <= I2C_MAX_STANDARD_MODE_FREQ) drv_data->errata_delay = true; } if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) { drv_data->offload_enabled = false; /* The delay is only needed in standard mode (100kHz) */ if (bus_freq <= I2C_MAX_STANDARD_MODE_FREQ) drv_data->errata_delay = true; } if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) drv_data->irq_clear_inverted = true; out: return rc; } #else /* CONFIG_OF */ static int mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, struct device *dev) { return -ENODEV; } #endif /* CONFIG_OF */ static int mv64xxx_i2c_init_recovery_info(struct mv64xxx_i2c_data *drv_data, struct device *dev) { struct i2c_bus_recovery_info *rinfo = &drv_data->rinfo; rinfo->pinctrl = devm_pinctrl_get(dev); if (IS_ERR(rinfo->pinctrl)) { if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_info(dev, "can't get pinctrl, bus recovery not supported\n"); return PTR_ERR(rinfo->pinctrl); } else if (!rinfo->pinctrl) { return -ENODEV; } drv_data->adapter.bus_recovery_info = rinfo; return 0; } static int mv64xxx_i2c_runtime_suspend(struct device *dev) { struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev); reset_control_assert(drv_data->rstc); clk_disable_unprepare(drv_data->reg_clk); clk_disable_unprepare(drv_data->clk); return 0; } static int mv64xxx_i2c_runtime_resume(struct device *dev) { struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev); clk_prepare_enable(drv_data->clk); clk_prepare_enable(drv_data->reg_clk); reset_control_reset(drv_data->rstc); mv64xxx_i2c_hw_init(drv_data); return 0; } static int mv64xxx_i2c_probe(struct platform_device *pd) { struct mv64xxx_i2c_data *drv_data; struct mv64xxx_i2c_pdata *pdata = dev_get_platdata(&pd->dev); int rc; if ((!pdata && !pd->dev.of_node)) return -ENODEV; drv_data = devm_kzalloc(&pd->dev, sizeof(struct mv64xxx_i2c_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; drv_data->reg_base = devm_platform_ioremap_resource(pd, 0); if (IS_ERR(drv_data->reg_base)) return PTR_ERR(drv_data->reg_base); strscpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter", sizeof(drv_data->adapter.name)); init_waitqueue_head(&drv_data->waitq); spin_lock_init(&drv_data->lock); /* Not all platforms have clocks */ drv_data->clk = devm_clk_get(&pd->dev, NULL); if (IS_ERR(drv_data->clk)) { if (PTR_ERR(drv_data->clk) == -EPROBE_DEFER) return -EPROBE_DEFER; drv_data->clk = NULL; } drv_data->reg_clk = devm_clk_get(&pd->dev, "reg"); if (IS_ERR(drv_data->reg_clk)) { if (PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER) return -EPROBE_DEFER; drv_data->reg_clk = NULL; } drv_data->irq = platform_get_irq(pd, 0); if (drv_data->irq < 0) return drv_data->irq; if (pdata) { drv_data->freq_m = pdata->freq_m; drv_data->freq_n = pdata->freq_n; drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); drv_data->offload_enabled = false; memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets)); } else if (pd->dev.of_node) { rc = mv64xxx_of_config(drv_data, &pd->dev); if (rc) return rc; } rc = mv64xxx_i2c_init_recovery_info(drv_data, &pd->dev); if (rc == -EPROBE_DEFER) return rc; drv_data->adapter.dev.parent = &pd->dev; drv_data->adapter.algo = &mv64xxx_i2c_algo; drv_data->adapter.owner = THIS_MODULE; drv_data->adapter.class = I2C_CLASS_DEPRECATED; drv_data->adapter.nr = pd->id; drv_data->adapter.dev.of_node = pd->dev.of_node; platform_set_drvdata(pd, drv_data); i2c_set_adapdata(&drv_data->adapter, drv_data); pm_runtime_set_autosuspend_delay(&pd->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(&pd->dev); pm_runtime_enable(&pd->dev); if (!pm_runtime_enabled(&pd->dev)) { rc = mv64xxx_i2c_runtime_resume(&pd->dev); if (rc) goto exit_disable_pm; } rc = request_irq(drv_data->irq, mv64xxx_i2c_intr, 0, MV64XXX_I2C_CTLR_NAME, drv_data); if (rc) { dev_err(&drv_data->adapter.dev, "mv64xxx: Can't register intr handler irq%d: %d\n", drv_data->irq, rc); goto exit_disable_pm; } else if ((rc = i2c_add_numbered_adapter(&drv_data->adapter)) != 0) { dev_err(&drv_data->adapter.dev, "mv64xxx: Can't add i2c adapter, rc: %d\n", -rc); goto exit_free_irq; } return 0; exit_free_irq: free_irq(drv_data->irq, drv_data); exit_disable_pm: pm_runtime_disable(&pd->dev); if (!pm_runtime_status_suspended(&pd->dev)) mv64xxx_i2c_runtime_suspend(&pd->dev); return rc; } static void mv64xxx_i2c_remove(struct platform_device *pd) { struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(pd); i2c_del_adapter(&drv_data->adapter); free_irq(drv_data->irq, drv_data); pm_runtime_disable(&pd->dev); if (!pm_runtime_status_suspended(&pd->dev)) mv64xxx_i2c_runtime_suspend(&pd->dev); } static const struct dev_pm_ops mv64xxx_i2c_pm_ops = { SET_RUNTIME_PM_OPS(mv64xxx_i2c_runtime_suspend, mv64xxx_i2c_runtime_resume, NULL) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static struct platform_driver mv64xxx_i2c_driver = { .probe = mv64xxx_i2c_probe, .remove_new = mv64xxx_i2c_remove, .driver = { .name = MV64XXX_I2C_CTLR_NAME, .pm = &mv64xxx_i2c_pm_ops, .of_match_table = mv64xxx_i2c_of_match_table, }, }; module_platform_driver(mv64xxx_i2c_driver); MODULE_AUTHOR("Mark A. Greer <[email protected]>"); MODULE_DESCRIPTION("Marvell mv64xxx host bridge i2c ctlr driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-mv64xxx.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for the Renesas RZ/V2M I2C unit * * Copyright (C) 2016-2022 Renesas Electronics Corporation */ #include <linux/bits.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/i2c.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> /* Register offsets */ #define IICB0DAT 0x00 /* Data Register */ #define IICB0CTL0 0x08 /* Control Register 0 */ #define IICB0TRG 0x0C /* Trigger Register */ #define IICB0STR0 0x10 /* Status Register 0 */ #define IICB0CTL1 0x20 /* Control Register 1 */ #define IICB0WL 0x24 /* Low Level Width Setting Reg */ #define IICB0WH 0x28 /* How Level Width Setting Reg */ /* IICB0CTL0 */ #define IICB0IICE BIT(7) /* I2C Enable */ #define IICB0SLWT BIT(1) /* Interrupt Request Timing */ #define IICB0SLAC BIT(0) /* Acknowledge */ /* IICB0TRG */ #define IICB0WRET BIT(2) /* Quit Wait Trigger */ #define IICB0STT BIT(1) /* Create Start Condition Trigger */ #define IICB0SPT BIT(0) /* Create Stop Condition Trigger */ /* IICB0STR0 */ #define IICB0SSAC BIT(8) /* Ack Flag */ #define IICB0SSBS BIT(6) /* Bus Flag */ #define IICB0SSSP BIT(4) /* Stop Condition Flag */ /* IICB0CTL1 */ #define IICB0MDSC BIT(7) /* Bus Mode */ #define IICB0SLSE BIT(1) /* Start condition output */ struct rzv2m_i2c_priv { void __iomem *base; struct i2c_adapter adap; struct clk *clk; int bus_mode; struct completion msg_tia_done; u32 iicb0wl; u32 iicb0wh; }; enum bcr_index { RZV2M_I2C_100K = 0, RZV2M_I2C_400K, }; struct bitrate_config { unsigned int percent_low; unsigned int min_hold_time_ns; }; static const struct bitrate_config bitrate_configs[] = { [RZV2M_I2C_100K] = { 47, 3450 }, [RZV2M_I2C_400K] = { 52, 900 }, }; static inline void bit_setl(void __iomem *addr, u32 val) { writel(readl(addr) | val, addr); } static inline void bit_clrl(void __iomem *addr, u32 val) { writel(readl(addr) & ~val, addr); } static irqreturn_t rzv2m_i2c_tia_irq_handler(int this_irq, void *dev_id) { struct rzv2m_i2c_priv *priv = dev_id; complete(&priv->msg_tia_done); return IRQ_HANDLED; } /* Calculate IICB0WL and IICB0WH */ static int rzv2m_i2c_clock_calculate(struct device *dev, struct rzv2m_i2c_priv *priv) { const struct bitrate_config *config; unsigned int hold_time_ns; unsigned int total_pclks; unsigned int trf_pclks; unsigned long pclk_hz; struct i2c_timings t; u32 trf_ns; i2c_parse_fw_timings(dev, &t, true); pclk_hz = clk_get_rate(priv->clk); total_pclks = pclk_hz / t.bus_freq_hz; trf_ns = t.scl_rise_ns + t.scl_fall_ns; trf_pclks = mul_u64_u32_div(pclk_hz, trf_ns, NSEC_PER_SEC); /* Config setting */ switch (t.bus_freq_hz) { case I2C_MAX_FAST_MODE_FREQ: priv->bus_mode = RZV2M_I2C_400K; break; case I2C_MAX_STANDARD_MODE_FREQ: priv->bus_mode = RZV2M_I2C_100K; break; default: dev_err(dev, "transfer speed is invalid\n"); return -EINVAL; } config = &bitrate_configs[priv->bus_mode]; /* IICB0WL = (percent_low / Transfer clock) x PCLK */ priv->iicb0wl = total_pclks * config->percent_low / 100; if (priv->iicb0wl > (BIT(10) - 1)) return -EINVAL; /* IICB0WH = ((percent_high / Transfer clock) x PCLK) - (tR + tF) */ priv->iicb0wh = total_pclks - priv->iicb0wl - trf_pclks; if (priv->iicb0wh > (BIT(10) - 1)) return -EINVAL; /* * Data hold time must be less than 0.9us in fast mode and * 3.45us in standard mode. * Data hold time = IICB0WL[9:2] / PCLK */ hold_time_ns = div64_ul((u64)(priv->iicb0wl >> 2) * NSEC_PER_SEC, pclk_hz); if (hold_time_ns > config->min_hold_time_ns) { dev_err(dev, "data hold time %dns is over %dns\n", hold_time_ns, config->min_hold_time_ns); return -EINVAL; } return 0; } static void rzv2m_i2c_init(struct rzv2m_i2c_priv *priv) { u32 i2c_ctl0; u32 i2c_ctl1; /* i2c disable */ writel(0, priv->base + IICB0CTL0); /* IICB0CTL1 setting */ i2c_ctl1 = IICB0SLSE; if (priv->bus_mode == RZV2M_I2C_400K) i2c_ctl1 |= IICB0MDSC; writel(i2c_ctl1, priv->base + IICB0CTL1); /* IICB0WL IICB0WH setting */ writel(priv->iicb0wl, priv->base + IICB0WL); writel(priv->iicb0wh, priv->base + IICB0WH); /* i2c enable after setting */ i2c_ctl0 = IICB0SLWT | IICB0SLAC | IICB0IICE; writel(i2c_ctl0, priv->base + IICB0CTL0); } static int rzv2m_i2c_write_with_ack(struct rzv2m_i2c_priv *priv, u32 data) { unsigned long time_left; reinit_completion(&priv->msg_tia_done); writel(data, priv->base + IICB0DAT); time_left = wait_for_completion_timeout(&priv->msg_tia_done, priv->adap.timeout); if (!time_left) return -ETIMEDOUT; /* Confirm ACK */ if ((readl(priv->base + IICB0STR0) & IICB0SSAC) != IICB0SSAC) return -ENXIO; return 0; } static int rzv2m_i2c_read_with_ack(struct rzv2m_i2c_priv *priv, u8 *data, bool last) { unsigned long time_left; u32 data_tmp; reinit_completion(&priv->msg_tia_done); /* Interrupt request timing : 8th clock */ bit_clrl(priv->base + IICB0CTL0, IICB0SLWT); /* Exit the wait state */ writel(IICB0WRET, priv->base + IICB0TRG); /* Wait for transaction */ time_left = wait_for_completion_timeout(&priv->msg_tia_done, priv->adap.timeout); if (!time_left) return -ETIMEDOUT; if (last) { /* Disable ACK */ bit_clrl(priv->base + IICB0CTL0, IICB0SLAC); /* Read data*/ data_tmp = readl(priv->base + IICB0DAT); /* Interrupt request timing : 9th clock */ bit_setl(priv->base + IICB0CTL0, IICB0SLWT); /* Exit the wait state */ writel(IICB0WRET, priv->base + IICB0TRG); /* Wait for transaction */ time_left = wait_for_completion_timeout(&priv->msg_tia_done, priv->adap.timeout); if (!time_left) return -ETIMEDOUT; /* Enable ACK */ bit_setl(priv->base + IICB0CTL0, IICB0SLAC); } else { /* Read data */ data_tmp = readl(priv->base + IICB0DAT); } *data = data_tmp; return 0; } static int rzv2m_i2c_send(struct rzv2m_i2c_priv *priv, struct i2c_msg *msg, unsigned int *count) { unsigned int i; int ret; for (i = 0; i < msg->len; i++) { ret = rzv2m_i2c_write_with_ack(priv, msg->buf[i]); if (ret < 0) return ret; } *count = i; return 0; } static int rzv2m_i2c_receive(struct rzv2m_i2c_priv *priv, struct i2c_msg *msg, unsigned int *count) { unsigned int i; int ret; for (i = 0; i < msg->len; i++) { ret = rzv2m_i2c_read_with_ack(priv, &msg->buf[i], (msg->len - 1) == i); if (ret < 0) return ret; } *count = i; return 0; } static int rzv2m_i2c_send_address(struct rzv2m_i2c_priv *priv, struct i2c_msg *msg) { u32 addr; int ret; if (msg->flags & I2C_M_TEN) { /* * 10-bit address * addr_1: 5'b11110 | addr[9:8] | (R/nW) * addr_2: addr[7:0] */ addr = 0xf0 | ((msg->addr & GENMASK(9, 8)) >> 7); addr |= !!(msg->flags & I2C_M_RD); /* Send 1st address(extend code) */ ret = rzv2m_i2c_write_with_ack(priv, addr); if (ret) return ret; /* Send 2nd address */ ret = rzv2m_i2c_write_with_ack(priv, msg->addr & 0xff); } else { /* 7-bit address */ addr = i2c_8bit_addr_from_msg(msg); ret = rzv2m_i2c_write_with_ack(priv, addr); } return ret; } static int rzv2m_i2c_stop_condition(struct rzv2m_i2c_priv *priv) { u32 value; /* Send stop condition */ writel(IICB0SPT, priv->base + IICB0TRG); return readl_poll_timeout(priv->base + IICB0STR0, value, value & IICB0SSSP, 100, jiffies_to_usecs(priv->adap.timeout)); } static int rzv2m_i2c_master_xfer_msg(struct rzv2m_i2c_priv *priv, struct i2c_msg *msg, int stop) { unsigned int count = 0; int ret, read = !!(msg->flags & I2C_M_RD); /* Send start condition */ writel(IICB0STT, priv->base + IICB0TRG); ret = rzv2m_i2c_send_address(priv, msg); if (!ret) { if (read) ret = rzv2m_i2c_receive(priv, msg, &count); else ret = rzv2m_i2c_send(priv, msg, &count); if (!ret && stop) ret = rzv2m_i2c_stop_condition(priv); } if (ret == -ENXIO) rzv2m_i2c_stop_condition(priv); else if (ret < 0) rzv2m_i2c_init(priv); else ret = count; return ret; } static int rzv2m_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct rzv2m_i2c_priv *priv = i2c_get_adapdata(adap); struct device *dev = priv->adap.dev.parent; unsigned int i; int ret; ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; if (readl(priv->base + IICB0STR0) & IICB0SSBS) { ret = -EAGAIN; goto out; } /* I2C main transfer */ for (i = 0; i < num; i++) { ret = rzv2m_i2c_master_xfer_msg(priv, &msgs[i], i == (num - 1)); if (ret < 0) goto out; } ret = num; out: pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } static u32 rzv2m_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_10BIT_ADDR; } static int rzv2m_i2c_disable(struct device *dev, struct rzv2m_i2c_priv *priv) { int ret; ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; bit_clrl(priv->base + IICB0CTL0, IICB0IICE); pm_runtime_put(dev); return 0; } static const struct i2c_adapter_quirks rzv2m_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; static struct i2c_algorithm rzv2m_i2c_algo = { .master_xfer = rzv2m_i2c_master_xfer, .functionality = rzv2m_i2c_func, }; static int rzv2m_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rzv2m_i2c_priv *priv; struct reset_control *rstc; struct i2c_adapter *adap; struct resource *res; int irq, ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); priv->clk = devm_clk_get(dev, NULL); if (IS_ERR(priv->clk)) return dev_err_probe(dev, PTR_ERR(priv->clk), "Can't get clock\n"); rstc = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(rstc)) return dev_err_probe(dev, PTR_ERR(rstc), "Missing reset ctrl\n"); /* * The reset also affects other HW that is not under the control * of Linux. Therefore, all we can do is deassert the reset. */ reset_control_deassert(rstc); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(dev, irq, rzv2m_i2c_tia_irq_handler, 0, dev_name(dev), priv); if (ret < 0) return dev_err_probe(dev, ret, "Unable to request irq %d\n", irq); adap = &priv->adap; adap->nr = pdev->id; adap->algo = &rzv2m_i2c_algo; adap->quirks = &rzv2m_i2c_quirks; adap->dev.parent = dev; adap->owner = THIS_MODULE; device_set_node(&adap->dev, dev_fwnode(dev)); i2c_set_adapdata(adap, priv); strscpy(adap->name, pdev->name, sizeof(adap->name)); init_completion(&priv->msg_tia_done); ret = rzv2m_i2c_clock_calculate(dev, priv); if (ret < 0) return ret; pm_runtime_enable(dev); pm_runtime_get_sync(dev); rzv2m_i2c_init(priv); pm_runtime_put(dev); platform_set_drvdata(pdev, priv); ret = i2c_add_numbered_adapter(adap); if (ret < 0) { rzv2m_i2c_disable(dev, priv); pm_runtime_disable(dev); } return ret; } static void rzv2m_i2c_remove(struct platform_device *pdev) { struct rzv2m_i2c_priv *priv = platform_get_drvdata(pdev); struct device *dev = priv->adap.dev.parent; i2c_del_adapter(&priv->adap); rzv2m_i2c_disable(dev, priv); pm_runtime_disable(dev); } static int rzv2m_i2c_suspend(struct device *dev) { struct rzv2m_i2c_priv *priv = dev_get_drvdata(dev); return rzv2m_i2c_disable(dev, priv); } static int rzv2m_i2c_resume(struct device *dev) { struct rzv2m_i2c_priv *priv = dev_get_drvdata(dev); int ret; ret = rzv2m_i2c_clock_calculate(dev, priv); if (ret < 0) return ret; ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; rzv2m_i2c_init(priv); pm_runtime_put(dev); return 0; } static const struct of_device_id rzv2m_i2c_ids[] = { { .compatible = "renesas,rzv2m-i2c" }, { } }; MODULE_DEVICE_TABLE(of, rzv2m_i2c_ids); static const struct dev_pm_ops rzv2m_i2c_pm_ops = { SYSTEM_SLEEP_PM_OPS(rzv2m_i2c_suspend, rzv2m_i2c_resume) }; static struct platform_driver rzv2m_i2c_driver = { .driver = { .name = "rzv2m-i2c", .of_match_table = rzv2m_i2c_ids, .pm = pm_sleep_ptr(&rzv2m_i2c_pm_ops), }, .probe = rzv2m_i2c_probe, .remove_new = rzv2m_i2c_remove, }; module_platform_driver(rzv2m_i2c_driver); MODULE_DESCRIPTION("RZ/V2M I2C bus driver"); MODULE_AUTHOR("Renesas Electronics Corporation"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-rzv2m.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for STMicroelectronics STM32 I2C controller * * This I2C controller is described in the STM32F429/439 Soc reference manual. * Please see below a link to the documentation: * http://www.st.com/resource/en/reference_manual/DM00031020.pdf * * Copyright (C) M'boumba Cedric Madianga 2016 * Copyright (C) STMicroelectronics 2017 * Author: M'boumba Cedric Madianga <[email protected]> * * This driver is based on i2c-st.c * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/reset.h> #include "i2c-stm32.h" /* STM32F4 I2C offset registers */ #define STM32F4_I2C_CR1 0x00 #define STM32F4_I2C_CR2 0x04 #define STM32F4_I2C_DR 0x10 #define STM32F4_I2C_SR1 0x14 #define STM32F4_I2C_SR2 0x18 #define STM32F4_I2C_CCR 0x1C #define STM32F4_I2C_TRISE 0x20 #define STM32F4_I2C_FLTR 0x24 /* STM32F4 I2C control 1*/ #define STM32F4_I2C_CR1_POS BIT(11) #define STM32F4_I2C_CR1_ACK BIT(10) #define STM32F4_I2C_CR1_STOP BIT(9) #define STM32F4_I2C_CR1_START BIT(8) #define STM32F4_I2C_CR1_PE BIT(0) /* STM32F4 I2C control 2 */ #define STM32F4_I2C_CR2_FREQ_MASK GENMASK(5, 0) #define STM32F4_I2C_CR2_FREQ(n) ((n) & STM32F4_I2C_CR2_FREQ_MASK) #define STM32F4_I2C_CR2_ITBUFEN BIT(10) #define STM32F4_I2C_CR2_ITEVTEN BIT(9) #define STM32F4_I2C_CR2_ITERREN BIT(8) #define STM32F4_I2C_CR2_IRQ_MASK (STM32F4_I2C_CR2_ITBUFEN | \ STM32F4_I2C_CR2_ITEVTEN | \ STM32F4_I2C_CR2_ITERREN) /* STM32F4 I2C Status 1 */ #define STM32F4_I2C_SR1_AF BIT(10) #define STM32F4_I2C_SR1_ARLO BIT(9) #define STM32F4_I2C_SR1_BERR BIT(8) #define STM32F4_I2C_SR1_TXE BIT(7) #define STM32F4_I2C_SR1_RXNE BIT(6) #define STM32F4_I2C_SR1_BTF BIT(2) #define STM32F4_I2C_SR1_ADDR BIT(1) #define STM32F4_I2C_SR1_SB BIT(0) #define STM32F4_I2C_SR1_ITEVTEN_MASK (STM32F4_I2C_SR1_BTF | \ STM32F4_I2C_SR1_ADDR | \ STM32F4_I2C_SR1_SB) #define STM32F4_I2C_SR1_ITBUFEN_MASK (STM32F4_I2C_SR1_TXE | \ STM32F4_I2C_SR1_RXNE) #define STM32F4_I2C_SR1_ITERREN_MASK (STM32F4_I2C_SR1_AF | \ STM32F4_I2C_SR1_ARLO | \ STM32F4_I2C_SR1_BERR) /* STM32F4 I2C Status 2 */ #define STM32F4_I2C_SR2_BUSY BIT(1) /* STM32F4 I2C Control Clock */ #define STM32F4_I2C_CCR_CCR_MASK GENMASK(11, 0) #define STM32F4_I2C_CCR_CCR(n) ((n) & STM32F4_I2C_CCR_CCR_MASK) #define STM32F4_I2C_CCR_FS BIT(15) #define STM32F4_I2C_CCR_DUTY BIT(14) /* STM32F4 I2C Trise */ #define STM32F4_I2C_TRISE_VALUE_MASK GENMASK(5, 0) #define STM32F4_I2C_TRISE_VALUE(n) ((n) & STM32F4_I2C_TRISE_VALUE_MASK) #define STM32F4_I2C_MIN_STANDARD_FREQ 2U #define STM32F4_I2C_MIN_FAST_FREQ 6U #define STM32F4_I2C_MAX_FREQ 46U #define HZ_TO_MHZ 1000000 /** * struct stm32f4_i2c_msg - client specific data * @addr: 8-bit slave addr, including r/w bit * @count: number of bytes to be transferred * @buf: data buffer * @result: result of the transfer * @stop: last I2C msg to be sent, i.e. STOP to be generated */ struct stm32f4_i2c_msg { u8 addr; u32 count; u8 *buf; int result; bool stop; }; /** * struct stm32f4_i2c_dev - private data of the controller * @adap: I2C adapter for this controller * @dev: device for this controller * @base: virtual memory area * @complete: completion of I2C message * @clk: hw i2c clock * @speed: I2C clock frequency of the controller. Standard or Fast are supported * @parent_rate: I2C clock parent rate in MHz * @msg: I2C transfer information */ struct stm32f4_i2c_dev { struct i2c_adapter adap; struct device *dev; void __iomem *base; struct completion complete; struct clk *clk; int speed; int parent_rate; struct stm32f4_i2c_msg msg; }; static inline void stm32f4_i2c_set_bits(void __iomem *reg, u32 mask) { writel_relaxed(readl_relaxed(reg) | mask, reg); } static inline void stm32f4_i2c_clr_bits(void __iomem *reg, u32 mask) { writel_relaxed(readl_relaxed(reg) & ~mask, reg); } static void stm32f4_i2c_disable_irq(struct stm32f4_i2c_dev *i2c_dev) { void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR2; stm32f4_i2c_clr_bits(reg, STM32F4_I2C_CR2_IRQ_MASK); } static int stm32f4_i2c_set_periph_clk_freq(struct stm32f4_i2c_dev *i2c_dev) { u32 freq; u32 cr2 = 0; i2c_dev->parent_rate = clk_get_rate(i2c_dev->clk); freq = DIV_ROUND_UP(i2c_dev->parent_rate, HZ_TO_MHZ); if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) { /* * To reach 100 kHz, the parent clk frequency should be between * a minimum value of 2 MHz and a maximum value of 46 MHz due * to hardware limitation */ if (freq < STM32F4_I2C_MIN_STANDARD_FREQ || freq > STM32F4_I2C_MAX_FREQ) { dev_err(i2c_dev->dev, "bad parent clk freq for standard mode\n"); return -EINVAL; } } else { /* * To be as close as possible to 400 kHz, the parent clk * frequency should be between a minimum value of 6 MHz and a * maximum value of 46 MHz due to hardware limitation */ if (freq < STM32F4_I2C_MIN_FAST_FREQ || freq > STM32F4_I2C_MAX_FREQ) { dev_err(i2c_dev->dev, "bad parent clk freq for fast mode\n"); return -EINVAL; } } cr2 |= STM32F4_I2C_CR2_FREQ(freq); writel_relaxed(cr2, i2c_dev->base + STM32F4_I2C_CR2); return 0; } static void stm32f4_i2c_set_rise_time(struct stm32f4_i2c_dev *i2c_dev) { u32 freq = DIV_ROUND_UP(i2c_dev->parent_rate, HZ_TO_MHZ); u32 trise; /* * These bits must be programmed with the maximum SCL rise time given in * the I2C bus specification, incremented by 1. * * In standard mode, the maximum allowed SCL rise time is 1000 ns. * If, in the I2C_CR2 register, the value of FREQ[5:0] bits is equal to * 0x08 so period = 125 ns therefore the TRISE[5:0] bits must be * programmed with 0x9. (1000 ns / 125 ns + 1) * So, for I2C standard mode TRISE = FREQ[5:0] + 1 * * In fast mode, the maximum allowed SCL rise time is 300 ns. * If, in the I2C_CR2 register, the value of FREQ[5:0] bits is equal to * 0x08 so period = 125 ns therefore the TRISE[5:0] bits must be * programmed with 0x3. (300 ns / 125 ns + 1) * So, for I2C fast mode TRISE = FREQ[5:0] * 300 / 1000 + 1 * * Function stm32f4_i2c_set_periph_clk_freq made sure that parent rate * is not higher than 46 MHz . As a result trise is at most 4 bits wide * and so fits into the TRISE bits [5:0]. */ if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) trise = freq + 1; else trise = freq * 3 / 10 + 1; writel_relaxed(STM32F4_I2C_TRISE_VALUE(trise), i2c_dev->base + STM32F4_I2C_TRISE); } static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev) { u32 val; u32 ccr = 0; if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) { /* * In standard mode: * t_scl_high = t_scl_low = CCR * I2C parent clk period * So to reach 100 kHz, we have: * CCR = I2C parent rate / (100 kHz * 2) * * For example with parent rate = 2 MHz: * CCR = 2000000 / (100000 * 2) = 10 * t_scl_high = t_scl_low = 10 * (1 / 2000000) = 5000 ns * t_scl_high + t_scl_low = 10000 ns so 100 kHz is reached * * Function stm32f4_i2c_set_periph_clk_freq made sure that * parent rate is not higher than 46 MHz . As a result val * is at most 8 bits wide and so fits into the CCR bits [11:0]. */ val = i2c_dev->parent_rate / (I2C_MAX_STANDARD_MODE_FREQ * 2); } else { /* * In fast mode, we compute CCR with duty = 0 as with low * frequencies we are not able to reach 400 kHz. * In that case: * t_scl_high = CCR * I2C parent clk period * t_scl_low = 2 * CCR * I2C parent clk period * So, CCR = I2C parent rate / (400 kHz * 3) * * For example with parent rate = 6 MHz: * CCR = 6000000 / (400000 * 3) = 5 * t_scl_high = 5 * (1 / 6000000) = 833 ns > 600 ns * t_scl_low = 2 * 5 * (1 / 6000000) = 1667 ns > 1300 ns * t_scl_high + t_scl_low = 2500 ns so 400 kHz is reached * * Function stm32f4_i2c_set_periph_clk_freq made sure that * parent rate is not higher than 46 MHz . As a result val * is at most 6 bits wide and so fits into the CCR bits [11:0]. */ val = DIV_ROUND_UP(i2c_dev->parent_rate, I2C_MAX_FAST_MODE_FREQ * 3); /* Select Fast mode */ ccr |= STM32F4_I2C_CCR_FS; } ccr |= STM32F4_I2C_CCR_CCR(val); writel_relaxed(ccr, i2c_dev->base + STM32F4_I2C_CCR); } /** * stm32f4_i2c_hw_config() - Prepare I2C block * @i2c_dev: Controller's private data */ static int stm32f4_i2c_hw_config(struct stm32f4_i2c_dev *i2c_dev) { int ret; ret = stm32f4_i2c_set_periph_clk_freq(i2c_dev); if (ret) return ret; stm32f4_i2c_set_rise_time(i2c_dev); stm32f4_i2c_set_speed_mode(i2c_dev); /* Enable I2C */ writel_relaxed(STM32F4_I2C_CR1_PE, i2c_dev->base + STM32F4_I2C_CR1); return 0; } static int stm32f4_i2c_wait_free_bus(struct stm32f4_i2c_dev *i2c_dev) { u32 status; int ret; ret = readl_relaxed_poll_timeout(i2c_dev->base + STM32F4_I2C_SR2, status, !(status & STM32F4_I2C_SR2_BUSY), 10, 1000); if (ret) { dev_dbg(i2c_dev->dev, "bus not free\n"); ret = -EBUSY; } return ret; } /** * stm32f4_i2c_write_byte() - Write a byte in the data register * @i2c_dev: Controller's private data * @byte: Data to write in the register */ static void stm32f4_i2c_write_byte(struct stm32f4_i2c_dev *i2c_dev, u8 byte) { writel_relaxed(byte, i2c_dev->base + STM32F4_I2C_DR); } /** * stm32f4_i2c_write_msg() - Fill the data register in write mode * @i2c_dev: Controller's private data * * This function fills the data register with I2C transfer buffer */ static void stm32f4_i2c_write_msg(struct stm32f4_i2c_dev *i2c_dev) { struct stm32f4_i2c_msg *msg = &i2c_dev->msg; stm32f4_i2c_write_byte(i2c_dev, *msg->buf++); msg->count--; } static void stm32f4_i2c_read_msg(struct stm32f4_i2c_dev *i2c_dev) { struct stm32f4_i2c_msg *msg = &i2c_dev->msg; u32 rbuf; rbuf = readl_relaxed(i2c_dev->base + STM32F4_I2C_DR); *msg->buf++ = rbuf; msg->count--; } static void stm32f4_i2c_terminate_xfer(struct stm32f4_i2c_dev *i2c_dev) { struct stm32f4_i2c_msg *msg = &i2c_dev->msg; void __iomem *reg; stm32f4_i2c_disable_irq(i2c_dev); reg = i2c_dev->base + STM32F4_I2C_CR1; if (msg->stop) stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_STOP); else stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_START); complete(&i2c_dev->complete); } /** * stm32f4_i2c_handle_write() - Handle FIFO empty interrupt in case of write * @i2c_dev: Controller's private data */ static void stm32f4_i2c_handle_write(struct stm32f4_i2c_dev *i2c_dev) { struct stm32f4_i2c_msg *msg = &i2c_dev->msg; void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR2; if (msg->count) { stm32f4_i2c_write_msg(i2c_dev); if (!msg->count) { /* * Disable buffer interrupts for RX not empty and TX * empty events */ stm32f4_i2c_clr_bits(reg, STM32F4_I2C_CR2_ITBUFEN); } } else { stm32f4_i2c_terminate_xfer(i2c_dev); } } /** * stm32f4_i2c_handle_read() - Handle FIFO empty interrupt in case of read * @i2c_dev: Controller's private data * * This function is called when a new data is received in data register */ static void stm32f4_i2c_handle_read(struct stm32f4_i2c_dev *i2c_dev) { struct stm32f4_i2c_msg *msg = &i2c_dev->msg; void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR2; switch (msg->count) { case 1: stm32f4_i2c_disable_irq(i2c_dev); stm32f4_i2c_read_msg(i2c_dev); complete(&i2c_dev->complete); break; /* * For 2-byte reception, 3-byte reception and for Data N-2, N-1 and N * for N-byte reception with N > 3, we do not have to read the data * register when RX not empty event occurs as we have to wait for byte * transferred finished event before reading data. * So, here we just disable buffer interrupt in order to avoid another * system preemption due to RX not empty event. */ case 2: case 3: stm32f4_i2c_clr_bits(reg, STM32F4_I2C_CR2_ITBUFEN); break; /* * For N byte reception with N > 3 we directly read data register * until N-2 data. */ default: stm32f4_i2c_read_msg(i2c_dev); } } /** * stm32f4_i2c_handle_rx_done() - Handle byte transfer finished interrupt * in case of read * @i2c_dev: Controller's private data * * This function is called when a new data is received in the shift register * but data register has not been read yet. */ static void stm32f4_i2c_handle_rx_done(struct stm32f4_i2c_dev *i2c_dev) { struct stm32f4_i2c_msg *msg = &i2c_dev->msg; void __iomem *reg; u32 mask; int i; switch (msg->count) { case 2: /* * In order to correctly send the Stop or Repeated Start * condition on the I2C bus, the STOP/START bit has to be set * before reading the last two bytes (data N-1 and N). * After that, we could read the last two bytes, disable * remaining interrupts and notify the end of xfer to the * client */ reg = i2c_dev->base + STM32F4_I2C_CR1; if (msg->stop) stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_STOP); else stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_START); for (i = 2; i > 0; i--) stm32f4_i2c_read_msg(i2c_dev); reg = i2c_dev->base + STM32F4_I2C_CR2; mask = STM32F4_I2C_CR2_ITEVTEN | STM32F4_I2C_CR2_ITERREN; stm32f4_i2c_clr_bits(reg, mask); complete(&i2c_dev->complete); break; case 3: /* * In order to correctly generate the NACK pulse after the last * received data byte, we have to enable NACK before reading N-2 * data */ reg = i2c_dev->base + STM32F4_I2C_CR1; stm32f4_i2c_clr_bits(reg, STM32F4_I2C_CR1_ACK); stm32f4_i2c_read_msg(i2c_dev); break; default: stm32f4_i2c_read_msg(i2c_dev); } } /** * stm32f4_i2c_handle_rx_addr() - Handle address matched interrupt in case of * master receiver * @i2c_dev: Controller's private data */ static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev) { struct stm32f4_i2c_msg *msg = &i2c_dev->msg; u32 cr1; switch (msg->count) { case 0: stm32f4_i2c_terminate_xfer(i2c_dev); /* Clear ADDR flag */ readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); break; case 1: /* * Single byte reception: * Enable NACK and reset POS (Acknowledge position). * Then, clear ADDR flag and set STOP or RepSTART. * In that way, the NACK and STOP or RepStart pulses will be * sent as soon as the byte will be received in shift register */ cr1 = readl_relaxed(i2c_dev->base + STM32F4_I2C_CR1); cr1 &= ~(STM32F4_I2C_CR1_ACK | STM32F4_I2C_CR1_POS); writel_relaxed(cr1, i2c_dev->base + STM32F4_I2C_CR1); readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); if (msg->stop) cr1 |= STM32F4_I2C_CR1_STOP; else cr1 |= STM32F4_I2C_CR1_START; writel_relaxed(cr1, i2c_dev->base + STM32F4_I2C_CR1); break; case 2: /* * 2-byte reception: * Enable NACK, set POS (NACK position) and clear ADDR flag. * In that way, NACK will be sent for the next byte which will * be received in the shift register instead of the current * one. */ cr1 = readl_relaxed(i2c_dev->base + STM32F4_I2C_CR1); cr1 &= ~STM32F4_I2C_CR1_ACK; cr1 |= STM32F4_I2C_CR1_POS; writel_relaxed(cr1, i2c_dev->base + STM32F4_I2C_CR1); readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); break; default: /* * N-byte reception: * Enable ACK, reset POS (ACK position) and clear ADDR flag. * In that way, ACK will be sent as soon as the current byte * will be received in the shift register */ cr1 = readl_relaxed(i2c_dev->base + STM32F4_I2C_CR1); cr1 |= STM32F4_I2C_CR1_ACK; cr1 &= ~STM32F4_I2C_CR1_POS; writel_relaxed(cr1, i2c_dev->base + STM32F4_I2C_CR1); readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); break; } } /** * stm32f4_i2c_isr_event() - Interrupt routine for I2C bus event * @irq: interrupt number * @data: Controller's private data */ static irqreturn_t stm32f4_i2c_isr_event(int irq, void *data) { struct stm32f4_i2c_dev *i2c_dev = data; struct stm32f4_i2c_msg *msg = &i2c_dev->msg; u32 possible_status = STM32F4_I2C_SR1_ITEVTEN_MASK; u32 status, ien, event, cr2; cr2 = readl_relaxed(i2c_dev->base + STM32F4_I2C_CR2); ien = cr2 & STM32F4_I2C_CR2_IRQ_MASK; /* Update possible_status if buffer interrupt is enabled */ if (ien & STM32F4_I2C_CR2_ITBUFEN) possible_status |= STM32F4_I2C_SR1_ITBUFEN_MASK; status = readl_relaxed(i2c_dev->base + STM32F4_I2C_SR1); event = status & possible_status; if (!event) { dev_dbg(i2c_dev->dev, "spurious evt irq (status=0x%08x, ien=0x%08x)\n", status, ien); return IRQ_NONE; } /* Start condition generated */ if (event & STM32F4_I2C_SR1_SB) stm32f4_i2c_write_byte(i2c_dev, msg->addr); /* I2C Address sent */ if (event & STM32F4_I2C_SR1_ADDR) { if (msg->addr & I2C_M_RD) stm32f4_i2c_handle_rx_addr(i2c_dev); else readl_relaxed(i2c_dev->base + STM32F4_I2C_SR2); /* * Enable buffer interrupts for RX not empty and TX empty * events */ cr2 |= STM32F4_I2C_CR2_ITBUFEN; writel_relaxed(cr2, i2c_dev->base + STM32F4_I2C_CR2); } /* TX empty */ if ((event & STM32F4_I2C_SR1_TXE) && !(msg->addr & I2C_M_RD)) stm32f4_i2c_handle_write(i2c_dev); /* RX not empty */ if ((event & STM32F4_I2C_SR1_RXNE) && (msg->addr & I2C_M_RD)) stm32f4_i2c_handle_read(i2c_dev); /* * The BTF (Byte Transfer finished) event occurs when: * - in reception : a new byte is received in the shift register * but the previous byte has not been read yet from data register * - in transmission: a new byte should be sent but the data register * has not been written yet */ if (event & STM32F4_I2C_SR1_BTF) { if (msg->addr & I2C_M_RD) stm32f4_i2c_handle_rx_done(i2c_dev); else stm32f4_i2c_handle_write(i2c_dev); } return IRQ_HANDLED; } /** * stm32f4_i2c_isr_error() - Interrupt routine for I2C bus error * @irq: interrupt number * @data: Controller's private data */ static irqreturn_t stm32f4_i2c_isr_error(int irq, void *data) { struct stm32f4_i2c_dev *i2c_dev = data; struct stm32f4_i2c_msg *msg = &i2c_dev->msg; void __iomem *reg; u32 status; status = readl_relaxed(i2c_dev->base + STM32F4_I2C_SR1); /* Arbitration lost */ if (status & STM32F4_I2C_SR1_ARLO) { status &= ~STM32F4_I2C_SR1_ARLO; writel_relaxed(status, i2c_dev->base + STM32F4_I2C_SR1); msg->result = -EAGAIN; } /* * Acknowledge failure: * In master transmitter mode a Stop must be generated by software */ if (status & STM32F4_I2C_SR1_AF) { if (!(msg->addr & I2C_M_RD)) { reg = i2c_dev->base + STM32F4_I2C_CR1; stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_STOP); } status &= ~STM32F4_I2C_SR1_AF; writel_relaxed(status, i2c_dev->base + STM32F4_I2C_SR1); msg->result = -EIO; } /* Bus error */ if (status & STM32F4_I2C_SR1_BERR) { status &= ~STM32F4_I2C_SR1_BERR; writel_relaxed(status, i2c_dev->base + STM32F4_I2C_SR1); msg->result = -EIO; } stm32f4_i2c_disable_irq(i2c_dev); complete(&i2c_dev->complete); return IRQ_HANDLED; } /** * stm32f4_i2c_xfer_msg() - Transfer a single I2C message * @i2c_dev: Controller's private data * @msg: I2C message to transfer * @is_first: first message of the sequence * @is_last: last message of the sequence */ static int stm32f4_i2c_xfer_msg(struct stm32f4_i2c_dev *i2c_dev, struct i2c_msg *msg, bool is_first, bool is_last) { struct stm32f4_i2c_msg *f4_msg = &i2c_dev->msg; void __iomem *reg = i2c_dev->base + STM32F4_I2C_CR1; unsigned long timeout; u32 mask; int ret; f4_msg->addr = i2c_8bit_addr_from_msg(msg); f4_msg->buf = msg->buf; f4_msg->count = msg->len; f4_msg->result = 0; f4_msg->stop = is_last; reinit_completion(&i2c_dev->complete); /* Enable events and errors interrupts */ mask = STM32F4_I2C_CR2_ITEVTEN | STM32F4_I2C_CR2_ITERREN; stm32f4_i2c_set_bits(i2c_dev->base + STM32F4_I2C_CR2, mask); if (is_first) { ret = stm32f4_i2c_wait_free_bus(i2c_dev); if (ret) return ret; /* START generation */ stm32f4_i2c_set_bits(reg, STM32F4_I2C_CR1_START); } timeout = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = f4_msg->result; if (!timeout) ret = -ETIMEDOUT; return ret; } /** * stm32f4_i2c_xfer() - Transfer combined I2C message * @i2c_adap: Adapter pointer to the controller * @msgs: Pointer to data to be written. * @num: Number of messages to be executed */ static int stm32f4_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct stm32f4_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); int ret, i; ret = clk_enable(i2c_dev->clk); if (ret) { dev_err(i2c_dev->dev, "Failed to enable clock\n"); return ret; } for (i = 0; i < num && !ret; i++) ret = stm32f4_i2c_xfer_msg(i2c_dev, &msgs[i], i == 0, i == num - 1); clk_disable(i2c_dev->clk); return (ret < 0) ? ret : num; } static u32 stm32f4_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm stm32f4_i2c_algo = { .master_xfer = stm32f4_i2c_xfer, .functionality = stm32f4_i2c_func, }; static int stm32f4_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct stm32f4_i2c_dev *i2c_dev; struct resource *res; u32 irq_event, irq_error, clk_rate; struct i2c_adapter *adap; struct reset_control *rst; int ret; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); irq_event = irq_of_parse_and_map(np, 0); if (!irq_event) { dev_err(&pdev->dev, "IRQ event missing or invalid\n"); return -EINVAL; } irq_error = irq_of_parse_and_map(np, 1); if (!irq_error) { dev_err(&pdev->dev, "IRQ error missing or invalid\n"); return -EINVAL; } i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c_dev->clk)) { dev_err(&pdev->dev, "Error: Missing controller clock\n"); return PTR_ERR(i2c_dev->clk); } ret = clk_prepare_enable(i2c_dev->clk); if (ret) { dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n"); return ret; } rst = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(rst)) { ret = dev_err_probe(&pdev->dev, PTR_ERR(rst), "Error: Missing reset ctrl\n"); goto clk_free; } reset_control_assert(rst); udelay(2); reset_control_deassert(rst); i2c_dev->speed = STM32_I2C_SPEED_STANDARD; ret = of_property_read_u32(np, "clock-frequency", &clk_rate); if (!ret && clk_rate >= I2C_MAX_FAST_MODE_FREQ) i2c_dev->speed = STM32_I2C_SPEED_FAST; i2c_dev->dev = &pdev->dev; ret = devm_request_irq(&pdev->dev, irq_event, stm32f4_i2c_isr_event, 0, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq event %i\n", irq_event); goto clk_free; } ret = devm_request_irq(&pdev->dev, irq_error, stm32f4_i2c_isr_error, 0, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq error %i\n", irq_error); goto clk_free; } ret = stm32f4_i2c_hw_config(i2c_dev); if (ret) goto clk_free; adap = &i2c_dev->adap; i2c_set_adapdata(adap, i2c_dev); snprintf(adap->name, sizeof(adap->name), "STM32 I2C(%pa)", &res->start); adap->owner = THIS_MODULE; adap->timeout = 2 * HZ; adap->retries = 0; adap->algo = &stm32f4_i2c_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; init_completion(&i2c_dev->complete); ret = i2c_add_adapter(adap); if (ret) goto clk_free; platform_set_drvdata(pdev, i2c_dev); clk_disable(i2c_dev->clk); dev_info(i2c_dev->dev, "STM32F4 I2C driver registered\n"); return 0; clk_free: clk_disable_unprepare(i2c_dev->clk); return ret; } static void stm32f4_i2c_remove(struct platform_device *pdev) { struct stm32f4_i2c_dev *i2c_dev = platform_get_drvdata(pdev); i2c_del_adapter(&i2c_dev->adap); clk_unprepare(i2c_dev->clk); } static const struct of_device_id stm32f4_i2c_match[] = { { .compatible = "st,stm32f4-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, stm32f4_i2c_match); static struct platform_driver stm32f4_i2c_driver = { .driver = { .name = "stm32f4-i2c", .of_match_table = stm32f4_i2c_match, }, .probe = stm32f4_i2c_probe, .remove_new = stm32f4_i2c_remove, }; module_platform_driver(stm32f4_i2c_driver); MODULE_AUTHOR("M'boumba Cedric Madianga <[email protected]>"); MODULE_DESCRIPTION("STMicroelectronics STM32F4 I2C driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-stm32f4.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Freescale CPM1/CPM2 I2C interface. * Copyright (c) 1999 Dan Malek ([email protected]). * * moved into proper i2c interface; * Brad Parker ([email protected]) * * Parts from dbox2_i2c.c (cvs.tuxbox.org) * (C) 2000-2001 Felix Domke ([email protected]), Gillem ([email protected]) * * (C) 2007 Montavista Software, Inc. * Vitaly Bordug <[email protected]> * * Converted to of_platform_device. Renamed to i2c-cpm.c. * (C) 2007,2008 Jochen Friedrich <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/stddef.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <sysdev/fsl_soc.h> #include <asm/cpm.h> /* Try to define this if you have an older CPU (earlier than rev D4) */ /* However, better use a GPIO based bitbang driver in this case :/ */ #undef I2C_CHIP_ERRATA #define CPM_MAX_READ 513 #define CPM_MAXBD 4 #define I2C_EB (0x10) /* Big endian mode */ #define I2C_EB_CPM2 (0x30) /* Big endian mode, memory snoop */ #define DPRAM_BASE ((u8 __iomem __force *)cpm_muram_addr(0)) /* I2C parameter RAM. */ struct i2c_ram { ushort rbase; /* Rx Buffer descriptor base address */ ushort tbase; /* Tx Buffer descriptor base address */ u_char rfcr; /* Rx function code */ u_char tfcr; /* Tx function code */ ushort mrblr; /* Max receive buffer length */ uint rstate; /* Internal */ uint rdp; /* Internal */ ushort rbptr; /* Rx Buffer descriptor pointer */ ushort rbc; /* Internal */ uint rxtmp; /* Internal */ uint tstate; /* Internal */ uint tdp; /* Internal */ ushort tbptr; /* Tx Buffer descriptor pointer */ ushort tbc; /* Internal */ uint txtmp; /* Internal */ char res1[4]; /* Reserved */ ushort rpbase; /* Relocation pointer */ char res2[2]; /* Reserved */ /* The following elements are only for CPM2 */ char res3[4]; /* Reserved */ uint sdmatmp; /* Internal */ }; #define I2COM_START 0x80 #define I2COM_MASTER 0x01 #define I2CER_TXE 0x10 #define I2CER_BUSY 0x04 #define I2CER_TXB 0x02 #define I2CER_RXB 0x01 #define I2MOD_EN 0x01 /* I2C Registers */ struct i2c_reg { u8 i2mod; u8 res1[3]; u8 i2add; u8 res2[3]; u8 i2brg; u8 res3[3]; u8 i2com; u8 res4[3]; u8 i2cer; u8 res5[3]; u8 i2cmr; }; struct cpm_i2c { char *base; struct platform_device *ofdev; struct i2c_adapter adap; uint dp_addr; int version; /* CPM1=1, CPM2=2 */ int irq; int cp_command; int freq; struct i2c_reg __iomem *i2c_reg; struct i2c_ram __iomem *i2c_ram; u16 i2c_addr; wait_queue_head_t i2c_wait; cbd_t __iomem *tbase; cbd_t __iomem *rbase; u_char *txbuf[CPM_MAXBD]; u_char *rxbuf[CPM_MAXBD]; dma_addr_t txdma[CPM_MAXBD]; dma_addr_t rxdma[CPM_MAXBD]; }; static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id) { struct cpm_i2c *cpm; struct i2c_reg __iomem *i2c_reg; struct i2c_adapter *adap = dev_id; int i; cpm = i2c_get_adapdata(dev_id); i2c_reg = cpm->i2c_reg; /* Clear interrupt. */ i = in_8(&i2c_reg->i2cer); out_8(&i2c_reg->i2cer, i); dev_dbg(&adap->dev, "Interrupt: %x\n", i); wake_up(&cpm->i2c_wait); return i ? IRQ_HANDLED : IRQ_NONE; } static void cpm_reset_i2c_params(struct cpm_i2c *cpm) { struct i2c_ram __iomem *i2c_ram = cpm->i2c_ram; /* Set up the I2C parameters in the parameter ram. */ out_be16(&i2c_ram->tbase, (u8 __iomem *)cpm->tbase - DPRAM_BASE); out_be16(&i2c_ram->rbase, (u8 __iomem *)cpm->rbase - DPRAM_BASE); if (cpm->version == 1) { out_8(&i2c_ram->tfcr, I2C_EB); out_8(&i2c_ram->rfcr, I2C_EB); } else { out_8(&i2c_ram->tfcr, I2C_EB_CPM2); out_8(&i2c_ram->rfcr, I2C_EB_CPM2); } out_be16(&i2c_ram->mrblr, CPM_MAX_READ); out_be32(&i2c_ram->rstate, 0); out_be32(&i2c_ram->rdp, 0); out_be16(&i2c_ram->rbptr, 0); out_be16(&i2c_ram->rbc, 0); out_be32(&i2c_ram->rxtmp, 0); out_be32(&i2c_ram->tstate, 0); out_be32(&i2c_ram->tdp, 0); out_be16(&i2c_ram->tbptr, 0); out_be16(&i2c_ram->tbc, 0); out_be32(&i2c_ram->txtmp, 0); } static void cpm_i2c_force_close(struct i2c_adapter *adap) { struct cpm_i2c *cpm = i2c_get_adapdata(adap); struct i2c_reg __iomem *i2c_reg = cpm->i2c_reg; dev_dbg(&adap->dev, "cpm_i2c_force_close()\n"); cpm_command(cpm->cp_command, CPM_CR_CLOSE_RX_BD); out_8(&i2c_reg->i2cmr, 0x00); /* Disable all interrupts */ out_8(&i2c_reg->i2cer, 0xff); } static void cpm_i2c_parse_message(struct i2c_adapter *adap, struct i2c_msg *pmsg, int num, int tx, int rx) { cbd_t __iomem *tbdf; cbd_t __iomem *rbdf; u_char addr; u_char *tb; u_char *rb; struct cpm_i2c *cpm = i2c_get_adapdata(adap); tbdf = cpm->tbase + tx; rbdf = cpm->rbase + rx; addr = i2c_8bit_addr_from_msg(pmsg); tb = cpm->txbuf[tx]; rb = cpm->rxbuf[rx]; /* Align read buffer */ rb = (u_char *) (((ulong) rb + 1) & ~1); tb[0] = addr; /* Device address byte w/rw flag */ out_be16(&tbdf->cbd_datlen, pmsg->len + 1); out_be16(&tbdf->cbd_sc, 0); if (!(pmsg->flags & I2C_M_NOSTART)) setbits16(&tbdf->cbd_sc, BD_I2C_START); if (tx + 1 == num) setbits16(&tbdf->cbd_sc, BD_SC_LAST | BD_SC_WRAP); if (pmsg->flags & I2C_M_RD) { /* * To read, we need an empty buffer of the proper length. * All that is used is the first byte for address, the remainder * is just used for timing (and doesn't really have to exist). */ dev_dbg(&adap->dev, "cpm_i2c_read(abyte=0x%x)\n", addr); out_be16(&rbdf->cbd_datlen, 0); out_be16(&rbdf->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT); if (rx + 1 == CPM_MAXBD) setbits16(&rbdf->cbd_sc, BD_SC_WRAP); eieio(); setbits16(&tbdf->cbd_sc, BD_SC_READY); } else { dev_dbg(&adap->dev, "cpm_i2c_write(abyte=0x%x)\n", addr); memcpy(tb+1, pmsg->buf, pmsg->len); eieio(); setbits16(&tbdf->cbd_sc, BD_SC_READY | BD_SC_INTRPT); } } static int cpm_i2c_check_message(struct i2c_adapter *adap, struct i2c_msg *pmsg, int tx, int rx) { cbd_t __iomem *tbdf; cbd_t __iomem *rbdf; u_char *tb; u_char *rb; struct cpm_i2c *cpm = i2c_get_adapdata(adap); tbdf = cpm->tbase + tx; rbdf = cpm->rbase + rx; tb = cpm->txbuf[tx]; rb = cpm->rxbuf[rx]; /* Align read buffer */ rb = (u_char *) (((uint) rb + 1) & ~1); eieio(); if (pmsg->flags & I2C_M_RD) { dev_dbg(&adap->dev, "tx sc 0x%04x, rx sc 0x%04x\n", in_be16(&tbdf->cbd_sc), in_be16(&rbdf->cbd_sc)); if (in_be16(&tbdf->cbd_sc) & BD_SC_NAK) { dev_dbg(&adap->dev, "I2C read; No ack\n"); return -ENXIO; } if (in_be16(&rbdf->cbd_sc) & BD_SC_EMPTY) { dev_err(&adap->dev, "I2C read; complete but rbuf empty\n"); return -EREMOTEIO; } if (in_be16(&rbdf->cbd_sc) & BD_SC_OV) { dev_err(&adap->dev, "I2C read; Overrun\n"); return -EREMOTEIO; } memcpy(pmsg->buf, rb, pmsg->len); } else { dev_dbg(&adap->dev, "tx sc %d 0x%04x\n", tx, in_be16(&tbdf->cbd_sc)); if (in_be16(&tbdf->cbd_sc) & BD_SC_NAK) { dev_dbg(&adap->dev, "I2C write; No ack\n"); return -ENXIO; } if (in_be16(&tbdf->cbd_sc) & BD_SC_UN) { dev_err(&adap->dev, "I2C write; Underrun\n"); return -EIO; } if (in_be16(&tbdf->cbd_sc) & BD_SC_CL) { dev_err(&adap->dev, "I2C write; Collision\n"); return -EIO; } } return 0; } static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct cpm_i2c *cpm = i2c_get_adapdata(adap); struct i2c_reg __iomem *i2c_reg = cpm->i2c_reg; struct i2c_ram __iomem *i2c_ram = cpm->i2c_ram; struct i2c_msg *pmsg; int ret; int tptr; int rptr; cbd_t __iomem *tbdf; cbd_t __iomem *rbdf; /* Reset to use first buffer */ out_be16(&i2c_ram->rbptr, in_be16(&i2c_ram->rbase)); out_be16(&i2c_ram->tbptr, in_be16(&i2c_ram->tbase)); tbdf = cpm->tbase; rbdf = cpm->rbase; tptr = 0; rptr = 0; /* * If there was a collision in the last i2c transaction, * Set I2COM_MASTER as it was cleared during collision. */ if (in_be16(&tbdf->cbd_sc) & BD_SC_CL) { out_8(&cpm->i2c_reg->i2com, I2COM_MASTER); } while (tptr < num) { pmsg = &msgs[tptr]; dev_dbg(&adap->dev, "R: %d T: %d\n", rptr, tptr); cpm_i2c_parse_message(adap, pmsg, num, tptr, rptr); if (pmsg->flags & I2C_M_RD) rptr++; tptr++; } /* Start transfer now */ /* Enable RX/TX/Error interupts */ out_8(&i2c_reg->i2cmr, I2CER_TXE | I2CER_TXB | I2CER_RXB); out_8(&i2c_reg->i2cer, 0xff); /* Clear interrupt status */ /* Chip bug, set enable here */ setbits8(&i2c_reg->i2mod, I2MOD_EN); /* Enable */ /* Begin transmission */ setbits8(&i2c_reg->i2com, I2COM_START); tptr = 0; rptr = 0; while (tptr < num) { /* Check for outstanding messages */ dev_dbg(&adap->dev, "test ready.\n"); pmsg = &msgs[tptr]; if (pmsg->flags & I2C_M_RD) ret = wait_event_timeout(cpm->i2c_wait, (in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) || !(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY), 1 * HZ); else ret = wait_event_timeout(cpm->i2c_wait, !(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_READY), 1 * HZ); if (ret == 0) { ret = -EREMOTEIO; dev_err(&adap->dev, "I2C transfer: timeout\n"); goto out_err; } if (ret > 0) { dev_dbg(&adap->dev, "ready.\n"); ret = cpm_i2c_check_message(adap, pmsg, tptr, rptr); tptr++; if (pmsg->flags & I2C_M_RD) rptr++; if (ret) goto out_err; } } #ifdef I2C_CHIP_ERRATA /* * Chip errata, clear enable. This is not needed on rev D4 CPUs. * Disabling I2C too early may cause too short stop condition */ udelay(4); clrbits8(&i2c_reg->i2mod, I2MOD_EN); #endif return (num); out_err: cpm_i2c_force_close(adap); #ifdef I2C_CHIP_ERRATA /* * Chip errata, clear enable. This is not needed on rev D4 CPUs. */ clrbits8(&i2c_reg->i2mod, I2MOD_EN); #endif return ret; } static u32 cpm_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } /* -----exported algorithm data: ------------------------------------- */ static const struct i2c_algorithm cpm_i2c_algo = { .master_xfer = cpm_i2c_xfer, .functionality = cpm_i2c_func, }; /* CPM_MAX_READ is also limiting writes according to the code! */ static const struct i2c_adapter_quirks cpm_i2c_quirks = { .max_num_msgs = CPM_MAXBD, .max_read_len = CPM_MAX_READ, .max_write_len = CPM_MAX_READ, }; static const struct i2c_adapter cpm_ops = { .owner = THIS_MODULE, .name = "i2c-cpm", .algo = &cpm_i2c_algo, .quirks = &cpm_i2c_quirks, }; static int cpm_i2c_setup(struct cpm_i2c *cpm) { struct platform_device *ofdev = cpm->ofdev; const u32 *data; int len, ret, i; void __iomem *i2c_base; cbd_t __iomem *tbdf; cbd_t __iomem *rbdf; unsigned char brg; dev_dbg(&cpm->ofdev->dev, "cpm_i2c_setup()\n"); init_waitqueue_head(&cpm->i2c_wait); cpm->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (!cpm->irq) return -EINVAL; /* Install interrupt handler. */ ret = request_irq(cpm->irq, cpm_i2c_interrupt, 0, "cpm_i2c", &cpm->adap); if (ret) return ret; /* I2C parameter RAM */ i2c_base = of_iomap(ofdev->dev.of_node, 1); if (i2c_base == NULL) { ret = -EINVAL; goto out_irq; } if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm1-i2c")) { /* Check for and use a microcode relocation patch. */ cpm->i2c_ram = i2c_base; cpm->i2c_addr = in_be16(&cpm->i2c_ram->rpbase); /* * Maybe should use cpm_muram_alloc instead of hardcoding * this in micropatch.c */ if (cpm->i2c_addr) { cpm->i2c_ram = cpm_muram_addr(cpm->i2c_addr); iounmap(i2c_base); } cpm->version = 1; } else if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm2-i2c")) { cpm->i2c_addr = cpm_muram_alloc(sizeof(struct i2c_ram), 64); cpm->i2c_ram = cpm_muram_addr(cpm->i2c_addr); out_be16(i2c_base, cpm->i2c_addr); iounmap(i2c_base); cpm->version = 2; } else { iounmap(i2c_base); ret = -EINVAL; goto out_irq; } /* I2C control/status registers */ cpm->i2c_reg = of_iomap(ofdev->dev.of_node, 0); if (cpm->i2c_reg == NULL) { ret = -EINVAL; goto out_ram; } data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) { ret = -EINVAL; goto out_reg; } cpm->cp_command = *data; data = of_get_property(ofdev->dev.of_node, "linux,i2c-class", &len); if (data && len == 4) cpm->adap.class = *data; data = of_get_property(ofdev->dev.of_node, "clock-frequency", &len); if (data && len == 4) cpm->freq = *data; else cpm->freq = 60000; /* use 60kHz i2c clock by default */ /* * Allocate space for CPM_MAXBD transmit and receive buffer * descriptors in the DP ram. */ cpm->dp_addr = cpm_muram_alloc(sizeof(cbd_t) * 2 * CPM_MAXBD, 8); if (!cpm->dp_addr) { ret = -ENOMEM; goto out_reg; } cpm->tbase = cpm_muram_addr(cpm->dp_addr); cpm->rbase = cpm_muram_addr(cpm->dp_addr + sizeof(cbd_t) * CPM_MAXBD); /* Allocate TX and RX buffers */ tbdf = cpm->tbase; rbdf = cpm->rbase; for (i = 0; i < CPM_MAXBD; i++) { cpm->rxbuf[i] = dma_alloc_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, &cpm->rxdma[i], GFP_KERNEL); if (!cpm->rxbuf[i]) { ret = -ENOMEM; goto out_muram; } out_be32(&rbdf[i].cbd_bufaddr, ((cpm->rxdma[i] + 1) & ~1)); cpm->txbuf[i] = dma_alloc_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, &cpm->txdma[i], GFP_KERNEL); if (!cpm->txbuf[i]) { ret = -ENOMEM; goto out_muram; } out_be32(&tbdf[i].cbd_bufaddr, cpm->txdma[i]); } /* Initialize Tx/Rx parameters. */ cpm_reset_i2c_params(cpm); dev_dbg(&cpm->ofdev->dev, "i2c_ram 0x%p, i2c_addr 0x%04x, freq %d\n", cpm->i2c_ram, cpm->i2c_addr, cpm->freq); dev_dbg(&cpm->ofdev->dev, "tbase 0x%04x, rbase 0x%04x\n", (u8 __iomem *)cpm->tbase - DPRAM_BASE, (u8 __iomem *)cpm->rbase - DPRAM_BASE); cpm_command(cpm->cp_command, CPM_CR_INIT_TRX); /* * Select an invalid address. Just make sure we don't use loopback mode */ out_8(&cpm->i2c_reg->i2add, 0x7f << 1); /* * PDIV is set to 00 in i2mod, so brgclk/32 is used as input to the * i2c baud rate generator. This is divided by 2 x (DIV + 3) to get * the actual i2c bus frequency. */ brg = get_brgfreq() / (32 * 2 * cpm->freq) - 3; out_8(&cpm->i2c_reg->i2brg, brg); out_8(&cpm->i2c_reg->i2mod, 0x00); out_8(&cpm->i2c_reg->i2com, I2COM_MASTER); /* Master mode */ /* Disable interrupts. */ out_8(&cpm->i2c_reg->i2cmr, 0); out_8(&cpm->i2c_reg->i2cer, 0xff); return 0; out_muram: for (i = 0; i < CPM_MAXBD; i++) { if (cpm->rxbuf[i]) dma_free_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, cpm->rxbuf[i], cpm->rxdma[i]); if (cpm->txbuf[i]) dma_free_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, cpm->txbuf[i], cpm->txdma[i]); } cpm_muram_free(cpm->dp_addr); out_reg: iounmap(cpm->i2c_reg); out_ram: if ((cpm->version == 1) && (!cpm->i2c_addr)) iounmap(cpm->i2c_ram); if (cpm->version == 2) cpm_muram_free(cpm->i2c_addr); out_irq: free_irq(cpm->irq, &cpm->adap); return ret; } static void cpm_i2c_shutdown(struct cpm_i2c *cpm) { int i; /* Shut down I2C. */ clrbits8(&cpm->i2c_reg->i2mod, I2MOD_EN); /* Disable interrupts */ out_8(&cpm->i2c_reg->i2cmr, 0); out_8(&cpm->i2c_reg->i2cer, 0xff); free_irq(cpm->irq, &cpm->adap); /* Free all memory */ for (i = 0; i < CPM_MAXBD; i++) { dma_free_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, cpm->rxbuf[i], cpm->rxdma[i]); dma_free_coherent(&cpm->ofdev->dev, CPM_MAX_READ + 1, cpm->txbuf[i], cpm->txdma[i]); } cpm_muram_free(cpm->dp_addr); iounmap(cpm->i2c_reg); if ((cpm->version == 1) && (!cpm->i2c_addr)) iounmap(cpm->i2c_ram); if (cpm->version == 2) cpm_muram_free(cpm->i2c_addr); } static int cpm_i2c_probe(struct platform_device *ofdev) { int result, len; struct cpm_i2c *cpm; const u32 *data; cpm = kzalloc(sizeof(struct cpm_i2c), GFP_KERNEL); if (!cpm) return -ENOMEM; cpm->ofdev = ofdev; platform_set_drvdata(ofdev, cpm); cpm->adap = cpm_ops; i2c_set_adapdata(&cpm->adap, cpm); cpm->adap.dev.parent = &ofdev->dev; cpm->adap.dev.of_node = of_node_get(ofdev->dev.of_node); result = cpm_i2c_setup(cpm); if (result) { dev_err(&ofdev->dev, "Unable to init hardware\n"); goto out_free; } /* register new adapter to i2c module... */ data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len); cpm->adap.nr = (data && len == 4) ? be32_to_cpup(data) : -1; result = i2c_add_numbered_adapter(&cpm->adap); if (result < 0) goto out_shut; dev_dbg(&ofdev->dev, "hw routines for %s registered.\n", cpm->adap.name); return 0; out_shut: cpm_i2c_shutdown(cpm); out_free: kfree(cpm); return result; } static void cpm_i2c_remove(struct platform_device *ofdev) { struct cpm_i2c *cpm = platform_get_drvdata(ofdev); i2c_del_adapter(&cpm->adap); cpm_i2c_shutdown(cpm); kfree(cpm); } static const struct of_device_id cpm_i2c_match[] = { { .compatible = "fsl,cpm1-i2c", }, { .compatible = "fsl,cpm2-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, cpm_i2c_match); static struct platform_driver cpm_i2c_driver = { .probe = cpm_i2c_probe, .remove_new = cpm_i2c_remove, .driver = { .name = "fsl-i2c-cpm", .of_match_table = cpm_i2c_match, }, }; module_platform_driver(cpm_i2c_driver); MODULE_AUTHOR("Jochen Friedrich <[email protected]>"); MODULE_DESCRIPTION("I2C-Bus adapter routines for CPM boards"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-cpm.c
// SPDX-License-Identifier: GPL-2.0-only /* * ARM IOC/IOMD i2c driver. * * Copyright (C) 2000 Russell King * * On Acorn machines, the following i2c devices are on the bus: * - PCF8583 real time clock & static RAM */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/hardware/ioc.h> #define FORCE_ONES 0xdc #define SCL 0x02 #define SDA 0x01 /* * We must preserve all non-i2c output bits in IOC_CONTROL. * Note also that we need to preserve the value of SCL and * SDA outputs as well (which may be different from the * values read back from IOC_CONTROL). */ static u_int force_ones; static void ioc_setscl(void *data, int state) { u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA); u_int ones = force_ones; if (state) ones |= SCL; else ones &= ~SCL; force_ones = ones; ioc_writeb(ioc_control | ones, IOC_CONTROL); } static void ioc_setsda(void *data, int state) { u_int ioc_control = ioc_readb(IOC_CONTROL) & ~(SCL | SDA); u_int ones = force_ones; if (state) ones |= SDA; else ones &= ~SDA; force_ones = ones; ioc_writeb(ioc_control | ones, IOC_CONTROL); } static int ioc_getscl(void *data) { return (ioc_readb(IOC_CONTROL) & SCL) != 0; } static int ioc_getsda(void *data) { return (ioc_readb(IOC_CONTROL) & SDA) != 0; } static struct i2c_algo_bit_data ioc_data = { .setsda = ioc_setsda, .setscl = ioc_setscl, .getsda = ioc_getsda, .getscl = ioc_getscl, .udelay = 80, .timeout = HZ, }; static struct i2c_adapter ioc_ops = { .nr = 0, .name = "ioc", .algo_data = &ioc_data, }; static int __init i2c_ioc_init(void) { force_ones = FORCE_ONES | SCL | SDA; return i2c_bit_add_numbered_bus(&ioc_ops); } module_init(i2c_ioc_init); MODULE_AUTHOR("Russell King <[email protected]>"); MODULE_DESCRIPTION("ARM IOC/IOMD i2c driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-acorn.c
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * AMD MP2 PCIe communication driver * * Authors: Shyam Sundar S K <[email protected]> * Elie Morisse <[email protected]> */ #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include "i2c-amd-mp2.h" #include <linux/io-64-nonatomic-lo-hi.h> static void amd_mp2_c2p_mutex_lock(struct amd_i2c_common *i2c_common) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; /* there is only one data mailbox for two i2c adapters */ mutex_lock(&privdata->c2p_lock); privdata->c2p_lock_busid = i2c_common->bus_id; } static void amd_mp2_c2p_mutex_unlock(struct amd_i2c_common *i2c_common) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; if (unlikely(privdata->c2p_lock_busid != i2c_common->bus_id)) { pci_warn(privdata->pci_dev, "bus %d attempting to unlock C2P locked by bus %d\n", i2c_common->bus_id, privdata->c2p_lock_busid); return; } mutex_unlock(&privdata->c2p_lock); } static int amd_mp2_cmd(struct amd_i2c_common *i2c_common, union i2c_cmd_base i2c_cmd_base) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; void __iomem *reg; i2c_common->reqcmd = i2c_cmd_base.s.i2c_cmd; reg = privdata->mmio + ((i2c_cmd_base.s.bus_id == 1) ? AMD_C2P_MSG1 : AMD_C2P_MSG0); writel(i2c_cmd_base.ul, reg); return 0; } int amd_mp2_bus_enable_set(struct amd_i2c_common *i2c_common, bool enable) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; union i2c_cmd_base i2c_cmd_base; pci_dbg(privdata->pci_dev, "id: %d\n", i2c_common->bus_id); i2c_cmd_base.ul = 0; i2c_cmd_base.s.i2c_cmd = enable ? i2c_enable : i2c_disable; i2c_cmd_base.s.bus_id = i2c_common->bus_id; i2c_cmd_base.s.i2c_speed = i2c_common->i2c_speed; amd_mp2_c2p_mutex_lock(i2c_common); return amd_mp2_cmd(i2c_common, i2c_cmd_base); } EXPORT_SYMBOL_GPL(amd_mp2_bus_enable_set); static void amd_mp2_cmd_rw_fill(struct amd_i2c_common *i2c_common, union i2c_cmd_base *i2c_cmd_base, enum i2c_cmd reqcmd) { i2c_cmd_base->s.i2c_cmd = reqcmd; i2c_cmd_base->s.bus_id = i2c_common->bus_id; i2c_cmd_base->s.i2c_speed = i2c_common->i2c_speed; i2c_cmd_base->s.slave_addr = i2c_common->msg->addr; i2c_cmd_base->s.length = i2c_common->msg->len; } int amd_mp2_rw(struct amd_i2c_common *i2c_common, enum i2c_cmd reqcmd) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; union i2c_cmd_base i2c_cmd_base; amd_mp2_cmd_rw_fill(i2c_common, &i2c_cmd_base, reqcmd); amd_mp2_c2p_mutex_lock(i2c_common); if (i2c_common->msg->len <= 32) { i2c_cmd_base.s.mem_type = use_c2pmsg; if (reqcmd == i2c_write) memcpy_toio(privdata->mmio + AMD_C2P_MSG2, i2c_common->msg->buf, i2c_common->msg->len); } else { i2c_cmd_base.s.mem_type = use_dram; writeq((u64)i2c_common->dma_addr, privdata->mmio + AMD_C2P_MSG2); } return amd_mp2_cmd(i2c_common, i2c_cmd_base); } EXPORT_SYMBOL_GPL(amd_mp2_rw); static void amd_mp2_pci_check_rw_event(struct amd_i2c_common *i2c_common) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; struct pci_dev *pdev = privdata->pci_dev; int len = i2c_common->eventval.r.length; u32 slave_addr = i2c_common->eventval.r.slave_addr; bool err = false; if (unlikely(len != i2c_common->msg->len)) { pci_err(pdev, "length %d in event doesn't match buffer length %d!\n", len, i2c_common->msg->len); err = true; } if (unlikely(slave_addr != i2c_common->msg->addr)) { pci_err(pdev, "unexpected slave address %x (expected: %x)!\n", slave_addr, i2c_common->msg->addr); err = true; } if (!err) i2c_common->cmd_success = true; } static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; struct pci_dev *pdev = privdata->pci_dev; enum status_type sts = i2c_common->eventval.r.status; enum response_type res = i2c_common->eventval.r.response; int len = i2c_common->eventval.r.length; if (res != command_success) { if (res != command_failed) pci_err(pdev, "invalid response to i2c command!\n"); return; } switch (i2c_common->reqcmd) { case i2c_read: if (sts == i2c_readcomplete_event) { amd_mp2_pci_check_rw_event(i2c_common); if (len <= 32) memcpy_fromio(i2c_common->msg->buf, privdata->mmio + AMD_C2P_MSG2, len); } else if (sts != i2c_readfail_event) { pci_err(pdev, "invalid i2c status after read (%d)!\n", sts); } break; case i2c_write: if (sts == i2c_writecomplete_event) amd_mp2_pci_check_rw_event(i2c_common); else if (sts != i2c_writefail_event) pci_err(pdev, "invalid i2c status after write (%d)!\n", sts); break; case i2c_enable: if (sts == i2c_busenable_complete) i2c_common->cmd_success = true; else if (sts != i2c_busenable_failed) pci_err(pdev, "invalid i2c status after bus enable (%d)!\n", sts); break; case i2c_disable: if (sts == i2c_busdisable_complete) i2c_common->cmd_success = true; else if (sts != i2c_busdisable_failed) pci_err(pdev, "invalid i2c status after bus disable (%d)!\n", sts); break; default: break; } } void amd_mp2_process_event(struct amd_i2c_common *i2c_common) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; struct pci_dev *pdev = privdata->pci_dev; if (unlikely(i2c_common->reqcmd == i2c_none)) { pci_warn(pdev, "received msg but no cmd was sent (bus = %d)!\n", i2c_common->bus_id); return; } __amd_mp2_process_event(i2c_common); i2c_common->reqcmd = i2c_none; amd_mp2_c2p_mutex_unlock(i2c_common); } EXPORT_SYMBOL_GPL(amd_mp2_process_event); static irqreturn_t amd_mp2_irq_isr(int irq, void *dev) { struct amd_mp2_dev *privdata = dev; struct pci_dev *pdev = privdata->pci_dev; struct amd_i2c_common *i2c_common; u32 val; unsigned int bus_id; void __iomem *reg; enum irqreturn ret = IRQ_NONE; for (bus_id = 0; bus_id < 2; bus_id++) { i2c_common = privdata->busses[bus_id]; if (!i2c_common) continue; reg = privdata->mmio + ((bus_id == 0) ? AMD_P2C_MSG1 : AMD_P2C_MSG2); val = readl(reg); if (val != 0) { writel(0, reg); writel(0, privdata->mmio + AMD_P2C_MSG_INTEN); i2c_common->eventval.ul = val; i2c_common->cmd_completion(i2c_common); ret = IRQ_HANDLED; } } if (ret != IRQ_HANDLED) { val = readl(privdata->mmio + AMD_P2C_MSG_INTEN); if (val != 0) { writel(0, privdata->mmio + AMD_P2C_MSG_INTEN); pci_warn(pdev, "received irq without message\n"); ret = IRQ_HANDLED; } } return ret; } void amd_mp2_rw_timeout(struct amd_i2c_common *i2c_common) { i2c_common->reqcmd = i2c_none; amd_mp2_c2p_mutex_unlock(i2c_common); } EXPORT_SYMBOL_GPL(amd_mp2_rw_timeout); int amd_mp2_register_cb(struct amd_i2c_common *i2c_common) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; struct pci_dev *pdev = privdata->pci_dev; if (i2c_common->bus_id > 1) return -EINVAL; if (privdata->busses[i2c_common->bus_id]) { pci_err(pdev, "Bus %d already taken!\n", i2c_common->bus_id); return -EINVAL; } privdata->busses[i2c_common->bus_id] = i2c_common; return 0; } EXPORT_SYMBOL_GPL(amd_mp2_register_cb); int amd_mp2_unregister_cb(struct amd_i2c_common *i2c_common) { struct amd_mp2_dev *privdata = i2c_common->mp2_dev; privdata->busses[i2c_common->bus_id] = NULL; return 0; } EXPORT_SYMBOL_GPL(amd_mp2_unregister_cb); static void amd_mp2_clear_reg(struct amd_mp2_dev *privdata) { int reg; for (reg = AMD_C2P_MSG0; reg <= AMD_C2P_MSG9; reg += 4) writel(0, privdata->mmio + reg); for (reg = AMD_P2C_MSG1; reg <= AMD_P2C_MSG2; reg += 4) writel(0, privdata->mmio + reg); } static int amd_mp2_pci_init(struct amd_mp2_dev *privdata, struct pci_dev *pci_dev) { int irq_flag = 0, rc; pci_set_drvdata(pci_dev, privdata); rc = pcim_enable_device(pci_dev); if (rc) { pci_err(pci_dev, "Failed to enable MP2 PCI device\n"); goto err_pci_enable; } rc = pcim_iomap_regions(pci_dev, 1 << 2, pci_name(pci_dev)); if (rc) { pci_err(pci_dev, "I/O memory remapping failed\n"); goto err_pci_enable; } privdata->mmio = pcim_iomap_table(pci_dev)[2]; pci_set_master(pci_dev); rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64)); if (rc) goto err_dma_mask; /* request and enable interrupt */ writel(0, privdata->mmio + AMD_P2C_MSG_INTEN); rc = pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_ALL_TYPES); if (rc < 0) { dev_err(&pci_dev->dev, "Failed to allocate single IRQ err=%d\n", rc); goto err_dma_mask; } privdata->dev_irq = pci_irq_vector(pci_dev, 0); if (!pci_dev->msix_enabled && !pci_dev->msi_enabled) irq_flag = IRQF_SHARED; rc = devm_request_irq(&pci_dev->dev, privdata->dev_irq, amd_mp2_irq_isr, irq_flag, dev_name(&pci_dev->dev), privdata); if (rc) { pci_err(pci_dev, "Failure requesting irq %i: %d\n", privdata->dev_irq, rc); goto free_irq_vectors; } return rc; free_irq_vectors: free_irq(privdata->dev_irq, privdata); err_dma_mask: pci_clear_master(pci_dev); err_pci_enable: pci_set_drvdata(pci_dev, NULL); return rc; } static int amd_mp2_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct amd_mp2_dev *privdata; int rc; privdata = devm_kzalloc(&pci_dev->dev, sizeof(*privdata), GFP_KERNEL); if (!privdata) return -ENOMEM; privdata->pci_dev = pci_dev; rc = amd_mp2_pci_init(privdata, pci_dev); if (rc) return rc; mutex_init(&privdata->c2p_lock); pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000); pm_runtime_use_autosuspend(&pci_dev->dev); pm_runtime_put_autosuspend(&pci_dev->dev); pm_runtime_allow(&pci_dev->dev); privdata->probed = true; pci_info(pci_dev, "MP2 device registered.\n"); return 0; } static void amd_mp2_pci_remove(struct pci_dev *pci_dev) { struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev); pm_runtime_forbid(&pci_dev->dev); pm_runtime_get_noresume(&pci_dev->dev); free_irq(privdata->dev_irq, privdata); pci_clear_master(pci_dev); amd_mp2_clear_reg(privdata); } #ifdef CONFIG_PM static int amd_mp2_pci_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev); struct amd_i2c_common *i2c_common; unsigned int bus_id; int ret = 0; for (bus_id = 0; bus_id < 2; bus_id++) { i2c_common = privdata->busses[bus_id]; if (i2c_common) i2c_common->suspend(i2c_common); } ret = pci_save_state(pci_dev); if (ret) { pci_err(pci_dev, "pci_save_state failed = %d\n", ret); return ret; } pci_disable_device(pci_dev); return ret; } static int amd_mp2_pci_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev); struct amd_i2c_common *i2c_common; unsigned int bus_id; int ret = 0; pci_restore_state(pci_dev); ret = pci_enable_device(pci_dev); if (ret < 0) { pci_err(pci_dev, "pci_enable_device failed = %d\n", ret); return ret; } for (bus_id = 0; bus_id < 2; bus_id++) { i2c_common = privdata->busses[bus_id]; if (i2c_common) { ret = i2c_common->resume(i2c_common); if (ret < 0) return ret; } } return ret; } static UNIVERSAL_DEV_PM_OPS(amd_mp2_pci_pm_ops, amd_mp2_pci_suspend, amd_mp2_pci_resume, NULL); #endif /* CONFIG_PM */ static const struct pci_device_id amd_mp2_pci_tbl[] = { {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_MP2)}, {0} }; MODULE_DEVICE_TABLE(pci, amd_mp2_pci_tbl); static struct pci_driver amd_mp2_pci_driver = { .name = "i2c_amd_mp2", .id_table = amd_mp2_pci_tbl, .probe = amd_mp2_pci_probe, .remove = amd_mp2_pci_remove, #ifdef CONFIG_PM .driver = { .pm = &amd_mp2_pci_pm_ops, }, #endif }; module_pci_driver(amd_mp2_pci_driver); struct amd_mp2_dev *amd_mp2_find_device(void) { struct device *dev; struct pci_dev *pci_dev; dev = driver_find_next_device(&amd_mp2_pci_driver.driver, NULL); if (!dev) return NULL; pci_dev = to_pci_dev(dev); return (struct amd_mp2_dev *)pci_get_drvdata(pci_dev); } EXPORT_SYMBOL_GPL(amd_mp2_find_device); MODULE_DESCRIPTION("AMD(R) PCI-E MP2 I2C Controller Driver"); MODULE_AUTHOR("Shyam Sundar S K <[email protected]>"); MODULE_AUTHOR("Elie Morisse <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/i2c/busses/i2c-amd-mp2-pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* ------------------------------------------------------------------------ * * i2c-parport.c I2C bus over parallel port * * ------------------------------------------------------------------------ * Copyright (C) 2003-2011 Jean Delvare <[email protected]> Based on older i2c-philips-par.c driver Copyright (C) 1995-2000 Simon G. Vogl With some changes from: Frodo Looijaard <[email protected]> Kyösti Mälkki <[email protected]> * ------------------------------------------------------------------------ */ #define pr_fmt(fmt) "i2c-parport: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/parport.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/i2c-smbus.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mutex.h> #define PORT_DATA 0 #define PORT_STAT 1 #define PORT_CTRL 2 struct lineop { u8 val; u8 port; u8 inverted; }; struct adapter_parm { struct lineop setsda; struct lineop setscl; struct lineop getsda; struct lineop getscl; struct lineop init; unsigned int smbus_alert:1; }; static const struct adapter_parm adapter_parm[] = { /* type 0: Philips adapter */ { .setsda = { 0x80, PORT_DATA, 1 }, .setscl = { 0x08, PORT_CTRL, 0 }, .getsda = { 0x80, PORT_STAT, 0 }, .getscl = { 0x08, PORT_STAT, 0 }, }, /* type 1: home brew teletext adapter */ { .setsda = { 0x02, PORT_DATA, 0 }, .setscl = { 0x01, PORT_DATA, 0 }, .getsda = { 0x80, PORT_STAT, 1 }, }, /* type 2: Velleman K8000 adapter */ { .setsda = { 0x02, PORT_CTRL, 1 }, .setscl = { 0x08, PORT_CTRL, 1 }, .getsda = { 0x10, PORT_STAT, 0 }, }, /* type 3: ELV adapter */ { .setsda = { 0x02, PORT_DATA, 1 }, .setscl = { 0x01, PORT_DATA, 1 }, .getsda = { 0x40, PORT_STAT, 1 }, .getscl = { 0x08, PORT_STAT, 1 }, }, /* type 4: ADM1032 evaluation board */ { .setsda = { 0x02, PORT_DATA, 1 }, .setscl = { 0x01, PORT_DATA, 1 }, .getsda = { 0x10, PORT_STAT, 1 }, .init = { 0xf0, PORT_DATA, 0 }, .smbus_alert = 1, }, /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */ { .setsda = { 0x02, PORT_DATA, 1 }, .setscl = { 0x01, PORT_DATA, 1 }, .getsda = { 0x10, PORT_STAT, 1 }, }, /* type 6: Barco LPT->DVI (K5800236) adapter */ { .setsda = { 0x02, PORT_DATA, 1 }, .setscl = { 0x01, PORT_DATA, 1 }, .getsda = { 0x20, PORT_STAT, 0 }, .getscl = { 0x40, PORT_STAT, 0 }, .init = { 0xfc, PORT_DATA, 0 }, }, /* type 7: One For All JP1 parallel port adapter */ { .setsda = { 0x01, PORT_DATA, 0 }, .setscl = { 0x02, PORT_DATA, 0 }, .getsda = { 0x80, PORT_STAT, 1 }, .init = { 0x04, PORT_DATA, 1 }, }, /* type 8: VCT-jig */ { .setsda = { 0x04, PORT_DATA, 1 }, .setscl = { 0x01, PORT_DATA, 1 }, .getsda = { 0x40, PORT_STAT, 0 }, .getscl = { 0x80, PORT_STAT, 1 }, }, }; /* ----- Device list ------------------------------------------------------ */ struct i2c_par { struct pardevice *pdev; struct i2c_adapter adapter; struct i2c_algo_bit_data algo_data; struct i2c_smbus_alert_setup alert_data; struct i2c_client *ara; struct list_head node; }; static LIST_HEAD(adapter_list); static DEFINE_MUTEX(adapter_list_lock); #define MAX_DEVICE 4 static int parport[MAX_DEVICE] = {0, -1, -1, -1}; module_param_array(parport, int, NULL, 0); MODULE_PARM_DESC(parport, "List of parallel ports to bind to, by index.\n" " At most " __stringify(MAX_DEVICE) " devices are supported.\n" " Default is one device connected to parport0.\n" ); static int type = -1; module_param(type, int, 0); MODULE_PARM_DESC(type, "Type of adapter:\n" " 0 = Philips adapter\n" " 1 = home brew teletext adapter\n" " 2 = Velleman K8000 adapter\n" " 3 = ELV adapter\n" " 4 = ADM1032 evaluation board\n" " 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n" " 6 = Barco LPT->DVI (K5800236) adapter\n" " 7 = One For All JP1 parallel port adapter\n" " 8 = VCT-jig\n" ); /* ----- Low-level parallel port access ----------------------------------- */ static void port_write_data(struct parport *p, unsigned char d) { parport_write_data(p, d); } static void port_write_control(struct parport *p, unsigned char d) { parport_write_control(p, d); } static unsigned char port_read_data(struct parport *p) { return parport_read_data(p); } static unsigned char port_read_status(struct parport *p) { return parport_read_status(p); } static unsigned char port_read_control(struct parport *p) { return parport_read_control(p); } static void (* const port_write[])(struct parport *, unsigned char) = { port_write_data, NULL, port_write_control, }; static unsigned char (* const port_read[])(struct parport *) = { port_read_data, port_read_status, port_read_control, }; /* ----- Unified line operation functions --------------------------------- */ static inline void line_set(struct parport *data, int state, const struct lineop *op) { u8 oldval = port_read[op->port](data); /* Touch only the bit(s) needed */ if ((op->inverted && !state) || (!op->inverted && state)) port_write[op->port](data, oldval | op->val); else port_write[op->port](data, oldval & ~op->val); } static inline int line_get(struct parport *data, const struct lineop *op) { u8 oldval = port_read[op->port](data); return ((op->inverted && (oldval & op->val) != op->val) || (!op->inverted && (oldval & op->val) == op->val)); } /* ----- I2C algorithm call-back functions and structures ----------------- */ static void parport_setscl(void *data, int state) { line_set((struct parport *) data, state, &adapter_parm[type].setscl); } static void parport_setsda(void *data, int state) { line_set((struct parport *) data, state, &adapter_parm[type].setsda); } static int parport_getscl(void *data) { return line_get((struct parport *) data, &adapter_parm[type].getscl); } static int parport_getsda(void *data) { return line_get((struct parport *) data, &adapter_parm[type].getsda); } /* Encapsulate the functions above in the correct structure. Note that this is only a template, from which the real structures are copied. The attaching code will set getscl to NULL for adapters that cannot read SCL back, and will also make the data field point to the parallel port structure. */ static const struct i2c_algo_bit_data parport_algo_data = { .setsda = parport_setsda, .setscl = parport_setscl, .getsda = parport_getsda, .getscl = parport_getscl, .udelay = 10, /* ~50 kbps */ .timeout = HZ, }; /* ----- I2c and parallel port call-back functions and structures --------- */ static void i2c_parport_irq(void *data) { struct i2c_par *adapter = data; struct i2c_client *ara = adapter->ara; if (ara) { dev_dbg(&ara->dev, "SMBus alert received\n"); i2c_handle_smbus_alert(ara); } else dev_dbg(&adapter->adapter.dev, "SMBus alert received but no ARA client!\n"); } static void i2c_parport_attach(struct parport *port) { struct i2c_par *adapter; int i; struct pardev_cb i2c_parport_cb; if (type < 0) { pr_warn("adapter type unspecified\n"); return; } if (type >= ARRAY_SIZE(adapter_parm)) { pr_warn("invalid type (%d)\n", type); return; } for (i = 0; i < MAX_DEVICE; i++) { if (parport[i] == -1) continue; if (port->number == parport[i]) break; } if (i == MAX_DEVICE) { pr_debug("Not using parport%d.\n", port->number); return; } adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL); if (!adapter) return; memset(&i2c_parport_cb, 0, sizeof(i2c_parport_cb)); i2c_parport_cb.flags = PARPORT_FLAG_EXCL; i2c_parport_cb.irq_func = i2c_parport_irq; i2c_parport_cb.private = adapter; pr_debug("attaching to %s\n", port->name); parport_disable_irq(port); adapter->pdev = parport_register_dev_model(port, "i2c-parport", &i2c_parport_cb, i); if (!adapter->pdev) { pr_err("Unable to register with parport\n"); goto err_free; } /* Fill the rest of the structure */ adapter->adapter.owner = THIS_MODULE; adapter->adapter.class = I2C_CLASS_HWMON; strscpy(adapter->adapter.name, "Parallel port adapter", sizeof(adapter->adapter.name)); adapter->algo_data = parport_algo_data; /* Slow down if we can't sense SCL */ if (!adapter_parm[type].getscl.val) { adapter->algo_data.getscl = NULL; adapter->algo_data.udelay = 50; /* ~10 kbps */ } adapter->algo_data.data = port; adapter->adapter.algo_data = &adapter->algo_data; adapter->adapter.dev.parent = port->physport->dev; if (parport_claim_or_block(adapter->pdev) < 0) { dev_err(&adapter->pdev->dev, "Could not claim parallel port\n"); goto err_unregister; } /* Reset hardware to a sane state (SCL and SDA high) */ parport_setsda(port, 1); parport_setscl(port, 1); /* Other init if needed (power on...) */ if (adapter_parm[type].init.val) { line_set(port, 1, &adapter_parm[type].init); /* Give powered devices some time to settle */ msleep(100); } if (i2c_bit_add_bus(&adapter->adapter) < 0) { dev_err(&adapter->pdev->dev, "Unable to register with I2C\n"); goto err_unregister; } /* Setup SMBus alert if supported */ if (adapter_parm[type].smbus_alert) { struct i2c_client *ara; ara = i2c_new_smbus_alert_device(&adapter->adapter, &adapter->alert_data); if (!IS_ERR(ara)) { adapter->ara = ara; parport_enable_irq(port); } else { dev_warn(&adapter->pdev->dev, "Failed to register ARA client\n"); } } /* Add the new adapter to the list */ mutex_lock(&adapter_list_lock); list_add_tail(&adapter->node, &adapter_list); mutex_unlock(&adapter_list_lock); return; err_unregister: parport_release(adapter->pdev); parport_unregister_device(adapter->pdev); err_free: kfree(adapter); } static void i2c_parport_detach(struct parport *port) { struct i2c_par *adapter, *_n; /* Walk the list */ mutex_lock(&adapter_list_lock); list_for_each_entry_safe(adapter, _n, &adapter_list, node) { if (adapter->pdev->port == port) { if (adapter->ara) { parport_disable_irq(port); i2c_unregister_device(adapter->ara); } i2c_del_adapter(&adapter->adapter); /* Un-init if needed (power off...) */ if (adapter_parm[type].init.val) line_set(port, 0, &adapter_parm[type].init); parport_release(adapter->pdev); parport_unregister_device(adapter->pdev); list_del(&adapter->node); kfree(adapter); } } mutex_unlock(&adapter_list_lock); } static struct parport_driver i2c_parport_driver = { .name = "i2c-parport", .match_port = i2c_parport_attach, .detach = i2c_parport_detach, .devmodel = true, }; module_parport_driver(i2c_parport_driver); MODULE_AUTHOR("Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("I2C bus over parallel port"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-parport.c
// SPDX-License-Identifier: GPL-2.0 /* * Mellanox BlueField I2C bus driver * * Copyright (C) 2020 Mellanox Technologies, Ltd. */ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/string.h> /* Defines what functionality is present. */ #define MLXBF_I2C_FUNC_SMBUS_BLOCK \ (I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL) #define MLXBF_I2C_FUNC_SMBUS_DEFAULT \ (I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | \ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | \ I2C_FUNC_SMBUS_PROC_CALL) #define MLXBF_I2C_FUNC_ALL \ (MLXBF_I2C_FUNC_SMBUS_DEFAULT | MLXBF_I2C_FUNC_SMBUS_BLOCK | \ I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SLAVE) /* Shared resources info in BlueField platforms. */ #define MLXBF_I2C_COALESCE_TYU_ADDR 0x02801300 #define MLXBF_I2C_COALESCE_TYU_SIZE 0x010 #define MLXBF_I2C_GPIO_TYU_ADDR 0x02802000 #define MLXBF_I2C_GPIO_TYU_SIZE 0x100 #define MLXBF_I2C_COREPLL_TYU_ADDR 0x02800358 #define MLXBF_I2C_COREPLL_TYU_SIZE 0x008 #define MLXBF_I2C_COREPLL_YU_ADDR 0x02800c30 #define MLXBF_I2C_COREPLL_YU_SIZE 0x00c #define MLXBF_I2C_COREPLL_RSH_YU_ADDR 0x13409824 #define MLXBF_I2C_COREPLL_RSH_YU_SIZE 0x00c #define MLXBF_I2C_SHARED_RES_MAX 3 /* * Note that the following SMBus, CAUSE, GPIO and PLL register addresses * refer to their respective offsets relative to the corresponding * memory-mapped region whose addresses are specified in either the DT or * the ACPI tables or above. */ /* * SMBus Master core clock frequency. Timing configurations are * strongly dependent on the core clock frequency of the SMBus * Master. Default value is set to 400MHz. */ #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000) /* Reference clock for Bluefield - 156 MHz. */ #define MLXBF_I2C_PLL_IN_FREQ 156250000ULL /* Constant used to determine the PLL frequency. */ #define MLNXBF_I2C_COREPLL_CONST 16384ULL #define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL /* PLL registers. */ #define MLXBF_I2C_CORE_PLL_REG1 0x4 #define MLXBF_I2C_CORE_PLL_REG2 0x8 /* OR cause register. */ #define MLXBF_I2C_CAUSE_OR_EVTEN0 0x14 #define MLXBF_I2C_CAUSE_OR_CLEAR 0x18 /* Arbiter Cause Register. */ #define MLXBF_I2C_CAUSE_ARBITER 0x1c /* * Cause Status flags. Note that those bits might be considered * as interrupt enabled bits. */ /* Transaction ended with STOP. */ #define MLXBF_I2C_CAUSE_TRANSACTION_ENDED BIT(0) /* Master arbitration lost. */ #define MLXBF_I2C_CAUSE_M_ARBITRATION_LOST BIT(1) /* Unexpected start detected. */ #define MLXBF_I2C_CAUSE_UNEXPECTED_START BIT(2) /* Unexpected stop detected. */ #define MLXBF_I2C_CAUSE_UNEXPECTED_STOP BIT(3) /* Wait for transfer continuation. */ #define MLXBF_I2C_CAUSE_WAIT_FOR_FW_DATA BIT(4) /* Failed to generate STOP. */ #define MLXBF_I2C_CAUSE_PUT_STOP_FAILED BIT(5) /* Failed to generate START. */ #define MLXBF_I2C_CAUSE_PUT_START_FAILED BIT(6) /* Clock toggle completed. */ #define MLXBF_I2C_CAUSE_CLK_TOGGLE_DONE BIT(7) /* Transfer timeout occurred. */ #define MLXBF_I2C_CAUSE_M_FW_TIMEOUT BIT(8) /* Master busy bit reset. */ #define MLXBF_I2C_CAUSE_M_GW_BUSY_FALL BIT(9) #define MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK GENMASK(9, 0) #define MLXBF_I2C_CAUSE_MASTER_STATUS_ERROR \ (MLXBF_I2C_CAUSE_M_ARBITRATION_LOST | \ MLXBF_I2C_CAUSE_UNEXPECTED_START | \ MLXBF_I2C_CAUSE_UNEXPECTED_STOP | \ MLXBF_I2C_CAUSE_PUT_STOP_FAILED | \ MLXBF_I2C_CAUSE_PUT_START_FAILED | \ MLXBF_I2C_CAUSE_CLK_TOGGLE_DONE | \ MLXBF_I2C_CAUSE_M_FW_TIMEOUT) /* * Slave cause status flags. Note that those bits might be considered * as interrupt enabled bits. */ /* Write transaction received successfully. */ #define MLXBF_I2C_CAUSE_WRITE_SUCCESS BIT(0) /* Read transaction received, waiting for response. */ #define MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE BIT(13) /* Slave busy bit reset. */ #define MLXBF_I2C_CAUSE_S_GW_BUSY_FALL BIT(18) /* Cause coalesce registers. */ #define MLXBF_I2C_CAUSE_COALESCE_0 0x00 #define MLXBF_I2C_CAUSE_TYU_SLAVE_BIT 3 #define MLXBF_I2C_CAUSE_YU_SLAVE_BIT 1 /* Functional enable register. */ #define MLXBF_I2C_GPIO_0_FUNC_EN_0 0x28 /* Force OE enable register. */ #define MLXBF_I2C_GPIO_0_FORCE_OE_EN 0x30 /* * Note that Smbus GWs are on GPIOs 30:25. Two pins are used to control * SDA/SCL lines: * * SMBUS GW0 -> bits[26:25] * SMBUS GW1 -> bits[28:27] * SMBUS GW2 -> bits[30:29] */ #define MLXBF_I2C_GPIO_SMBUS_GW_PINS(num) (25 + ((num) << 1)) /* Note that gw_id can be 0,1 or 2. */ #define MLXBF_I2C_GPIO_SMBUS_GW_MASK(num) \ (0xffffffff & (~(0x3 << MLXBF_I2C_GPIO_SMBUS_GW_PINS(num)))) #define MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(num, val) \ ((val) & MLXBF_I2C_GPIO_SMBUS_GW_MASK(num)) #define MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(num, val) \ ((val) | (0x3 << MLXBF_I2C_GPIO_SMBUS_GW_PINS(num))) /* * Defines SMBus operating frequency and core clock frequency. * According to ADB files, default values are compliant to 100KHz SMBus * @ 400MHz core clock. The driver should be able to calculate core * frequency based on PLL parameters. */ #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ /* Core PLL TYU configuration. */ #define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3) #define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16) #define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20) /* Core PLL YU configuration. */ #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0) #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0) #define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26) /* SMBus timing parameters. */ #define MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH 0x00 #define MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE 0x04 #define MLXBF_I2C_SMBUS_TIMER_THOLD 0x08 #define MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP 0x0c #define MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA 0x10 #define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14 #define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18 #define MLXBF_I2C_SHIFT_0 0 #define MLXBF_I2C_SHIFT_8 8 #define MLXBF_I2C_SHIFT_16 16 #define MLXBF_I2C_SHIFT_24 24 #define MLXBF_I2C_MASK_8 GENMASK(7, 0) #define MLXBF_I2C_MASK_16 GENMASK(15, 0) #define MLXBF_I2C_MST_ADDR_OFFSET 0x200 /* SMBus Master GW. */ #define MLXBF_I2C_SMBUS_MASTER_GW 0x0 /* Number of bytes received and sent. */ #define MLXBF_I2C_YU_SMBUS_RS_BYTES 0x100 #define MLXBF_I2C_RSH_YU_SMBUS_RS_BYTES 0x10c /* Packet error check (PEC) value. */ #define MLXBF_I2C_SMBUS_MASTER_PEC 0x104 /* Status bits (ACK/NACK/FW Timeout). */ #define MLXBF_I2C_SMBUS_MASTER_STATUS 0x108 /* SMbus Master Finite State Machine. */ #define MLXBF_I2C_YU_SMBUS_MASTER_FSM 0x110 #define MLXBF_I2C_RSH_YU_SMBUS_MASTER_FSM 0x100 /* SMBus master GW control bits offset in MLXBF_I2C_SMBUS_MASTER_GW[31:3]. */ #define MLXBF_I2C_MASTER_LOCK_BIT BIT(31) /* Lock bit. */ #define MLXBF_I2C_MASTER_BUSY_BIT BIT(30) /* Busy bit. */ #define MLXBF_I2C_MASTER_START_BIT BIT(29) /* Control start. */ #define MLXBF_I2C_MASTER_CTL_WRITE_BIT BIT(28) /* Control write phase. */ #define MLXBF_I2C_MASTER_CTL_READ_BIT BIT(19) /* Control read phase. */ #define MLXBF_I2C_MASTER_STOP_BIT BIT(3) /* Control stop. */ #define MLXBF_I2C_MASTER_ENABLE \ (MLXBF_I2C_MASTER_LOCK_BIT | MLXBF_I2C_MASTER_BUSY_BIT | \ MLXBF_I2C_MASTER_START_BIT | MLXBF_I2C_MASTER_STOP_BIT) #define MLXBF_I2C_MASTER_ENABLE_WRITE \ (MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_WRITE_BIT) #define MLXBF_I2C_MASTER_ENABLE_READ \ (MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_READ_BIT) #define MLXBF_I2C_MASTER_WRITE_SHIFT 21 /* Control write bytes */ #define MLXBF_I2C_MASTER_SEND_PEC_SHIFT 20 /* Send PEC byte when set to 1 */ #define MLXBF_I2C_MASTER_PARSE_EXP_SHIFT 11 /* Control parse expected bytes */ #define MLXBF_I2C_MASTER_SLV_ADDR_SHIFT 12 /* Slave address */ #define MLXBF_I2C_MASTER_READ_SHIFT 4 /* Control read bytes */ /* SMBus master GW Data descriptor. */ #define MLXBF_I2C_MASTER_DATA_DESC_ADDR 0x80 #define MLXBF_I2C_MASTER_DATA_DESC_SIZE 0x80 /* Size in bytes. */ /* Maximum bytes to read/write per SMBus transaction. */ #define MLXBF_I2C_MASTER_DATA_R_LENGTH MLXBF_I2C_MASTER_DATA_DESC_SIZE #define MLXBF_I2C_MASTER_DATA_W_LENGTH (MLXBF_I2C_MASTER_DATA_DESC_SIZE - 1) /* All bytes were transmitted. */ #define MLXBF_I2C_SMBUS_STATUS_BYTE_CNT_DONE BIT(0) /* NACK received. */ #define MLXBF_I2C_SMBUS_STATUS_NACK_RCV BIT(1) /* Slave's byte count >128 bytes. */ #define MLXBF_I2C_SMBUS_STATUS_READ_ERR BIT(2) /* Timeout occurred. */ #define MLXBF_I2C_SMBUS_STATUS_FW_TIMEOUT BIT(3) #define MLXBF_I2C_SMBUS_MASTER_STATUS_MASK GENMASK(3, 0) #define MLXBF_I2C_SMBUS_MASTER_STATUS_ERROR \ (MLXBF_I2C_SMBUS_STATUS_NACK_RCV | \ MLXBF_I2C_SMBUS_STATUS_READ_ERR | \ MLXBF_I2C_SMBUS_STATUS_FW_TIMEOUT) #define MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK BIT(31) #define MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK BIT(15) #define MLXBF_I2C_SLV_ADDR_OFFSET 0x400 /* SMBus slave GW. */ #define MLXBF_I2C_SMBUS_SLAVE_GW 0x0 /* Number of bytes received and sent from/to master. */ #define MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES 0x100 /* Packet error check (PEC) value. */ #define MLXBF_I2C_SMBUS_SLAVE_PEC 0x104 /* SMBus slave Finite State Machine (FSM). */ #define MLXBF_I2C_SMBUS_SLAVE_FSM 0x110 /* * Should be set when all raised causes handled, and cleared by HW on * every new cause. */ #define MLXBF_I2C_SMBUS_SLAVE_READY 0x12c /* SMBus slave GW control bits offset in MLXBF_I2C_SMBUS_SLAVE_GW[31:19]. */ #define MLXBF_I2C_SLAVE_BUSY_BIT BIT(30) /* Busy bit. */ #define MLXBF_I2C_SLAVE_WRITE_BIT BIT(29) /* Control write enable. */ #define MLXBF_I2C_SLAVE_ENABLE \ (MLXBF_I2C_SLAVE_BUSY_BIT | MLXBF_I2C_SLAVE_WRITE_BIT) #define MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT 22 /* Number of bytes to write. */ #define MLXBF_I2C_SLAVE_SEND_PEC_SHIFT 21 /* Send PEC byte shift. */ /* SMBus slave GW Data descriptor. */ #define MLXBF_I2C_SLAVE_DATA_DESC_ADDR 0x80 #define MLXBF_I2C_SLAVE_DATA_DESC_SIZE 0x80 /* Size in bytes. */ /* SMbus slave configuration registers. */ #define MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG 0x114 #define MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT 16 #define MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT BIT(7) #define MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK GENMASK(6, 0) /* * Timeout is given in microsends. Note also that timeout handling is not * exact. */ #define MLXBF_I2C_SMBUS_TIMEOUT (300 * 1000) /* 300ms */ #define MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT (300 * 1000) /* 300ms */ /* Polling frequency in microseconds. */ #define MLXBF_I2C_POLL_FREQ_IN_USEC 200 #define MLXBF_I2C_SMBUS_OP_CNT_1 1 #define MLXBF_I2C_SMBUS_OP_CNT_2 2 #define MLXBF_I2C_SMBUS_OP_CNT_3 3 #define MLXBF_I2C_SMBUS_MAX_OP_CNT MLXBF_I2C_SMBUS_OP_CNT_3 /* Helper macro to define an I2C resource parameters. */ #define MLXBF_I2C_RES_PARAMS(addr, size, str) \ { \ .start = (addr), \ .end = (addr) + (size) - 1, \ .name = (str) \ } enum { MLXBF_I2C_TIMING_100KHZ = 100000, MLXBF_I2C_TIMING_400KHZ = 400000, MLXBF_I2C_TIMING_1000KHZ = 1000000, }; enum { MLXBF_I2C_F_READ = BIT(0), MLXBF_I2C_F_WRITE = BIT(1), MLXBF_I2C_F_NORESTART = BIT(3), MLXBF_I2C_F_SMBUS_OPERATION = BIT(4), MLXBF_I2C_F_SMBUS_BLOCK = BIT(5), MLXBF_I2C_F_SMBUS_PEC = BIT(6), MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7), }; /* Mellanox BlueField chip type. */ enum mlxbf_i2c_chip_type { MLXBF_I2C_CHIP_TYPE_1, /* Mellanox BlueField-1 chip. */ MLXBF_I2C_CHIP_TYPE_2, /* Mellanox BlueField-2 chip. */ MLXBF_I2C_CHIP_TYPE_3 /* Mellanox BlueField-3 chip. */ }; /* List of chip resources that are being accessed by the driver. */ enum { MLXBF_I2C_SMBUS_RES, MLXBF_I2C_MST_CAUSE_RES, MLXBF_I2C_SLV_CAUSE_RES, MLXBF_I2C_COALESCE_RES, MLXBF_I2C_SMBUS_TIMER_RES, MLXBF_I2C_SMBUS_MST_RES, MLXBF_I2C_SMBUS_SLV_RES, MLXBF_I2C_COREPLL_RES, MLXBF_I2C_GPIO_RES, MLXBF_I2C_END_RES }; /* Encapsulates timing parameters. */ struct mlxbf_i2c_timings { u16 scl_high; /* Clock high period. */ u16 scl_low; /* Clock low period. */ u8 sda_rise; /* Data rise time. */ u8 sda_fall; /* Data fall time. */ u8 scl_rise; /* Clock rise time. */ u8 scl_fall; /* Clock fall time. */ u16 hold_start; /* Hold time after (REPEATED) START. */ u16 hold_data; /* Data hold time. */ u16 setup_start; /* REPEATED START condition setup time. */ u16 setup_stop; /* STOP condition setup time. */ u16 setup_data; /* Data setup time. */ u16 pad; /* Padding. */ u16 buf; /* Bus free time between STOP and START. */ u16 thigh_max; /* Thigh max. */ u32 timeout; /* Detect clock low timeout. */ }; struct mlxbf_i2c_smbus_operation { u32 flags; u32 length; /* Buffer length in bytes. */ u8 *buffer; }; struct mlxbf_i2c_smbus_request { u8 slave; u8 operation_cnt; struct mlxbf_i2c_smbus_operation operation[MLXBF_I2C_SMBUS_MAX_OP_CNT]; }; struct mlxbf_i2c_resource { void __iomem *io; struct resource *params; struct mutex *lock; /* Mutex to protect mlxbf_i2c_resource. */ u8 type; }; struct mlxbf_i2c_chip_info { enum mlxbf_i2c_chip_type type; /* Chip shared resources that are being used by the I2C controller. */ struct mlxbf_i2c_resource *shared_res[MLXBF_I2C_SHARED_RES_MAX]; /* Callback to calculate the core PLL frequency. */ u64 (*calculate_freq)(struct mlxbf_i2c_resource *corepll_res); /* Registers' address offset */ u32 smbus_master_rs_bytes_off; u32 smbus_master_fsm_off; }; struct mlxbf_i2c_priv { const struct mlxbf_i2c_chip_info *chip; struct i2c_adapter adap; struct mlxbf_i2c_resource *smbus; struct mlxbf_i2c_resource *timer; struct mlxbf_i2c_resource *mst; struct mlxbf_i2c_resource *slv; struct mlxbf_i2c_resource *mst_cause; struct mlxbf_i2c_resource *slv_cause; struct mlxbf_i2c_resource *coalesce; u64 frequency; /* Core frequency in Hz. */ int bus; /* Physical bus identifier. */ int irq; struct i2c_client *slave[MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT]; u32 resource_version; }; /* Core PLL frequency. */ static u64 mlxbf_i2c_corepll_frequency; static struct resource mlxbf_i2c_coalesce_tyu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COALESCE_TYU_ADDR, MLXBF_I2C_COALESCE_TYU_SIZE, "COALESCE_MEM"); static struct resource mlxbf_i2c_corepll_tyu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_TYU_ADDR, MLXBF_I2C_COREPLL_TYU_SIZE, "COREPLL_MEM"); static struct resource mlxbf_i2c_corepll_yu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_YU_ADDR, MLXBF_I2C_COREPLL_YU_SIZE, "COREPLL_MEM"); static struct resource mlxbf_i2c_corepll_rsh_yu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_RSH_YU_ADDR, MLXBF_I2C_COREPLL_RSH_YU_SIZE, "COREPLL_MEM"); static struct resource mlxbf_i2c_gpio_tyu_params = MLXBF_I2C_RES_PARAMS(MLXBF_I2C_GPIO_TYU_ADDR, MLXBF_I2C_GPIO_TYU_SIZE, "GPIO_MEM"); static struct mutex mlxbf_i2c_coalesce_lock; static struct mutex mlxbf_i2c_corepll_lock; static struct mutex mlxbf_i2c_gpio_lock; static struct mlxbf_i2c_resource mlxbf_i2c_coalesce_res[] = { [MLXBF_I2C_CHIP_TYPE_1] = { .params = &mlxbf_i2c_coalesce_tyu_params, .lock = &mlxbf_i2c_coalesce_lock, .type = MLXBF_I2C_COALESCE_RES }, {} }; static struct mlxbf_i2c_resource mlxbf_i2c_corepll_res[] = { [MLXBF_I2C_CHIP_TYPE_1] = { .params = &mlxbf_i2c_corepll_tyu_params, .lock = &mlxbf_i2c_corepll_lock, .type = MLXBF_I2C_COREPLL_RES }, [MLXBF_I2C_CHIP_TYPE_2] = { .params = &mlxbf_i2c_corepll_yu_params, .lock = &mlxbf_i2c_corepll_lock, .type = MLXBF_I2C_COREPLL_RES, }, [MLXBF_I2C_CHIP_TYPE_3] = { .params = &mlxbf_i2c_corepll_rsh_yu_params, .lock = &mlxbf_i2c_corepll_lock, .type = MLXBF_I2C_COREPLL_RES, } }; static struct mlxbf_i2c_resource mlxbf_i2c_gpio_res[] = { [MLXBF_I2C_CHIP_TYPE_1] = { .params = &mlxbf_i2c_gpio_tyu_params, .lock = &mlxbf_i2c_gpio_lock, .type = MLXBF_I2C_GPIO_RES }, {} }; static u8 mlxbf_i2c_bus_count; static struct mutex mlxbf_i2c_bus_lock; /* * Function to poll a set of bits at a specific address; it checks whether * the bits are equal to zero when eq_zero is set to 'true', and not equal * to zero when eq_zero is set to 'false'. * Note that the timeout is given in microseconds. */ static u32 mlxbf_i2c_poll(void __iomem *io, u32 addr, u32 mask, bool eq_zero, u32 timeout) { u32 bits; timeout = (timeout / MLXBF_I2C_POLL_FREQ_IN_USEC) + 1; do { bits = readl(io + addr) & mask; if (eq_zero ? bits == 0 : bits != 0) return eq_zero ? 1 : bits; udelay(MLXBF_I2C_POLL_FREQ_IN_USEC); } while (timeout-- != 0); return 0; } /* * SW must make sure that the SMBus Master GW is idle before starting * a transaction. Accordingly, this function polls the Master FSM stop * bit; it returns false when the bit is asserted, true if not. */ static bool mlxbf_i2c_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv) { u32 mask = MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK; u32 addr = priv->chip->smbus_master_fsm_off; u32 timeout = MLXBF_I2C_SMBUS_TIMEOUT; if (mlxbf_i2c_poll(priv->mst->io, addr, mask, true, timeout)) return true; return false; } /* * wait for the lock to be released before acquiring it. */ static bool mlxbf_i2c_smbus_master_lock(struct mlxbf_i2c_priv *priv) { if (mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW, MLXBF_I2C_MASTER_LOCK_BIT, true, MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT)) return true; return false; } static void mlxbf_i2c_smbus_master_unlock(struct mlxbf_i2c_priv *priv) { /* Clear the gw to clear the lock */ writel(0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW); } static bool mlxbf_i2c_smbus_transaction_success(u32 master_status, u32 cause_status) { /* * When transaction ended with STOP, all bytes were transmitted, * and no NACK received, then the transaction ended successfully. * On the other hand, when the GW is configured with the stop bit * de-asserted then the SMBus expects the following GW configuration * for transfer continuation. */ if ((cause_status & MLXBF_I2C_CAUSE_WAIT_FOR_FW_DATA) || ((cause_status & MLXBF_I2C_CAUSE_TRANSACTION_ENDED) && (master_status & MLXBF_I2C_SMBUS_STATUS_BYTE_CNT_DONE) && !(master_status & MLXBF_I2C_SMBUS_STATUS_NACK_RCV))) return true; return false; } /* * Poll SMBus master status and return transaction status, * i.e. whether succeeded or failed. I2C and SMBus fault codes * are returned as negative numbers from most calls, with zero * or some positive number indicating a non-fault return. */ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv) { u32 master_status_bits; u32 cause_status_bits; /* * GW busy bit is raised by the driver and cleared by the HW * when the transaction is completed. The busy bit is a good * indicator of transaction status. So poll the busy bit, and * then read the cause and master status bits to determine if * errors occurred during the transaction. */ mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW, MLXBF_I2C_MASTER_BUSY_BIT, true, MLXBF_I2C_SMBUS_TIMEOUT); /* Read cause status bits. */ cause_status_bits = readl(priv->mst_cause->io + MLXBF_I2C_CAUSE_ARBITER); cause_status_bits &= MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK; /* * Parse both Cause and Master GW bits, then return transaction status. */ master_status_bits = readl(priv->mst->io + MLXBF_I2C_SMBUS_MASTER_STATUS); master_status_bits &= MLXBF_I2C_SMBUS_MASTER_STATUS_MASK; if (mlxbf_i2c_smbus_transaction_success(master_status_bits, cause_status_bits)) return 0; /* * In case of timeout on GW busy, the ISR will clear busy bit but * transaction ended bits cause will not be set so the transaction * fails. Then, we must check Master GW status bits. */ if ((master_status_bits & MLXBF_I2C_SMBUS_MASTER_STATUS_ERROR) && (cause_status_bits & (MLXBF_I2C_CAUSE_TRANSACTION_ENDED | MLXBF_I2C_CAUSE_M_GW_BUSY_FALL))) return -EIO; if (cause_status_bits & MLXBF_I2C_CAUSE_MASTER_STATUS_ERROR) return -EAGAIN; return -ETIMEDOUT; } static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv, const u8 *data, u8 length, u32 addr, bool is_master) { u8 offset, aligned_length; u32 data32; aligned_length = round_up(length, 4); /* * Copy data bytes from 4-byte aligned source buffer. * Data copied to the Master GW Data Descriptor MUST be shifted * left so the data starts at the MSB of the descriptor registers * as required by the underlying hardware. Enable byte swapping * when writing data bytes to the 32 * 32-bit HW Data registers * a.k.a Master GW Data Descriptor. */ for (offset = 0; offset < aligned_length; offset += sizeof(u32)) { data32 = *((u32 *)(data + offset)); if (is_master) iowrite32be(data32, priv->mst->io + addr + offset); else iowrite32be(data32, priv->slv->io + addr + offset); } } static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv, u8 *data, u8 length, u32 addr, bool is_master) { u32 data32, mask; u8 byte, offset; mask = sizeof(u32) - 1; /* * Data bytes in the Master GW Data Descriptor are shifted left * so the data starts at the MSB of the descriptor registers as * set by the underlying hardware. Enable byte swapping while * reading data bytes from the 32 * 32-bit HW Data registers * a.k.a Master GW Data Descriptor. */ for (offset = 0; offset < (length & ~mask); offset += sizeof(u32)) { if (is_master) data32 = ioread32be(priv->mst->io + addr + offset); else data32 = ioread32be(priv->slv->io + addr + offset); *((u32 *)(data + offset)) = data32; } if (!(length & mask)) return; if (is_master) data32 = ioread32be(priv->mst->io + addr + offset); else data32 = ioread32be(priv->slv->io + addr + offset); for (byte = 0; byte < (length & mask); byte++) { data[offset + byte] = data32 & GENMASK(7, 0); data32 = ror32(data32, MLXBF_I2C_SHIFT_8); } } static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, u8 len, u8 block_en, u8 pec_en, bool read) { u32 command; /* Set Master GW control word. */ if (read) { command = MLXBF_I2C_MASTER_ENABLE_READ; command |= rol32(len, MLXBF_I2C_MASTER_READ_SHIFT); } else { command = MLXBF_I2C_MASTER_ENABLE_WRITE; command |= rol32(len, MLXBF_I2C_MASTER_WRITE_SHIFT); } command |= rol32(slave, MLXBF_I2C_MASTER_SLV_ADDR_SHIFT); command |= rol32(block_en, MLXBF_I2C_MASTER_PARSE_EXP_SHIFT); command |= rol32(pec_en, MLXBF_I2C_MASTER_SEND_PEC_SHIFT); /* Clear status bits. */ writel(0x0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_STATUS); /* Set the cause data. */ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); /* Zero PEC byte. */ writel(0x0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_PEC); /* Zero byte count. */ writel(0x0, priv->mst->io + priv->chip->smbus_master_rs_bytes_off); /* GW activation. */ writel(command, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW); /* * Poll master status and check status bits. An ACK is sent when * completing writing data to the bus (Master 'byte_count_done' bit * is set to 1). */ return mlxbf_i2c_smbus_check_status(priv); } static int mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, struct mlxbf_i2c_smbus_request *request) { u8 data_desc[MLXBF_I2C_MASTER_DATA_DESC_SIZE] = { 0 }; u8 op_idx, data_idx, data_len, write_len, read_len; struct mlxbf_i2c_smbus_operation *operation; u8 read_en, write_en, block_en, pec_en; u8 slave, flags, addr; u8 *read_buf; int ret = 0; if (request->operation_cnt > MLXBF_I2C_SMBUS_MAX_OP_CNT) return -EINVAL; read_buf = NULL; data_idx = 0; read_en = 0; write_en = 0; write_len = 0; read_len = 0; block_en = 0; pec_en = 0; slave = request->slave & GENMASK(6, 0); addr = slave << 1; /* * Try to acquire the smbus gw lock before any reads of the GW register since * a read sets the lock. */ if (WARN_ON(!mlxbf_i2c_smbus_master_lock(priv))) return -EBUSY; /* Check whether the HW is idle */ if (WARN_ON(!mlxbf_i2c_smbus_master_wait_for_idle(priv))) { ret = -EBUSY; goto out_unlock; } /* Set first byte. */ data_desc[data_idx++] = addr; for (op_idx = 0; op_idx < request->operation_cnt; op_idx++) { operation = &request->operation[op_idx]; flags = operation->flags; /* * Note that read and write operations might be handled by a * single command. If the MLXBF_I2C_F_SMBUS_OPERATION is set * then write command byte and set the optional SMBus specific * bits such as block_en and pec_en. These bits MUST be * submitted by the first operation only. */ if (op_idx == 0 && flags & MLXBF_I2C_F_SMBUS_OPERATION) { block_en = flags & MLXBF_I2C_F_SMBUS_BLOCK; pec_en = flags & MLXBF_I2C_F_SMBUS_PEC; } if (flags & MLXBF_I2C_F_WRITE) { write_en = 1; write_len += operation->length; if (data_idx + operation->length > MLXBF_I2C_MASTER_DATA_DESC_SIZE) { ret = -ENOBUFS; goto out_unlock; } memcpy(data_desc + data_idx, operation->buffer, operation->length); data_idx += operation->length; } /* * We assume that read operations are performed only once per * SMBus transaction. *TBD* protect this statement so it won't * be executed twice? or return an error if we try to read more * than once? */ if (flags & MLXBF_I2C_F_READ) { read_en = 1; /* Subtract 1 as required by HW. */ read_len = operation->length - 1; read_buf = operation->buffer; } } /* Set Master GW data descriptor. */ data_len = write_len + 1; /* Add one byte of the slave address. */ /* * Note that data_len cannot be 0. Indeed, the slave address byte * must be written to the data registers. */ mlxbf_i2c_smbus_write_data(priv, (const u8 *)data_desc, data_len, MLXBF_I2C_MASTER_DATA_DESC_ADDR, true); if (write_en) { ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en, pec_en, 0); if (ret) goto out_unlock; } if (read_en) { /* Write slave address to Master GW data descriptor. */ mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1, MLXBF_I2C_MASTER_DATA_DESC_ADDR, true); ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en, pec_en, 1); if (!ret) { /* Get Master GW data descriptor. */ mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1, MLXBF_I2C_MASTER_DATA_DESC_ADDR, true); /* Get data from Master GW data descriptor. */ memcpy(read_buf, data_desc, read_len + 1); } /* * After a read operation the SMBus FSM ps (present state) * needs to be 'manually' reset. This should be removed in * next tag integration. */ writel(MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK, priv->mst->io + priv->chip->smbus_master_fsm_off); } out_unlock: mlxbf_i2c_smbus_master_unlock(priv); return ret; } /* I2C SMBus protocols. */ static void mlxbf_i2c_smbus_quick_command(struct mlxbf_i2c_smbus_request *request, u8 read) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_1; request->operation[0].length = 0; request->operation[0].flags = MLXBF_I2C_F_WRITE; request->operation[0].flags |= read ? MLXBF_I2C_F_READ : 0; } static void mlxbf_i2c_smbus_byte_func(struct mlxbf_i2c_smbus_request *request, u8 *data, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_1; request->operation[0].length = 1; request->operation[0].length += pec_check; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION; request->operation[0].flags |= read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = data; } static void mlxbf_i2c_smbus_data_byte_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; request->operation[1].length = 1; request->operation[1].length += pec_check; request->operation[1].flags = read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; request->operation[1].buffer = data; } static void mlxbf_i2c_smbus_data_word_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; request->operation[1].length = 2; request->operation[1].length += pec_check; request->operation[1].flags = read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; request->operation[1].buffer = data; } static void mlxbf_i2c_smbus_i2c_block_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, u8 *data_len, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; /* * As specified in the standard, the max number of bytes to read/write * per block operation is 32 bytes. In Golan code, the controller can * read up to 128 bytes and write up to 127 bytes. */ request->operation[1].length = (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ? I2C_SMBUS_BLOCK_MAX : *data_len + pec_check; request->operation[1].flags = read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; /* * Skip the first data byte, which corresponds to the number of bytes * to read/write. */ request->operation[1].buffer = data + 1; *data_len = request->operation[1].length; /* Set the number of byte to read. This will be used by userspace. */ if (read) data[0] = *data_len; } static void mlxbf_i2c_smbus_block_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, u8 *data_len, bool read, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; request->operation[1].length = (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ? I2C_SMBUS_BLOCK_MAX : *data_len + pec_check; request->operation[1].flags = read ? MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE; request->operation[1].buffer = data + 1; *data_len = request->operation[1].length; /* Set the number of bytes to read. This will be used by userspace. */ if (read) data[0] = *data_len; } static void mlxbf_i2c_smbus_process_call_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, bool pec_check) { request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_3; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK; request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; request->operation[1].length = 2; request->operation[1].flags = MLXBF_I2C_F_WRITE; request->operation[1].buffer = data; request->operation[2].length = 3; request->operation[2].flags = MLXBF_I2C_F_READ; request->operation[2].buffer = data; } static void mlxbf_i2c_smbus_blk_process_call_func(struct mlxbf_i2c_smbus_request *request, u8 *command, u8 *data, u8 *data_len, bool pec_check) { u32 length; request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_3; request->operation[0].length = 1; request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE; request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK; request->operation[0].flags |= (pec_check) ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; length = (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ? I2C_SMBUS_BLOCK_MAX : *data_len + pec_check; request->operation[1].length = length - pec_check; request->operation[1].flags = MLXBF_I2C_F_WRITE; request->operation[1].buffer = data; request->operation[2].length = length; request->operation[2].flags = MLXBF_I2C_F_READ; request->operation[2].buffer = data; *data_len = length; /* including PEC byte. */ } /* Initialization functions. */ static bool mlxbf_i2c_has_chip_type(struct mlxbf_i2c_priv *priv, u8 type) { return priv->chip->type == type; } static struct mlxbf_i2c_resource * mlxbf_i2c_get_shared_resource(struct mlxbf_i2c_priv *priv, u8 type) { const struct mlxbf_i2c_chip_info *chip = priv->chip; struct mlxbf_i2c_resource *res; u8 res_idx = 0; for (res_idx = 0; res_idx < MLXBF_I2C_SHARED_RES_MAX; res_idx++) { res = chip->shared_res[res_idx]; if (res && res->type == type) return res; } return NULL; } static int mlxbf_i2c_init_resource(struct platform_device *pdev, struct mlxbf_i2c_resource **res, u8 type) { struct mlxbf_i2c_resource *tmp_res; struct device *dev = &pdev->dev; if (!res || *res || type >= MLXBF_I2C_END_RES) return -EINVAL; tmp_res = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL); if (!tmp_res) return -ENOMEM; tmp_res->io = devm_platform_get_and_ioremap_resource(pdev, type, &tmp_res->params); if (IS_ERR(tmp_res->io)) { devm_kfree(dev, tmp_res); return PTR_ERR(tmp_res->io); } tmp_res->type = type; *res = tmp_res; return 0; } static u32 mlxbf_i2c_get_ticks(struct mlxbf_i2c_priv *priv, u64 nanoseconds, bool minimum) { u64 frequency; u32 ticks; /* * Compute ticks as follow: * * Ticks * Time = --------- x 10^9 => Ticks = Time x Frequency x 10^-9 * Frequency */ frequency = priv->frequency; ticks = (nanoseconds * frequency) / MLXBF_I2C_FREQUENCY_1GHZ; /* * The number of ticks is rounded down and if minimum is equal to 1 * then add one tick. */ if (minimum) ticks++; return ticks; } static u32 mlxbf_i2c_set_timer(struct mlxbf_i2c_priv *priv, u64 nsec, bool opt, u32 mask, u8 shift) { u32 val = (mlxbf_i2c_get_ticks(priv, nsec, opt) & mask) << shift; return val; } static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv, const struct mlxbf_i2c_timings *timings) { u32 timer; timer = mlxbf_i2c_set_timer(priv, timings->scl_high, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->scl_low, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH); timer = mlxbf_i2c_set_timer(priv, timings->sda_rise, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->sda_fall, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_8); timer |= mlxbf_i2c_set_timer(priv, timings->scl_rise, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_16); timer |= mlxbf_i2c_set_timer(priv, timings->scl_fall, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_24); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE); timer = mlxbf_i2c_set_timer(priv, timings->hold_start, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->hold_data, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_THOLD); timer = mlxbf_i2c_set_timer(priv, timings->setup_start, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->setup_stop, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP); timer = mlxbf_i2c_set_timer(priv, timings->setup_data, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA); timer = mlxbf_i2c_set_timer(priv, timings->buf, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->thigh_max, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF); timer = timings->timeout; writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT); } enum mlxbf_i2c_timings_config { MLXBF_I2C_TIMING_CONFIG_100KHZ, MLXBF_I2C_TIMING_CONFIG_400KHZ, MLXBF_I2C_TIMING_CONFIG_1000KHZ, }; /* * Note that the mlxbf_i2c_timings->timeout value is not related to the * bus frequency, it is impacted by the time it takes the driver to * complete data transmission before transaction abort. */ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = { [MLXBF_I2C_TIMING_CONFIG_100KHZ] = { .scl_high = 4810, .scl_low = 5000, .hold_start = 4000, .setup_start = 4800, .setup_stop = 4000, .setup_data = 250, .sda_rise = 50, .sda_fall = 50, .scl_rise = 50, .scl_fall = 50, .hold_data = 300, .buf = 20000, .thigh_max = 5000, .timeout = 106500 }, [MLXBF_I2C_TIMING_CONFIG_400KHZ] = { .scl_high = 1011, .scl_low = 1300, .hold_start = 600, .setup_start = 700, .setup_stop = 600, .setup_data = 100, .sda_rise = 50, .sda_fall = 50, .scl_rise = 50, .scl_fall = 50, .hold_data = 300, .buf = 20000, .thigh_max = 5000, .timeout = 106500 }, [MLXBF_I2C_TIMING_CONFIG_1000KHZ] = { .scl_high = 600, .scl_low = 1300, .hold_start = 600, .setup_start = 600, .setup_stop = 600, .setup_data = 100, .sda_rise = 50, .sda_fall = 50, .scl_rise = 50, .scl_fall = 50, .hold_data = 300, .buf = 20000, .thigh_max = 5000, .timeout = 106500 } }; static int mlxbf_i2c_init_timings(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { enum mlxbf_i2c_timings_config config_idx; struct device *dev = &pdev->dev; u32 config_khz; int ret; ret = device_property_read_u32(dev, "clock-frequency", &config_khz); if (ret < 0) config_khz = I2C_MAX_STANDARD_MODE_FREQ; switch (config_khz) { default: /* Default settings is 100 KHz. */ pr_warn("Illegal value %d: defaulting to 100 KHz\n", config_khz); fallthrough; case I2C_MAX_STANDARD_MODE_FREQ: config_idx = MLXBF_I2C_TIMING_CONFIG_100KHZ; break; case I2C_MAX_FAST_MODE_FREQ: config_idx = MLXBF_I2C_TIMING_CONFIG_400KHZ; break; case I2C_MAX_FAST_MODE_PLUS_FREQ: config_idx = MLXBF_I2C_TIMING_CONFIG_1000KHZ; break; } mlxbf_i2c_set_timings(priv, &mlxbf_i2c_timings[config_idx]); return 0; } static int mlxbf_i2c_get_gpio(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *gpio_res; struct device *dev = &pdev->dev; struct resource *params; resource_size_t size; gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES); if (!gpio_res) return -EPERM; /* * The GPIO region in TYU space is shared among I2C busses. * This function MUST be serialized to avoid racing when * claiming the memory region and/or setting up the GPIO. */ lockdep_assert_held(gpio_res->lock); /* Check whether the memory map exist. */ if (gpio_res->io) return 0; params = gpio_res->params; size = resource_size(params); if (!devm_request_mem_region(dev, params->start, size, params->name)) return -EFAULT; gpio_res->io = devm_ioremap(dev, params->start, size); if (!gpio_res->io) { devm_release_mem_region(dev, params->start, size); return -ENOMEM; } return 0; } static int mlxbf_i2c_release_gpio(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *gpio_res; struct device *dev = &pdev->dev; struct resource *params; gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES); if (!gpio_res) return 0; mutex_lock(gpio_res->lock); if (gpio_res->io) { /* Release the GPIO resource. */ params = gpio_res->params; devm_iounmap(dev, gpio_res->io); devm_release_mem_region(dev, params->start, resource_size(params)); } mutex_unlock(gpio_res->lock); return 0; } static int mlxbf_i2c_get_corepll(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *corepll_res; struct device *dev = &pdev->dev; struct resource *params; resource_size_t size; corepll_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_COREPLL_RES); if (!corepll_res) return -EPERM; /* * The COREPLL region in TYU space is shared among I2C busses. * This function MUST be serialized to avoid racing when * claiming the memory region. */ lockdep_assert_held(corepll_res->lock); /* Check whether the memory map exist. */ if (corepll_res->io) return 0; params = corepll_res->params; size = resource_size(params); if (!devm_request_mem_region(dev, params->start, size, params->name)) return -EFAULT; corepll_res->io = devm_ioremap(dev, params->start, size); if (!corepll_res->io) { devm_release_mem_region(dev, params->start, size); return -ENOMEM; } return 0; } static int mlxbf_i2c_release_corepll(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *corepll_res; struct device *dev = &pdev->dev; struct resource *params; corepll_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_COREPLL_RES); mutex_lock(corepll_res->lock); if (corepll_res->io) { /* Release the CorePLL resource. */ params = corepll_res->params; devm_iounmap(dev, corepll_res->io); devm_release_mem_region(dev, params->start, resource_size(params)); } mutex_unlock(corepll_res->lock); return 0; } static int mlxbf_i2c_init_master(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *gpio_res; struct device *dev = &pdev->dev; u32 config_reg; int ret; /* This configuration is only needed for BlueField 1. */ if (!mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1)) return 0; gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES); if (!gpio_res) return -EPERM; /* * The GPIO region in TYU space is shared among I2C busses. * This function MUST be serialized to avoid racing when * claiming the memory region and/or setting up the GPIO. */ mutex_lock(gpio_res->lock); ret = mlxbf_i2c_get_gpio(pdev, priv); if (ret < 0) { dev_err(dev, "Failed to get gpio resource"); mutex_unlock(gpio_res->lock); return ret; } /* * TYU - Configuration for GPIO pins. Those pins must be asserted in * MLXBF_I2C_GPIO_0_FUNC_EN_0, i.e. GPIO 0 is controlled by HW, and must * be reset in MLXBF_I2C_GPIO_0_FORCE_OE_EN, i.e. GPIO_OE will be driven * instead of HW_OE. * For now, we do not reset the GPIO state when the driver is removed. * First, it is not necessary to disable the bus since we are using * the same busses. Then, some busses might be shared among Linux and * platform firmware; disabling the bus might compromise the system * functionality. */ config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0); config_reg = MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(priv->bus, config_reg); writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0); config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN); config_reg = MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(priv->bus, config_reg); writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN); mutex_unlock(gpio_res->lock); return 0; } static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) { u64 core_frequency; u8 core_od, core_r; u32 corepll_val; u16 core_f; corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); /* Get Core PLL configuration bits. */ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val); core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val); core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val); /* * Compute PLL output frequency as follow: * * CORE_F + 1 * PLL_OUT_FREQ = PLL_IN_FREQ * ---------------------------- * (CORE_R + 1) * (CORE_OD + 1) * * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f); core_frequency /= (++core_r) * (++core_od); return core_frequency; } static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) { u32 corepll_reg1_val, corepll_reg2_val; u64 corepll_frequency; u8 core_od, core_r; u32 core_f; corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2); /* Get Core PLL configuration bits */ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val); core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val); core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val); /* * Compute PLL output frequency as follow: * * CORE_F / 16384 * PLL_OUT_FREQ = PLL_IN_FREQ * ---------------------------- * (CORE_R + 1) * (CORE_OD + 1) * * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST; corepll_frequency /= (++core_r) * (++core_od); return corepll_frequency; } static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { const struct mlxbf_i2c_chip_info *chip = priv->chip; struct mlxbf_i2c_resource *corepll_res; struct device *dev = &pdev->dev; u64 *freq = &priv->frequency; int ret; corepll_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_COREPLL_RES); if (!corepll_res) return -EPERM; /* * First, check whether the TYU core Clock frequency is set. * The TYU core frequency is the same for all I2C busses; when * the first device gets probed the frequency is determined and * stored into a globally visible variable. So, first of all, * check whether the frequency is already set. Here, we assume * that the frequency is expected to be greater than 0. */ mutex_lock(corepll_res->lock); if (!mlxbf_i2c_corepll_frequency) { if (!chip->calculate_freq) { mutex_unlock(corepll_res->lock); return -EPERM; } ret = mlxbf_i2c_get_corepll(pdev, priv); if (ret < 0) { dev_err(dev, "Failed to get corePLL resource"); mutex_unlock(corepll_res->lock); return ret; } mlxbf_i2c_corepll_frequency = chip->calculate_freq(corepll_res); } mutex_unlock(corepll_res->lock); *freq = mlxbf_i2c_corepll_frequency; return 0; } static int mlxbf_i2c_slave_enable(struct mlxbf_i2c_priv *priv, struct i2c_client *slave) { u8 reg, reg_cnt, byte, addr_tmp; u32 slave_reg, slave_reg_tmp; if (!priv) return -EPERM; reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2; /* * Read the slave registers. There are 4 * 32-bit slave registers. * Each slave register can hold up to 4 * 8-bit slave configuration: * 1) A 7-bit address * 2) And a status bit (1 if enabled, 0 if not). * Look for the next available slave register slot. */ for (reg = 0; reg < reg_cnt; reg++) { slave_reg = readl(priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4); /* * Each register holds 4 slave addresses. So, we have to keep * the byte order consistent with the value read in order to * update the register correctly, if needed. */ slave_reg_tmp = slave_reg; for (byte = 0; byte < 4; byte++) { addr_tmp = slave_reg_tmp & GENMASK(7, 0); /* * If an enable bit is not set in the * MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG register, then the * slave address slot associated with that bit is * free. So set the enable bit and write the * slave address bits. */ if (!(addr_tmp & MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT)) { slave_reg &= ~(MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK << (byte * 8)); slave_reg |= (slave->addr << (byte * 8)); slave_reg |= MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT << (byte * 8); writel(slave_reg, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + (reg * 0x4)); /* * Set the slave at the corresponding index. */ priv->slave[(reg * 4) + byte] = slave; return 0; } /* Parse next byte. */ slave_reg_tmp >>= 8; } } return -EBUSY; } static int mlxbf_i2c_slave_disable(struct mlxbf_i2c_priv *priv, u8 addr) { u8 addr_tmp, reg, reg_cnt, byte; u32 slave_reg, slave_reg_tmp; reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2; /* * Read the slave registers. There are 4 * 32-bit slave registers. * Each slave register can hold up to 4 * 8-bit slave configuration: * 1) A 7-bit address * 2) And a status bit (1 if enabled, 0 if not). * Check if addr is present in the registers. */ for (reg = 0; reg < reg_cnt; reg++) { slave_reg = readl(priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4); /* Check whether the address slots are empty. */ if (!slave_reg) continue; /* * Check if addr matches any of the 4 slave addresses * in the register. */ slave_reg_tmp = slave_reg; for (byte = 0; byte < 4; byte++) { addr_tmp = slave_reg_tmp & MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK; /* * Parse slave address bytes and check whether the * slave address already exists. */ if (addr_tmp == addr) { /* Clear the slave address slot. */ slave_reg &= ~(GENMASK(7, 0) << (byte * 8)); writel(slave_reg, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + (reg * 0x4)); /* Free slave at the corresponding index */ priv->slave[(reg * 4) + byte] = NULL; return 0; } /* Parse next byte. */ slave_reg_tmp >>= 8; } } return -ENXIO; } static int mlxbf_i2c_init_coalesce(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *coalesce_res; struct resource *params; resource_size_t size; int ret = 0; /* * Unlike BlueField-1 platform, the coalesce registers is a dedicated * resource in the next generations of BlueField. */ if (mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1)) { coalesce_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_COALESCE_RES); if (!coalesce_res) return -EPERM; /* * The Cause Coalesce group in TYU space is shared among * I2C busses. This function MUST be serialized to avoid * racing when claiming the memory region. */ lockdep_assert_held(mlxbf_i2c_gpio_res->lock); /* Check whether the memory map exist. */ if (coalesce_res->io) { priv->coalesce = coalesce_res; return 0; } params = coalesce_res->params; size = resource_size(params); if (!request_mem_region(params->start, size, params->name)) return -EFAULT; coalesce_res->io = ioremap(params->start, size); if (!coalesce_res->io) { release_mem_region(params->start, size); return -ENOMEM; } priv->coalesce = coalesce_res; } else { ret = mlxbf_i2c_init_resource(pdev, &priv->coalesce, MLXBF_I2C_COALESCE_RES); } return ret; } static int mlxbf_i2c_release_coalesce(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct mlxbf_i2c_resource *coalesce_res; struct device *dev = &pdev->dev; struct resource *params; resource_size_t size; coalesce_res = priv->coalesce; if (coalesce_res->io) { params = coalesce_res->params; size = resource_size(params); if (mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1)) { mutex_lock(coalesce_res->lock); iounmap(coalesce_res->io); release_mem_region(params->start, size); mutex_unlock(coalesce_res->lock); } else { devm_release_mem_region(dev, params->start, size); } } return 0; } static int mlxbf_i2c_init_slave(struct platform_device *pdev, struct mlxbf_i2c_priv *priv) { struct device *dev = &pdev->dev; u32 int_reg; int ret; /* Reset FSM. */ writel(0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_FSM); /* * Enable slave cause interrupt bits. Drive * MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE and * MLXBF_I2C_CAUSE_WRITE_SUCCESS, these are enabled when an external * masters issue a Read and Write, respectively. But, clear all * interrupts first. */ writel(~0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); int_reg = MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE; int_reg |= MLXBF_I2C_CAUSE_WRITE_SUCCESS; writel(int_reg, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_EVTEN0); /* Finally, set the 'ready' bit to start handling transactions. */ writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY); /* Initialize the cause coalesce resource. */ ret = mlxbf_i2c_init_coalesce(pdev, priv); if (ret < 0) { dev_err(dev, "failed to initialize cause coalesce\n"); return ret; } return 0; } static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read, bool *write) { const struct mlxbf_i2c_chip_info *chip = priv->chip; u32 coalesce0_reg, cause_reg; u8 slave_shift, is_set; *write = false; *read = false; slave_shift = chip->type != MLXBF_I2C_CHIP_TYPE_1 ? MLXBF_I2C_CAUSE_YU_SLAVE_BIT : priv->bus + MLXBF_I2C_CAUSE_TYU_SLAVE_BIT; coalesce0_reg = readl(priv->coalesce->io + MLXBF_I2C_CAUSE_COALESCE_0); is_set = coalesce0_reg & (1 << slave_shift); if (!is_set) return false; /* Check the source of the interrupt, i.e. whether a Read or Write. */ cause_reg = readl(priv->slv_cause->io + MLXBF_I2C_CAUSE_ARBITER); if (cause_reg & MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE) *read = true; else if (cause_reg & MLXBF_I2C_CAUSE_WRITE_SUCCESS) *write = true; /* Clear cause bits. */ writel(~0x0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); return true; } static bool mlxbf_i2c_slave_wait_for_idle(struct mlxbf_i2c_priv *priv, u32 timeout) { u32 mask = MLXBF_I2C_CAUSE_S_GW_BUSY_FALL; u32 addr = MLXBF_I2C_CAUSE_ARBITER; if (mlxbf_i2c_poll(priv->slv_cause->io, addr, mask, false, timeout)) return true; return false; } static struct i2c_client *mlxbf_i2c_get_slave_from_addr( struct mlxbf_i2c_priv *priv, u8 addr) { int i; for (i = 0; i < MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT; i++) { if (!priv->slave[i]) continue; if (priv->slave[i]->addr == addr) return priv->slave[i]; } return NULL; } /* * Send byte to 'external' smbus master. This function is executed when * an external smbus master wants to read data from the BlueField. */ static int mlxbf_i2c_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes) { u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 }; u8 write_size, pec_en, addr, value, byte_cnt; struct i2c_client *slave; u32 control32, data32; int ret = 0; /* * Read the first byte received from the external master to * determine the slave address. This byte is located in the * first data descriptor register of the slave GW. */ data32 = ioread32be(priv->slv->io + MLXBF_I2C_SLAVE_DATA_DESC_ADDR); addr = (data32 & GENMASK(7, 0)) >> 1; /* * Check if the slave address received in the data descriptor register * matches any of the slave addresses registered. If there is a match, * set the slave. */ slave = mlxbf_i2c_get_slave_from_addr(priv, addr); if (!slave) { ret = -ENXIO; goto clear_csr; } /* * An I2C read can consist of a WRITE bit transaction followed by * a READ bit transaction. Indeed, slave devices often expect * the slave address to be followed by the internal address. * So, write the internal address byte first, and then, send the * requested data to the master. */ if (recv_bytes > 1) { i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); value = (data32 >> 8) & GENMASK(7, 0); ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); i2c_slave_event(slave, I2C_SLAVE_STOP, &value); if (ret < 0) goto clear_csr; } /* * Send data to the master. Currently, the driver supports * READ_BYTE, READ_WORD and BLOCK READ protocols. The * hardware can send up to 128 bytes per transfer which is * the total size of the data registers. */ i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value); for (byte_cnt = 0; byte_cnt < MLXBF_I2C_SLAVE_DATA_DESC_SIZE; byte_cnt++) { data_desc[byte_cnt] = value; i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value); } /* Send a stop condition to the backend. */ i2c_slave_event(slave, I2C_SLAVE_STOP, &value); /* Set the number of bytes to write to master. */ write_size = (byte_cnt - 1) & 0x7f; /* Write data to Slave GW data descriptor. */ mlxbf_i2c_smbus_write_data(priv, data_desc, byte_cnt, MLXBF_I2C_SLAVE_DATA_DESC_ADDR, false); pec_en = 0; /* Disable PEC since it is not supported. */ /* Prepare control word. */ control32 = MLXBF_I2C_SLAVE_ENABLE; control32 |= rol32(write_size, MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT); control32 |= rol32(pec_en, MLXBF_I2C_SLAVE_SEND_PEC_SHIFT); writel(control32, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_GW); /* * Wait until the transfer is completed; the driver will wait * until the GW is idle, a cause will rise on fall of GW busy. */ mlxbf_i2c_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT); clear_csr: /* Release the Slave GW. */ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_PEC); writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY); return ret; } /* * Receive bytes from 'external' smbus master. This function is executed when * an external smbus master wants to write data to the BlueField. */ static int mlxbf_i2c_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes) { u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 }; struct i2c_client *slave; u8 value, byte, addr; int ret = 0; /* Read data from Slave GW data descriptor. */ mlxbf_i2c_smbus_read_data(priv, data_desc, recv_bytes, MLXBF_I2C_SLAVE_DATA_DESC_ADDR, false); addr = data_desc[0] >> 1; /* * Check if the slave address received in the data descriptor register * matches any of the slave addresses registered. */ slave = mlxbf_i2c_get_slave_from_addr(priv, addr); if (!slave) { ret = -EINVAL; goto clear_csr; } /* * Notify the slave backend that an smbus master wants to write data * to the BlueField. */ i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); /* Send the received data to the slave backend. */ for (byte = 1; byte < recv_bytes; byte++) { value = data_desc[byte]; ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); if (ret < 0) break; } /* * Send a stop event to the slave backend, to signal * the end of the write transactions. */ i2c_slave_event(slave, I2C_SLAVE_STOP, &value); clear_csr: /* Release the Slave GW. */ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_PEC); writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY); return ret; } static irqreturn_t mlxbf_i2c_irq(int irq, void *ptr) { struct mlxbf_i2c_priv *priv = ptr; bool read, write, irq_is_set; u32 rw_bytes_reg; u8 recv_bytes; /* * Read TYU interrupt register and determine the source of the * interrupt. Based on the source of the interrupt one of the * following actions are performed: * - Receive data and send response to master. * - Send data and release slave GW. * * Handle read/write transaction only. CRmaster and Iarp requests * are ignored for now. */ irq_is_set = mlxbf_i2c_has_coalesce(priv, &read, &write); if (!irq_is_set || (!read && !write)) { /* Nothing to do here, interrupt was not from this device. */ return IRQ_NONE; } /* * The MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES includes the number of * bytes from/to master. These are defined by 8-bits each. If the lower * 8 bits are set, then the master expect to read N bytes from the * slave, if the higher 8 bits are sent then the slave expect N bytes * from the master. */ rw_bytes_reg = readl(priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); recv_bytes = (rw_bytes_reg >> 8) & GENMASK(7, 0); /* * For now, the slave supports 128 bytes transfer. Discard remaining * data bytes if the master wrote more than * MLXBF_I2C_SLAVE_DATA_DESC_SIZE, i.e, the actual size of the slave * data descriptor. * * Note that we will never expect to transfer more than 128 bytes; as * specified in the SMBus standard, block transactions cannot exceed * 32 bytes. */ recv_bytes = recv_bytes > MLXBF_I2C_SLAVE_DATA_DESC_SIZE ? MLXBF_I2C_SLAVE_DATA_DESC_SIZE : recv_bytes; if (read) mlxbf_i2c_irq_send(priv, recv_bytes); else mlxbf_i2c_irq_recv(priv, recv_bytes); return IRQ_HANDLED; } /* Return negative errno on error. */ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct mlxbf_i2c_smbus_request request = { 0 }; struct mlxbf_i2c_priv *priv; bool read, pec; u8 byte_cnt; request.slave = addr; read = (read_write == I2C_SMBUS_READ); pec = flags & I2C_FUNC_SMBUS_PEC; switch (size) { case I2C_SMBUS_QUICK: mlxbf_i2c_smbus_quick_command(&request, read); dev_dbg(&adap->dev, "smbus quick, slave 0x%02x\n", addr); break; case I2C_SMBUS_BYTE: mlxbf_i2c_smbus_byte_func(&request, read ? &data->byte : &command, read, pec); dev_dbg(&adap->dev, "smbus %s byte, slave 0x%02x.\n", read ? "read" : "write", addr); break; case I2C_SMBUS_BYTE_DATA: mlxbf_i2c_smbus_data_byte_func(&request, &command, &data->byte, read, pec); dev_dbg(&adap->dev, "smbus %s byte data at 0x%02x, slave 0x%02x.\n", read ? "read" : "write", command, addr); break; case I2C_SMBUS_WORD_DATA: mlxbf_i2c_smbus_data_word_func(&request, &command, (u8 *)&data->word, read, pec); dev_dbg(&adap->dev, "smbus %s word data at 0x%02x, slave 0x%02x.\n", read ? "read" : "write", command, addr); break; case I2C_SMBUS_I2C_BLOCK_DATA: byte_cnt = data->block[0]; mlxbf_i2c_smbus_i2c_block_func(&request, &command, data->block, &byte_cnt, read, pec); dev_dbg(&adap->dev, "i2c %s block data, %d bytes at 0x%02x, slave 0x%02x.\n", read ? "read" : "write", byte_cnt, command, addr); break; case I2C_SMBUS_BLOCK_DATA: byte_cnt = read ? I2C_SMBUS_BLOCK_MAX : data->block[0]; mlxbf_i2c_smbus_block_func(&request, &command, data->block, &byte_cnt, read, pec); dev_dbg(&adap->dev, "smbus %s block data, %d bytes at 0x%02x, slave 0x%02x.\n", read ? "read" : "write", byte_cnt, command, addr); break; case I2C_FUNC_SMBUS_PROC_CALL: mlxbf_i2c_smbus_process_call_func(&request, &command, (u8 *)&data->word, pec); dev_dbg(&adap->dev, "process call, wr/rd at 0x%02x, slave 0x%02x.\n", command, addr); break; case I2C_FUNC_SMBUS_BLOCK_PROC_CALL: byte_cnt = data->block[0]; mlxbf_i2c_smbus_blk_process_call_func(&request, &command, data->block, &byte_cnt, pec); dev_dbg(&adap->dev, "block process call, wr/rd %d bytes, slave 0x%02x.\n", byte_cnt, addr); break; default: dev_dbg(&adap->dev, "Unsupported I2C/SMBus command %d\n", size); return -EOPNOTSUPP; } priv = i2c_get_adapdata(adap); return mlxbf_i2c_smbus_start_transaction(priv, &request); } static int mlxbf_i2c_reg_slave(struct i2c_client *slave) { struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter); struct device *dev = &slave->dev; int ret; /* * Do not support ten bit chip address and do not use Packet Error * Checking (PEC). */ if (slave->flags & (I2C_CLIENT_TEN | I2C_CLIENT_PEC)) { dev_err(dev, "SMBus PEC and 10 bit address not supported\n"); return -EAFNOSUPPORT; } ret = mlxbf_i2c_slave_enable(priv, slave); if (ret) dev_err(dev, "Surpassed max number of registered slaves allowed\n"); return 0; } static int mlxbf_i2c_unreg_slave(struct i2c_client *slave) { struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter); struct device *dev = &slave->dev; int ret; /* * Unregister slave by: * 1) Disabling the slave address in hardware * 2) Freeing priv->slave at the corresponding index */ ret = mlxbf_i2c_slave_disable(priv, slave->addr); if (ret) dev_err(dev, "Unable to find slave 0x%x\n", slave->addr); return ret; } static u32 mlxbf_i2c_functionality(struct i2c_adapter *adap) { return MLXBF_I2C_FUNC_ALL; } static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = { [MLXBF_I2C_CHIP_TYPE_1] = { .type = MLXBF_I2C_CHIP_TYPE_1, .shared_res = { [0] = &mlxbf_i2c_coalesce_res[MLXBF_I2C_CHIP_TYPE_1], [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1], [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1] }, .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu, .smbus_master_rs_bytes_off = MLXBF_I2C_YU_SMBUS_RS_BYTES, .smbus_master_fsm_off = MLXBF_I2C_YU_SMBUS_MASTER_FSM }, [MLXBF_I2C_CHIP_TYPE_2] = { .type = MLXBF_I2C_CHIP_TYPE_2, .shared_res = { [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2] }, .calculate_freq = mlxbf_i2c_calculate_freq_from_yu, .smbus_master_rs_bytes_off = MLXBF_I2C_YU_SMBUS_RS_BYTES, .smbus_master_fsm_off = MLXBF_I2C_YU_SMBUS_MASTER_FSM }, [MLXBF_I2C_CHIP_TYPE_3] = { .type = MLXBF_I2C_CHIP_TYPE_3, .shared_res = { [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_3] }, .calculate_freq = mlxbf_i2c_calculate_freq_from_yu, .smbus_master_rs_bytes_off = MLXBF_I2C_RSH_YU_SMBUS_RS_BYTES, .smbus_master_fsm_off = MLXBF_I2C_RSH_YU_SMBUS_MASTER_FSM } }; static const struct i2c_algorithm mlxbf_i2c_algo = { .smbus_xfer = mlxbf_i2c_smbus_xfer, .functionality = mlxbf_i2c_functionality, .reg_slave = mlxbf_i2c_reg_slave, .unreg_slave = mlxbf_i2c_unreg_slave, }; static struct i2c_adapter_quirks mlxbf_i2c_quirks = { .max_read_len = MLXBF_I2C_MASTER_DATA_R_LENGTH, .max_write_len = MLXBF_I2C_MASTER_DATA_W_LENGTH, }; static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = { { "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] }, { "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] }, { "MLNXBF31", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_3] }, {}, }; MODULE_DEVICE_TABLE(acpi, mlxbf_i2c_acpi_ids); static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) { const struct acpi_device_id *aid; u64 bus_id; int ret; if (acpi_disabled) return -ENOENT; aid = acpi_match_device(mlxbf_i2c_acpi_ids, dev); if (!aid) return -ENODEV; priv->chip = (struct mlxbf_i2c_chip_info *)aid->driver_data; ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &bus_id); if (ret) { dev_err(dev, "Cannot retrieve UID\n"); return ret; } priv->bus = bus_id; return 0; } static int mlxbf_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mlxbf_i2c_priv *priv; struct i2c_adapter *adap; u32 resource_version; int irq, ret; priv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_priv), GFP_KERNEL); if (!priv) return -ENOMEM; ret = mlxbf_i2c_acpi_probe(dev, priv); if (ret < 0) return ret; /* This property allows the driver to stay backward compatible with older * ACPI tables. * Starting BlueField-3 SoC, the "smbus" resource was broken down into 3 * separate resources "timer", "master" and "slave". */ if (device_property_read_u32(dev, "resource_version", &resource_version)) resource_version = 0; priv->resource_version = resource_version; if (priv->chip->type < MLXBF_I2C_CHIP_TYPE_3 && resource_version == 0) { priv->timer = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL); if (!priv->timer) return -ENOMEM; priv->mst = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL); if (!priv->mst) return -ENOMEM; priv->slv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL); if (!priv->slv) return -ENOMEM; ret = mlxbf_i2c_init_resource(pdev, &priv->smbus, MLXBF_I2C_SMBUS_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch smbus resource info"); priv->timer->io = priv->smbus->io; priv->mst->io = priv->smbus->io + MLXBF_I2C_MST_ADDR_OFFSET; priv->slv->io = priv->smbus->io + MLXBF_I2C_SLV_ADDR_OFFSET; } else { ret = mlxbf_i2c_init_resource(pdev, &priv->timer, MLXBF_I2C_SMBUS_TIMER_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch timer resource info"); ret = mlxbf_i2c_init_resource(pdev, &priv->mst, MLXBF_I2C_SMBUS_MST_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch master resource info"); ret = mlxbf_i2c_init_resource(pdev, &priv->slv, MLXBF_I2C_SMBUS_SLV_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch slave resource info"); } ret = mlxbf_i2c_init_resource(pdev, &priv->mst_cause, MLXBF_I2C_MST_CAUSE_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch cause master resource info"); ret = mlxbf_i2c_init_resource(pdev, &priv->slv_cause, MLXBF_I2C_SLV_CAUSE_RES); if (ret < 0) return dev_err_probe(dev, ret, "Cannot fetch cause slave resource info"); adap = &priv->adap; adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON; adap->algo = &mlxbf_i2c_algo; adap->quirks = &mlxbf_i2c_quirks; adap->dev.parent = dev; adap->dev.of_node = dev->of_node; adap->nr = priv->bus; snprintf(adap->name, sizeof(adap->name), "i2c%d", adap->nr); i2c_set_adapdata(adap, priv); /* Read Core PLL frequency. */ ret = mlxbf_i2c_calculate_corepll_freq(pdev, priv); if (ret < 0) { dev_err(dev, "cannot get core clock frequency\n"); /* Set to default value. */ priv->frequency = MLXBF_I2C_COREPLL_FREQ; } /* * Initialize master. * Note that a physical bus might be shared among Linux and firmware * (e.g., ATF). Thus, the bus should be initialized and ready and * bus initialization would be unnecessary. This requires additional * knowledge about physical busses. But, since an extra initialization * does not really hurt, then keep the code as is. */ ret = mlxbf_i2c_init_master(pdev, priv); if (ret < 0) return dev_err_probe(dev, ret, "failed to initialize smbus master %d", priv->bus); mlxbf_i2c_init_timings(pdev, priv); mlxbf_i2c_init_slave(pdev, priv); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(dev, irq, mlxbf_i2c_irq, IRQF_SHARED | IRQF_PROBE_SHARED, dev_name(dev), priv); if (ret < 0) return dev_err_probe(dev, ret, "Cannot get irq %d\n", irq); priv->irq = irq; platform_set_drvdata(pdev, priv); ret = i2c_add_numbered_adapter(adap); if (ret < 0) return ret; mutex_lock(&mlxbf_i2c_bus_lock); mlxbf_i2c_bus_count++; mutex_unlock(&mlxbf_i2c_bus_lock); return 0; } static void mlxbf_i2c_remove(struct platform_device *pdev) { struct mlxbf_i2c_priv *priv = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; struct resource *params; if (priv->chip->type < MLXBF_I2C_CHIP_TYPE_3 && priv->resource_version == 0) { params = priv->smbus->params; devm_release_mem_region(dev, params->start, resource_size(params)); } else { params = priv->timer->params; devm_release_mem_region(dev, params->start, resource_size(params)); params = priv->mst->params; devm_release_mem_region(dev, params->start, resource_size(params)); params = priv->slv->params; devm_release_mem_region(dev, params->start, resource_size(params)); } params = priv->mst_cause->params; devm_release_mem_region(dev, params->start, resource_size(params)); params = priv->slv_cause->params; devm_release_mem_region(dev, params->start, resource_size(params)); /* * Release shared resources. This should be done when releasing * the I2C controller. */ mutex_lock(&mlxbf_i2c_bus_lock); if (--mlxbf_i2c_bus_count == 0) { mlxbf_i2c_release_coalesce(pdev, priv); mlxbf_i2c_release_corepll(pdev, priv); mlxbf_i2c_release_gpio(pdev, priv); } mutex_unlock(&mlxbf_i2c_bus_lock); devm_free_irq(dev, priv->irq, priv); i2c_del_adapter(&priv->adap); } static struct platform_driver mlxbf_i2c_driver = { .probe = mlxbf_i2c_probe, .remove_new = mlxbf_i2c_remove, .driver = { .name = "i2c-mlxbf", .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids), }, }; static int __init mlxbf_i2c_init(void) { mutex_init(&mlxbf_i2c_coalesce_lock); mutex_init(&mlxbf_i2c_corepll_lock); mutex_init(&mlxbf_i2c_gpio_lock); mutex_init(&mlxbf_i2c_bus_lock); return platform_driver_register(&mlxbf_i2c_driver); } module_init(mlxbf_i2c_init); static void __exit mlxbf_i2c_exit(void) { platform_driver_unregister(&mlxbf_i2c_driver); mutex_destroy(&mlxbf_i2c_bus_lock); mutex_destroy(&mlxbf_i2c_gpio_lock); mutex_destroy(&mlxbf_i2c_corepll_lock); mutex_destroy(&mlxbf_i2c_coalesce_lock); } module_exit(mlxbf_i2c_exit); MODULE_DESCRIPTION("Mellanox BlueField I2C bus driver"); MODULE_AUTHOR("Khalil Blaiech <[email protected]>"); MODULE_AUTHOR("Asmaa Mnebhi <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-mlxbf.c
// SPDX-License-Identifier: GPL-2.0 /* * I2C driver for stand-alone PCF8584 style adapters on Zorro cards * * Original ICY documentation can be found on Aminet: * https://aminet.net/package/docs/hard/icy * * There has been a modern community re-print of this design in 2019: * https://www.a1k.org/forum/index.php?threads/70106/ * * The card is basically a Philips PCF8584 connected straight to the * beginning of the AutoConfig'd address space (register S1 on base+2), * with /INT on /INT2 on the Zorro bus. * * Copyright (c) 2019 Max Staudt <[email protected]> * * This started as a fork of i2c-elektor.c and has evolved since. * Thanks go to its authors for providing a base to grow on. * * * IRQ support is currently not implemented. * * As it turns out, i2c-algo-pcf is really written with i2c-elektor's * edge-triggered ISA interrupts in mind, while the Amiga's Zorro bus has * level-triggered interrupts. This means that once an interrupt occurs, we * have to tell the PCF8584 to shut up immediately, or it will keep the * interrupt line busy and cause an IRQ storm. * However, because of the PCF8584's host-side protocol, there is no good * way to just quieten it without side effects. Rather, we have to perform * the next read/write operation straight away, which will reset the /INT * pin. This entails re-designing the core of i2c-algo-pcf in the future. * For now, we never request an IRQ from the PCF8584, and poll it instead. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-pcf.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <linux/zorro.h> #include "../algos/i2c-algo-pcf.h" struct icy_i2c { struct i2c_adapter adapter; void __iomem *reg_s0; void __iomem *reg_s1; struct i2c_client *ltc2990_client; }; /* * Functions called by i2c-algo-pcf */ static void icy_pcf_setpcf(void *data, int ctl, int val) { struct icy_i2c *i2c = (struct icy_i2c *)data; u8 __iomem *address = ctl ? i2c->reg_s1 : i2c->reg_s0; z_writeb(val, address); } static int icy_pcf_getpcf(void *data, int ctl) { struct icy_i2c *i2c = (struct icy_i2c *)data; u8 __iomem *address = ctl ? i2c->reg_s1 : i2c->reg_s0; return z_readb(address); } static int icy_pcf_getown(void *data) { return 0x55; } static int icy_pcf_getclock(void *data) { return 0x1c; } static void icy_pcf_waitforpin(void *data) { usleep_range(50, 150); } /* * Main i2c-icy part */ static unsigned short const icy_ltc2990_addresses[] = { 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* * Additional sensors exposed once this property is applied: * * in1 will be the voltage of the 5V rail, divided by 2. * in2 will be the voltage of the 12V rail, divided by 4. * temp3 will be measured using a PCB loop next the chip. */ static const u32 icy_ltc2990_meas_mode[] = {0, 3}; static const struct property_entry icy_ltc2990_props[] = { PROPERTY_ENTRY_U32_ARRAY("lltc,meas-mode", icy_ltc2990_meas_mode), { } }; static const struct software_node icy_ltc2990_node = { .properties = icy_ltc2990_props, }; static int icy_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { struct icy_i2c *i2c; struct i2c_algo_pcf_data *algo_data; struct i2c_board_info ltc2990_info = { .type = "ltc2990", .swnode = &icy_ltc2990_node, }; i2c = devm_kzalloc(&z->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; algo_data = devm_kzalloc(&z->dev, sizeof(*algo_data), GFP_KERNEL); if (!algo_data) return -ENOMEM; dev_set_drvdata(&z->dev, i2c); i2c->adapter.dev.parent = &z->dev; i2c->adapter.owner = THIS_MODULE; /* i2c->adapter.algo assigned by i2c_pcf_add_bus() */ i2c->adapter.algo_data = algo_data; strscpy(i2c->adapter.name, "ICY I2C Zorro adapter", sizeof(i2c->adapter.name)); if (!devm_request_mem_region(&z->dev, z->resource.start, 4, i2c->adapter.name)) return -ENXIO; /* Driver private data */ i2c->reg_s0 = ZTWO_VADDR(z->resource.start); i2c->reg_s1 = ZTWO_VADDR(z->resource.start + 2); algo_data->data = i2c; algo_data->setpcf = icy_pcf_setpcf; algo_data->getpcf = icy_pcf_getpcf; algo_data->getown = icy_pcf_getown; algo_data->getclock = icy_pcf_getclock; algo_data->waitforpin = icy_pcf_waitforpin; if (i2c_pcf_add_bus(&i2c->adapter)) { dev_err(&z->dev, "i2c_pcf_add_bus() failed\n"); return -ENXIO; } dev_info(&z->dev, "ICY I2C controller at %pa, IRQ not implemented\n", &z->resource.start); /* * The 2019 a1k.org PCBs have an LTC2990 at 0x4c, so start * it automatically once ltc2990 is modprobed. * * in0 is the voltage of the internal 5V power supply. * temp1 is the temperature inside the chip. * * See property_entry above for in1, in2, temp3. */ i2c->ltc2990_client = i2c_new_scanned_device(&i2c->adapter, &ltc2990_info, icy_ltc2990_addresses, NULL); return 0; } static void icy_remove(struct zorro_dev *z) { struct icy_i2c *i2c = dev_get_drvdata(&z->dev); i2c_unregister_device(i2c->ltc2990_client); i2c_del_adapter(&i2c->adapter); } static const struct zorro_device_id icy_zorro_tbl[] = { { ZORRO_ID(VMC, 15, 0), }, { 0 } }; MODULE_DEVICE_TABLE(zorro, icy_zorro_tbl); static struct zorro_driver icy_driver = { .name = "i2c-icy", .id_table = icy_zorro_tbl, .probe = icy_probe, .remove = icy_remove, }; module_driver(icy_driver, zorro_register_driver, zorro_unregister_driver); MODULE_AUTHOR("Max Staudt <[email protected]>"); MODULE_DESCRIPTION("I2C bus via PCF8584 on ICY Zorro card"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-icy.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2006-2007 PA Semi, Inc * * SMBus host driver for PA Semi PWRficient */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/io.h> #include "i2c-pasemi-core.h" /* Register offsets */ #define REG_MTXFIFO 0x00 #define REG_MRXFIFO 0x04 #define REG_SMSTA 0x14 #define REG_IMASK 0x18 #define REG_CTL 0x1c #define REG_REV 0x28 /* Register defs */ #define MTXFIFO_READ 0x00000400 #define MTXFIFO_STOP 0x00000200 #define MTXFIFO_START 0x00000100 #define MTXFIFO_DATA_M 0x000000ff #define MRXFIFO_EMPTY 0x00000100 #define MRXFIFO_DATA_M 0x000000ff #define SMSTA_XEN 0x08000000 #define SMSTA_MTN 0x00200000 #define CTL_MRR 0x00000400 #define CTL_MTR 0x00000200 #define CTL_EN 0x00000800 #define CTL_CLK_M 0x000000ff static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val) { dev_dbg(smbus->dev, "smbus write reg %x val %08x\n", reg, val); iowrite32(val, smbus->ioaddr + reg); } static inline int reg_read(struct pasemi_smbus *smbus, int reg) { int ret; ret = ioread32(smbus->ioaddr + reg); dev_dbg(smbus->dev, "smbus read reg %x val %08x\n", reg, ret); return ret; } #define TXFIFO_WR(smbus, reg) reg_write((smbus), REG_MTXFIFO, (reg)) #define RXFIFO_RD(smbus) reg_read((smbus), REG_MRXFIFO) static void pasemi_reset(struct pasemi_smbus *smbus) { u32 val = (CTL_MTR | CTL_MRR | (smbus->clk_div & CTL_CLK_M)); if (smbus->hw_rev >= 6) val |= CTL_EN; reg_write(smbus, REG_CTL, val); reinit_completion(&smbus->irq_completion); } static void pasemi_smb_clear(struct pasemi_smbus *smbus) { unsigned int status; status = reg_read(smbus, REG_SMSTA); reg_write(smbus, REG_SMSTA, status); } static int pasemi_smb_waitready(struct pasemi_smbus *smbus) { int timeout = 100; unsigned int status; if (smbus->use_irq) { reinit_completion(&smbus->irq_completion); reg_write(smbus, REG_IMASK, SMSTA_XEN | SMSTA_MTN); wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(100)); reg_write(smbus, REG_IMASK, 0); status = reg_read(smbus, REG_SMSTA); } else { status = reg_read(smbus, REG_SMSTA); while (!(status & SMSTA_XEN) && timeout--) { msleep(1); status = reg_read(smbus, REG_SMSTA); } } /* Got NACK? */ if (status & SMSTA_MTN) return -ENXIO; if (timeout < 0) { dev_warn(smbus->dev, "Timeout, status 0x%08x\n", status); reg_write(smbus, REG_SMSTA, status); return -ETIME; } /* Clear XEN */ reg_write(smbus, REG_SMSTA, SMSTA_XEN); return 0; } static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter, struct i2c_msg *msg, int stop) { struct pasemi_smbus *smbus = adapter->algo_data; int read, i, err; u32 rd; read = msg->flags & I2C_M_RD ? 1 : 0; TXFIFO_WR(smbus, MTXFIFO_START | i2c_8bit_addr_from_msg(msg)); if (read) { TXFIFO_WR(smbus, msg->len | MTXFIFO_READ | (stop ? MTXFIFO_STOP : 0)); err = pasemi_smb_waitready(smbus); if (err) goto reset_out; for (i = 0; i < msg->len; i++) { rd = RXFIFO_RD(smbus); if (rd & MRXFIFO_EMPTY) { err = -ENODATA; goto reset_out; } msg->buf[i] = rd & MRXFIFO_DATA_M; } } else { for (i = 0; i < msg->len - 1; i++) TXFIFO_WR(smbus, msg->buf[i]); TXFIFO_WR(smbus, msg->buf[msg->len-1] | (stop ? MTXFIFO_STOP : 0)); if (stop) { err = pasemi_smb_waitready(smbus); if (err) goto reset_out; } } return 0; reset_out: pasemi_reset(smbus); return err; } static int pasemi_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct pasemi_smbus *smbus = adapter->algo_data; int ret, i; pasemi_smb_clear(smbus); ret = 0; for (i = 0; i < num && !ret; i++) ret = pasemi_i2c_xfer_msg(adapter, &msgs[i], (i == (num - 1))); return ret ? ret : num; } static int pasemi_smb_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct pasemi_smbus *smbus = adapter->algo_data; unsigned int rd; int read_flag, err; int len = 0, i; /* All our ops take 8-bit shifted addresses */ addr <<= 1; read_flag = read_write == I2C_SMBUS_READ; pasemi_smb_clear(smbus); switch (size) { case I2C_SMBUS_QUICK: TXFIFO_WR(smbus, addr | read_flag | MTXFIFO_START | MTXFIFO_STOP); break; case I2C_SMBUS_BYTE: TXFIFO_WR(smbus, addr | read_flag | MTXFIFO_START); if (read_write) TXFIFO_WR(smbus, 1 | MTXFIFO_STOP | MTXFIFO_READ); else TXFIFO_WR(smbus, MTXFIFO_STOP | command); break; case I2C_SMBUS_BYTE_DATA: TXFIFO_WR(smbus, addr | MTXFIFO_START); TXFIFO_WR(smbus, command); if (read_write) { TXFIFO_WR(smbus, addr | I2C_SMBUS_READ | MTXFIFO_START); TXFIFO_WR(smbus, 1 | MTXFIFO_READ | MTXFIFO_STOP); } else { TXFIFO_WR(smbus, MTXFIFO_STOP | data->byte); } break; case I2C_SMBUS_WORD_DATA: TXFIFO_WR(smbus, addr | MTXFIFO_START); TXFIFO_WR(smbus, command); if (read_write) { TXFIFO_WR(smbus, addr | I2C_SMBUS_READ | MTXFIFO_START); TXFIFO_WR(smbus, 2 | MTXFIFO_READ | MTXFIFO_STOP); } else { TXFIFO_WR(smbus, data->word & MTXFIFO_DATA_M); TXFIFO_WR(smbus, MTXFIFO_STOP | (data->word >> 8)); } break; case I2C_SMBUS_BLOCK_DATA: TXFIFO_WR(smbus, addr | MTXFIFO_START); TXFIFO_WR(smbus, command); if (read_write) { TXFIFO_WR(smbus, addr | I2C_SMBUS_READ | MTXFIFO_START); TXFIFO_WR(smbus, 1 | MTXFIFO_READ); rd = RXFIFO_RD(smbus); len = min_t(u8, (rd & MRXFIFO_DATA_M), I2C_SMBUS_BLOCK_MAX); TXFIFO_WR(smbus, len | MTXFIFO_READ | MTXFIFO_STOP); } else { len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX); TXFIFO_WR(smbus, len); for (i = 1; i < len; i++) TXFIFO_WR(smbus, data->block[i]); TXFIFO_WR(smbus, data->block[len] | MTXFIFO_STOP); } break; case I2C_SMBUS_PROC_CALL: read_write = I2C_SMBUS_READ; TXFIFO_WR(smbus, addr | MTXFIFO_START); TXFIFO_WR(smbus, command); TXFIFO_WR(smbus, data->word & MTXFIFO_DATA_M); TXFIFO_WR(smbus, (data->word >> 8) & MTXFIFO_DATA_M); TXFIFO_WR(smbus, addr | I2C_SMBUS_READ | MTXFIFO_START); TXFIFO_WR(smbus, 2 | MTXFIFO_STOP | MTXFIFO_READ); break; case I2C_SMBUS_BLOCK_PROC_CALL: len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX - 1); read_write = I2C_SMBUS_READ; TXFIFO_WR(smbus, addr | MTXFIFO_START); TXFIFO_WR(smbus, command); TXFIFO_WR(smbus, len); for (i = 1; i <= len; i++) TXFIFO_WR(smbus, data->block[i]); TXFIFO_WR(smbus, addr | I2C_SMBUS_READ); TXFIFO_WR(smbus, MTXFIFO_READ | 1); rd = RXFIFO_RD(smbus); len = min_t(u8, (rd & MRXFIFO_DATA_M), I2C_SMBUS_BLOCK_MAX - len); TXFIFO_WR(smbus, len | MTXFIFO_READ | MTXFIFO_STOP); break; default: dev_warn(&adapter->dev, "Unsupported transaction %d\n", size); return -EINVAL; } err = pasemi_smb_waitready(smbus); if (err) goto reset_out; if (read_write == I2C_SMBUS_WRITE) return 0; switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: rd = RXFIFO_RD(smbus); if (rd & MRXFIFO_EMPTY) { err = -ENODATA; goto reset_out; } data->byte = rd & MRXFIFO_DATA_M; break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: rd = RXFIFO_RD(smbus); if (rd & MRXFIFO_EMPTY) { err = -ENODATA; goto reset_out; } data->word = rd & MRXFIFO_DATA_M; rd = RXFIFO_RD(smbus); if (rd & MRXFIFO_EMPTY) { err = -ENODATA; goto reset_out; } data->word |= (rd & MRXFIFO_DATA_M) << 8; break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: data->block[0] = len; for (i = 1; i <= len; i ++) { rd = RXFIFO_RD(smbus); if (rd & MRXFIFO_EMPTY) { err = -ENODATA; goto reset_out; } data->block[i] = rd & MRXFIFO_DATA_M; } break; } return 0; reset_out: pasemi_reset(smbus); return err; } static u32 pasemi_smb_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | I2C_FUNC_I2C; } static const struct i2c_algorithm smbus_algorithm = { .master_xfer = pasemi_i2c_xfer, .smbus_xfer = pasemi_smb_xfer, .functionality = pasemi_smb_func, }; int pasemi_i2c_common_probe(struct pasemi_smbus *smbus) { int error; smbus->adapter.owner = THIS_MODULE; snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), "PA Semi SMBus adapter (%s)", dev_name(smbus->dev)); smbus->adapter.algo = &smbus_algorithm; smbus->adapter.algo_data = smbus; /* set up the sysfs linkage to our parent device */ smbus->adapter.dev.parent = smbus->dev; smbus->use_irq = 0; init_completion(&smbus->irq_completion); if (smbus->hw_rev != PASEMI_HW_REV_PCI) smbus->hw_rev = reg_read(smbus, REG_REV); reg_write(smbus, REG_IMASK, 0); pasemi_reset(smbus); error = devm_i2c_add_adapter(smbus->dev, &smbus->adapter); if (error) return error; return 0; } irqreturn_t pasemi_irq_handler(int irq, void *dev_id) { struct pasemi_smbus *smbus = dev_id; reg_write(smbus, REG_IMASK, 0); complete(&smbus->irq_completion); return IRQ_HANDLED; }
linux-master
drivers/i2c/busses/i2c-pasemi-core.c
// SPDX-License-Identifier: GPL-2.0-only /* * i2c-xiic.c * Copyright (c) 2002-2007 Xilinx Inc. * Copyright (c) 2009-2010 Intel Corporation * * This code was implemented by Mocean Laboratories AB when porting linux * to the automotive development board Russellville. The copyright holder * as seen in the header is Intel corporation. * Mocean Laboratories forked off the GNU/Linux platform work into a * separate company called Pelagicore AB, which committed the code to the * kernel. */ /* Supports: * Xilinx IIC */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/platform_data/i2c-xiic.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/clk.h> #include <linux/pm_runtime.h> #define DRIVER_NAME "xiic-i2c" #define DYNAMIC_MODE_READ_BROKEN_BIT BIT(0) #define SMBUS_BLOCK_READ_MIN_LEN 3 enum xilinx_i2c_state { STATE_DONE, STATE_ERROR, STATE_START }; enum xiic_endian { LITTLE, BIG }; enum i2c_scl_freq { REG_VALUES_100KHZ = 0, REG_VALUES_400KHZ = 1, REG_VALUES_1MHZ = 2 }; /** * struct xiic_i2c - Internal representation of the XIIC I2C bus * @dev: Pointer to device structure * @base: Memory base of the HW registers * @completion: Completion for callers * @adap: Kernel adapter representation * @tx_msg: Messages from above to be sent * @lock: Mutual exclusion * @tx_pos: Current pos in TX message * @nmsgs: Number of messages in tx_msg * @rx_msg: Current RX message * @rx_pos: Position within current RX message * @endianness: big/little-endian byte order * @clk: Pointer to AXI4-lite input clock * @state: See STATE_ * @singlemaster: Indicates bus is single master * @dynamic: Mode of controller * @prev_msg_tx: Previous message is Tx * @quirks: To hold platform specific bug info * @smbus_block_read: Flag to handle block read * @input_clk: Input clock to I2C controller * @i2c_clk: I2C SCL frequency */ struct xiic_i2c { struct device *dev; void __iomem *base; struct completion completion; struct i2c_adapter adap; struct i2c_msg *tx_msg; struct mutex lock; unsigned int tx_pos; unsigned int nmsgs; struct i2c_msg *rx_msg; int rx_pos; enum xiic_endian endianness; struct clk *clk; enum xilinx_i2c_state state; bool singlemaster; bool dynamic; bool prev_msg_tx; u32 quirks; bool smbus_block_read; unsigned long input_clk; unsigned int i2c_clk; }; struct xiic_version_data { u32 quirks; }; /** * struct timing_regs - AXI I2C timing registers that depend on I2C spec * @tsusta: setup time for a repeated START condition * @tsusto: setup time for a STOP condition * @thdsta: hold time for a repeated START condition * @tsudat: setup time for data * @tbuf: bus free time between STOP and START */ struct timing_regs { unsigned int tsusta; unsigned int tsusto; unsigned int thdsta; unsigned int tsudat; unsigned int tbuf; }; /* Reg values in ns derived from I2C spec and AXI I2C PG for different frequencies */ static const struct timing_regs timing_reg_values[] = { { 5700, 5000, 4300, 550, 5000 }, /* Reg values for 100KHz */ { 900, 900, 900, 400, 1600 }, /* Reg values for 400KHz */ { 380, 380, 380, 170, 620 }, /* Reg values for 1MHz */ }; #define XIIC_MSB_OFFSET 0 #define XIIC_REG_OFFSET (0x100 + XIIC_MSB_OFFSET) /* * Register offsets in bytes from RegisterBase. Three is added to the * base offset to access LSB (IBM style) of the word */ #define XIIC_CR_REG_OFFSET (0x00 + XIIC_REG_OFFSET) /* Control Register */ #define XIIC_SR_REG_OFFSET (0x04 + XIIC_REG_OFFSET) /* Status Register */ #define XIIC_DTR_REG_OFFSET (0x08 + XIIC_REG_OFFSET) /* Data Tx Register */ #define XIIC_DRR_REG_OFFSET (0x0C + XIIC_REG_OFFSET) /* Data Rx Register */ #define XIIC_ADR_REG_OFFSET (0x10 + XIIC_REG_OFFSET) /* Address Register */ #define XIIC_TFO_REG_OFFSET (0x14 + XIIC_REG_OFFSET) /* Tx FIFO Occupancy */ #define XIIC_RFO_REG_OFFSET (0x18 + XIIC_REG_OFFSET) /* Rx FIFO Occupancy */ #define XIIC_TBA_REG_OFFSET (0x1C + XIIC_REG_OFFSET) /* 10 Bit Address reg */ #define XIIC_RFD_REG_OFFSET (0x20 + XIIC_REG_OFFSET) /* Rx FIFO Depth reg */ #define XIIC_GPO_REG_OFFSET (0x24 + XIIC_REG_OFFSET) /* Output Register */ /* * Timing register offsets from RegisterBase. These are used only for * setting i2c clock frequency for the line. */ #define XIIC_TSUSTA_REG_OFFSET (0x28 + XIIC_REG_OFFSET) /* TSUSTA Register */ #define XIIC_TSUSTO_REG_OFFSET (0x2C + XIIC_REG_OFFSET) /* TSUSTO Register */ #define XIIC_THDSTA_REG_OFFSET (0x30 + XIIC_REG_OFFSET) /* THDSTA Register */ #define XIIC_TSUDAT_REG_OFFSET (0x34 + XIIC_REG_OFFSET) /* TSUDAT Register */ #define XIIC_TBUF_REG_OFFSET (0x38 + XIIC_REG_OFFSET) /* TBUF Register */ #define XIIC_THIGH_REG_OFFSET (0x3C + XIIC_REG_OFFSET) /* THIGH Register */ #define XIIC_TLOW_REG_OFFSET (0x40 + XIIC_REG_OFFSET) /* TLOW Register */ #define XIIC_THDDAT_REG_OFFSET (0x44 + XIIC_REG_OFFSET) /* THDDAT Register */ /* Control Register masks */ #define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */ #define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */ #define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */ #define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */ #define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */ #define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */ #define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */ /* Status Register masks */ #define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */ #define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */ #define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */ #define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */ #define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */ #define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */ #define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */ #define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */ /* Interrupt Status Register masks Interrupt occurs when... */ #define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */ #define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */ #define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */ #define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */ #define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */ #define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */ #define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */ #define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */ /* The following constants specify the depth of the FIFOs */ #define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */ #define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */ /* The following constants specify groups of interrupts that are typically * enabled or disables at the same time */ #define XIIC_TX_INTERRUPTS \ (XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK) #define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS) /* * Tx Fifo upper bit masks. */ #define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */ #define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */ /* Dynamic mode constants */ #define MAX_READ_LENGTH_DYNAMIC 255 /* Max length for dynamic read */ /* * The following constants define the register offsets for the Interrupt * registers. There are some holes in the memory map for reserved addresses * to allow other registers to be added and still match the memory map of the * interrupt controller registers */ #define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */ #define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */ #define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */ #define XIIC_RESETR_OFFSET 0x40 /* Reset Register */ #define XIIC_RESET_MASK 0xAUL #define XIIC_PM_TIMEOUT 1000 /* ms */ /* timeout waiting for the controller to respond */ #define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000)) /* timeout waiting for the controller finish transfers */ #define XIIC_XFER_TIMEOUT (msecs_to_jiffies(10000)) /* * The following constant is used for the device global interrupt enable * register, to enable all interrupts for the device, this is the only bit * in the register */ #define XIIC_GINTR_ENABLE_MASK 0x80000000UL #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos) #define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos) static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num); static void __xiic_start_xfer(struct xiic_i2c *i2c); /* * For the register read and write functions, a little-endian and big-endian * version are necessary. Endianness is detected during the probe function. * Only the least significant byte [doublet] of the register are ever * accessed. This requires an offset of 3 [2] from the base address for * big-endian systems. */ static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value) { if (i2c->endianness == LITTLE) iowrite8(value, i2c->base + reg); else iowrite8(value, i2c->base + reg + 3); } static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg) { u8 ret; if (i2c->endianness == LITTLE) ret = ioread8(i2c->base + reg); else ret = ioread8(i2c->base + reg + 3); return ret; } static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value) { if (i2c->endianness == LITTLE) iowrite16(value, i2c->base + reg); else iowrite16be(value, i2c->base + reg + 2); } static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value) { if (i2c->endianness == LITTLE) iowrite32(value, i2c->base + reg); else iowrite32be(value, i2c->base + reg); } static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg) { u32 ret; if (i2c->endianness == LITTLE) ret = ioread32(i2c->base + reg); else ret = ioread32be(i2c->base + reg); return ret; } static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask) { u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask); } static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask) { u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask); } static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask) { u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask); } static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask) { xiic_irq_clr(i2c, mask); xiic_irq_en(i2c, mask); } static int xiic_clear_rx_fifo(struct xiic_i2c *i2c) { u8 sr; unsigned long timeout; timeout = jiffies + XIIC_I2C_TIMEOUT; for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET); !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK); sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) { xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); if (time_after(jiffies, timeout)) { dev_err(i2c->dev, "Failed to clear rx fifo\n"); return -ETIMEDOUT; } } return 0; } static int xiic_wait_tx_empty(struct xiic_i2c *i2c) { u8 isr; unsigned long timeout; timeout = jiffies + XIIC_I2C_TIMEOUT; for (isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); !(isr & XIIC_INTR_TX_EMPTY_MASK); isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET)) { if (time_after(jiffies, timeout)) { dev_err(i2c->dev, "Timeout waiting at Tx empty\n"); return -ETIMEDOUT; } } return 0; } /** * xiic_setclk - Sets the configured clock rate * @i2c: Pointer to the xiic device structure * * The timing register values are calculated according to the input clock * frequency and configured scl frequency. For details, please refer the * AXI I2C PG and NXP I2C Spec. * Supported frequencies are 100KHz, 400KHz and 1MHz. * * Return: 0 on success (Supported frequency selected or not configurable in SW) * -EINVAL on failure (scl frequency not supported or THIGH is 0) */ static int xiic_setclk(struct xiic_i2c *i2c) { unsigned int clk_in_mhz; unsigned int index = 0; u32 reg_val; dev_dbg(i2c->adap.dev.parent, "%s entry, i2c->input_clk: %ld, i2c->i2c_clk: %d\n", __func__, i2c->input_clk, i2c->i2c_clk); /* If not specified in DT, do not configure in SW. Rely only on Vivado design */ if (!i2c->i2c_clk || !i2c->input_clk) return 0; clk_in_mhz = DIV_ROUND_UP(i2c->input_clk, 1000000); switch (i2c->i2c_clk) { case I2C_MAX_FAST_MODE_PLUS_FREQ: index = REG_VALUES_1MHZ; break; case I2C_MAX_FAST_MODE_FREQ: index = REG_VALUES_400KHZ; break; case I2C_MAX_STANDARD_MODE_FREQ: index = REG_VALUES_100KHZ; break; default: dev_warn(i2c->adap.dev.parent, "Unsupported scl frequency\n"); return -EINVAL; } /* * Value to be stored in a register is the number of clock cycles required * for the time duration. So the time is divided by the input clock time * period to get the number of clock cycles required. Refer Xilinx AXI I2C * PG document and I2C specification for further details. */ /* THIGH - Depends on SCL clock frequency(i2c_clk) as below */ reg_val = (DIV_ROUND_UP(i2c->input_clk, 2 * i2c->i2c_clk)) - 7; if (reg_val == 0) return -EINVAL; xiic_setreg32(i2c, XIIC_THIGH_REG_OFFSET, reg_val - 1); /* TLOW - Value same as THIGH */ xiic_setreg32(i2c, XIIC_TLOW_REG_OFFSET, reg_val - 1); /* TSUSTA */ reg_val = (timing_reg_values[index].tsusta * clk_in_mhz) / 1000; xiic_setreg32(i2c, XIIC_TSUSTA_REG_OFFSET, reg_val - 1); /* TSUSTO */ reg_val = (timing_reg_values[index].tsusto * clk_in_mhz) / 1000; xiic_setreg32(i2c, XIIC_TSUSTO_REG_OFFSET, reg_val - 1); /* THDSTA */ reg_val = (timing_reg_values[index].thdsta * clk_in_mhz) / 1000; xiic_setreg32(i2c, XIIC_THDSTA_REG_OFFSET, reg_val - 1); /* TSUDAT */ reg_val = (timing_reg_values[index].tsudat * clk_in_mhz) / 1000; xiic_setreg32(i2c, XIIC_TSUDAT_REG_OFFSET, reg_val - 1); /* TBUF */ reg_val = (timing_reg_values[index].tbuf * clk_in_mhz) / 1000; xiic_setreg32(i2c, XIIC_TBUF_REG_OFFSET, reg_val - 1); /* THDDAT */ xiic_setreg32(i2c, XIIC_THDDAT_REG_OFFSET, 1); return 0; } static int xiic_reinit(struct xiic_i2c *i2c) { int ret; xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK); ret = xiic_setclk(i2c); if (ret) return ret; /* Set receive Fifo depth to maximum (zero based). */ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1); /* Reset Tx Fifo. */ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK); /* Enable IIC Device, remove Tx Fifo reset & disable general call. */ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK); /* make sure RX fifo is empty */ ret = xiic_clear_rx_fifo(i2c); if (ret) return ret; /* Enable interrupts */ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK); xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK); return 0; } static void xiic_deinit(struct xiic_i2c *i2c) { u8 cr; xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK); /* Disable IIC Device. */ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK); } static void xiic_smbus_block_read_setup(struct xiic_i2c *i2c) { u8 rxmsg_len, rfd_set = 0; /* * Clear the I2C_M_RECV_LEN flag to avoid setting * message length again */ i2c->rx_msg->flags &= ~I2C_M_RECV_LEN; /* Set smbus_block_read flag to identify in isr */ i2c->smbus_block_read = true; /* Read byte from rx fifo and set message length */ rxmsg_len = xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); i2c->rx_msg->buf[i2c->rx_pos++] = rxmsg_len; /* Check if received length is valid */ if (rxmsg_len <= I2C_SMBUS_BLOCK_MAX) { /* Set Receive fifo depth */ if (rxmsg_len > IIC_RX_FIFO_DEPTH) { /* * When Rx msg len greater than or equal to Rx fifo capacity * Receive fifo depth should set to Rx fifo capacity minus 1 */ rfd_set = IIC_RX_FIFO_DEPTH - 1; i2c->rx_msg->len = rxmsg_len + 1; } else if ((rxmsg_len == 1) || (rxmsg_len == 0)) { /* * Minimum of 3 bytes required to exit cleanly. 1 byte * already received, Second byte is being received. Have * to set NACK in read_rx before receiving the last byte */ rfd_set = 0; i2c->rx_msg->len = SMBUS_BLOCK_READ_MIN_LEN; } else { /* * When Rx msg len less than Rx fifo capacity * Receive fifo depth should set to Rx msg len minus 2 */ rfd_set = rxmsg_len - 2; i2c->rx_msg->len = rxmsg_len + 1; } xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rfd_set); return; } /* Invalid message length, trigger STATE_ERROR with tx_msg_len in ISR */ i2c->tx_msg->len = 3; i2c->smbus_block_read = false; dev_err(i2c->adap.dev.parent, "smbus_block_read Invalid msg length\n"); } static void xiic_read_rx(struct xiic_i2c *i2c) { u8 bytes_in_fifo, cr = 0, bytes_to_read = 0; u32 bytes_rem = 0; int i; bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1; dev_dbg(i2c->adap.dev.parent, "%s entry, bytes in fifo: %d, rem: %d, SR: 0x%x, CR: 0x%x\n", __func__, bytes_in_fifo, xiic_rx_space(i2c), xiic_getreg8(i2c, XIIC_SR_REG_OFFSET), xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); if (bytes_in_fifo > xiic_rx_space(i2c)) bytes_in_fifo = xiic_rx_space(i2c); bytes_to_read = bytes_in_fifo; if (!i2c->dynamic) { bytes_rem = xiic_rx_space(i2c) - bytes_in_fifo; /* Set msg length if smbus_block_read */ if (i2c->rx_msg->flags & I2C_M_RECV_LEN) { xiic_smbus_block_read_setup(i2c); return; } if (bytes_rem > IIC_RX_FIFO_DEPTH) { bytes_to_read = bytes_in_fifo; } else if (bytes_rem > 1) { bytes_to_read = bytes_rem - 1; } else if (bytes_rem == 1) { bytes_to_read = 1; /* Set NACK in CR to indicate slave transmitter */ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr | XIIC_CR_NO_ACK_MASK); } else if (bytes_rem == 0) { bytes_to_read = bytes_in_fifo; /* Generate stop on the bus if it is last message */ if (i2c->nmsgs == 1) { cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_MSMS_MASK); } /* Make TXACK=0, clean up for next transaction */ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_NO_ACK_MASK); } } /* Read the fifo */ for (i = 0; i < bytes_to_read; i++) { i2c->rx_msg->buf[i2c->rx_pos++] = xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); } if (i2c->dynamic) { u8 bytes; /* Receive remaining bytes if less than fifo depth */ bytes = min_t(u8, xiic_rx_space(i2c), IIC_RX_FIFO_DEPTH); bytes--; xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, bytes); } } static int xiic_tx_fifo_space(struct xiic_i2c *i2c) { /* return the actual space left in the FIFO */ return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1; } static void xiic_fill_tx_fifo(struct xiic_i2c *i2c) { u8 fifo_space = xiic_tx_fifo_space(i2c); int len = xiic_tx_space(i2c); len = (len > fifo_space) ? fifo_space : len; dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n", __func__, len, fifo_space); while (len--) { u16 data = i2c->tx_msg->buf[i2c->tx_pos++]; if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) { /* last message in transfer -> STOP */ if (i2c->dynamic) { data |= XIIC_TX_DYN_STOP_MASK; } else { u8 cr; int status; /* Wait till FIFO is empty so STOP is sent last */ status = xiic_wait_tx_empty(i2c); if (status) return; /* Write to CR to stop */ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_MSMS_MASK); } dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__); } xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); } } static void xiic_wakeup(struct xiic_i2c *i2c, enum xilinx_i2c_state code) { i2c->tx_msg = NULL; i2c->rx_msg = NULL; i2c->nmsgs = 0; i2c->state = code; complete(&i2c->completion); } static irqreturn_t xiic_process(int irq, void *dev_id) { struct xiic_i2c *i2c = dev_id; u32 pend, isr, ier; u32 clr = 0; int xfer_more = 0; int wakeup_req = 0; enum xilinx_i2c_state wakeup_code = STATE_DONE; int ret; /* Get the interrupt Status from the IPIF. There is no clearing of * interrupts in the IPIF. Interrupts must be cleared at the source. * To find which interrupts are pending; AND interrupts pending with * interrupts masked. */ mutex_lock(&i2c->lock); isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); pend = isr & ier; dev_dbg(i2c->adap.dev.parent, "%s: IER: 0x%x, ISR: 0x%x, pend: 0x%x\n", __func__, ier, isr, pend); dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n", __func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET), i2c->tx_msg, i2c->nmsgs); dev_dbg(i2c->adap.dev.parent, "%s, ISR: 0x%x, CR: 0x%x\n", __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); /* Service requesting interrupt */ if ((pend & XIIC_INTR_ARB_LOST_MASK) || ((pend & XIIC_INTR_TX_ERROR_MASK) && !(pend & XIIC_INTR_RX_FULL_MASK))) { /* bus arbritration lost, or... * Transmit error _OR_ RX completed * if this happens when RX_FULL is not set * this is probably a TX error */ dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__); /* dynamic mode seem to suffer from problems if we just flushes * fifos and the next message is a TX with len 0 (only addr) * reset the IP instead of just flush fifos */ ret = xiic_reinit(i2c); if (ret < 0) dev_dbg(i2c->adap.dev.parent, "reinit failed\n"); if (i2c->rx_msg) { wakeup_req = 1; wakeup_code = STATE_ERROR; } if (i2c->tx_msg) { wakeup_req = 1; wakeup_code = STATE_ERROR; } /* don't try to handle other events */ goto out; } if (pend & XIIC_INTR_RX_FULL_MASK) { /* Receive register/FIFO is full */ clr |= XIIC_INTR_RX_FULL_MASK; if (!i2c->rx_msg) { dev_dbg(i2c->adap.dev.parent, "%s unexpected RX IRQ\n", __func__); xiic_clear_rx_fifo(i2c); goto out; } xiic_read_rx(i2c); if (xiic_rx_space(i2c) == 0) { /* this is the last part of the message */ i2c->rx_msg = NULL; /* also clear TX error if there (RX complete) */ clr |= (isr & XIIC_INTR_TX_ERROR_MASK); dev_dbg(i2c->adap.dev.parent, "%s end of message, nmsgs: %d\n", __func__, i2c->nmsgs); /* send next message if this wasn't the last, * otherwise the transfer will be finialise when * receiving the bus not busy interrupt */ if (i2c->nmsgs > 1) { i2c->nmsgs--; i2c->tx_msg++; dev_dbg(i2c->adap.dev.parent, "%s will start next...\n", __func__); xfer_more = 1; } } } if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) { /* Transmit register/FIFO is empty or ½ empty */ clr |= (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)); if (!i2c->tx_msg) { dev_dbg(i2c->adap.dev.parent, "%s unexpected TX IRQ\n", __func__); goto out; } xiic_fill_tx_fifo(i2c); /* current message sent and there is space in the fifo */ if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) { dev_dbg(i2c->adap.dev.parent, "%s end of message sent, nmsgs: %d\n", __func__, i2c->nmsgs); if (i2c->nmsgs > 1) { i2c->nmsgs--; i2c->tx_msg++; xfer_more = 1; } else { xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); dev_dbg(i2c->adap.dev.parent, "%s Got TX IRQ but no more to do...\n", __func__); } } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1)) /* current frame is sent and is last, * make sure to disable tx half */ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); } if (pend & XIIC_INTR_BNB_MASK) { /* IIC bus has transitioned to not busy */ clr |= XIIC_INTR_BNB_MASK; /* The bus is not busy, disable BusNotBusy interrupt */ xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK); if (i2c->tx_msg && i2c->smbus_block_read) { i2c->smbus_block_read = false; /* Set requested message len=1 to indicate STATE_DONE */ i2c->tx_msg->len = 1; } if (!i2c->tx_msg) goto out; wakeup_req = 1; if (i2c->nmsgs == 1 && !i2c->rx_msg && xiic_tx_space(i2c) == 0) wakeup_code = STATE_DONE; else wakeup_code = STATE_ERROR; } out: dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr); xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr); if (xfer_more) __xiic_start_xfer(i2c); if (wakeup_req) xiic_wakeup(i2c, wakeup_code); WARN_ON(xfer_more && wakeup_req); mutex_unlock(&i2c->lock); return IRQ_HANDLED; } static int xiic_bus_busy(struct xiic_i2c *i2c) { u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET); return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0; } static int xiic_busy(struct xiic_i2c *i2c) { int tries = 3; int err; if (i2c->tx_msg || i2c->rx_msg) return -EBUSY; /* In single master mode bus can only be busy, when in use by this * driver. If the register indicates bus being busy for some reason we * should ignore it, since bus will never be released and i2c will be * stuck forever. */ if (i2c->singlemaster) { return 0; } /* for instance if previous transfer was terminated due to TX error * it might be that the bus is on it's way to become available * give it at most 3 ms to wake */ err = xiic_bus_busy(i2c); while (err && tries--) { msleep(1); err = xiic_bus_busy(i2c); } return err; } static void xiic_start_recv(struct xiic_i2c *i2c) { u16 rx_watermark; u8 cr = 0, rfd_set = 0; struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n", __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); /* Disable Tx interrupts */ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK | XIIC_INTR_TX_EMPTY_MASK); if (i2c->dynamic) { u8 bytes; u16 val; /* Clear and enable Rx full interrupt. */ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); /* * We want to get all but last byte, because the TX_ERROR IRQ * is used to indicate error ACK on the address, and * negative ack on the last received byte, so to not mix * them receive all but last. * In the case where there is only one byte to receive * we can check if ERROR and RX full is set at the same time */ rx_watermark = msg->len; bytes = min_t(u8, rx_watermark, IIC_RX_FIFO_DEPTH); if (rx_watermark > 0) bytes--; xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, bytes); /* write the address */ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, i2c_8bit_addr_from_msg(msg) | XIIC_TX_DYN_START_MASK); /* If last message, include dynamic stop bit with length */ val = (i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0; val |= msg->len; xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, val); xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); } else { /* * If previous message is Tx, make sure that Tx FIFO is empty * before starting a new transfer as the repeated start in * standard mode can corrupt the transaction if there are * still bytes to be transmitted in FIFO */ if (i2c->prev_msg_tx) { int status; status = xiic_wait_tx_empty(i2c); if (status) return; } cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); /* Set Receive fifo depth */ rx_watermark = msg->len; if (rx_watermark > IIC_RX_FIFO_DEPTH) { rfd_set = IIC_RX_FIFO_DEPTH - 1; } else if (rx_watermark == 1) { rfd_set = rx_watermark - 1; /* Set No_ACK, except for smbus_block_read */ if (!(i2c->rx_msg->flags & I2C_M_RECV_LEN)) { /* Handle single byte transfer separately */ cr |= XIIC_CR_NO_ACK_MASK; } } else if (rx_watermark == 0) { rfd_set = rx_watermark; } else { rfd_set = rx_watermark - 2; } /* Check if RSTA should be set */ if (cr & XIIC_CR_MSMS_MASK) { /* Already a master, RSTA should be set */ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr | XIIC_CR_REPEATED_START_MASK) & ~(XIIC_CR_DIR_IS_TX_MASK)); } xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rfd_set); /* Clear and enable Rx full and transmit complete interrupts */ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); /* Write the address */ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, i2c_8bit_addr_from_msg(msg)); /* Write to Control Register,to start transaction in Rx mode */ if ((cr & XIIC_CR_MSMS_MASK) == 0) { xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr | XIIC_CR_MSMS_MASK) & ~(XIIC_CR_DIR_IS_TX_MASK)); } dev_dbg(i2c->adap.dev.parent, "%s end, ISR: 0x%x, CR: 0x%x\n", __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); } if (i2c->nmsgs == 1) /* very last, enable bus not busy as well */ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); /* the message is tx:ed */ i2c->tx_pos = msg->len; /* Enable interrupts */ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK); i2c->prev_msg_tx = false; } static void xiic_start_send(struct xiic_i2c *i2c) { u8 cr = 0; u16 data; struct i2c_msg *msg = i2c->tx_msg; dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d", __func__, msg, msg->len); dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n", __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET), xiic_getreg8(i2c, XIIC_CR_REG_OFFSET)); if (i2c->dynamic) { /* write the address */ data = i2c_8bit_addr_from_msg(msg) | XIIC_TX_DYN_START_MASK; if (i2c->nmsgs == 1 && msg->len == 0) /* no data and last message -> add STOP */ data |= XIIC_TX_DYN_STOP_MASK; xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); /* Clear any pending Tx empty, Tx Error and then enable them */ xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_BNB_MASK | ((i2c->nmsgs > 1 || xiic_tx_space(i2c)) ? XIIC_INTR_TX_HALF_MASK : 0)); xiic_fill_tx_fifo(i2c); } else { /* * If previous message is Tx, make sure that Tx FIFO is empty * before starting a new transfer as the repeated start in * standard mode can corrupt the transaction if there are * still bytes to be transmitted in FIFO */ if (i2c->prev_msg_tx) { int status; status = xiic_wait_tx_empty(i2c); if (status) return; } /* Check if RSTA should be set */ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); if (cr & XIIC_CR_MSMS_MASK) { /* Already a master, RSTA should be set */ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr | XIIC_CR_REPEATED_START_MASK | XIIC_CR_DIR_IS_TX_MASK) & ~(XIIC_CR_NO_ACK_MASK)); } /* Write address to FIFO */ data = i2c_8bit_addr_from_msg(msg); xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); /* Fill fifo */ xiic_fill_tx_fifo(i2c); if ((cr & XIIC_CR_MSMS_MASK) == 0) { /* Start Tx by writing to CR */ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET); xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr | XIIC_CR_MSMS_MASK | XIIC_CR_DIR_IS_TX_MASK); } /* Clear any pending Tx empty, Tx Error and then enable them */ xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_BNB_MASK); } i2c->prev_msg_tx = true; } static void __xiic_start_xfer(struct xiic_i2c *i2c) { int fifo_space = xiic_tx_fifo_space(i2c); dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n", __func__, i2c->tx_msg, fifo_space); if (!i2c->tx_msg) return; i2c->rx_pos = 0; i2c->tx_pos = 0; i2c->state = STATE_START; if (i2c->tx_msg->flags & I2C_M_RD) { /* we dont date putting several reads in the FIFO */ xiic_start_recv(i2c); } else { xiic_start_send(i2c); } } static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num) { bool broken_read, max_read_len, smbus_blk_read; int ret, count; mutex_lock(&i2c->lock); ret = xiic_busy(i2c); if (ret) goto out; i2c->tx_msg = msgs; i2c->rx_msg = NULL; i2c->nmsgs = num; init_completion(&i2c->completion); /* Decide standard mode or Dynamic mode */ i2c->dynamic = true; /* Initialize prev message type */ i2c->prev_msg_tx = false; /* * Scan through nmsgs, use dynamic mode when none of the below three * conditions occur. We need standard mode even if one condition holds * true in the entire array of messages in a single transfer. * If read transaction as dynamic mode is broken for delayed reads * in xlnx,axi-iic-2.0 / xlnx,xps-iic-2.00.a IP versions. * If read length is > 255 bytes. * If smbus_block_read transaction. */ for (count = 0; count < i2c->nmsgs; count++) { broken_read = (i2c->quirks & DYNAMIC_MODE_READ_BROKEN_BIT) && (i2c->tx_msg[count].flags & I2C_M_RD); max_read_len = (i2c->tx_msg[count].flags & I2C_M_RD) && (i2c->tx_msg[count].len > MAX_READ_LENGTH_DYNAMIC); smbus_blk_read = (i2c->tx_msg[count].flags & I2C_M_RECV_LEN); if (broken_read || max_read_len || smbus_blk_read) { i2c->dynamic = false; break; } } ret = xiic_reinit(i2c); if (!ret) __xiic_start_xfer(i2c); out: mutex_unlock(&i2c->lock); return ret; } static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct xiic_i2c *i2c = i2c_get_adapdata(adap); int err; dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)); err = pm_runtime_resume_and_get(i2c->dev); if (err < 0) return err; err = xiic_start_xfer(i2c, msgs, num); if (err < 0) { dev_err(adap->dev.parent, "Error xiic_start_xfer\n"); goto out; } err = wait_for_completion_timeout(&i2c->completion, XIIC_XFER_TIMEOUT); mutex_lock(&i2c->lock); if (err == 0) { /* Timeout */ i2c->tx_msg = NULL; i2c->rx_msg = NULL; i2c->nmsgs = 0; err = -ETIMEDOUT; } else { err = (i2c->state == STATE_DONE) ? num : -EIO; } mutex_unlock(&i2c->lock); out: pm_runtime_mark_last_busy(i2c->dev); pm_runtime_put_autosuspend(i2c->dev); return err; } static u32 xiic_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm xiic_algorithm = { .master_xfer = xiic_xfer, .functionality = xiic_func, }; static const struct i2c_adapter xiic_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_DEPRECATED, .algo = &xiic_algorithm, }; #if defined(CONFIG_OF) static const struct xiic_version_data xiic_2_00 = { .quirks = DYNAMIC_MODE_READ_BROKEN_BIT, }; static const struct of_device_id xiic_of_match[] = { { .compatible = "xlnx,xps-iic-2.00.a", .data = &xiic_2_00 }, { .compatible = "xlnx,axi-iic-2.1", }, {}, }; MODULE_DEVICE_TABLE(of, xiic_of_match); #endif static int xiic_i2c_probe(struct platform_device *pdev) { struct xiic_i2c *i2c; struct xiic_i2c_platform_data *pdata; const struct of_device_id *match; struct resource *res; int ret, irq; u8 i; u32 sr; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; match = of_match_node(xiic_of_match, pdev->dev.of_node); if (match && match->data) { const struct xiic_version_data *data = match->data; i2c->quirks = data->quirks; } i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; pdata = dev_get_platdata(&pdev->dev); /* hook up driver to tree */ platform_set_drvdata(pdev, i2c); i2c->adap = xiic_adapter; i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; snprintf(i2c->adap.name, sizeof(i2c->adap.name), DRIVER_NAME " %s", pdev->name); mutex_init(&i2c->lock); i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk), "failed to enable input clock.\n"); i2c->dev = &pdev->dev; pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT); pm_runtime_use_autosuspend(i2c->dev); pm_runtime_set_active(i2c->dev); pm_runtime_enable(i2c->dev); /* SCL frequency configuration */ i2c->input_clk = clk_get_rate(i2c->clk); ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &i2c->i2c_clk); /* If clock-frequency not specified in DT, do not configure in SW */ if (ret || i2c->i2c_clk > I2C_MAX_FAST_MODE_PLUS_FREQ) i2c->i2c_clk = 0; ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, xiic_process, IRQF_ONESHOT, pdev->name, i2c); if (ret < 0) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); goto err_pm_disable; } i2c->singlemaster = of_property_read_bool(pdev->dev.of_node, "single-master"); /* * Detect endianness * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not * set, assume that the endianness was wrong and swap. */ i2c->endianness = LITTLE; xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK); /* Reset is cleared in xiic_reinit */ sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET); if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK)) i2c->endianness = BIG; ret = xiic_reinit(i2c); if (ret < 0) { dev_err(&pdev->dev, "Cannot xiic_reinit\n"); goto err_pm_disable; } /* add i2c adapter to i2c tree */ ret = i2c_add_adapter(&i2c->adap); if (ret) { xiic_deinit(i2c); goto err_pm_disable; } if (pdata) { /* add in known devices to the bus */ for (i = 0; i < pdata->num_devices; i++) i2c_new_client_device(&i2c->adap, pdata->devices + i); } dev_dbg(&pdev->dev, "mmio %08lx irq %d scl clock frequency %d\n", (unsigned long)res->start, irq, i2c->i2c_clk); return 0; err_pm_disable: pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); return ret; } static void xiic_i2c_remove(struct platform_device *pdev) { struct xiic_i2c *i2c = platform_get_drvdata(pdev); int ret; /* remove adapter & data */ i2c_del_adapter(&i2c->adap); ret = pm_runtime_get_sync(i2c->dev); if (ret < 0) dev_warn(&pdev->dev, "Failed to activate device for removal (%pe)\n", ERR_PTR(ret)); else xiic_deinit(i2c); pm_runtime_put_sync(i2c->dev); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); } static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev) { struct xiic_i2c *i2c = dev_get_drvdata(dev); clk_disable(i2c->clk); return 0; } static int __maybe_unused xiic_i2c_runtime_resume(struct device *dev) { struct xiic_i2c *i2c = dev_get_drvdata(dev); int ret; ret = clk_enable(i2c->clk); if (ret) { dev_err(dev, "Cannot enable clock.\n"); return ret; } return 0; } static const struct dev_pm_ops xiic_dev_pm_ops = { SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend, xiic_i2c_runtime_resume, NULL) }; static struct platform_driver xiic_i2c_driver = { .probe = xiic_i2c_probe, .remove_new = xiic_i2c_remove, .driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(xiic_of_match), .pm = &xiic_dev_pm_ops, }, }; module_platform_driver(xiic_i2c_driver); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("[email protected]"); MODULE_DESCRIPTION("Xilinx I2C bus driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-xiic.c
// SPDX-License-Identifier: GPL-2.0-only /* * drivers/i2c/busses/i2c-tegra-bpmp.c * * Copyright (c) 2016 NVIDIA Corporation. All rights reserved. * * Author: Shardar Shariff Md <[email protected]> */ #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <soc/tegra/bpmp-abi.h> #include <soc/tegra/bpmp.h> /* * Serialized I2C message header size is 6 bytes and includes address, flags * and length */ #define SERIALI2C_HDR_SIZE 6 struct tegra_bpmp_i2c { struct i2c_adapter adapter; struct device *dev; struct tegra_bpmp *bpmp; unsigned int bus; }; /* * Linux flags are translated to BPMP defined I2C flags that are used in BPMP * firmware I2C driver to avoid any issues in future if Linux I2C flags are * changed. */ static void tegra_bpmp_xlate_flags(u16 flags, u16 *out) { if (flags & I2C_M_TEN) *out |= SERIALI2C_TEN; if (flags & I2C_M_RD) *out |= SERIALI2C_RD; if (flags & I2C_M_STOP) *out |= SERIALI2C_STOP; if (flags & I2C_M_NOSTART) *out |= SERIALI2C_NOSTART; if (flags & I2C_M_REV_DIR_ADDR) *out |= SERIALI2C_REV_DIR_ADDR; if (flags & I2C_M_IGNORE_NAK) *out |= SERIALI2C_IGNORE_NAK; if (flags & I2C_M_NO_RD_ACK) *out |= SERIALI2C_NO_RD_ACK; if (flags & I2C_M_RECV_LEN) *out |= SERIALI2C_RECV_LEN; } /* * The serialized I2C format is simply the following: * [addr little-endian][flags little-endian][len little-endian][data if write] * [addr little-endian][flags little-endian][len little-endian][data if write] * ... * * The flags are translated from Linux kernel representation to seriali2c * representation. Any undefined flag being set causes an error. * * The data is there only for writes. Reads have the data transferred in the * other direction, and thus data is not present. * * See deserialize_i2c documentation for the data format in the other direction. */ static void tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c, struct mrq_i2c_request *request, struct i2c_msg *msgs, unsigned int num) { char *buf = request->xfer.data_buf; unsigned int i, j, pos = 0; for (i = 0; i < num; i++) { struct i2c_msg *msg = &msgs[i]; u16 flags = 0; tegra_bpmp_xlate_flags(msg->flags, &flags); buf[pos++] = msg->addr & 0xff; buf[pos++] = (msg->addr & 0xff00) >> 8; buf[pos++] = flags & 0xff; buf[pos++] = (flags & 0xff00) >> 8; buf[pos++] = msg->len & 0xff; buf[pos++] = (msg->len & 0xff00) >> 8; if ((flags & SERIALI2C_RD) == 0) { for (j = 0; j < msg->len; j++) buf[pos++] = msg->buf[j]; } } request->xfer.data_size = pos; } /* * The data in the BPMP -> CPU direction is composed of sequential blocks for * those messages that have I2C_M_RD. So, for example, if you have: * * - !I2C_M_RD, len == 5, data == a0 01 02 03 04 * - !I2C_M_RD, len == 1, data == a0 * - I2C_M_RD, len == 2, data == [uninitialized buffer 1] * - !I2C_M_RD, len == 1, data == a2 * - I2C_M_RD, len == 2, data == [uninitialized buffer 2] * * ...then the data in the BPMP -> CPU direction would be 4 bytes total, and * would contain 2 bytes that will go to uninitialized buffer 1, and 2 bytes * that will go to uninitialized buffer 2. */ static int tegra_bpmp_i2c_deserialize(struct tegra_bpmp_i2c *i2c, struct mrq_i2c_response *response, struct i2c_msg *msgs, unsigned int num) { size_t size = response->xfer.data_size, len = 0, pos = 0; char *buf = response->xfer.data_buf; unsigned int i; for (i = 0; i < num; i++) if (msgs[i].flags & I2C_M_RD) len += msgs[i].len; if (len != size) return -EINVAL; for (i = 0; i < num; i++) { if (msgs[i].flags & I2C_M_RD) { memcpy(msgs[i].buf, buf + pos, msgs[i].len); pos += msgs[i].len; } } return 0; } static int tegra_bpmp_i2c_msg_len_check(struct i2c_msg *msgs, unsigned int num) { size_t tx_len = 0, rx_len = 0; unsigned int i; for (i = 0; i < num; i++) if (!(msgs[i].flags & I2C_M_RD)) tx_len += SERIALI2C_HDR_SIZE + msgs[i].len; if (tx_len > TEGRA_I2C_IPC_MAX_IN_BUF_SIZE) return -EINVAL; for (i = 0; i < num; i++) if ((msgs[i].flags & I2C_M_RD)) rx_len += msgs[i].len; if (rx_len > TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE) return -EINVAL; return 0; } static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c, struct mrq_i2c_request *request, struct mrq_i2c_response *response, bool atomic) { struct tegra_bpmp_message msg; int err; request->cmd = CMD_I2C_XFER; request->xfer.bus_id = i2c->bus; memset(&msg, 0, sizeof(msg)); msg.mrq = MRQ_I2C; msg.tx.data = request; msg.tx.size = sizeof(*request); msg.rx.data = response; msg.rx.size = sizeof(*response); if (atomic) err = tegra_bpmp_transfer_atomic(i2c->bpmp, &msg); else err = tegra_bpmp_transfer(i2c->bpmp, &msg); if (err < 0) { dev_err(i2c->dev, "failed to transfer message: %d\n", err); return err; } if (msg.rx.ret != 0) { if (msg.rx.ret == -BPMP_EAGAIN) { dev_dbg(i2c->dev, "arbitration lost\n"); return -EAGAIN; } if (msg.rx.ret == -BPMP_ETIMEDOUT) { dev_dbg(i2c->dev, "timeout\n"); return -ETIMEDOUT; } if (msg.rx.ret == -BPMP_ENXIO) { dev_dbg(i2c->dev, "NAK\n"); return -ENXIO; } dev_err(i2c->dev, "transaction failed: %d\n", msg.rx.ret); return -EIO; } return 0; } static int tegra_bpmp_i2c_xfer_common(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, bool atomic) { struct tegra_bpmp_i2c *i2c = i2c_get_adapdata(adapter); struct mrq_i2c_response response; struct mrq_i2c_request request; int err; err = tegra_bpmp_i2c_msg_len_check(msgs, num); if (err < 0) { dev_err(i2c->dev, "unsupported message length\n"); return err; } memset(&request, 0, sizeof(request)); memset(&response, 0, sizeof(response)); tegra_bpmp_serialize_i2c_msg(i2c, &request, msgs, num); err = tegra_bpmp_i2c_msg_xfer(i2c, &request, &response, atomic); if (err < 0) { dev_err(i2c->dev, "failed to transfer message: %d\n", err); return err; } err = tegra_bpmp_i2c_deserialize(i2c, &response, msgs, num); if (err < 0) { dev_err(i2c->dev, "failed to deserialize message: %d\n", err); return err; } return num; } static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { return tegra_bpmp_i2c_xfer_common(adapter, msgs, num, false); } static int tegra_bpmp_i2c_xfer_atomic(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { return tegra_bpmp_i2c_xfer_common(adapter, msgs, num, true); } static u32 tegra_bpmp_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING | I2C_FUNC_NOSTART; } static const struct i2c_algorithm tegra_bpmp_i2c_algo = { .master_xfer = tegra_bpmp_i2c_xfer, .master_xfer_atomic = tegra_bpmp_i2c_xfer_atomic, .functionality = tegra_bpmp_i2c_func, }; static int tegra_bpmp_i2c_probe(struct platform_device *pdev) { struct tegra_bpmp_i2c *i2c; u32 value; int err; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->dev = &pdev->dev; i2c->bpmp = dev_get_drvdata(pdev->dev.parent); if (!i2c->bpmp) return -ENODEV; err = of_property_read_u32(pdev->dev.of_node, "nvidia,bpmp-bus-id", &value); if (err < 0) return err; i2c->bus = value; i2c_set_adapdata(&i2c->adapter, i2c); i2c->adapter.owner = THIS_MODULE; strscpy(i2c->adapter.name, "Tegra BPMP I2C adapter", sizeof(i2c->adapter.name)); i2c->adapter.algo = &tegra_bpmp_i2c_algo; i2c->adapter.dev.parent = &pdev->dev; i2c->adapter.dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, i2c); return i2c_add_adapter(&i2c->adapter); } static void tegra_bpmp_i2c_remove(struct platform_device *pdev) { struct tegra_bpmp_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adapter); } static const struct of_device_id tegra_bpmp_i2c_of_match[] = { { .compatible = "nvidia,tegra186-bpmp-i2c", }, { } }; MODULE_DEVICE_TABLE(of, tegra_bpmp_i2c_of_match); static struct platform_driver tegra_bpmp_i2c_driver = { .driver = { .name = "tegra-bpmp-i2c", .of_match_table = tegra_bpmp_i2c_of_match, }, .probe = tegra_bpmp_i2c_probe, .remove_new = tegra_bpmp_i2c_remove, }; module_platform_driver(tegra_bpmp_i2c_driver); MODULE_DESCRIPTION("NVIDIA Tegra BPMP I2C bus controller driver"); MODULE_AUTHOR("Shardar Shariff Md <[email protected]>"); MODULE_AUTHOR("Juha-Matti Tilli"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-tegra-bpmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of.h> #include <linux/spinlock.h> /* * HSI2C controller from Samsung supports 2 modes of operation * 1. Auto mode: Where in master automatically controls the whole transaction * 2. Manual mode: Software controls the transaction by issuing commands * START, READ, WRITE, STOP, RESTART in I2C_MANUAL_CMD register. * * Operation mode can be selected by setting AUTO_MODE bit in I2C_CONF register * * Special bits are available for both modes of operation to set commands * and for checking transfer status */ /* Register Map */ #define HSI2C_CTL 0x00 #define HSI2C_FIFO_CTL 0x04 #define HSI2C_TRAILIG_CTL 0x08 #define HSI2C_CLK_CTL 0x0C #define HSI2C_CLK_SLOT 0x10 #define HSI2C_INT_ENABLE 0x20 #define HSI2C_INT_STATUS 0x24 #define HSI2C_ERR_STATUS 0x2C #define HSI2C_FIFO_STATUS 0x30 #define HSI2C_TX_DATA 0x34 #define HSI2C_RX_DATA 0x38 #define HSI2C_CONF 0x40 #define HSI2C_AUTO_CONF 0x44 #define HSI2C_TIMEOUT 0x48 #define HSI2C_MANUAL_CMD 0x4C #define HSI2C_TRANS_STATUS 0x50 #define HSI2C_TIMING_HS1 0x54 #define HSI2C_TIMING_HS2 0x58 #define HSI2C_TIMING_HS3 0x5C #define HSI2C_TIMING_FS1 0x60 #define HSI2C_TIMING_FS2 0x64 #define HSI2C_TIMING_FS3 0x68 #define HSI2C_TIMING_SLA 0x6C #define HSI2C_ADDR 0x70 /* I2C_CTL Register bits */ #define HSI2C_FUNC_MODE_I2C (1u << 0) #define HSI2C_MASTER (1u << 3) #define HSI2C_RXCHON (1u << 6) #define HSI2C_TXCHON (1u << 7) #define HSI2C_SW_RST (1u << 31) /* I2C_FIFO_CTL Register bits */ #define HSI2C_RXFIFO_EN (1u << 0) #define HSI2C_TXFIFO_EN (1u << 1) #define HSI2C_RXFIFO_TRIGGER_LEVEL(x) ((x) << 4) #define HSI2C_TXFIFO_TRIGGER_LEVEL(x) ((x) << 16) /* I2C_TRAILING_CTL Register bits */ #define HSI2C_TRAILING_COUNT (0xf) /* I2C_INT_EN Register bits */ #define HSI2C_INT_TX_ALMOSTEMPTY_EN (1u << 0) #define HSI2C_INT_RX_ALMOSTFULL_EN (1u << 1) #define HSI2C_INT_TRAILING_EN (1u << 6) /* I2C_INT_STAT Register bits */ #define HSI2C_INT_TX_ALMOSTEMPTY (1u << 0) #define HSI2C_INT_RX_ALMOSTFULL (1u << 1) #define HSI2C_INT_TX_UNDERRUN (1u << 2) #define HSI2C_INT_TX_OVERRUN (1u << 3) #define HSI2C_INT_RX_UNDERRUN (1u << 4) #define HSI2C_INT_RX_OVERRUN (1u << 5) #define HSI2C_INT_TRAILING (1u << 6) #define HSI2C_INT_I2C (1u << 9) #define HSI2C_INT_TRANS_DONE (1u << 7) #define HSI2C_INT_TRANS_ABORT (1u << 8) #define HSI2C_INT_NO_DEV_ACK (1u << 9) #define HSI2C_INT_NO_DEV (1u << 10) #define HSI2C_INT_TIMEOUT (1u << 11) #define HSI2C_INT_I2C_TRANS (HSI2C_INT_TRANS_DONE | \ HSI2C_INT_TRANS_ABORT | \ HSI2C_INT_NO_DEV_ACK | \ HSI2C_INT_NO_DEV | \ HSI2C_INT_TIMEOUT) /* I2C_FIFO_STAT Register bits */ #define HSI2C_RX_FIFO_EMPTY (1u << 24) #define HSI2C_RX_FIFO_FULL (1u << 23) #define HSI2C_RX_FIFO_LVL(x) ((x >> 16) & 0x7f) #define HSI2C_TX_FIFO_EMPTY (1u << 8) #define HSI2C_TX_FIFO_FULL (1u << 7) #define HSI2C_TX_FIFO_LVL(x) ((x >> 0) & 0x7f) /* I2C_CONF Register bits */ #define HSI2C_AUTO_MODE (1u << 31) #define HSI2C_10BIT_ADDR_MODE (1u << 30) #define HSI2C_HS_MODE (1u << 29) /* I2C_AUTO_CONF Register bits */ #define HSI2C_READ_WRITE (1u << 16) #define HSI2C_STOP_AFTER_TRANS (1u << 17) #define HSI2C_MASTER_RUN (1u << 31) /* I2C_TIMEOUT Register bits */ #define HSI2C_TIMEOUT_EN (1u << 31) #define HSI2C_TIMEOUT_MASK 0xff /* I2C_MANUAL_CMD register bits */ #define HSI2C_CMD_READ_DATA (1u << 4) #define HSI2C_CMD_SEND_STOP (1u << 2) /* I2C_TRANS_STATUS register bits */ #define HSI2C_MASTER_BUSY (1u << 17) #define HSI2C_SLAVE_BUSY (1u << 16) /* I2C_TRANS_STATUS register bits for Exynos5 variant */ #define HSI2C_TIMEOUT_AUTO (1u << 4) #define HSI2C_NO_DEV (1u << 3) #define HSI2C_NO_DEV_ACK (1u << 2) #define HSI2C_TRANS_ABORT (1u << 1) #define HSI2C_TRANS_DONE (1u << 0) /* I2C_TRANS_STATUS register bits for Exynos7 variant */ #define HSI2C_MASTER_ST_MASK 0xf #define HSI2C_MASTER_ST_IDLE 0x0 #define HSI2C_MASTER_ST_START 0x1 #define HSI2C_MASTER_ST_RESTART 0x2 #define HSI2C_MASTER_ST_STOP 0x3 #define HSI2C_MASTER_ST_MASTER_ID 0x4 #define HSI2C_MASTER_ST_ADDR0 0x5 #define HSI2C_MASTER_ST_ADDR1 0x6 #define HSI2C_MASTER_ST_ADDR2 0x7 #define HSI2C_MASTER_ST_ADDR_SR 0x8 #define HSI2C_MASTER_ST_READ 0x9 #define HSI2C_MASTER_ST_WRITE 0xa #define HSI2C_MASTER_ST_NO_ACK 0xb #define HSI2C_MASTER_ST_LOSE 0xc #define HSI2C_MASTER_ST_WAIT 0xd #define HSI2C_MASTER_ST_WAIT_CMD 0xe /* I2C_ADDR register bits */ #define HSI2C_SLV_ADDR_SLV(x) ((x & 0x3ff) << 0) #define HSI2C_SLV_ADDR_MAS(x) ((x & 0x3ff) << 10) #define HSI2C_MASTER_ID(x) ((x & 0xff) << 24) #define MASTER_ID(x) ((x & 0x7) + 0x08) #define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(100)) enum i2c_type_exynos { I2C_TYPE_EXYNOS5, I2C_TYPE_EXYNOS7, I2C_TYPE_EXYNOSAUTOV9, }; struct exynos5_i2c { struct i2c_adapter adap; struct i2c_msg *msg; struct completion msg_complete; unsigned int msg_ptr; unsigned int irq; void __iomem *regs; struct clk *clk; /* operating clock */ struct clk *pclk; /* bus clock */ struct device *dev; int state; spinlock_t lock; /* IRQ synchronization */ /* * Since the TRANS_DONE bit is cleared on read, and we may read it * either during an IRQ or after a transaction, keep track of its * state here. */ int trans_done; /* Controller operating frequency */ unsigned int op_clock; /* Version of HS-I2C Hardware */ const struct exynos_hsi2c_variant *variant; }; /** * struct exynos_hsi2c_variant - platform specific HSI2C driver data * @fifo_depth: the fifo depth supported by the HSI2C module * @hw: the hardware variant of Exynos I2C controller * * Specifies platform specific configuration of HSI2C module. * Note: A structure for driver specific platform data is used for future * expansion of its usage. */ struct exynos_hsi2c_variant { unsigned int fifo_depth; enum i2c_type_exynos hw; }; static const struct exynos_hsi2c_variant exynos5250_hsi2c_data = { .fifo_depth = 64, .hw = I2C_TYPE_EXYNOS5, }; static const struct exynos_hsi2c_variant exynos5260_hsi2c_data = { .fifo_depth = 16, .hw = I2C_TYPE_EXYNOS5, }; static const struct exynos_hsi2c_variant exynos7_hsi2c_data = { .fifo_depth = 16, .hw = I2C_TYPE_EXYNOS7, }; static const struct exynos_hsi2c_variant exynosautov9_hsi2c_data = { .fifo_depth = 64, .hw = I2C_TYPE_EXYNOSAUTOV9, }; static const struct of_device_id exynos5_i2c_match[] = { { .compatible = "samsung,exynos5-hsi2c", .data = &exynos5250_hsi2c_data }, { .compatible = "samsung,exynos5250-hsi2c", .data = &exynos5250_hsi2c_data }, { .compatible = "samsung,exynos5260-hsi2c", .data = &exynos5260_hsi2c_data }, { .compatible = "samsung,exynos7-hsi2c", .data = &exynos7_hsi2c_data }, { .compatible = "samsung,exynosautov9-hsi2c", .data = &exynosautov9_hsi2c_data }, {}, }; MODULE_DEVICE_TABLE(of, exynos5_i2c_match); static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c) { writel(readl(i2c->regs + HSI2C_INT_STATUS), i2c->regs + HSI2C_INT_STATUS); } /* * exynos5_i2c_set_timing: updates the registers with appropriate * timing values calculated * * Timing values for operation are calculated against either 100kHz * or 1MHz controller operating frequency. * * Returns 0 on success, -EINVAL if the cycle length cannot * be calculated. */ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings) { u32 i2c_timing_s1; u32 i2c_timing_s2; u32 i2c_timing_s3; u32 i2c_timing_sla; unsigned int t_start_su, t_start_hd; unsigned int t_stop_su; unsigned int t_data_su, t_data_hd; unsigned int t_scl_l, t_scl_h; unsigned int t_sr_release; unsigned int t_ftl_cycle; unsigned int clkin = clk_get_rate(i2c->clk); unsigned int op_clk = hs_timings ? i2c->op_clock : (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ) ? I2C_MAX_STANDARD_MODE_FREQ : i2c->op_clock; int div, clk_cycle, temp; /* * In case of HSI2C controllers in ExynosAutoV9: * * FSCL = IPCLK / ((CLK_DIV + 1) * 16) * T_SCL_LOW = IPCLK * (CLK_DIV + 1) * (N + M) * [N : number of 0's in the TSCL_H_HS] * [M : number of 0's in the TSCL_L_HS] * T_SCL_HIGH = IPCLK * (CLK_DIV + 1) * (N + M) * [N : number of 1's in the TSCL_H_HS] * [M : number of 1's in the TSCL_L_HS] * * Result of (N + M) is always 8. * In general case, we don't need to control timing_s1 and timing_s2. */ if (i2c->variant->hw == I2C_TYPE_EXYNOSAUTOV9) { div = ((clkin / (16 * i2c->op_clock)) - 1); i2c_timing_s3 = div << 16; if (hs_timings) writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3); else writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3); return 0; } /* * In case of HSI2C controller in Exynos5 series * FPCLK / FI2C = * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + 2 * FLT_CYCLE * * In case of HSI2C controllers in Exynos7 series * FPCLK / FI2C = * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + FLT_CYCLE * * clk_cycle := TSCLK_L + TSCLK_H * temp := (CLK_DIV + 1) * (clk_cycle + 2) * * Constraints: 4 <= temp, 0 <= CLK_DIV < 256, 2 <= clk_cycle <= 510 * */ t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7; temp = clkin / op_clk - 8 - t_ftl_cycle; if (i2c->variant->hw != I2C_TYPE_EXYNOS7) temp -= t_ftl_cycle; div = temp / 512; clk_cycle = temp / (div + 1) - 2; if (temp < 4 || div >= 256 || clk_cycle < 2) { dev_err(i2c->dev, "%s clock set-up failed\n", hs_timings ? "HS" : "FS"); return -EINVAL; } t_scl_l = clk_cycle / 2; t_scl_h = clk_cycle / 2; t_start_su = t_scl_l; t_start_hd = t_scl_l; t_stop_su = t_scl_l; t_data_su = t_scl_l / 2; t_data_hd = t_scl_l / 2; t_sr_release = clk_cycle; i2c_timing_s1 = t_start_su << 24 | t_start_hd << 16 | t_stop_su << 8; i2c_timing_s2 = t_data_su << 24 | t_scl_l << 8 | t_scl_h << 0; i2c_timing_s3 = div << 16 | t_sr_release << 0; i2c_timing_sla = t_data_hd << 0; dev_dbg(i2c->dev, "tSTART_SU: %X, tSTART_HD: %X, tSTOP_SU: %X\n", t_start_su, t_start_hd, t_stop_su); dev_dbg(i2c->dev, "tDATA_SU: %X, tSCL_L: %X, tSCL_H: %X\n", t_data_su, t_scl_l, t_scl_h); dev_dbg(i2c->dev, "nClkDiv: %X, tSR_RELEASE: %X\n", div, t_sr_release); dev_dbg(i2c->dev, "tDATA_HD: %X\n", t_data_hd); if (hs_timings) { writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_HS1); writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_HS2); writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3); } else { writel(i2c_timing_s1, i2c->regs + HSI2C_TIMING_FS1); writel(i2c_timing_s2, i2c->regs + HSI2C_TIMING_FS2); writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3); } writel(i2c_timing_sla, i2c->regs + HSI2C_TIMING_SLA); return 0; } static int exynos5_hsi2c_clock_setup(struct exynos5_i2c *i2c) { /* always set Fast Speed timings */ int ret = exynos5_i2c_set_timing(i2c, false); if (ret < 0 || i2c->op_clock < I2C_MAX_FAST_MODE_PLUS_FREQ) return ret; return exynos5_i2c_set_timing(i2c, true); } /* * exynos5_i2c_init: configures the controller for I2C functionality * Programs I2C controller for Master mode operation */ static void exynos5_i2c_init(struct exynos5_i2c *i2c) { u32 i2c_conf = readl(i2c->regs + HSI2C_CONF); u32 i2c_timeout = readl(i2c->regs + HSI2C_TIMEOUT); /* Clear to disable Timeout */ i2c_timeout &= ~HSI2C_TIMEOUT_EN; writel(i2c_timeout, i2c->regs + HSI2C_TIMEOUT); writel((HSI2C_FUNC_MODE_I2C | HSI2C_MASTER), i2c->regs + HSI2C_CTL); writel(HSI2C_TRAILING_COUNT, i2c->regs + HSI2C_TRAILIG_CTL); if (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ) { writel(HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr)), i2c->regs + HSI2C_ADDR); i2c_conf |= HSI2C_HS_MODE; } writel(i2c_conf | HSI2C_AUTO_MODE, i2c->regs + HSI2C_CONF); } static void exynos5_i2c_reset(struct exynos5_i2c *i2c) { u32 i2c_ctl; /* Set and clear the bit for reset */ i2c_ctl = readl(i2c->regs + HSI2C_CTL); i2c_ctl |= HSI2C_SW_RST; writel(i2c_ctl, i2c->regs + HSI2C_CTL); i2c_ctl = readl(i2c->regs + HSI2C_CTL); i2c_ctl &= ~HSI2C_SW_RST; writel(i2c_ctl, i2c->regs + HSI2C_CTL); /* We don't expect calculations to fail during the run */ exynos5_hsi2c_clock_setup(i2c); /* Initialize the configure registers */ exynos5_i2c_init(i2c); } /* * exynos5_i2c_irq: top level IRQ servicing routine * * INT_STATUS registers gives the interrupt details. Further, * FIFO_STATUS or TRANS_STATUS registers are to be check for detailed * state of the bus. */ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) { struct exynos5_i2c *i2c = dev_id; u32 fifo_level, int_status, fifo_status, trans_status; unsigned char byte; int len = 0; i2c->state = -EINVAL; spin_lock(&i2c->lock); int_status = readl(i2c->regs + HSI2C_INT_STATUS); writel(int_status, i2c->regs + HSI2C_INT_STATUS); /* handle interrupt related to the transfer status */ switch (i2c->variant->hw) { case I2C_TYPE_EXYNOSAUTOV9: fallthrough; case I2C_TYPE_EXYNOS7: if (int_status & HSI2C_INT_TRANS_DONE) { i2c->trans_done = 1; i2c->state = 0; } else if (int_status & HSI2C_INT_TRANS_ABORT) { dev_dbg(i2c->dev, "Deal with arbitration lose\n"); i2c->state = -EAGAIN; goto stop; } else if (int_status & HSI2C_INT_NO_DEV_ACK) { dev_dbg(i2c->dev, "No ACK from device\n"); i2c->state = -ENXIO; goto stop; } else if (int_status & HSI2C_INT_NO_DEV) { dev_dbg(i2c->dev, "No device\n"); i2c->state = -ENXIO; goto stop; } else if (int_status & HSI2C_INT_TIMEOUT) { dev_dbg(i2c->dev, "Accessing device timed out\n"); i2c->state = -ETIMEDOUT; goto stop; } break; case I2C_TYPE_EXYNOS5: if (!(int_status & HSI2C_INT_I2C)) break; trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); if (trans_status & HSI2C_NO_DEV_ACK) { dev_dbg(i2c->dev, "No ACK from device\n"); i2c->state = -ENXIO; goto stop; } else if (trans_status & HSI2C_NO_DEV) { dev_dbg(i2c->dev, "No device\n"); i2c->state = -ENXIO; goto stop; } else if (trans_status & HSI2C_TRANS_ABORT) { dev_dbg(i2c->dev, "Deal with arbitration lose\n"); i2c->state = -EAGAIN; goto stop; } else if (trans_status & HSI2C_TIMEOUT_AUTO) { dev_dbg(i2c->dev, "Accessing device timed out\n"); i2c->state = -ETIMEDOUT; goto stop; } else if (trans_status & HSI2C_TRANS_DONE) { i2c->trans_done = 1; i2c->state = 0; } break; } if ((i2c->msg->flags & I2C_M_RD) && (int_status & (HSI2C_INT_TRAILING | HSI2C_INT_RX_ALMOSTFULL))) { fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); fifo_level = HSI2C_RX_FIFO_LVL(fifo_status); len = min(fifo_level, i2c->msg->len - i2c->msg_ptr); while (len > 0) { byte = (unsigned char) readl(i2c->regs + HSI2C_RX_DATA); i2c->msg->buf[i2c->msg_ptr++] = byte; len--; } i2c->state = 0; } else if (int_status & HSI2C_INT_TX_ALMOSTEMPTY) { fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); fifo_level = HSI2C_TX_FIFO_LVL(fifo_status); len = i2c->variant->fifo_depth - fifo_level; if (len > (i2c->msg->len - i2c->msg_ptr)) { u32 int_en = readl(i2c->regs + HSI2C_INT_ENABLE); int_en &= ~HSI2C_INT_TX_ALMOSTEMPTY_EN; writel(int_en, i2c->regs + HSI2C_INT_ENABLE); len = i2c->msg->len - i2c->msg_ptr; } while (len > 0) { byte = i2c->msg->buf[i2c->msg_ptr++]; writel(byte, i2c->regs + HSI2C_TX_DATA); len--; } i2c->state = 0; } stop: if ((i2c->trans_done && (i2c->msg->len == i2c->msg_ptr)) || (i2c->state < 0)) { writel(0, i2c->regs + HSI2C_INT_ENABLE); exynos5_i2c_clr_pend_irq(i2c); complete(&i2c->msg_complete); } spin_unlock(&i2c->lock); return IRQ_HANDLED; } /* * exynos5_i2c_wait_bus_idle * * Wait for the bus to go idle, indicated by the MASTER_BUSY bit being * cleared. * * Returns -EBUSY if the bus cannot be bought to idle */ static int exynos5_i2c_wait_bus_idle(struct exynos5_i2c *i2c) { unsigned long stop_time; u32 trans_status; /* wait for 100 milli seconds for the bus to be idle */ stop_time = jiffies + msecs_to_jiffies(100) + 1; do { trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); if (!(trans_status & HSI2C_MASTER_BUSY)) return 0; usleep_range(50, 200); } while (time_before(jiffies, stop_time)); return -EBUSY; } static void exynos5_i2c_bus_recover(struct exynos5_i2c *i2c) { u32 val; val = readl(i2c->regs + HSI2C_CTL) | HSI2C_RXCHON; writel(val, i2c->regs + HSI2C_CTL); val = readl(i2c->regs + HSI2C_CONF) & ~HSI2C_AUTO_MODE; writel(val, i2c->regs + HSI2C_CONF); /* * Specification says master should send nine clock pulses. It can be * emulated by sending manual read command (nine pulses for read eight * bits + one pulse for NACK). */ writel(HSI2C_CMD_READ_DATA, i2c->regs + HSI2C_MANUAL_CMD); exynos5_i2c_wait_bus_idle(i2c); writel(HSI2C_CMD_SEND_STOP, i2c->regs + HSI2C_MANUAL_CMD); exynos5_i2c_wait_bus_idle(i2c); val = readl(i2c->regs + HSI2C_CTL) & ~HSI2C_RXCHON; writel(val, i2c->regs + HSI2C_CTL); val = readl(i2c->regs + HSI2C_CONF) | HSI2C_AUTO_MODE; writel(val, i2c->regs + HSI2C_CONF); } static void exynos5_i2c_bus_check(struct exynos5_i2c *i2c) { unsigned long timeout; if (i2c->variant->hw == I2C_TYPE_EXYNOS5) return; /* * HSI2C_MASTER_ST_LOSE state (in Exynos7 and ExynosAutoV9 variants) * before transaction indicates that bus is stuck (SDA is low). * In such case bus recovery can be performed. */ timeout = jiffies + msecs_to_jiffies(100); for (;;) { u32 st = readl(i2c->regs + HSI2C_TRANS_STATUS); if ((st & HSI2C_MASTER_ST_MASK) != HSI2C_MASTER_ST_LOSE) return; if (time_is_before_jiffies(timeout)) return; exynos5_i2c_bus_recover(i2c); } } /* * exynos5_i2c_message_start: Configures the bus and starts the xfer * i2c: struct exynos5_i2c pointer for the current bus * stop: Enables stop after transfer if set. Set for last transfer of * in the list of messages. * * Configures the bus for read/write function * Sets chip address to talk to, message length to be sent. * Enables appropriate interrupts and sends start xfer command. */ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop) { u32 i2c_ctl; u32 int_en = 0; u32 i2c_auto_conf = 0; u32 i2c_addr = 0; u32 fifo_ctl; unsigned long flags; unsigned short trig_lvl; if (i2c->variant->hw == I2C_TYPE_EXYNOS5) int_en |= HSI2C_INT_I2C; else int_en |= HSI2C_INT_I2C_TRANS; i2c_ctl = readl(i2c->regs + HSI2C_CTL); i2c_ctl &= ~(HSI2C_TXCHON | HSI2C_RXCHON); fifo_ctl = HSI2C_RXFIFO_EN | HSI2C_TXFIFO_EN; if (i2c->msg->flags & I2C_M_RD) { i2c_ctl |= HSI2C_RXCHON; i2c_auto_conf |= HSI2C_READ_WRITE; trig_lvl = (i2c->msg->len > i2c->variant->fifo_depth) ? (i2c->variant->fifo_depth * 3 / 4) : i2c->msg->len; fifo_ctl |= HSI2C_RXFIFO_TRIGGER_LEVEL(trig_lvl); int_en |= (HSI2C_INT_RX_ALMOSTFULL_EN | HSI2C_INT_TRAILING_EN); } else { i2c_ctl |= HSI2C_TXCHON; trig_lvl = (i2c->msg->len > i2c->variant->fifo_depth) ? (i2c->variant->fifo_depth * 1 / 4) : i2c->msg->len; fifo_ctl |= HSI2C_TXFIFO_TRIGGER_LEVEL(trig_lvl); int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN; } i2c_addr = HSI2C_SLV_ADDR_MAS(i2c->msg->addr); if (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ) i2c_addr |= HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr)); writel(i2c_addr, i2c->regs + HSI2C_ADDR); writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL); writel(i2c_ctl, i2c->regs + HSI2C_CTL); exynos5_i2c_bus_check(i2c); /* * Enable interrupts before starting the transfer so that we don't * miss any INT_I2C interrupts. */ spin_lock_irqsave(&i2c->lock, flags); writel(int_en, i2c->regs + HSI2C_INT_ENABLE); if (stop == 1) i2c_auto_conf |= HSI2C_STOP_AFTER_TRANS; i2c_auto_conf |= i2c->msg->len; i2c_auto_conf |= HSI2C_MASTER_RUN; writel(i2c_auto_conf, i2c->regs + HSI2C_AUTO_CONF); spin_unlock_irqrestore(&i2c->lock, flags); } static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c, struct i2c_msg *msgs, int stop) { unsigned long timeout; int ret; i2c->msg = msgs; i2c->msg_ptr = 0; i2c->trans_done = 0; reinit_completion(&i2c->msg_complete); exynos5_i2c_message_start(i2c, stop); timeout = wait_for_completion_timeout(&i2c->msg_complete, EXYNOS5_I2C_TIMEOUT); if (timeout == 0) ret = -ETIMEDOUT; else ret = i2c->state; /* * If this is the last message to be transfered (stop == 1) * Then check if the bus can be brought back to idle. */ if (ret == 0 && stop) ret = exynos5_i2c_wait_bus_idle(i2c); if (ret < 0) { exynos5_i2c_reset(i2c); if (ret == -ETIMEDOUT) dev_warn(i2c->dev, "%s timeout\n", (msgs->flags & I2C_M_RD) ? "rx" : "tx"); } /* Return the state as in interrupt routine */ return ret; } static int exynos5_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct exynos5_i2c *i2c = adap->algo_data; int i, ret; ret = clk_enable(i2c->pclk); if (ret) return ret; ret = clk_enable(i2c->clk); if (ret) goto err_pclk; for (i = 0; i < num; ++i) { ret = exynos5_i2c_xfer_msg(i2c, msgs + i, i + 1 == num); if (ret) break; } clk_disable(i2c->clk); err_pclk: clk_disable(i2c->pclk); return ret ?: num; } static u32 exynos5_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static const struct i2c_algorithm exynos5_i2c_algorithm = { .master_xfer = exynos5_i2c_xfer, .functionality = exynos5_i2c_func, }; static int exynos5_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct exynos5_i2c *i2c; int ret; i2c = devm_kzalloc(&pdev->dev, sizeof(struct exynos5_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; if (of_property_read_u32(np, "clock-frequency", &i2c->op_clock)) i2c->op_clock = I2C_MAX_STANDARD_MODE_FREQ; strscpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name)); i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &exynos5_i2c_algorithm; i2c->adap.retries = 3; i2c->dev = &pdev->dev; i2c->clk = devm_clk_get(&pdev->dev, "hsi2c"); if (IS_ERR(i2c->clk)) { dev_err(&pdev->dev, "cannot get clock\n"); return -ENOENT; } i2c->pclk = devm_clk_get_optional(&pdev->dev, "hsi2c_pclk"); if (IS_ERR(i2c->pclk)) { return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk), "cannot get pclk"); } ret = clk_prepare_enable(i2c->pclk); if (ret) return ret; ret = clk_prepare_enable(i2c->clk); if (ret) goto err_pclk; i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) { ret = PTR_ERR(i2c->regs); goto err_clk; } i2c->adap.dev.of_node = np; i2c->adap.algo_data = i2c; i2c->adap.dev.parent = &pdev->dev; /* Clear pending interrupts from u-boot or misc causes */ exynos5_i2c_clr_pend_irq(i2c); spin_lock_init(&i2c->lock); init_completion(&i2c->msg_complete); i2c->irq = ret = platform_get_irq(pdev, 0); if (ret < 0) goto err_clk; ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq, IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c); if (ret != 0) { dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq); goto err_clk; } i2c->variant = of_device_get_match_data(&pdev->dev); ret = exynos5_hsi2c_clock_setup(i2c); if (ret) goto err_clk; exynos5_i2c_reset(i2c); ret = i2c_add_adapter(&i2c->adap); if (ret < 0) goto err_clk; platform_set_drvdata(pdev, i2c); clk_disable(i2c->clk); clk_disable(i2c->pclk); return 0; err_clk: clk_disable_unprepare(i2c->clk); err_pclk: clk_disable_unprepare(i2c->pclk); return ret; } static void exynos5_i2c_remove(struct platform_device *pdev) { struct exynos5_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); clk_unprepare(i2c->clk); clk_unprepare(i2c->pclk); } static int exynos5_i2c_suspend_noirq(struct device *dev) { struct exynos5_i2c *i2c = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&i2c->adap); clk_unprepare(i2c->clk); clk_unprepare(i2c->pclk); return 0; } static int exynos5_i2c_resume_noirq(struct device *dev) { struct exynos5_i2c *i2c = dev_get_drvdata(dev); int ret = 0; ret = clk_prepare_enable(i2c->pclk); if (ret) return ret; ret = clk_prepare_enable(i2c->clk); if (ret) goto err_pclk; ret = exynos5_hsi2c_clock_setup(i2c); if (ret) goto err_clk; exynos5_i2c_init(i2c); clk_disable(i2c->clk); clk_disable(i2c->pclk); i2c_mark_adapter_resumed(&i2c->adap); return 0; err_clk: clk_disable_unprepare(i2c->clk); err_pclk: clk_disable_unprepare(i2c->pclk); return ret; } static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos5_i2c_suspend_noirq, exynos5_i2c_resume_noirq) }; static struct platform_driver exynos5_i2c_driver = { .probe = exynos5_i2c_probe, .remove_new = exynos5_i2c_remove, .driver = { .name = "exynos5-hsi2c", .pm = pm_sleep_ptr(&exynos5_i2c_dev_pm_ops), .of_match_table = exynos5_i2c_match, }, }; module_platform_driver(exynos5_i2c_driver); MODULE_DESCRIPTION("Exynos5 HS-I2C Bus driver"); MODULE_AUTHOR("Naveen Krishna Chatradhi <[email protected]>"); MODULE_AUTHOR("Taekgyun Ko <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-exynos5.c
// SPDX-License-Identifier: GPL-2.0-only /* * driver for the i2c-tiny-usb adapter - 1.0 * http://www.harbaum.org/till/i2c_tiny_usb * * Copyright (C) 2006-2007 Till Harbaum ([email protected]) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> /* include interfaces to usb layer */ #include <linux/usb.h> /* include interface to i2c layer */ #include <linux/i2c.h> /* commands via USB, must match command ids in the firmware */ #define CMD_ECHO 0 #define CMD_GET_FUNC 1 #define CMD_SET_DELAY 2 #define CMD_GET_STATUS 3 #define CMD_I2C_IO 4 #define CMD_I2C_IO_BEGIN (1<<0) #define CMD_I2C_IO_END (1<<1) /* i2c bit delay, default is 10us -> 100kHz max (in practice, due to additional delays in the i2c bitbanging code this results in a i2c clock of about 50kHz) */ static unsigned short delay = 10; module_param(delay, ushort, 0); MODULE_PARM_DESC(delay, "bit delay in microseconds " "(default is 10us for 100kHz max)"); static int usb_read(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len); static int usb_write(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len); /* ----- begin of i2c layer ---------------------------------------------- */ #define STATUS_IDLE 0 #define STATUS_ADDRESS_ACK 1 #define STATUS_ADDRESS_NAK 2 static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { unsigned char *pstatus; struct i2c_msg *pmsg; int i, ret; dev_dbg(&adapter->dev, "master xfer %d messages:\n", num); pstatus = kmalloc(sizeof(*pstatus), GFP_KERNEL); if (!pstatus) return -ENOMEM; for (i = 0 ; i < num ; i++) { int cmd = CMD_I2C_IO; if (i == 0) cmd |= CMD_I2C_IO_BEGIN; if (i == num-1) cmd |= CMD_I2C_IO_END; pmsg = &msgs[i]; dev_dbg(&adapter->dev, " %d: %s (flags %d) %d bytes to 0x%02x\n", i, pmsg->flags & I2C_M_RD ? "read" : "write", pmsg->flags, pmsg->len, pmsg->addr); /* and directly send the message */ if (pmsg->flags & I2C_M_RD) { /* read data */ if (usb_read(adapter, cmd, pmsg->flags, pmsg->addr, pmsg->buf, pmsg->len) != pmsg->len) { dev_err(&adapter->dev, "failure reading data\n"); ret = -EIO; goto out; } } else { /* write data */ if (usb_write(adapter, cmd, pmsg->flags, pmsg->addr, pmsg->buf, pmsg->len) != pmsg->len) { dev_err(&adapter->dev, "failure writing data\n"); ret = -EIO; goto out; } } /* read status */ if (usb_read(adapter, CMD_GET_STATUS, 0, 0, pstatus, 1) != 1) { dev_err(&adapter->dev, "failure reading status\n"); ret = -EIO; goto out; } dev_dbg(&adapter->dev, " status = %d\n", *pstatus); if (*pstatus == STATUS_ADDRESS_NAK) { ret = -ENXIO; goto out; } } ret = i; out: kfree(pstatus); return ret; } static u32 usb_func(struct i2c_adapter *adapter) { __le32 *pfunc; u32 ret; pfunc = kmalloc(sizeof(*pfunc), GFP_KERNEL); /* get functionality from adapter */ if (!pfunc || usb_read(adapter, CMD_GET_FUNC, 0, 0, pfunc, sizeof(*pfunc)) != sizeof(*pfunc)) { dev_err(&adapter->dev, "failure reading functionality\n"); ret = 0; goto out; } ret = le32_to_cpup(pfunc); out: kfree(pfunc); return ret; } /* This is the actual algorithm we define */ static const struct i2c_algorithm usb_algorithm = { .master_xfer = usb_xfer, .functionality = usb_func, }; /* ----- end of i2c layer ------------------------------------------------ */ /* ----- begin of usb layer ---------------------------------------------- */ /* * Initially the usb i2c interface uses a vid/pid pair donated by * Future Technology Devices International Ltd., later a pair was * bought from EZPrototypes */ static const struct usb_device_id i2c_tiny_usb_table[] = { { USB_DEVICE(0x0403, 0xc631) }, /* FTDI */ { USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, i2c_tiny_usb_table); /* Structure to hold all of our device specific stuff */ struct i2c_tiny_usb { struct usb_device *usb_dev; /* the usb device for this device */ struct usb_interface *interface; /* the interface for this device */ struct i2c_adapter adapter; /* i2c related things */ }; static int usb_read(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; void *dmadata = kmalloc(len, GFP_KERNEL); int ret; if (!dmadata) return -ENOMEM; /* do control transfer */ ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_IN, value, index, dmadata, len, 2000); memcpy(data, dmadata, len); kfree(dmadata); return ret; } static int usb_write(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; void *dmadata = kmemdup(data, len, GFP_KERNEL); int ret; if (!dmadata) return -ENOMEM; /* do control transfer */ ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, index, dmadata, len, 2000); kfree(dmadata); return ret; } static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) { usb_put_dev(dev->usb_dev); kfree(dev); } static int i2c_tiny_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct i2c_tiny_usb *dev; int retval = -ENOMEM; u16 version; if (interface->intf_assoc && interface->intf_assoc->bFunctionClass != USB_CLASS_VENDOR_SPEC) return -ENODEV; dev_dbg(&interface->dev, "probing usb device\n"); /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto error; dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); version = le16_to_cpu(dev->usb_dev->descriptor.bcdDevice); dev_info(&interface->dev, "version %x.%02x found at bus %03d address %03d\n", version >> 8, version & 0xff, dev->usb_dev->bus->busnum, dev->usb_dev->devnum); /* setup i2c adapter description */ dev->adapter.owner = THIS_MODULE; dev->adapter.class = I2C_CLASS_HWMON; dev->adapter.algo = &usb_algorithm; dev->adapter.algo_data = dev; snprintf(dev->adapter.name, sizeof(dev->adapter.name), "i2c-tiny-usb at bus %03d device %03d", dev->usb_dev->bus->busnum, dev->usb_dev->devnum); if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) { dev_err(&dev->adapter.dev, "failure setting delay to %dus\n", delay); retval = -EIO; goto error; } dev->adapter.dev.parent = &dev->interface->dev; /* and finally attach to i2c layer */ i2c_add_adapter(&dev->adapter); /* inform user about successful attachment to i2c layer */ dev_info(&dev->adapter.dev, "connected i2c-tiny-usb device\n"); return 0; error: if (dev) i2c_tiny_usb_free(dev); return retval; } static void i2c_tiny_usb_disconnect(struct usb_interface *interface) { struct i2c_tiny_usb *dev = usb_get_intfdata(interface); i2c_del_adapter(&dev->adapter); usb_set_intfdata(interface, NULL); i2c_tiny_usb_free(dev); dev_dbg(&interface->dev, "disconnected\n"); } static struct usb_driver i2c_tiny_usb_driver = { .name = "i2c-tiny-usb", .probe = i2c_tiny_usb_probe, .disconnect = i2c_tiny_usb_disconnect, .id_table = i2c_tiny_usb_table, }; module_usb_driver(i2c_tiny_usb_driver); /* ----- end of usb layer ------------------------------------------------ */ MODULE_AUTHOR("Till Harbaum <[email protected]>"); MODULE_DESCRIPTION("i2c-tiny-usb driver v1.0"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-tiny-usb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 STMicroelectronics * * I2C master mode controller driver, used in STMicroelectronics devices. * * Author: Maxime Coquelin <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> /* SSC registers */ #define SSC_BRG 0x000 #define SSC_TBUF 0x004 #define SSC_RBUF 0x008 #define SSC_CTL 0x00C #define SSC_IEN 0x010 #define SSC_STA 0x014 #define SSC_I2C 0x018 #define SSC_SLAD 0x01C #define SSC_REP_START_HOLD 0x020 #define SSC_START_HOLD 0x024 #define SSC_REP_START_SETUP 0x028 #define SSC_DATA_SETUP 0x02C #define SSC_STOP_SETUP 0x030 #define SSC_BUS_FREE 0x034 #define SSC_TX_FSTAT 0x038 #define SSC_RX_FSTAT 0x03C #define SSC_PRE_SCALER_BRG 0x040 #define SSC_CLR 0x080 #define SSC_NOISE_SUPP_WIDTH 0x100 #define SSC_PRSCALER 0x104 #define SSC_NOISE_SUPP_WIDTH_DATAOUT 0x108 #define SSC_PRSCALER_DATAOUT 0x10c /* SSC Control */ #define SSC_CTL_DATA_WIDTH_9 0x8 #define SSC_CTL_DATA_WIDTH_MSK 0xf #define SSC_CTL_BM 0xf #define SSC_CTL_HB BIT(4) #define SSC_CTL_PH BIT(5) #define SSC_CTL_PO BIT(6) #define SSC_CTL_SR BIT(7) #define SSC_CTL_MS BIT(8) #define SSC_CTL_EN BIT(9) #define SSC_CTL_LPB BIT(10) #define SSC_CTL_EN_TX_FIFO BIT(11) #define SSC_CTL_EN_RX_FIFO BIT(12) #define SSC_CTL_EN_CLST_RX BIT(13) /* SSC Interrupt Enable */ #define SSC_IEN_RIEN BIT(0) #define SSC_IEN_TIEN BIT(1) #define SSC_IEN_TEEN BIT(2) #define SSC_IEN_REEN BIT(3) #define SSC_IEN_PEEN BIT(4) #define SSC_IEN_AASEN BIT(6) #define SSC_IEN_STOPEN BIT(7) #define SSC_IEN_ARBLEN BIT(8) #define SSC_IEN_NACKEN BIT(10) #define SSC_IEN_REPSTRTEN BIT(11) #define SSC_IEN_TX_FIFO_HALF BIT(12) #define SSC_IEN_RX_FIFO_HALF_FULL BIT(14) /* SSC Status */ #define SSC_STA_RIR BIT(0) #define SSC_STA_TIR BIT(1) #define SSC_STA_TE BIT(2) #define SSC_STA_RE BIT(3) #define SSC_STA_PE BIT(4) #define SSC_STA_CLST BIT(5) #define SSC_STA_AAS BIT(6) #define SSC_STA_STOP BIT(7) #define SSC_STA_ARBL BIT(8) #define SSC_STA_BUSY BIT(9) #define SSC_STA_NACK BIT(10) #define SSC_STA_REPSTRT BIT(11) #define SSC_STA_TX_FIFO_HALF BIT(12) #define SSC_STA_TX_FIFO_FULL BIT(13) #define SSC_STA_RX_FIFO_HALF BIT(14) /* SSC I2C Control */ #define SSC_I2C_I2CM BIT(0) #define SSC_I2C_STRTG BIT(1) #define SSC_I2C_STOPG BIT(2) #define SSC_I2C_ACKG BIT(3) #define SSC_I2C_AD10 BIT(4) #define SSC_I2C_TXENB BIT(5) #define SSC_I2C_REPSTRTG BIT(11) #define SSC_I2C_SLAVE_DISABLE BIT(12) /* SSC Tx FIFO Status */ #define SSC_TX_FSTAT_STATUS 0x07 /* SSC Rx FIFO Status */ #define SSC_RX_FSTAT_STATUS 0x07 /* SSC Clear bit operation */ #define SSC_CLR_SSCAAS BIT(6) #define SSC_CLR_SSCSTOP BIT(7) #define SSC_CLR_SSCARBL BIT(8) #define SSC_CLR_NACK BIT(10) #define SSC_CLR_REPSTRT BIT(11) /* SSC Clock Prescaler */ #define SSC_PRSC_VALUE 0x0f #define SSC_TXFIFO_SIZE 0x8 #define SSC_RXFIFO_SIZE 0x8 enum st_i2c_mode { I2C_MODE_STANDARD, I2C_MODE_FAST, I2C_MODE_END, }; /** * struct st_i2c_timings - per-Mode tuning parameters * @rate: I2C bus rate * @rep_start_hold: I2C repeated start hold time requirement * @rep_start_setup: I2C repeated start set up time requirement * @start_hold: I2C start hold time requirement * @data_setup_time: I2C data set up time requirement * @stop_setup_time: I2C stop set up time requirement * @bus_free_time: I2C bus free time requirement * @sda_pulse_min_limit: I2C SDA pulse mini width limit */ struct st_i2c_timings { u32 rate; u32 rep_start_hold; u32 rep_start_setup; u32 start_hold; u32 data_setup_time; u32 stop_setup_time; u32 bus_free_time; u32 sda_pulse_min_limit; }; /** * struct st_i2c_client - client specific data * @addr: 8-bit slave addr, including r/w bit * @count: number of bytes to be transfered * @xfered: number of bytes already transferred * @buf: data buffer * @result: result of the transfer * @stop: last I2C msg to be sent, i.e. STOP to be generated */ struct st_i2c_client { u8 addr; u32 count; u32 xfered; u8 *buf; int result; bool stop; }; /** * struct st_i2c_dev - private data of the controller * @adap: I2C adapter for this controller * @dev: device for this controller * @base: virtual memory area * @complete: completion of I2C message * @irq: interrupt line for th controller * @clk: hw ssc block clock * @mode: I2C mode of the controller. Standard or Fast only supported * @scl_min_width_us: SCL line minimum pulse width in us * @sda_min_width_us: SDA line minimum pulse width in us * @client: I2C transfert information * @busy: I2C transfer on-going */ struct st_i2c_dev { struct i2c_adapter adap; struct device *dev; void __iomem *base; struct completion complete; int irq; struct clk *clk; int mode; u32 scl_min_width_us; u32 sda_min_width_us; struct st_i2c_client client; bool busy; }; static inline void st_i2c_set_bits(void __iomem *reg, u32 mask) { writel_relaxed(readl_relaxed(reg) | mask, reg); } static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask) { writel_relaxed(readl_relaxed(reg) & ~mask, reg); } /* * From I2C Specifications v0.5. * * All the values below have +10% margin added to be * compatible with some out-of-spec devices, * like HDMI link of the Toshiba 19AV600 TV. */ static struct st_i2c_timings i2c_timings[] = { [I2C_MODE_STANDARD] = { .rate = I2C_MAX_STANDARD_MODE_FREQ, .rep_start_hold = 4400, .rep_start_setup = 5170, .start_hold = 4400, .data_setup_time = 275, .stop_setup_time = 4400, .bus_free_time = 5170, }, [I2C_MODE_FAST] = { .rate = I2C_MAX_FAST_MODE_FREQ, .rep_start_hold = 660, .rep_start_setup = 660, .start_hold = 660, .data_setup_time = 110, .stop_setup_time = 660, .bus_free_time = 1430, }, }; static void st_i2c_flush_rx_fifo(struct st_i2c_dev *i2c_dev) { int count, i; /* * Counter only counts up to 7 but fifo size is 8... * When fifo is full, counter is 0 and RIR bit of status register is * set */ if (readl_relaxed(i2c_dev->base + SSC_STA) & SSC_STA_RIR) count = SSC_RXFIFO_SIZE; else count = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT) & SSC_RX_FSTAT_STATUS; for (i = 0; i < count; i++) readl_relaxed(i2c_dev->base + SSC_RBUF); } static void st_i2c_soft_reset(struct st_i2c_dev *i2c_dev) { /* * FIFO needs to be emptied before reseting the IP, * else the controller raises a BUSY error. */ st_i2c_flush_rx_fifo(i2c_dev); st_i2c_set_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR); st_i2c_clr_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR); } /** * st_i2c_hw_config() - Prepare SSC block, calculate and apply tuning timings * @i2c_dev: Controller's private data */ static void st_i2c_hw_config(struct st_i2c_dev *i2c_dev) { unsigned long rate; u32 val, ns_per_clk; struct st_i2c_timings *t = &i2c_timings[i2c_dev->mode]; st_i2c_soft_reset(i2c_dev); val = SSC_CLR_REPSTRT | SSC_CLR_NACK | SSC_CLR_SSCARBL | SSC_CLR_SSCAAS | SSC_CLR_SSCSTOP; writel_relaxed(val, i2c_dev->base + SSC_CLR); /* SSC Control register setup */ val = SSC_CTL_PO | SSC_CTL_PH | SSC_CTL_HB | SSC_CTL_DATA_WIDTH_9; writel_relaxed(val, i2c_dev->base + SSC_CTL); rate = clk_get_rate(i2c_dev->clk); ns_per_clk = 1000000000 / rate; /* Baudrate */ val = rate / (2 * t->rate); writel_relaxed(val, i2c_dev->base + SSC_BRG); /* Pre-scaler baudrate */ writel_relaxed(1, i2c_dev->base + SSC_PRE_SCALER_BRG); /* Enable I2C mode */ writel_relaxed(SSC_I2C_I2CM, i2c_dev->base + SSC_I2C); /* Repeated start hold time */ val = t->rep_start_hold / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_REP_START_HOLD); /* Repeated start set up time */ val = t->rep_start_setup / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_REP_START_SETUP); /* Start hold time */ val = t->start_hold / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_START_HOLD); /* Data set up time */ val = t->data_setup_time / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_DATA_SETUP); /* Stop set up time */ val = t->stop_setup_time / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_STOP_SETUP); /* Bus free time */ val = t->bus_free_time / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_BUS_FREE); /* Prescalers set up */ val = rate / 10000000; writel_relaxed(val, i2c_dev->base + SSC_PRSCALER); writel_relaxed(val, i2c_dev->base + SSC_PRSCALER_DATAOUT); /* Noise suppression witdh */ val = i2c_dev->scl_min_width_us * rate / 100000000; writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH); /* Noise suppression max output data delay width */ val = i2c_dev->sda_min_width_us * rate / 100000000; writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH_DATAOUT); } static int st_i2c_recover_bus(struct i2c_adapter *i2c_adap) { struct st_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); u32 ctl; dev_dbg(i2c_dev->dev, "Trying to recover bus\n"); /* * SSP IP is dual role SPI/I2C to generate 9 clock pulses * we switch to SPI node, 9 bit words and write a 0. This * has been validate with a oscilloscope and is easier * than switching to GPIO mode. */ /* Disable interrupts */ writel_relaxed(0, i2c_dev->base + SSC_IEN); st_i2c_hw_config(i2c_dev); ctl = SSC_CTL_EN | SSC_CTL_MS | SSC_CTL_EN_RX_FIFO | SSC_CTL_EN_TX_FIFO; st_i2c_set_bits(i2c_dev->base + SSC_CTL, ctl); st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_I2CM); usleep_range(8000, 10000); writel_relaxed(0, i2c_dev->base + SSC_TBUF); usleep_range(2000, 4000); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_I2CM); return 0; } static int st_i2c_wait_free_bus(struct st_i2c_dev *i2c_dev) { u32 sta; int i, ret; for (i = 0; i < 10; i++) { sta = readl_relaxed(i2c_dev->base + SSC_STA); if (!(sta & SSC_STA_BUSY)) return 0; usleep_range(2000, 4000); } dev_err(i2c_dev->dev, "bus not free (status = 0x%08x)\n", sta); ret = i2c_recover_bus(&i2c_dev->adap); if (ret) { dev_err(i2c_dev->dev, "Failed to recover the bus (%d)\n", ret); return ret; } return -EBUSY; } /** * st_i2c_write_tx_fifo() - Write a byte in the Tx FIFO * @i2c_dev: Controller's private data * @byte: Data to write in the Tx FIFO */ static inline void st_i2c_write_tx_fifo(struct st_i2c_dev *i2c_dev, u8 byte) { u16 tbuf = byte << 1; writel_relaxed(tbuf | 1, i2c_dev->base + SSC_TBUF); } /** * st_i2c_wr_fill_tx_fifo() - Fill the Tx FIFO in write mode * @i2c_dev: Controller's private data * * This functions fills the Tx FIFO with I2C transfert buffer when * in write mode. */ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; u32 tx_fstat, sta; int i; sta = readl_relaxed(i2c_dev->base + SSC_STA); if (sta & SSC_STA_TX_FIFO_FULL) return; tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT); tx_fstat &= SSC_TX_FSTAT_STATUS; if (c->count < (SSC_TXFIFO_SIZE - tx_fstat)) i = c->count; else i = SSC_TXFIFO_SIZE - tx_fstat; for (; i > 0; i--, c->count--, c->buf++) st_i2c_write_tx_fifo(i2c_dev, *c->buf); } /** * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode * @i2c_dev: Controller's private data * @max: Maximum amount of data to fill into the Tx FIFO * * This functions fills the Tx FIFO with fixed pattern when * in read mode to trigger clock. */ static void st_i2c_rd_fill_tx_fifo(struct st_i2c_dev *i2c_dev, int max) { struct st_i2c_client *c = &i2c_dev->client; u32 tx_fstat, sta; int i; sta = readl_relaxed(i2c_dev->base + SSC_STA); if (sta & SSC_STA_TX_FIFO_FULL) return; tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT); tx_fstat &= SSC_TX_FSTAT_STATUS; if (max < (SSC_TXFIFO_SIZE - tx_fstat)) i = max; else i = SSC_TXFIFO_SIZE - tx_fstat; for (; i > 0; i--, c->xfered++) st_i2c_write_tx_fifo(i2c_dev, 0xff); } static void st_i2c_read_rx_fifo(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; u32 i, sta; u16 rbuf; sta = readl_relaxed(i2c_dev->base + SSC_STA); if (sta & SSC_STA_RIR) { i = SSC_RXFIFO_SIZE; } else { i = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT); i &= SSC_RX_FSTAT_STATUS; } for (; (i > 0) && (c->count > 0); i--, c->count--) { rbuf = readl_relaxed(i2c_dev->base + SSC_RBUF) >> 1; *c->buf++ = (u8)rbuf & 0xff; } if (i) { dev_err(i2c_dev->dev, "Unexpected %d bytes in rx fifo\n", i); st_i2c_flush_rx_fifo(i2c_dev); } } /** * st_i2c_terminate_xfer() - Send either STOP or REPSTART condition * @i2c_dev: Controller's private data */ static void st_i2c_terminate_xfer(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; st_i2c_clr_bits(i2c_dev->base + SSC_IEN, SSC_IEN_TEEN); st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG); if (c->stop) { st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_STOPEN); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); } else { st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_REPSTRTEN); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_REPSTRTG); } } /** * st_i2c_handle_write() - Handle FIFO empty interrupt in case of write * @i2c_dev: Controller's private data */ static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; st_i2c_flush_rx_fifo(i2c_dev); if (!c->count) /* End of xfer, send stop or repstart */ st_i2c_terminate_xfer(i2c_dev); else st_i2c_wr_fill_tx_fifo(i2c_dev); } /** * st_i2c_handle_read() - Handle FIFO empty interrupt in case of read * @i2c_dev: Controller's private data */ static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; u32 ien; /* Trash the address read back */ if (!c->xfered) { readl_relaxed(i2c_dev->base + SSC_RBUF); st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_TXENB); } else { st_i2c_read_rx_fifo(i2c_dev); } if (!c->count) { /* End of xfer, send stop or repstart */ st_i2c_terminate_xfer(i2c_dev); } else if (c->count == 1) { /* Penultimate byte to xfer, disable ACK gen. */ st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_ACKG); /* Last received byte is to be handled by NACK interrupt */ ien = SSC_IEN_NACKEN | SSC_IEN_ARBLEN; writel_relaxed(ien, i2c_dev->base + SSC_IEN); st_i2c_rd_fill_tx_fifo(i2c_dev, c->count); } else { st_i2c_rd_fill_tx_fifo(i2c_dev, c->count - 1); } } /** * st_i2c_isr_thread() - Interrupt routine * @irq: interrupt number * @data: Controller's private data */ static irqreturn_t st_i2c_isr_thread(int irq, void *data) { struct st_i2c_dev *i2c_dev = data; struct st_i2c_client *c = &i2c_dev->client; u32 sta, ien; int it; ien = readl_relaxed(i2c_dev->base + SSC_IEN); sta = readl_relaxed(i2c_dev->base + SSC_STA); /* Use __fls() to check error bits first */ it = __fls(sta & ien); if (it < 0) { dev_dbg(i2c_dev->dev, "spurious it (sta=0x%04x, ien=0x%04x)\n", sta, ien); return IRQ_NONE; } switch (1 << it) { case SSC_STA_TE: if (c->addr & I2C_M_RD) st_i2c_handle_read(i2c_dev); else st_i2c_handle_write(i2c_dev); break; case SSC_STA_STOP: case SSC_STA_REPSTRT: writel_relaxed(0, i2c_dev->base + SSC_IEN); complete(&i2c_dev->complete); break; case SSC_STA_NACK: writel_relaxed(SSC_CLR_NACK, i2c_dev->base + SSC_CLR); /* Last received byte handled by NACK interrupt */ if ((c->addr & I2C_M_RD) && (c->count == 1) && (c->xfered)) { st_i2c_handle_read(i2c_dev); break; } it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN; writel_relaxed(it, i2c_dev->base + SSC_IEN); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); c->result = -EIO; break; case SSC_STA_ARBL: writel_relaxed(SSC_CLR_SSCARBL, i2c_dev->base + SSC_CLR); it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN; writel_relaxed(it, i2c_dev->base + SSC_IEN); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); c->result = -EAGAIN; break; default: dev_err(i2c_dev->dev, "it %d unhandled (sta=0x%04x)\n", it, sta); } /* * Read IEN register to ensure interrupt mask write is effective * before re-enabling interrupt at GIC level, and thus avoid spurious * interrupts. */ readl(i2c_dev->base + SSC_IEN); return IRQ_HANDLED; } /** * st_i2c_xfer_msg() - Transfer a single I2C message * @i2c_dev: Controller's private data * @msg: I2C message to transfer * @is_first: first message of the sequence * @is_last: last message of the sequence */ static int st_i2c_xfer_msg(struct st_i2c_dev *i2c_dev, struct i2c_msg *msg, bool is_first, bool is_last) { struct st_i2c_client *c = &i2c_dev->client; u32 ctl, i2c, it; unsigned long timeout; int ret; c->addr = i2c_8bit_addr_from_msg(msg); c->buf = msg->buf; c->count = msg->len; c->xfered = 0; c->result = 0; c->stop = is_last; reinit_completion(&i2c_dev->complete); ctl = SSC_CTL_EN | SSC_CTL_MS | SSC_CTL_EN_RX_FIFO | SSC_CTL_EN_TX_FIFO; st_i2c_set_bits(i2c_dev->base + SSC_CTL, ctl); i2c = SSC_I2C_TXENB; if (c->addr & I2C_M_RD) i2c |= SSC_I2C_ACKG; st_i2c_set_bits(i2c_dev->base + SSC_I2C, i2c); /* Write slave address */ st_i2c_write_tx_fifo(i2c_dev, c->addr); /* Pre-fill Tx fifo with data in case of write */ if (!(c->addr & I2C_M_RD)) st_i2c_wr_fill_tx_fifo(i2c_dev); it = SSC_IEN_NACKEN | SSC_IEN_TEEN | SSC_IEN_ARBLEN; writel_relaxed(it, i2c_dev->base + SSC_IEN); if (is_first) { ret = st_i2c_wait_free_bus(i2c_dev); if (ret) return ret; st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG); } timeout = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = c->result; if (!timeout) { dev_err(i2c_dev->dev, "Write to slave 0x%x timed out\n", c->addr); ret = -ETIMEDOUT; } i2c = SSC_I2C_STOPG | SSC_I2C_REPSTRTG; st_i2c_clr_bits(i2c_dev->base + SSC_I2C, i2c); writel_relaxed(SSC_CLR_SSCSTOP | SSC_CLR_REPSTRT, i2c_dev->base + SSC_CLR); return ret; } /** * st_i2c_xfer() - Transfer a single I2C message * @i2c_adap: Adapter pointer to the controller * @msgs: Pointer to data to be written. * @num: Number of messages to be executed */ static int st_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct st_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); int ret, i; i2c_dev->busy = true; ret = clk_prepare_enable(i2c_dev->clk); if (ret) { dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n"); return ret; } pinctrl_pm_select_default_state(i2c_dev->dev); st_i2c_hw_config(i2c_dev); for (i = 0; (i < num) && !ret; i++) ret = st_i2c_xfer_msg(i2c_dev, &msgs[i], i == 0, i == num - 1); pinctrl_pm_select_idle_state(i2c_dev->dev); clk_disable_unprepare(i2c_dev->clk); i2c_dev->busy = false; return (ret < 0) ? ret : i; } static int st_i2c_suspend(struct device *dev) { struct st_i2c_dev *i2c_dev = dev_get_drvdata(dev); if (i2c_dev->busy) return -EBUSY; pinctrl_pm_select_sleep_state(dev); return 0; } static int st_i2c_resume(struct device *dev) { pinctrl_pm_select_default_state(dev); /* Go in idle state if available */ pinctrl_pm_select_idle_state(dev); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(st_i2c_pm, st_i2c_suspend, st_i2c_resume); static u32 st_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm st_i2c_algo = { .master_xfer = st_i2c_xfer, .functionality = st_i2c_func, }; static struct i2c_bus_recovery_info st_i2c_recovery_info = { .recover_bus = st_i2c_recover_bus, }; static int st_i2c_of_get_deglitch(struct device_node *np, struct st_i2c_dev *i2c_dev) { int ret; ret = of_property_read_u32(np, "st,i2c-min-scl-pulse-width-us", &i2c_dev->scl_min_width_us); if ((ret == -ENODATA) || (ret == -EOVERFLOW)) { dev_err(i2c_dev->dev, "st,i2c-min-scl-pulse-width-us invalid\n"); return ret; } ret = of_property_read_u32(np, "st,i2c-min-sda-pulse-width-us", &i2c_dev->sda_min_width_us); if ((ret == -ENODATA) || (ret == -EOVERFLOW)) { dev_err(i2c_dev->dev, "st,i2c-min-sda-pulse-width-us invalid\n"); return ret; } return 0; } static int st_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct st_i2c_dev *i2c_dev; struct resource *res; u32 clk_rate; struct i2c_adapter *adap; int ret; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); i2c_dev->irq = irq_of_parse_and_map(np, 0); if (!i2c_dev->irq) { dev_err(&pdev->dev, "IRQ missing or invalid\n"); return -EINVAL; } i2c_dev->clk = of_clk_get_by_name(np, "ssc"); if (IS_ERR(i2c_dev->clk)) { dev_err(&pdev->dev, "Unable to request clock\n"); return PTR_ERR(i2c_dev->clk); } i2c_dev->mode = I2C_MODE_STANDARD; ret = of_property_read_u32(np, "clock-frequency", &clk_rate); if (!ret && (clk_rate == I2C_MAX_FAST_MODE_FREQ)) i2c_dev->mode = I2C_MODE_FAST; i2c_dev->dev = &pdev->dev; ret = devm_request_threaded_irq(&pdev->dev, i2c_dev->irq, NULL, st_i2c_isr_thread, IRQF_ONESHOT, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); return ret; } pinctrl_pm_select_default_state(i2c_dev->dev); /* In case idle state available, select it */ pinctrl_pm_select_idle_state(i2c_dev->dev); ret = st_i2c_of_get_deglitch(np, i2c_dev); if (ret) return ret; adap = &i2c_dev->adap; i2c_set_adapdata(adap, i2c_dev); snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start); adap->owner = THIS_MODULE; adap->timeout = 2 * HZ; adap->retries = 0; adap->algo = &st_i2c_algo; adap->bus_recovery_info = &st_i2c_recovery_info; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; init_completion(&i2c_dev->complete); ret = i2c_add_adapter(adap); if (ret) return ret; platform_set_drvdata(pdev, i2c_dev); dev_info(i2c_dev->dev, "%s initialized\n", adap->name); return 0; } static void st_i2c_remove(struct platform_device *pdev) { struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev); i2c_del_adapter(&i2c_dev->adap); } static const struct of_device_id st_i2c_match[] = { { .compatible = "st,comms-ssc-i2c", }, { .compatible = "st,comms-ssc4-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, st_i2c_match); static struct platform_driver st_i2c_driver = { .driver = { .name = "st-i2c", .of_match_table = st_i2c_match, .pm = pm_sleep_ptr(&st_i2c_pm), }, .probe = st_i2c_probe, .remove_new = st_i2c_remove, }; module_platform_driver(st_i2c_driver); MODULE_AUTHOR("Maxime Coquelin <[email protected]>"); MODULE_DESCRIPTION("STMicroelectronics I2C driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-st.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bitbanging I2C bus driver using the GPIO API * * Copyright (C) 2007 Atmel Corporation */ #include <linux/completion.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c-algo-bit.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_data/i2c-gpio.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/slab.h> struct i2c_gpio_private_data { struct gpio_desc *sda; struct gpio_desc *scl; struct i2c_adapter adap; struct i2c_algo_bit_data bit_data; struct i2c_gpio_platform_data pdata; #ifdef CONFIG_I2C_GPIO_FAULT_INJECTOR struct dentry *debug_dir; /* these must be protected by bus lock */ struct completion scl_irq_completion; u64 scl_irq_data; #endif }; /* * Toggle SDA by changing the output value of the pin. This is only * valid for pins configured as open drain (i.e. setting the value * high effectively turns off the output driver.) */ static void i2c_gpio_setsda_val(void *data, int state) { struct i2c_gpio_private_data *priv = data; gpiod_set_value_cansleep(priv->sda, state); } /* * Toggle SCL by changing the output value of the pin. This is used * for pins that are configured as open drain and for output-only * pins. The latter case will break the i2c protocol, but it will * often work in practice. */ static void i2c_gpio_setscl_val(void *data, int state) { struct i2c_gpio_private_data *priv = data; gpiod_set_value_cansleep(priv->scl, state); } static int i2c_gpio_getsda(void *data) { struct i2c_gpio_private_data *priv = data; return gpiod_get_value_cansleep(priv->sda); } static int i2c_gpio_getscl(void *data) { struct i2c_gpio_private_data *priv = data; return gpiod_get_value_cansleep(priv->scl); } #ifdef CONFIG_I2C_GPIO_FAULT_INJECTOR static struct dentry *i2c_gpio_debug_dir; #define setsda(bd, val) ((bd)->setsda((bd)->data, val)) #define setscl(bd, val) ((bd)->setscl((bd)->data, val)) #define getsda(bd) ((bd)->getsda((bd)->data)) #define getscl(bd) ((bd)->getscl((bd)->data)) #define WIRE_ATTRIBUTE(wire) \ static int fops_##wire##_get(void *data, u64 *val) \ { \ struct i2c_gpio_private_data *priv = data; \ \ i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \ *val = get##wire(&priv->bit_data); \ i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \ return 0; \ } \ static int fops_##wire##_set(void *data, u64 val) \ { \ struct i2c_gpio_private_data *priv = data; \ \ i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \ set##wire(&priv->bit_data, val); \ i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \ return 0; \ } \ DEFINE_DEBUGFS_ATTRIBUTE(fops_##wire, fops_##wire##_get, fops_##wire##_set, "%llu\n") WIRE_ATTRIBUTE(scl); WIRE_ATTRIBUTE(sda); static void i2c_gpio_incomplete_transfer(struct i2c_gpio_private_data *priv, u32 pattern, u8 pattern_size) { struct i2c_algo_bit_data *bit_data = &priv->bit_data; int i; i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); /* START condition */ setsda(bit_data, 0); udelay(bit_data->udelay); /* Send pattern, request ACK, don't send STOP */ for (i = pattern_size - 1; i >= 0; i--) { setscl(bit_data, 0); udelay(bit_data->udelay / 2); setsda(bit_data, (pattern >> i) & 1); udelay((bit_data->udelay + 1) / 2); setscl(bit_data, 1); udelay(bit_data->udelay); } i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); } static int fops_incomplete_addr_phase_set(void *data, u64 addr) { struct i2c_gpio_private_data *priv = data; u32 pattern; if (addr > 0x7f) return -EINVAL; /* ADDR (7 bit) + RD (1 bit) + Client ACK, keep SDA hi (1 bit) */ pattern = (addr << 2) | 3; i2c_gpio_incomplete_transfer(priv, pattern, 9); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_incomplete_addr_phase, NULL, fops_incomplete_addr_phase_set, "%llu\n"); static int fops_incomplete_write_byte_set(void *data, u64 addr) { struct i2c_gpio_private_data *priv = data; u32 pattern; if (addr > 0x7f) return -EINVAL; /* ADDR (7 bit) + WR (1 bit) + Client ACK (1 bit) */ pattern = (addr << 2) | 1; /* 0x00 (8 bit) + Client ACK, keep SDA hi (1 bit) */ pattern = (pattern << 9) | 1; i2c_gpio_incomplete_transfer(priv, pattern, 18); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_incomplete_write_byte, NULL, fops_incomplete_write_byte_set, "%llu\n"); static int i2c_gpio_fi_act_on_scl_irq(struct i2c_gpio_private_data *priv, irqreturn_t handler(int, void*)) { int ret, irq = gpiod_to_irq(priv->scl); if (irq < 0) return irq; i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); ret = gpiod_direction_input(priv->scl); if (ret) goto unlock; reinit_completion(&priv->scl_irq_completion); ret = request_irq(irq, handler, IRQF_TRIGGER_FALLING, "i2c_gpio_fault_injector_scl_irq", priv); if (ret) goto output; wait_for_completion_interruptible(&priv->scl_irq_completion); free_irq(irq, priv); output: ret = gpiod_direction_output(priv->scl, 1) ?: ret; unlock: i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); return ret; } static irqreturn_t lose_arbitration_irq(int irq, void *dev_id) { struct i2c_gpio_private_data *priv = dev_id; setsda(&priv->bit_data, 0); udelay(priv->scl_irq_data); setsda(&priv->bit_data, 1); complete(&priv->scl_irq_completion); return IRQ_HANDLED; } static int fops_lose_arbitration_set(void *data, u64 duration) { struct i2c_gpio_private_data *priv = data; if (duration > 100 * 1000) return -EINVAL; priv->scl_irq_data = duration; /* * Interrupt on falling SCL. This ensures that the master under test has * really started the transfer. Interrupt on falling SDA did only * exercise 'bus busy' detection on some HW but not 'arbitration lost'. * Note that the interrupt latency may cause the first bits to be * transmitted correctly. */ return i2c_gpio_fi_act_on_scl_irq(priv, lose_arbitration_irq); } DEFINE_DEBUGFS_ATTRIBUTE(fops_lose_arbitration, NULL, fops_lose_arbitration_set, "%llu\n"); static irqreturn_t inject_panic_irq(int irq, void *dev_id) { struct i2c_gpio_private_data *priv = dev_id; udelay(priv->scl_irq_data); panic("I2C fault injector induced panic"); return IRQ_HANDLED; } static int fops_inject_panic_set(void *data, u64 duration) { struct i2c_gpio_private_data *priv = data; if (duration > 100 * 1000) return -EINVAL; priv->scl_irq_data = duration; /* * Interrupt on falling SCL. This ensures that the master under test has * really started the transfer. */ return i2c_gpio_fi_act_on_scl_irq(priv, inject_panic_irq); } DEFINE_DEBUGFS_ATTRIBUTE(fops_inject_panic, NULL, fops_inject_panic_set, "%llu\n"); static void i2c_gpio_fault_injector_init(struct platform_device *pdev) { struct i2c_gpio_private_data *priv = platform_get_drvdata(pdev); /* * If there will be a debugfs-dir per i2c adapter somewhen, put the * 'fault-injector' dir there. Until then, we have a global dir with * all adapters as subdirs. */ if (!i2c_gpio_debug_dir) { i2c_gpio_debug_dir = debugfs_create_dir("i2c-fault-injector", NULL); if (!i2c_gpio_debug_dir) return; } priv->debug_dir = debugfs_create_dir(pdev->name, i2c_gpio_debug_dir); if (!priv->debug_dir) return; init_completion(&priv->scl_irq_completion); debugfs_create_file_unsafe("incomplete_address_phase", 0200, priv->debug_dir, priv, &fops_incomplete_addr_phase); debugfs_create_file_unsafe("incomplete_write_byte", 0200, priv->debug_dir, priv, &fops_incomplete_write_byte); if (priv->bit_data.getscl) { debugfs_create_file_unsafe("inject_panic", 0200, priv->debug_dir, priv, &fops_inject_panic); debugfs_create_file_unsafe("lose_arbitration", 0200, priv->debug_dir, priv, &fops_lose_arbitration); } debugfs_create_file_unsafe("scl", 0600, priv->debug_dir, priv, &fops_scl); debugfs_create_file_unsafe("sda", 0600, priv->debug_dir, priv, &fops_sda); } static void i2c_gpio_fault_injector_exit(struct platform_device *pdev) { struct i2c_gpio_private_data *priv = platform_get_drvdata(pdev); debugfs_remove_recursive(priv->debug_dir); } #else static inline void i2c_gpio_fault_injector_init(struct platform_device *pdev) {} static inline void i2c_gpio_fault_injector_exit(struct platform_device *pdev) {} #endif /* CONFIG_I2C_GPIO_FAULT_INJECTOR*/ /* Get i2c-gpio properties from DT or ACPI table */ static void i2c_gpio_get_properties(struct device *dev, struct i2c_gpio_platform_data *pdata) { u32 reg; device_property_read_u32(dev, "i2c-gpio,delay-us", &pdata->udelay); if (!device_property_read_u32(dev, "i2c-gpio,timeout-ms", &reg)) pdata->timeout = msecs_to_jiffies(reg); pdata->sda_is_open_drain = device_property_read_bool(dev, "i2c-gpio,sda-open-drain"); pdata->scl_is_open_drain = device_property_read_bool(dev, "i2c-gpio,scl-open-drain"); pdata->scl_is_output_only = device_property_read_bool(dev, "i2c-gpio,scl-output-only"); pdata->sda_is_output_only = device_property_read_bool(dev, "i2c-gpio,sda-output-only"); pdata->sda_has_no_pullup = device_property_read_bool(dev, "i2c-gpio,sda-has-no-pullup"); pdata->scl_has_no_pullup = device_property_read_bool(dev, "i2c-gpio,scl-has-no-pullup"); } static struct gpio_desc *i2c_gpio_get_desc(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags gflags) { struct gpio_desc *retdesc; int ret; retdesc = devm_gpiod_get(dev, con_id, gflags); if (!IS_ERR(retdesc)) { dev_dbg(dev, "got GPIO from name %s\n", con_id); return retdesc; } retdesc = devm_gpiod_get_index(dev, NULL, index, gflags); if (!IS_ERR(retdesc)) { dev_dbg(dev, "got GPIO from index %u\n", index); return retdesc; } ret = PTR_ERR(retdesc); /* FIXME: hack in the old code, is this really necessary? */ if (ret == -EINVAL) retdesc = ERR_PTR(-EPROBE_DEFER); /* This happens if the GPIO driver is not yet probed, let's defer */ if (ret == -ENOENT) retdesc = ERR_PTR(-EPROBE_DEFER); if (PTR_ERR(retdesc) != -EPROBE_DEFER) dev_err(dev, "error trying to get descriptor: %d\n", ret); return retdesc; } static int i2c_gpio_probe(struct platform_device *pdev) { struct i2c_gpio_private_data *priv; struct i2c_gpio_platform_data *pdata; struct i2c_algo_bit_data *bit_data; struct i2c_adapter *adap; struct device *dev = &pdev->dev; struct fwnode_handle *fwnode = dev_fwnode(dev); enum gpiod_flags gflags; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; adap = &priv->adap; bit_data = &priv->bit_data; pdata = &priv->pdata; if (fwnode) { i2c_gpio_get_properties(dev, pdata); } else { /* * If all platform data settings are zero it is OK * to not provide any platform data from the board. */ if (dev_get_platdata(dev)) memcpy(pdata, dev_get_platdata(dev), sizeof(*pdata)); } /* * First get the GPIO pins; if it fails, we'll defer the probe. * If the SCL/SDA lines are marked "open drain" by platform data or * device tree then this means that something outside of our control is * marking these lines to be handled as open drain, and we should just * handle them as we handle any other output. Else we enforce open * drain as this is required for an I2C bus. */ if (pdata->sda_is_open_drain || pdata->sda_has_no_pullup) gflags = GPIOD_OUT_HIGH; else gflags = GPIOD_OUT_HIGH_OPEN_DRAIN; priv->sda = i2c_gpio_get_desc(dev, "sda", 0, gflags); if (IS_ERR(priv->sda)) return PTR_ERR(priv->sda); if (pdata->scl_is_open_drain || pdata->scl_has_no_pullup) gflags = GPIOD_OUT_HIGH; else gflags = GPIOD_OUT_HIGH_OPEN_DRAIN; priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags); if (IS_ERR(priv->scl)) return PTR_ERR(priv->scl); if (gpiod_cansleep(priv->sda) || gpiod_cansleep(priv->scl)) dev_warn(dev, "Slow GPIO pins might wreak havoc into I2C/SMBus bus timing"); else bit_data->can_do_atomic = true; bit_data->setsda = i2c_gpio_setsda_val; bit_data->setscl = i2c_gpio_setscl_val; if (!pdata->scl_is_output_only) bit_data->getscl = i2c_gpio_getscl; if (!pdata->sda_is_output_only) bit_data->getsda = i2c_gpio_getsda; if (pdata->udelay) bit_data->udelay = pdata->udelay; else if (pdata->scl_is_output_only) bit_data->udelay = 50; /* 10 kHz */ else bit_data->udelay = 5; /* 100 kHz */ if (pdata->timeout) bit_data->timeout = pdata->timeout; else bit_data->timeout = HZ / 10; /* 100 ms */ bit_data->data = priv; adap->owner = THIS_MODULE; if (fwnode) strscpy(adap->name, dev_name(dev), sizeof(adap->name)); else snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id); adap->algo_data = bit_data; adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->dev.parent = dev; device_set_node(&adap->dev, fwnode); adap->nr = pdev->id; ret = i2c_bit_add_numbered_bus(adap); if (ret) return ret; platform_set_drvdata(pdev, priv); /* * FIXME: using global GPIO numbers is not helpful. If/when we * get accessors to get the actual name of the GPIO line, * from the descriptor, then provide that instead. */ dev_info(dev, "using lines %u (SDA) and %u (SCL%s)\n", desc_to_gpio(priv->sda), desc_to_gpio(priv->scl), pdata->scl_is_output_only ? ", no clock stretching" : ""); i2c_gpio_fault_injector_init(pdev); return 0; } static void i2c_gpio_remove(struct platform_device *pdev) { struct i2c_gpio_private_data *priv; struct i2c_adapter *adap; i2c_gpio_fault_injector_exit(pdev); priv = platform_get_drvdata(pdev); adap = &priv->adap; i2c_del_adapter(adap); } static const struct of_device_id i2c_gpio_dt_ids[] = { { .compatible = "i2c-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, i2c_gpio_dt_ids); static const struct acpi_device_id i2c_gpio_acpi_match[] = { { "LOON0005" }, /* LoongArch */ { } }; MODULE_DEVICE_TABLE(acpi, i2c_gpio_acpi_match); static struct platform_driver i2c_gpio_driver = { .driver = { .name = "i2c-gpio", .of_match_table = i2c_gpio_dt_ids, .acpi_match_table = i2c_gpio_acpi_match, }, .probe = i2c_gpio_probe, .remove_new = i2c_gpio_remove, }; static int __init i2c_gpio_init(void) { int ret; ret = platform_driver_register(&i2c_gpio_driver); if (ret) printk(KERN_ERR "i2c-gpio: probe failed: %d\n", ret); return ret; } subsys_initcall(i2c_gpio_init); static void __exit i2c_gpio_exit(void) { platform_driver_unregister(&i2c_gpio_driver); } module_exit(i2c_gpio_exit); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); MODULE_DESCRIPTION("Platform-independent bitbanging I2C driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c-gpio");
linux-master
drivers/i2c/busses/i2c-gpio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2001,2002 Christer Weinigel <[email protected]> National Semiconductor SCx200 ACCESS.bus support Also supports the AMD CS5535 and AMD CS5536 Based on i2c-keywest.c which is: Copyright (c) 2001 Benjamin Herrenschmidt <[email protected]> Copyright (c) 2000 Philip Edelbrock <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/scx200.h> MODULE_AUTHOR("Christer Weinigel <[email protected]>"); MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver"); MODULE_ALIAS("platform:cs5535-smb"); MODULE_LICENSE("GPL"); #define MAX_DEVICES 4 static int base[MAX_DEVICES] = { 0x820, 0x840 }; module_param_hw_array(base, int, ioport, NULL, 0); MODULE_PARM_DESC(base, "Base addresses for the ACCESS.bus controllers"); #define POLL_TIMEOUT (HZ/5) enum scx200_acb_state { state_idle, state_address, state_command, state_repeat_start, state_quick, state_read, state_write, }; static const char *scx200_acb_state_name[] = { "idle", "address", "command", "repeat_start", "quick", "read", "write", }; /* Physical interface */ struct scx200_acb_iface { struct scx200_acb_iface *next; struct i2c_adapter adapter; unsigned base; struct mutex mutex; /* State machine data */ enum scx200_acb_state state; int result; u8 address_byte; u8 command; u8 *ptr; char needs_reset; unsigned len; }; /* Register Definitions */ #define ACBSDA (iface->base + 0) #define ACBST (iface->base + 1) #define ACBST_SDAST 0x40 /* SDA Status */ #define ACBST_BER 0x20 #define ACBST_NEGACK 0x10 /* Negative Acknowledge */ #define ACBST_STASTR 0x08 /* Stall After Start */ #define ACBST_MASTER 0x02 #define ACBCST (iface->base + 2) #define ACBCST_BB 0x02 #define ACBCTL1 (iface->base + 3) #define ACBCTL1_STASTRE 0x80 #define ACBCTL1_NMINTE 0x40 #define ACBCTL1_ACK 0x10 #define ACBCTL1_STOP 0x02 #define ACBCTL1_START 0x01 #define ACBADDR (iface->base + 4) #define ACBCTL2 (iface->base + 5) #define ACBCTL2_ENABLE 0x01 /************************************************************************/ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) { const char *errmsg; dev_dbg(&iface->adapter.dev, "state %s, status = 0x%02x\n", scx200_acb_state_name[iface->state], status); if (status & ACBST_BER) { errmsg = "bus error"; goto error; } if (!(status & ACBST_MASTER)) { errmsg = "not master"; goto error; } if (status & ACBST_NEGACK) { dev_dbg(&iface->adapter.dev, "negative ack in state %s\n", scx200_acb_state_name[iface->state]); iface->state = state_idle; iface->result = -ENXIO; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); outb(ACBST_STASTR | ACBST_NEGACK, ACBST); /* Reset the status register */ outb(0, ACBST); return; } switch (iface->state) { case state_idle: dev_warn(&iface->adapter.dev, "interrupt in idle state\n"); break; case state_address: /* Do a pointer write first */ outb(iface->address_byte & ~1, ACBSDA); iface->state = state_command; break; case state_command: outb(iface->command, ACBSDA); if (iface->address_byte & 1) iface->state = state_repeat_start; else iface->state = state_write; break; case state_repeat_start: outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); fallthrough; case state_quick: if (iface->address_byte & 1) { if (iface->len == 1) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); outb(iface->address_byte, ACBSDA); iface->state = state_read; } else { outb(iface->address_byte, ACBSDA); iface->state = state_write; } break; case state_read: /* Set ACK if _next_ byte will be the last one */ if (iface->len == 2) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); if (iface->len == 1) { iface->result = 0; iface->state = state_idle; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); } *iface->ptr++ = inb(ACBSDA); --iface->len; break; case state_write: if (iface->len == 0) { iface->result = 0; iface->state = state_idle; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); break; } outb(*iface->ptr++, ACBSDA); --iface->len; break; } return; error: dev_err(&iface->adapter.dev, "%s in state %s (addr=0x%02x, len=%d, status=0x%02x)\n", errmsg, scx200_acb_state_name[iface->state], iface->address_byte, iface->len, status); iface->state = state_idle; iface->result = -EIO; iface->needs_reset = 1; } static void scx200_acb_poll(struct scx200_acb_iface *iface) { u8 status; unsigned long timeout; timeout = jiffies + POLL_TIMEOUT; while (1) { status = inb(ACBST); /* Reset the status register to avoid the hang */ outb(0, ACBST); if ((status & (ACBST_SDAST|ACBST_BER|ACBST_NEGACK)) != 0) { scx200_acb_machine(iface, status); return; } if (time_after(jiffies, timeout)) break; cpu_relax(); cond_resched(); } dev_err(&iface->adapter.dev, "timeout in state %s\n", scx200_acb_state_name[iface->state]); iface->state = state_idle; iface->result = -EIO; iface->needs_reset = 1; } static void scx200_acb_reset(struct scx200_acb_iface *iface) { /* Disable the ACCESS.bus device and Configure the SCL frequency: 16 clock cycles */ outb(0x70, ACBCTL2); /* Polling mode */ outb(0, ACBCTL1); /* Disable slave address */ outb(0, ACBADDR); /* Enable the ACCESS.bus device */ outb(inb(ACBCTL2) | ACBCTL2_ENABLE, ACBCTL2); /* Free STALL after START */ outb(inb(ACBCTL1) & ~(ACBCTL1_STASTRE | ACBCTL1_NMINTE), ACBCTL1); /* Send a STOP */ outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); /* Clear BER, NEGACK and STASTR bits */ outb(ACBST_BER | ACBST_NEGACK | ACBST_STASTR, ACBST); /* Clear BB bit */ outb(inb(ACBCST) | ACBCST_BB, ACBCST); } static s32 scx200_acb_smbus_xfer(struct i2c_adapter *adapter, u16 address, unsigned short flags, char rw, u8 command, int size, union i2c_smbus_data *data) { struct scx200_acb_iface *iface = i2c_get_adapdata(adapter); int len; u8 *buffer; u16 cur_word; int rc; switch (size) { case I2C_SMBUS_QUICK: len = 0; buffer = NULL; break; case I2C_SMBUS_BYTE: len = 1; buffer = rw ? &data->byte : &command; break; case I2C_SMBUS_BYTE_DATA: len = 1; buffer = &data->byte; break; case I2C_SMBUS_WORD_DATA: len = 2; cur_word = cpu_to_le16(data->word); buffer = (u8 *)&cur_word; break; case I2C_SMBUS_I2C_BLOCK_DATA: len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; buffer = &data->block[1]; break; default: return -EINVAL; } dev_dbg(&adapter->dev, "size=%d, address=0x%x, command=0x%x, len=%d, read=%d\n", size, address, command, len, rw); if (!len && rw == I2C_SMBUS_READ) { dev_dbg(&adapter->dev, "zero length read\n"); return -EINVAL; } mutex_lock(&iface->mutex); iface->address_byte = (address << 1) | rw; iface->command = command; iface->ptr = buffer; iface->len = len; iface->result = -EINVAL; iface->needs_reset = 0; outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); if (size == I2C_SMBUS_QUICK || size == I2C_SMBUS_BYTE) iface->state = state_quick; else iface->state = state_address; while (iface->state != state_idle) scx200_acb_poll(iface); if (iface->needs_reset) scx200_acb_reset(iface); rc = iface->result; mutex_unlock(&iface->mutex); if (rc == 0 && size == I2C_SMBUS_WORD_DATA && rw == I2C_SMBUS_READ) data->word = le16_to_cpu(cur_word); #ifdef DEBUG dev_dbg(&adapter->dev, "transfer done, result: %d", rc); if (buffer) { int i; printk(" data:"); for (i = 0; i < len; ++i) printk(" %02x", buffer[i]); } printk("\n"); #endif return rc; } static u32 scx200_acb_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK; } /* For now, we only handle combined mode (smbus) */ static const struct i2c_algorithm scx200_acb_algorithm = { .smbus_xfer = scx200_acb_smbus_xfer, .functionality = scx200_acb_func, }; static struct scx200_acb_iface *scx200_acb_list; static DEFINE_MUTEX(scx200_acb_list_mutex); static int scx200_acb_probe(struct scx200_acb_iface *iface) { u8 val; /* Disable the ACCESS.bus device and Configure the SCL frequency: 16 clock cycles */ outb(0x70, ACBCTL2); if (inb(ACBCTL2) != 0x70) { pr_debug("ACBCTL2 readback failed\n"); return -ENXIO; } outb(inb(ACBCTL1) | ACBCTL1_NMINTE, ACBCTL1); val = inb(ACBCTL1); if (val) { pr_debug("disabled, but ACBCTL1=0x%02x\n", val); return -ENXIO; } outb(inb(ACBCTL2) | ACBCTL2_ENABLE, ACBCTL2); outb(inb(ACBCTL1) | ACBCTL1_NMINTE, ACBCTL1); val = inb(ACBCTL1); if ((val & ACBCTL1_NMINTE) != ACBCTL1_NMINTE) { pr_debug("enabled, but NMINTE won't be set, ACBCTL1=0x%02x\n", val); return -ENXIO; } return 0; } static struct scx200_acb_iface *scx200_create_iface(const char *text, struct device *dev, int index) { struct scx200_acb_iface *iface; struct i2c_adapter *adapter; iface = kzalloc(sizeof(*iface), GFP_KERNEL); if (!iface) return NULL; adapter = &iface->adapter; i2c_set_adapdata(adapter, iface); snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index); adapter->owner = THIS_MODULE; adapter->algo = &scx200_acb_algorithm; adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adapter->dev.parent = dev; mutex_init(&iface->mutex); return iface; } static int scx200_acb_create(struct scx200_acb_iface *iface) { struct i2c_adapter *adapter; int rc; adapter = &iface->adapter; rc = scx200_acb_probe(iface); if (rc) { pr_warn("probe failed\n"); return rc; } scx200_acb_reset(iface); if (i2c_add_adapter(adapter) < 0) { pr_err("failed to register\n"); return -ENODEV; } if (!adapter->dev.parent) { /* If there's no dev, we're tracking (ISA) ifaces manually */ mutex_lock(&scx200_acb_list_mutex); iface->next = scx200_acb_list; scx200_acb_list = iface; mutex_unlock(&scx200_acb_list_mutex); } return 0; } static struct scx200_acb_iface *scx200_create_dev(const char *text, unsigned long base, int index, struct device *dev) { struct scx200_acb_iface *iface; int rc; iface = scx200_create_iface(text, dev, index); if (iface == NULL) return NULL; if (!request_region(base, 8, iface->adapter.name)) { pr_err("can't allocate io 0x%lx-0x%lx\n", base, base + 8 - 1); goto errout_free; } iface->base = base; rc = scx200_acb_create(iface); if (rc == 0) return iface; release_region(base, 8); errout_free: kfree(iface); return NULL; } static int scx200_probe(struct platform_device *pdev) { struct scx200_acb_iface *iface; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) { dev_err(&pdev->dev, "can't fetch device resource info\n"); return -ENODEV; } iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev); if (!iface) return -EIO; dev_info(&pdev->dev, "SCx200 device '%s' registered\n", iface->adapter.name); platform_set_drvdata(pdev, iface); return 0; } static void scx200_cleanup_iface(struct scx200_acb_iface *iface) { i2c_del_adapter(&iface->adapter); release_region(iface->base, 8); kfree(iface); } static void scx200_remove(struct platform_device *pdev) { struct scx200_acb_iface *iface; iface = platform_get_drvdata(pdev); scx200_cleanup_iface(iface); } static struct platform_driver scx200_pci_driver = { .driver = { .name = "cs5535-smb", }, .probe = scx200_probe, .remove_new = scx200_remove, }; static const struct pci_device_id scx200_isa[] = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, { 0, } }; static __init void scx200_scan_isa(void) { int i; if (!pci_dev_present(scx200_isa)) return; for (i = 0; i < MAX_DEVICES; ++i) { if (base[i] == 0) continue; /* XXX: should we care about failures? */ scx200_create_dev("SCx200", base[i], i, NULL); } } static int __init scx200_acb_init(void) { pr_debug("NatSemi SCx200 ACCESS.bus Driver\n"); /* First scan for ISA-based devices */ scx200_scan_isa(); /* XXX: should we care about errors? */ /* If at least one bus was created, init must succeed */ if (scx200_acb_list) return 0; /* No ISA devices; register the platform driver for PCI-based devices */ return platform_driver_register(&scx200_pci_driver); } static void __exit scx200_acb_cleanup(void) { struct scx200_acb_iface *iface; platform_driver_unregister(&scx200_pci_driver); mutex_lock(&scx200_acb_list_mutex); while ((iface = scx200_acb_list) != NULL) { scx200_acb_list = iface->next; mutex_unlock(&scx200_acb_list_mutex); scx200_cleanup_iface(iface); mutex_lock(&scx200_acb_list_mutex); } mutex_unlock(&scx200_acb_list_mutex); } module_init(scx200_acb_init); module_exit(scx200_acb_cleanup);
linux-master
drivers/i2c/busses/scx200_acb.c
// SPDX-License-Identifier: GPL-2.0-only /* * SMBus 2.0 driver for AMD-8111 IO-Hub. * * Copyright (c) 2002 Vojtech Pavlik */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/acpi.h> #include <linux/slab.h> #include <linux/io.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR ("Vojtech Pavlik <[email protected]>"); MODULE_DESCRIPTION("AMD8111 SMBus 2.0 driver"); struct amd_smbus { struct pci_dev *dev; struct i2c_adapter adapter; int base; int size; }; static struct pci_driver amd8111_driver; /* * AMD PCI control registers definitions. */ #define AMD_PCI_MISC 0x48 #define AMD_PCI_MISC_SCI 0x04 /* deliver SCI */ #define AMD_PCI_MISC_INT 0x02 /* deliver PCI IRQ */ #define AMD_PCI_MISC_SPEEDUP 0x01 /* 16x clock speedup */ /* * ACPI 2.0 chapter 13 PCI interface definitions. */ #define AMD_EC_DATA 0x00 /* data register */ #define AMD_EC_SC 0x04 /* status of controller */ #define AMD_EC_CMD 0x04 /* command register */ #define AMD_EC_ICR 0x08 /* interrupt control register */ #define AMD_EC_SC_SMI 0x04 /* smi event pending */ #define AMD_EC_SC_SCI 0x02 /* sci event pending */ #define AMD_EC_SC_BURST 0x01 /* burst mode enabled */ #define AMD_EC_SC_CMD 0x08 /* byte in data reg is command */ #define AMD_EC_SC_IBF 0x02 /* data ready for embedded controller */ #define AMD_EC_SC_OBF 0x01 /* data ready for host */ #define AMD_EC_CMD_RD 0x80 /* read EC */ #define AMD_EC_CMD_WR 0x81 /* write EC */ #define AMD_EC_CMD_BE 0x82 /* enable burst mode */ #define AMD_EC_CMD_BD 0x83 /* disable burst mode */ #define AMD_EC_CMD_QR 0x84 /* query EC */ /* * ACPI 2.0 chapter 13 access of registers of the EC */ static int amd_ec_wait_write(struct amd_smbus *smbus) { int timeout = 500; while ((inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_IBF) && --timeout) udelay(1); if (!timeout) { dev_warn(&smbus->dev->dev, "Timeout while waiting for IBF to clear\n"); return -ETIMEDOUT; } return 0; } static int amd_ec_wait_read(struct amd_smbus *smbus) { int timeout = 500; while ((~inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_OBF) && --timeout) udelay(1); if (!timeout) { dev_warn(&smbus->dev->dev, "Timeout while waiting for OBF to set\n"); return -ETIMEDOUT; } return 0; } static int amd_ec_read(struct amd_smbus *smbus, unsigned char address, unsigned char *data) { int status; status = amd_ec_wait_write(smbus); if (status) return status; outb(AMD_EC_CMD_RD, smbus->base + AMD_EC_CMD); status = amd_ec_wait_write(smbus); if (status) return status; outb(address, smbus->base + AMD_EC_DATA); status = amd_ec_wait_read(smbus); if (status) return status; *data = inb(smbus->base + AMD_EC_DATA); return 0; } static int amd_ec_write(struct amd_smbus *smbus, unsigned char address, unsigned char data) { int status; status = amd_ec_wait_write(smbus); if (status) return status; outb(AMD_EC_CMD_WR, smbus->base + AMD_EC_CMD); status = amd_ec_wait_write(smbus); if (status) return status; outb(address, smbus->base + AMD_EC_DATA); status = amd_ec_wait_write(smbus); if (status) return status; outb(data, smbus->base + AMD_EC_DATA); return 0; } /* * ACPI 2.0 chapter 13 SMBus 2.0 EC register model */ #define AMD_SMB_PRTCL 0x00 /* protocol, PEC */ #define AMD_SMB_STS 0x01 /* status */ #define AMD_SMB_ADDR 0x02 /* address */ #define AMD_SMB_CMD 0x03 /* command */ #define AMD_SMB_DATA 0x04 /* 32 data registers */ #define AMD_SMB_BCNT 0x24 /* number of data bytes */ #define AMD_SMB_ALRM_A 0x25 /* alarm address */ #define AMD_SMB_ALRM_D 0x26 /* 2 bytes alarm data */ #define AMD_SMB_STS_DONE 0x80 #define AMD_SMB_STS_ALRM 0x40 #define AMD_SMB_STS_RES 0x20 #define AMD_SMB_STS_STATUS 0x1f #define AMD_SMB_STATUS_OK 0x00 #define AMD_SMB_STATUS_FAIL 0x07 #define AMD_SMB_STATUS_DNAK 0x10 #define AMD_SMB_STATUS_DERR 0x11 #define AMD_SMB_STATUS_CMD_DENY 0x12 #define AMD_SMB_STATUS_UNKNOWN 0x13 #define AMD_SMB_STATUS_ACC_DENY 0x17 #define AMD_SMB_STATUS_TIMEOUT 0x18 #define AMD_SMB_STATUS_NOTSUP 0x19 #define AMD_SMB_STATUS_BUSY 0x1A #define AMD_SMB_STATUS_PEC 0x1F #define AMD_SMB_PRTCL_WRITE 0x00 #define AMD_SMB_PRTCL_READ 0x01 #define AMD_SMB_PRTCL_QUICK 0x02 #define AMD_SMB_PRTCL_BYTE 0x04 #define AMD_SMB_PRTCL_BYTE_DATA 0x06 #define AMD_SMB_PRTCL_WORD_DATA 0x08 #define AMD_SMB_PRTCL_BLOCK_DATA 0x0a #define AMD_SMB_PRTCL_PROC_CALL 0x0c #define AMD_SMB_PRTCL_BLOCK_PROC_CALL 0x0d #define AMD_SMB_PRTCL_I2C_BLOCK_DATA 0x4a #define AMD_SMB_PRTCL_PEC 0x80 static s32 amd8111_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct amd_smbus *smbus = adap->algo_data; unsigned char protocol, len, pec, temp[2]; int i, status; protocol = (read_write == I2C_SMBUS_READ) ? AMD_SMB_PRTCL_READ : AMD_SMB_PRTCL_WRITE; pec = (flags & I2C_CLIENT_PEC) ? AMD_SMB_PRTCL_PEC : 0; switch (size) { case I2C_SMBUS_QUICK: protocol |= AMD_SMB_PRTCL_QUICK; read_write = I2C_SMBUS_WRITE; break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_WRITE) { status = amd_ec_write(smbus, AMD_SMB_CMD, command); if (status) return status; } protocol |= AMD_SMB_PRTCL_BYTE; break; case I2C_SMBUS_BYTE_DATA: status = amd_ec_write(smbus, AMD_SMB_CMD, command); if (status) return status; if (read_write == I2C_SMBUS_WRITE) { status = amd_ec_write(smbus, AMD_SMB_DATA, data->byte); if (status) return status; } protocol |= AMD_SMB_PRTCL_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: status = amd_ec_write(smbus, AMD_SMB_CMD, command); if (status) return status; if (read_write == I2C_SMBUS_WRITE) { status = amd_ec_write(smbus, AMD_SMB_DATA, data->word & 0xff); if (status) return status; status = amd_ec_write(smbus, AMD_SMB_DATA + 1, data->word >> 8); if (status) return status; } protocol |= AMD_SMB_PRTCL_WORD_DATA | pec; break; case I2C_SMBUS_BLOCK_DATA: status = amd_ec_write(smbus, AMD_SMB_CMD, command); if (status) return status; if (read_write == I2C_SMBUS_WRITE) { len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX); status = amd_ec_write(smbus, AMD_SMB_BCNT, len); if (status) return status; for (i = 0; i < len; i++) { status = amd_ec_write(smbus, AMD_SMB_DATA + i, data->block[i + 1]); if (status) return status; } } protocol |= AMD_SMB_PRTCL_BLOCK_DATA | pec; break; case I2C_SMBUS_I2C_BLOCK_DATA: len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX); status = amd_ec_write(smbus, AMD_SMB_CMD, command); if (status) return status; status = amd_ec_write(smbus, AMD_SMB_BCNT, len); if (status) return status; if (read_write == I2C_SMBUS_WRITE) for (i = 0; i < len; i++) { status = amd_ec_write(smbus, AMD_SMB_DATA + i, data->block[i + 1]); if (status) return status; } protocol |= AMD_SMB_PRTCL_I2C_BLOCK_DATA; break; case I2C_SMBUS_PROC_CALL: status = amd_ec_write(smbus, AMD_SMB_CMD, command); if (status) return status; status = amd_ec_write(smbus, AMD_SMB_DATA, data->word & 0xff); if (status) return status; status = amd_ec_write(smbus, AMD_SMB_DATA + 1, data->word >> 8); if (status) return status; protocol = AMD_SMB_PRTCL_PROC_CALL | pec; read_write = I2C_SMBUS_READ; break; case I2C_SMBUS_BLOCK_PROC_CALL: len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX - 1); status = amd_ec_write(smbus, AMD_SMB_CMD, command); if (status) return status; status = amd_ec_write(smbus, AMD_SMB_BCNT, len); if (status) return status; for (i = 0; i < len; i++) { status = amd_ec_write(smbus, AMD_SMB_DATA + i, data->block[i + 1]); if (status) return status; } protocol = AMD_SMB_PRTCL_BLOCK_PROC_CALL | pec; read_write = I2C_SMBUS_READ; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } status = amd_ec_write(smbus, AMD_SMB_ADDR, addr << 1); if (status) return status; status = amd_ec_write(smbus, AMD_SMB_PRTCL, protocol); if (status) return status; status = amd_ec_read(smbus, AMD_SMB_STS, temp + 0); if (status) return status; if (~temp[0] & AMD_SMB_STS_DONE) { udelay(500); status = amd_ec_read(smbus, AMD_SMB_STS, temp + 0); if (status) return status; } if (~temp[0] & AMD_SMB_STS_DONE) { msleep(1); status = amd_ec_read(smbus, AMD_SMB_STS, temp + 0); if (status) return status; } if ((~temp[0] & AMD_SMB_STS_DONE) || (temp[0] & AMD_SMB_STS_STATUS)) return -EIO; if (read_write == I2C_SMBUS_WRITE) return 0; switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: status = amd_ec_read(smbus, AMD_SMB_DATA, &data->byte); if (status) return status; break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: status = amd_ec_read(smbus, AMD_SMB_DATA, temp + 0); if (status) return status; status = amd_ec_read(smbus, AMD_SMB_DATA + 1, temp + 1); if (status) return status; data->word = (temp[1] << 8) | temp[0]; break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: status = amd_ec_read(smbus, AMD_SMB_BCNT, &len); if (status) return status; len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX); fallthrough; case I2C_SMBUS_I2C_BLOCK_DATA: for (i = 0; i < len; i++) { status = amd_ec_read(smbus, AMD_SMB_DATA + i, data->block + i + 1); if (status) return status; } data->block[0] = len; break; } return 0; } static u32 amd8111_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_PEC; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = amd8111_access, .functionality = amd8111_func, }; static const struct pci_device_id amd8111_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) }, { 0, } }; MODULE_DEVICE_TABLE (pci, amd8111_ids); static int amd8111_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct amd_smbus *smbus; int error; if (!(pci_resource_flags(dev, 0) & IORESOURCE_IO)) return -ENODEV; smbus = kzalloc(sizeof(struct amd_smbus), GFP_KERNEL); if (!smbus) return -ENOMEM; smbus->dev = dev; smbus->base = pci_resource_start(dev, 0); smbus->size = pci_resource_len(dev, 0); error = acpi_check_resource_conflict(&dev->resource[0]); if (error) { error = -ENODEV; goto out_kfree; } if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) { error = -EBUSY; goto out_kfree; } smbus->adapter.owner = THIS_MODULE; snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), "SMBus2 AMD8111 adapter at %04x", smbus->base); smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; smbus->adapter.algo = &smbus_algorithm; smbus->adapter.algo_data = smbus; /* set up the sysfs linkage to our parent device */ smbus->adapter.dev.parent = &dev->dev; pci_write_config_dword(smbus->dev, AMD_PCI_MISC, 0); error = i2c_add_adapter(&smbus->adapter); if (error) goto out_release_region; pci_set_drvdata(dev, smbus); return 0; out_release_region: release_region(smbus->base, smbus->size); out_kfree: kfree(smbus); return error; } static void amd8111_remove(struct pci_dev *dev) { struct amd_smbus *smbus = pci_get_drvdata(dev); i2c_del_adapter(&smbus->adapter); release_region(smbus->base, smbus->size); kfree(smbus); } static struct pci_driver amd8111_driver = { .name = "amd8111_smbus2", .id_table = amd8111_ids, .probe = amd8111_probe, .remove = amd8111_remove, }; module_pci_driver(amd8111_driver);
linux-master
drivers/i2c/busses/i2c-amd8111.c
// SPDX-License-Identifier: GPL-2.0-only /* * i2c_adap_pxa.c * * I2C adapter for the PXA I2C bus access. * * Copyright (C) 2002 Intrinsyc Software Inc. * Copyright (C) 2004-2005 Deep Blue Solutions Ltd. * * History: * Apr 2002: Initial version [CS] * Jun 2002: Properly separated algo/adap [FB] * Jan 2003: Fixed several bugs concerning interrupt handling [Kai-Uwe Bloem] * Jan 2003: added limited signal handling [Kai-Uwe Bloem] * Sep 2004: Major rework to ensure efficient bus handling [RMK] * Dec 2004: Added support for PXA27x and slave device probing [Liam Girdwood] * Feb 2005: Rework slave mode handling [RMK] */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/platform_data/i2c-pxa.h> #include <linux/slab.h> /* I2C register field definitions */ #define IBMR_SDAS (1 << 0) #define IBMR_SCLS (1 << 1) #define ICR_START (1 << 0) /* start bit */ #define ICR_STOP (1 << 1) /* stop bit */ #define ICR_ACKNAK (1 << 2) /* send ACK(0) or NAK(1) */ #define ICR_TB (1 << 3) /* transfer byte bit */ #define ICR_MA (1 << 4) /* master abort */ #define ICR_SCLE (1 << 5) /* master clock enable */ #define ICR_IUE (1 << 6) /* unit enable */ #define ICR_GCD (1 << 7) /* general call disable */ #define ICR_ITEIE (1 << 8) /* enable tx interrupts */ #define ICR_IRFIE (1 << 9) /* enable rx interrupts */ #define ICR_BEIE (1 << 10) /* enable bus error ints */ #define ICR_SSDIE (1 << 11) /* slave STOP detected int enable */ #define ICR_ALDIE (1 << 12) /* enable arbitration interrupt */ #define ICR_SADIE (1 << 13) /* slave address detected int enable */ #define ICR_UR (1 << 14) /* unit reset */ #define ICR_FM (1 << 15) /* fast mode */ #define ICR_HS (1 << 16) /* High Speed mode */ #define ICR_A3700_FM (1 << 16) /* fast mode for armada-3700 */ #define ICR_A3700_HS (1 << 17) /* high speed mode for armada-3700 */ #define ICR_GPIOEN (1 << 19) /* enable GPIO mode for SCL in HS */ #define ISR_RWM (1 << 0) /* read/write mode */ #define ISR_ACKNAK (1 << 1) /* ack/nak status */ #define ISR_UB (1 << 2) /* unit busy */ #define ISR_IBB (1 << 3) /* bus busy */ #define ISR_SSD (1 << 4) /* slave stop detected */ #define ISR_ALD (1 << 5) /* arbitration loss detected */ #define ISR_ITE (1 << 6) /* tx buffer empty */ #define ISR_IRF (1 << 7) /* rx buffer full */ #define ISR_GCAD (1 << 8) /* general call address detected */ #define ISR_SAD (1 << 9) /* slave address detected */ #define ISR_BED (1 << 10) /* bus error no ACK/NAK */ #define ILCR_SLV_SHIFT 0 #define ILCR_SLV_MASK (0x1FF << ILCR_SLV_SHIFT) #define ILCR_FLV_SHIFT 9 #define ILCR_FLV_MASK (0x1FF << ILCR_FLV_SHIFT) #define ILCR_HLVL_SHIFT 18 #define ILCR_HLVL_MASK (0x1FF << ILCR_HLVL_SHIFT) #define ILCR_HLVH_SHIFT 27 #define ILCR_HLVH_MASK (0x1F << ILCR_HLVH_SHIFT) #define IWCR_CNT_SHIFT 0 #define IWCR_CNT_MASK (0x1F << IWCR_CNT_SHIFT) #define IWCR_HS_CNT1_SHIFT 5 #define IWCR_HS_CNT1_MASK (0x1F << IWCR_HS_CNT1_SHIFT) #define IWCR_HS_CNT2_SHIFT 10 #define IWCR_HS_CNT2_MASK (0x1F << IWCR_HS_CNT2_SHIFT) /* need a longer timeout if we're dealing with the fact we may well be * looking at a multi-master environment */ #define DEF_TIMEOUT 32 #define NO_SLAVE (-ENXIO) #define BUS_ERROR (-EREMOTEIO) #define XFER_NAKED (-ECONNREFUSED) #define I2C_RETRY (-2000) /* an error has occurred retry transmit */ /* ICR initialize bit values * * 15 FM 0 (100 kHz operation) * 14 UR 0 (No unit reset) * 13 SADIE 0 (Disables the unit from interrupting on slave addresses * matching its slave address) * 12 ALDIE 0 (Disables the unit from interrupt when it loses arbitration * in master mode) * 11 SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) * 10 BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) * 9 IRFIE 1 (Enable interrupts from full buffer received) * 8 ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) * 7 GCD 1 (Disables i2c unit response to general call messages as a slave) * 6 IUE 0 (Disable unit until we change settings) * 5 SCLE 1 (Enables the i2c clock output for master mode (drives SCL) * 4 MA 0 (Only send stop with the ICR stop bit) * 3 TB 0 (We are not transmitting a byte initially) * 2 ACKNAK 0 (Send an ACK after the unit receives a byte) * 1 STOP 0 (Do not send a STOP) * 0 START 0 (Do not send a START) */ #define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) /* I2C status register init values * * 10 BED 1 (Clear bus error detected) * 9 SAD 1 (Clear slave address detected) * 7 IRF 1 (Clear IDBR Receive Full) * 6 ITE 1 (Clear IDBR Transmit Empty) * 5 ALD 1 (Clear Arbitration Loss Detected) * 4 SSD 1 (Clear Slave Stop Detected) */ #define I2C_ISR_INIT 0x7FF /* status register init */ struct pxa_reg_layout { u32 ibmr; u32 idbr; u32 icr; u32 isr; u32 isar; u32 ilcr; u32 iwcr; u32 fm; u32 hs; }; enum pxa_i2c_types { REGS_PXA2XX, REGS_PXA3XX, REGS_CE4100, REGS_PXA910, REGS_A3700, }; /* I2C register layout definitions */ static struct pxa_reg_layout pxa_reg_layout[] = { [REGS_PXA2XX] = { .ibmr = 0x00, .idbr = 0x08, .icr = 0x10, .isr = 0x18, .isar = 0x20, .fm = ICR_FM, .hs = ICR_HS, }, [REGS_PXA3XX] = { .ibmr = 0x00, .idbr = 0x04, .icr = 0x08, .isr = 0x0c, .isar = 0x10, .fm = ICR_FM, .hs = ICR_HS, }, [REGS_CE4100] = { .ibmr = 0x14, .idbr = 0x0c, .icr = 0x00, .isr = 0x04, /* no isar register */ .fm = ICR_FM, .hs = ICR_HS, }, [REGS_PXA910] = { .ibmr = 0x00, .idbr = 0x08, .icr = 0x10, .isr = 0x18, .isar = 0x20, .ilcr = 0x28, .iwcr = 0x30, .fm = ICR_FM, .hs = ICR_HS, }, [REGS_A3700] = { .ibmr = 0x00, .idbr = 0x04, .icr = 0x08, .isr = 0x0c, .isar = 0x10, .fm = ICR_A3700_FM, .hs = ICR_A3700_HS, }, }; static const struct of_device_id i2c_pxa_dt_ids[] = { { .compatible = "mrvl,pxa-i2c", .data = (void *)REGS_PXA2XX }, { .compatible = "mrvl,pwri2c", .data = (void *)REGS_PXA3XX }, { .compatible = "mrvl,mmp-twsi", .data = (void *)REGS_PXA910 }, { .compatible = "marvell,armada-3700-i2c", .data = (void *)REGS_A3700 }, {} }; MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids); static const struct platform_device_id i2c_pxa_id_table[] = { { "pxa2xx-i2c", REGS_PXA2XX }, { "pxa3xx-pwri2c", REGS_PXA3XX }, { "ce4100-i2c", REGS_CE4100 }, { "pxa910-i2c", REGS_PXA910 }, { "armada-3700-i2c", REGS_A3700 }, { }, }; MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table); struct pxa_i2c { spinlock_t lock; wait_queue_head_t wait; struct i2c_msg *msg; unsigned int msg_num; unsigned int msg_idx; unsigned int msg_ptr; unsigned int slave_addr; unsigned int req_slave_addr; struct i2c_adapter adap; struct clk *clk; #ifdef CONFIG_I2C_PXA_SLAVE struct i2c_client *slave; #endif unsigned int irqlogidx; u32 isrlog[32]; u32 icrlog[32]; void __iomem *reg_base; void __iomem *reg_ibmr; void __iomem *reg_idbr; void __iomem *reg_icr; void __iomem *reg_isr; void __iomem *reg_isar; void __iomem *reg_ilcr; void __iomem *reg_iwcr; unsigned long iobase; unsigned long iosize; int irq; unsigned int use_pio :1; unsigned int fast_mode :1; unsigned int high_mode:1; unsigned char master_code; unsigned long rate; bool highmode_enter; u32 fm_mask; u32 hs_mask; struct i2c_bus_recovery_info recovery; }; #define _IBMR(i2c) ((i2c)->reg_ibmr) #define _IDBR(i2c) ((i2c)->reg_idbr) #define _ICR(i2c) ((i2c)->reg_icr) #define _ISR(i2c) ((i2c)->reg_isr) #define _ISAR(i2c) ((i2c)->reg_isar) #define _ILCR(i2c) ((i2c)->reg_ilcr) #define _IWCR(i2c) ((i2c)->reg_iwcr) /* * I2C Slave mode address */ #define I2C_PXA_SLAVE_ADDR 0x1 #ifdef DEBUG struct bits { u32 mask; const char *set; const char *unset; }; #define PXA_BIT(m, s, u) { .mask = m, .set = s, .unset = u } static inline void decode_bits(const char *prefix, const struct bits *bits, int num, u32 val) { printk("%s %08x:", prefix, val); while (num--) { const char *str = val & bits->mask ? bits->set : bits->unset; if (str) pr_cont(" %s", str); bits++; } pr_cont("\n"); } static const struct bits isr_bits[] = { PXA_BIT(ISR_RWM, "RX", "TX"), PXA_BIT(ISR_ACKNAK, "NAK", "ACK"), PXA_BIT(ISR_UB, "Bsy", "Rdy"), PXA_BIT(ISR_IBB, "BusBsy", "BusRdy"), PXA_BIT(ISR_SSD, "SlaveStop", NULL), PXA_BIT(ISR_ALD, "ALD", NULL), PXA_BIT(ISR_ITE, "TxEmpty", NULL), PXA_BIT(ISR_IRF, "RxFull", NULL), PXA_BIT(ISR_GCAD, "GenCall", NULL), PXA_BIT(ISR_SAD, "SlaveAddr", NULL), PXA_BIT(ISR_BED, "BusErr", NULL), }; static void decode_ISR(unsigned int val) { decode_bits(KERN_DEBUG "ISR", isr_bits, ARRAY_SIZE(isr_bits), val); } static const struct bits icr_bits[] = { PXA_BIT(ICR_START, "START", NULL), PXA_BIT(ICR_STOP, "STOP", NULL), PXA_BIT(ICR_ACKNAK, "ACKNAK", NULL), PXA_BIT(ICR_TB, "TB", NULL), PXA_BIT(ICR_MA, "MA", NULL), PXA_BIT(ICR_SCLE, "SCLE", "scle"), PXA_BIT(ICR_IUE, "IUE", "iue"), PXA_BIT(ICR_GCD, "GCD", NULL), PXA_BIT(ICR_ITEIE, "ITEIE", NULL), PXA_BIT(ICR_IRFIE, "IRFIE", NULL), PXA_BIT(ICR_BEIE, "BEIE", NULL), PXA_BIT(ICR_SSDIE, "SSDIE", NULL), PXA_BIT(ICR_ALDIE, "ALDIE", NULL), PXA_BIT(ICR_SADIE, "SADIE", NULL), PXA_BIT(ICR_UR, "UR", "ur"), }; #ifdef CONFIG_I2C_PXA_SLAVE static void decode_ICR(unsigned int val) { decode_bits(KERN_DEBUG "ICR", icr_bits, ARRAY_SIZE(icr_bits), val); } #endif static unsigned int i2c_debug = DEBUG; static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname) { dev_dbg(&i2c->adap.dev, "state:%s:%d: ISR=%08x, ICR=%08x, IBMR=%02x\n", fname, lno, readl(_ISR(i2c)), readl(_ICR(i2c)), readl(_IBMR(i2c))); } #define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __func__) static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) { unsigned int i; struct device *dev = &i2c->adap.dev; dev_err(dev, "slave_0x%x error: %s\n", i2c->req_slave_addr >> 1, why); dev_err(dev, "msg_num: %d msg_idx: %d msg_ptr: %d\n", i2c->msg_num, i2c->msg_idx, i2c->msg_ptr); dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n", readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)), readl(_ISR(i2c))); dev_err(dev, "log:"); for (i = 0; i < i2c->irqlogidx; i++) pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]); pr_cont("\n"); } #else /* ifdef DEBUG */ #define i2c_debug 0 #define show_state(i2c) do { } while (0) #define decode_ISR(val) do { } while (0) #define decode_ICR(val) do { } while (0) #define i2c_pxa_scream_blue_murder(i2c, why) do { } while (0) #endif /* ifdef DEBUG / else */ static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret); static inline int i2c_pxa_is_slavemode(struct pxa_i2c *i2c) { return !(readl(_ICR(i2c)) & ICR_SCLE); } static void i2c_pxa_abort(struct pxa_i2c *i2c) { int i = 250; if (i2c_pxa_is_slavemode(i2c)) { dev_dbg(&i2c->adap.dev, "%s: called in slave mode\n", __func__); return; } while ((i > 0) && (readl(_IBMR(i2c)) & IBMR_SDAS) == 0) { unsigned long icr = readl(_ICR(i2c)); icr &= ~ICR_START; icr |= ICR_ACKNAK | ICR_STOP | ICR_TB; writel(icr, _ICR(i2c)); show_state(i2c); mdelay(1); i --; } writel(readl(_ICR(i2c)) & ~(ICR_MA | ICR_START | ICR_STOP), _ICR(i2c)); } static int i2c_pxa_wait_bus_not_busy(struct pxa_i2c *i2c) { int timeout = DEF_TIMEOUT; u32 isr; while (1) { isr = readl(_ISR(i2c)); if (!(isr & (ISR_IBB | ISR_UB))) return 0; if (isr & ISR_SAD) timeout += 4; if (!timeout--) break; msleep(2); show_state(i2c); } show_state(i2c); return I2C_RETRY; } static int i2c_pxa_wait_master(struct pxa_i2c *i2c) { unsigned long timeout = jiffies + HZ*4; while (time_before(jiffies, timeout)) { if (i2c_debug > 1) dev_dbg(&i2c->adap.dev, "%s: %ld: ISR=%08x, ICR=%08x, IBMR=%02x\n", __func__, (long)jiffies, readl(_ISR(i2c)), readl(_ICR(i2c)), readl(_IBMR(i2c))); if (readl(_ISR(i2c)) & ISR_SAD) { if (i2c_debug > 0) dev_dbg(&i2c->adap.dev, "%s: Slave detected\n", __func__); goto out; } /* wait for unit and bus being not busy, and we also do a * quick check of the i2c lines themselves to ensure they've * gone high... */ if ((readl(_ISR(i2c)) & (ISR_UB | ISR_IBB)) == 0 && readl(_IBMR(i2c)) == (IBMR_SCLS | IBMR_SDAS)) { if (i2c_debug > 0) dev_dbg(&i2c->adap.dev, "%s: done\n", __func__); return 1; } msleep(1); } if (i2c_debug > 0) dev_dbg(&i2c->adap.dev, "%s: did not free\n", __func__); out: return 0; } static int i2c_pxa_set_master(struct pxa_i2c *i2c) { if (i2c_debug) dev_dbg(&i2c->adap.dev, "setting to bus master\n"); if ((readl(_ISR(i2c)) & (ISR_UB | ISR_IBB)) != 0) { dev_dbg(&i2c->adap.dev, "%s: unit is busy\n", __func__); if (!i2c_pxa_wait_master(i2c)) { dev_dbg(&i2c->adap.dev, "%s: error: unit busy\n", __func__); return I2C_RETRY; } } writel(readl(_ICR(i2c)) | ICR_SCLE, _ICR(i2c)); return 0; } #ifdef CONFIG_I2C_PXA_SLAVE static int i2c_pxa_wait_slave(struct pxa_i2c *i2c) { unsigned long timeout = jiffies + HZ*1; /* wait for stop */ show_state(i2c); while (time_before(jiffies, timeout)) { if (i2c_debug > 1) dev_dbg(&i2c->adap.dev, "%s: %ld: ISR=%08x, ICR=%08x, IBMR=%02x\n", __func__, (long)jiffies, readl(_ISR(i2c)), readl(_ICR(i2c)), readl(_IBMR(i2c))); if ((readl(_ISR(i2c)) & (ISR_UB|ISR_IBB)) == 0 || (readl(_ISR(i2c)) & ISR_SAD) != 0 || (readl(_ICR(i2c)) & ICR_SCLE) == 0) { if (i2c_debug > 1) dev_dbg(&i2c->adap.dev, "%s: done\n", __func__); return 1; } msleep(1); } if (i2c_debug > 0) dev_dbg(&i2c->adap.dev, "%s: did not free\n", __func__); return 0; } /* * clear the hold on the bus, and take of anything else * that has been configured */ static void i2c_pxa_set_slave(struct pxa_i2c *i2c, int errcode) { show_state(i2c); if (errcode < 0) { udelay(100); /* simple delay */ } else { /* we need to wait for the stop condition to end */ /* if we where in stop, then clear... */ if (readl(_ICR(i2c)) & ICR_STOP) { udelay(100); writel(readl(_ICR(i2c)) & ~ICR_STOP, _ICR(i2c)); } if (!i2c_pxa_wait_slave(i2c)) { dev_err(&i2c->adap.dev, "%s: wait timedout\n", __func__); return; } } writel(readl(_ICR(i2c)) & ~(ICR_STOP|ICR_ACKNAK|ICR_MA), _ICR(i2c)); writel(readl(_ICR(i2c)) & ~ICR_SCLE, _ICR(i2c)); if (i2c_debug) { dev_dbg(&i2c->adap.dev, "ICR now %08x, ISR %08x\n", readl(_ICR(i2c)), readl(_ISR(i2c))); decode_ICR(readl(_ICR(i2c))); } } #else #define i2c_pxa_set_slave(i2c, err) do { } while (0) #endif static void i2c_pxa_do_reset(struct pxa_i2c *i2c) { /* reset according to 9.8 */ writel(ICR_UR, _ICR(i2c)); writel(I2C_ISR_INIT, _ISR(i2c)); writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c)); if (i2c->reg_isar && IS_ENABLED(CONFIG_I2C_PXA_SLAVE)) writel(i2c->slave_addr, _ISAR(i2c)); /* set control register values */ writel(I2C_ICR_INIT | (i2c->fast_mode ? i2c->fm_mask : 0), _ICR(i2c)); writel(readl(_ICR(i2c)) | (i2c->high_mode ? i2c->hs_mask : 0), _ICR(i2c)); #ifdef CONFIG_I2C_PXA_SLAVE dev_info(&i2c->adap.dev, "Enabling slave mode\n"); writel(readl(_ICR(i2c)) | ICR_SADIE | ICR_ALDIE | ICR_SSDIE, _ICR(i2c)); #endif i2c_pxa_set_slave(i2c, 0); } static void i2c_pxa_enable(struct pxa_i2c *i2c) { /* enable unit */ writel(readl(_ICR(i2c)) | ICR_IUE, _ICR(i2c)); udelay(100); } static void i2c_pxa_reset(struct pxa_i2c *i2c) { pr_debug("Resetting I2C Controller Unit\n"); /* abort any transfer currently under way */ i2c_pxa_abort(i2c); i2c_pxa_do_reset(i2c); i2c_pxa_enable(i2c); } #ifdef CONFIG_I2C_PXA_SLAVE /* * PXA I2C Slave mode */ static void i2c_pxa_slave_txempty(struct pxa_i2c *i2c, u32 isr) { if (isr & ISR_BED) { /* what should we do here? */ } else { u8 byte = 0; if (i2c->slave != NULL) i2c_slave_event(i2c->slave, I2C_SLAVE_READ_PROCESSED, &byte); writel(byte, _IDBR(i2c)); writel(readl(_ICR(i2c)) | ICR_TB, _ICR(i2c)); /* allow next byte */ } } static void i2c_pxa_slave_rxfull(struct pxa_i2c *i2c, u32 isr) { u8 byte = readl(_IDBR(i2c)); if (i2c->slave != NULL) i2c_slave_event(i2c->slave, I2C_SLAVE_WRITE_RECEIVED, &byte); writel(readl(_ICR(i2c)) | ICR_TB, _ICR(i2c)); } static void i2c_pxa_slave_start(struct pxa_i2c *i2c, u32 isr) { int timeout; if (i2c_debug > 0) dev_dbg(&i2c->adap.dev, "SAD, mode is slave-%cx\n", (isr & ISR_RWM) ? 'r' : 't'); if (i2c->slave != NULL) { if (isr & ISR_RWM) { u8 byte = 0; i2c_slave_event(i2c->slave, I2C_SLAVE_READ_REQUESTED, &byte); writel(byte, _IDBR(i2c)); } else { i2c_slave_event(i2c->slave, I2C_SLAVE_WRITE_REQUESTED, NULL); } } /* * slave could interrupt in the middle of us generating a * start condition... if this happens, we'd better back off * and stop holding the poor thing up */ writel(readl(_ICR(i2c)) & ~(ICR_START|ICR_STOP), _ICR(i2c)); writel(readl(_ICR(i2c)) | ICR_TB, _ICR(i2c)); timeout = 0x10000; while (1) { if ((readl(_IBMR(i2c)) & IBMR_SCLS) == IBMR_SCLS) break; timeout--; if (timeout <= 0) { dev_err(&i2c->adap.dev, "timeout waiting for SCL high\n"); break; } } writel(readl(_ICR(i2c)) & ~ICR_SCLE, _ICR(i2c)); } static void i2c_pxa_slave_stop(struct pxa_i2c *i2c) { if (i2c_debug > 2) dev_dbg(&i2c->adap.dev, "ISR: SSD (Slave Stop)\n"); if (i2c->slave != NULL) i2c_slave_event(i2c->slave, I2C_SLAVE_STOP, NULL); if (i2c_debug > 2) dev_dbg(&i2c->adap.dev, "ISR: SSD (Slave Stop) acked\n"); /* * If we have a master-mode message waiting, * kick it off now that the slave has completed. */ if (i2c->msg) i2c_pxa_master_complete(i2c, I2C_RETRY); } static int i2c_pxa_slave_reg(struct i2c_client *slave) { struct pxa_i2c *i2c = slave->adapter->algo_data; if (i2c->slave) return -EBUSY; if (!i2c->reg_isar) return -EAFNOSUPPORT; i2c->slave = slave; i2c->slave_addr = slave->addr; writel(i2c->slave_addr, _ISAR(i2c)); return 0; } static int i2c_pxa_slave_unreg(struct i2c_client *slave) { struct pxa_i2c *i2c = slave->adapter->algo_data; WARN_ON(!i2c->slave); i2c->slave_addr = I2C_PXA_SLAVE_ADDR; writel(i2c->slave_addr, _ISAR(i2c)); i2c->slave = NULL; return 0; } #else static void i2c_pxa_slave_txempty(struct pxa_i2c *i2c, u32 isr) { if (isr & ISR_BED) { /* what should we do here? */ } else { writel(0, _IDBR(i2c)); writel(readl(_ICR(i2c)) | ICR_TB, _ICR(i2c)); } } static void i2c_pxa_slave_rxfull(struct pxa_i2c *i2c, u32 isr) { writel(readl(_ICR(i2c)) | ICR_TB | ICR_ACKNAK, _ICR(i2c)); } static void i2c_pxa_slave_start(struct pxa_i2c *i2c, u32 isr) { int timeout; /* * slave could interrupt in the middle of us generating a * start condition... if this happens, we'd better back off * and stop holding the poor thing up */ writel(readl(_ICR(i2c)) & ~(ICR_START|ICR_STOP), _ICR(i2c)); writel(readl(_ICR(i2c)) | ICR_TB | ICR_ACKNAK, _ICR(i2c)); timeout = 0x10000; while (1) { if ((readl(_IBMR(i2c)) & IBMR_SCLS) == IBMR_SCLS) break; timeout--; if (timeout <= 0) { dev_err(&i2c->adap.dev, "timeout waiting for SCL high\n"); break; } } writel(readl(_ICR(i2c)) & ~ICR_SCLE, _ICR(i2c)); } static void i2c_pxa_slave_stop(struct pxa_i2c *i2c) { if (i2c->msg) i2c_pxa_master_complete(i2c, I2C_RETRY); } #endif /* * PXA I2C Master mode */ static inline void i2c_pxa_start_message(struct pxa_i2c *i2c) { u32 icr; /* * Step 1: target slave address into IDBR */ i2c->req_slave_addr = i2c_8bit_addr_from_msg(i2c->msg); writel(i2c->req_slave_addr, _IDBR(i2c)); /* * Step 2: initiate the write. */ icr = readl(_ICR(i2c)) & ~(ICR_STOP | ICR_ALDIE); writel(icr | ICR_START | ICR_TB, _ICR(i2c)); } static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) { u32 icr; /* Clear the START, STOP, ACK, TB and MA flags */ icr = readl(_ICR(i2c)); icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA); writel(icr, _ICR(i2c)); } /* * PXA I2C send master code * 1. Load master code to IDBR and send it. * Note for HS mode, set ICR [GPIOEN]. * 2. Wait until win arbitration. */ static int i2c_pxa_send_mastercode(struct pxa_i2c *i2c) { u32 icr; long timeout; spin_lock_irq(&i2c->lock); i2c->highmode_enter = true; writel(i2c->master_code, _IDBR(i2c)); icr = readl(_ICR(i2c)) & ~(ICR_STOP | ICR_ALDIE); icr |= ICR_GPIOEN | ICR_START | ICR_TB | ICR_ITEIE; writel(icr, _ICR(i2c)); spin_unlock_irq(&i2c->lock); timeout = wait_event_timeout(i2c->wait, i2c->highmode_enter == false, HZ * 1); i2c->highmode_enter = false; return (timeout == 0) ? I2C_RETRY : 0; } /* * i2c_pxa_master_complete - complete the message and wake up. */ static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret) { i2c->msg_ptr = 0; i2c->msg = NULL; i2c->msg_idx ++; i2c->msg_num = 0; if (ret) i2c->msg_idx = ret; if (!i2c->use_pio) wake_up(&i2c->wait); } static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr) { u32 icr = readl(_ICR(i2c)) & ~(ICR_START|ICR_STOP|ICR_ACKNAK|ICR_TB); again: /* * If ISR_ALD is set, we lost arbitration. */ if (isr & ISR_ALD) { /* * Do we need to do anything here? The PXA docs * are vague about what happens. */ i2c_pxa_scream_blue_murder(i2c, "ALD set"); /* * We ignore this error. We seem to see spurious ALDs * for seemingly no reason. If we handle them as I think * they should, we end up causing an I2C error, which * is painful for some systems. */ return; /* ignore */ } if ((isr & ISR_BED) && (!((i2c->msg->flags & I2C_M_IGNORE_NAK) && (isr & ISR_ACKNAK)))) { int ret = BUS_ERROR; /* * I2C bus error - either the device NAK'd us, or * something more serious happened. If we were NAK'd * on the initial address phase, we can retry. */ if (isr & ISR_ACKNAK) { if (i2c->msg_ptr == 0 && i2c->msg_idx == 0) ret = NO_SLAVE; else ret = XFER_NAKED; } i2c_pxa_master_complete(i2c, ret); } else if (isr & ISR_RWM) { /* * Read mode. We have just sent the address byte, and * now we must initiate the transfer. */ if (i2c->msg_ptr == i2c->msg->len - 1 && i2c->msg_idx == i2c->msg_num - 1) icr |= ICR_STOP | ICR_ACKNAK; icr |= ICR_ALDIE | ICR_TB; } else if (i2c->msg_ptr < i2c->msg->len) { /* * Write mode. Write the next data byte. */ writel(i2c->msg->buf[i2c->msg_ptr++], _IDBR(i2c)); icr |= ICR_ALDIE | ICR_TB; /* * If this is the last byte of the last message or last byte * of any message with I2C_M_STOP (e.g. SCCB), send a STOP. */ if ((i2c->msg_ptr == i2c->msg->len) && ((i2c->msg->flags & I2C_M_STOP) || (i2c->msg_idx == i2c->msg_num - 1))) icr |= ICR_STOP; } else if (i2c->msg_idx < i2c->msg_num - 1) { /* * Next segment of the message. */ i2c->msg_ptr = 0; i2c->msg_idx ++; i2c->msg++; /* * If we aren't doing a repeated start and address, * go back and try to send the next byte. Note that * we do not support switching the R/W direction here. */ if (i2c->msg->flags & I2C_M_NOSTART) goto again; /* * Write the next address. */ i2c->req_slave_addr = i2c_8bit_addr_from_msg(i2c->msg); writel(i2c->req_slave_addr, _IDBR(i2c)); /* * And trigger a repeated start, and send the byte. */ icr &= ~ICR_ALDIE; icr |= ICR_START | ICR_TB; } else { if (i2c->msg->len == 0) icr |= ICR_MA; i2c_pxa_master_complete(i2c, 0); } i2c->icrlog[i2c->irqlogidx-1] = icr; writel(icr, _ICR(i2c)); show_state(i2c); } static void i2c_pxa_irq_rxfull(struct pxa_i2c *i2c, u32 isr) { u32 icr = readl(_ICR(i2c)) & ~(ICR_START|ICR_STOP|ICR_ACKNAK|ICR_TB); /* * Read the byte. */ i2c->msg->buf[i2c->msg_ptr++] = readl(_IDBR(i2c)); if (i2c->msg_ptr < i2c->msg->len) { /* * If this is the last byte of the last * message, send a STOP. */ if (i2c->msg_ptr == i2c->msg->len - 1) icr |= ICR_STOP | ICR_ACKNAK; icr |= ICR_ALDIE | ICR_TB; } else { i2c_pxa_master_complete(i2c, 0); } i2c->icrlog[i2c->irqlogidx-1] = icr; writel(icr, _ICR(i2c)); } #define VALID_INT_SOURCE (ISR_SSD | ISR_ALD | ISR_ITE | ISR_IRF | \ ISR_SAD | ISR_BED) static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id) { struct pxa_i2c *i2c = dev_id; u32 isr = readl(_ISR(i2c)); if (!(isr & VALID_INT_SOURCE)) return IRQ_NONE; if (i2c_debug > 2 && 0) { dev_dbg(&i2c->adap.dev, "%s: ISR=%08x, ICR=%08x, IBMR=%02x\n", __func__, isr, readl(_ICR(i2c)), readl(_IBMR(i2c))); decode_ISR(isr); } if (i2c->irqlogidx < ARRAY_SIZE(i2c->isrlog)) i2c->isrlog[i2c->irqlogidx++] = isr; show_state(i2c); /* * Always clear all pending IRQs. */ writel(isr & VALID_INT_SOURCE, _ISR(i2c)); if (isr & ISR_SAD) i2c_pxa_slave_start(i2c, isr); if (isr & ISR_SSD) i2c_pxa_slave_stop(i2c); if (i2c_pxa_is_slavemode(i2c)) { if (isr & ISR_ITE) i2c_pxa_slave_txempty(i2c, isr); if (isr & ISR_IRF) i2c_pxa_slave_rxfull(i2c, isr); } else if (i2c->msg && (!i2c->highmode_enter)) { if (isr & ISR_ITE) i2c_pxa_irq_txempty(i2c, isr); if (isr & ISR_IRF) i2c_pxa_irq_rxfull(i2c, isr); } else if ((isr & ISR_ITE) && i2c->highmode_enter) { i2c->highmode_enter = false; wake_up(&i2c->wait); } else { i2c_pxa_scream_blue_murder(i2c, "spurious irq"); } return IRQ_HANDLED; } /* * We are protected by the adapter bus mutex. */ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) { long timeout; int ret; /* * Wait for the bus to become free. */ ret = i2c_pxa_wait_bus_not_busy(i2c); if (ret) { dev_err(&i2c->adap.dev, "i2c_pxa: timeout waiting for bus free\n"); i2c_recover_bus(&i2c->adap); goto out; } /* * Set master mode. */ ret = i2c_pxa_set_master(i2c); if (ret) { dev_err(&i2c->adap.dev, "i2c_pxa_set_master: error %d\n", ret); goto out; } if (i2c->high_mode) { ret = i2c_pxa_send_mastercode(i2c); if (ret) { dev_err(&i2c->adap.dev, "i2c_pxa_send_mastercode timeout\n"); goto out; } } spin_lock_irq(&i2c->lock); i2c->msg = msg; i2c->msg_num = num; i2c->msg_idx = 0; i2c->msg_ptr = 0; i2c->irqlogidx = 0; i2c_pxa_start_message(i2c); spin_unlock_irq(&i2c->lock); /* * The rest of the processing occurs in the interrupt handler. */ timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); i2c_pxa_stop_message(i2c); /* * We place the return code in i2c->msg_idx. */ ret = i2c->msg_idx; if (!timeout && i2c->msg_num) { i2c_pxa_scream_blue_murder(i2c, "timeout with active message"); i2c_recover_bus(&i2c->adap); ret = I2C_RETRY; } out: return ret; } static int i2c_pxa_internal_xfer(struct pxa_i2c *i2c, struct i2c_msg *msgs, int num, int (*xfer)(struct pxa_i2c *, struct i2c_msg *, int num)) { int ret, i; for (i = 0; ; ) { ret = xfer(i2c, msgs, num); if (ret != I2C_RETRY && ret != NO_SLAVE) goto out; if (++i >= i2c->adap.retries) break; if (i2c_debug) dev_dbg(&i2c->adap.dev, "Retrying transmission\n"); udelay(100); } if (ret != NO_SLAVE) i2c_pxa_scream_blue_murder(i2c, "exhausted retries"); ret = -EREMOTEIO; out: i2c_pxa_set_slave(i2c, ret); return ret; } static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct pxa_i2c *i2c = adap->algo_data; return i2c_pxa_internal_xfer(i2c, msgs, num, i2c_pxa_do_xfer); } static u32 i2c_pxa_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING | I2C_FUNC_NOSTART; } static const struct i2c_algorithm i2c_pxa_algorithm = { .master_xfer = i2c_pxa_xfer, .functionality = i2c_pxa_functionality, #ifdef CONFIG_I2C_PXA_SLAVE .reg_slave = i2c_pxa_slave_reg, .unreg_slave = i2c_pxa_slave_unreg, #endif }; /* Non-interrupt mode support */ static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c) { /* make timeout the same as for interrupt based functions */ long timeout = 2 * DEF_TIMEOUT; /* * Wait for the bus to become free. */ while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) udelay(1000); if (timeout < 0) { show_state(i2c); dev_err(&i2c->adap.dev, "i2c_pxa: timeout waiting for bus free (set_master)\n"); return I2C_RETRY; } /* * Set master mode. */ writel(readl(_ICR(i2c)) | ICR_SCLE, _ICR(i2c)); return 0; } static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) { unsigned long timeout = 500000; /* 5 seconds */ int ret = 0; ret = i2c_pxa_pio_set_master(i2c); if (ret) goto out; i2c->msg = msg; i2c->msg_num = num; i2c->msg_idx = 0; i2c->msg_ptr = 0; i2c->irqlogidx = 0; i2c_pxa_start_message(i2c); while (i2c->msg_num > 0 && --timeout) { i2c_pxa_handler(0, i2c); udelay(10); } i2c_pxa_stop_message(i2c); /* * We place the return code in i2c->msg_idx. */ ret = i2c->msg_idx; out: if (timeout == 0) { i2c_pxa_scream_blue_murder(i2c, "timeout (do_pio_xfer)"); ret = I2C_RETRY; } return ret; } static int i2c_pxa_pio_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct pxa_i2c *i2c = adap->algo_data; /* If the I2C controller is disabled we need to reset it (probably due to a suspend/resume destroying state). We do this here as we can then avoid worrying about resuming the controller before its users. */ if (!(readl(_ICR(i2c)) & ICR_IUE)) i2c_pxa_reset(i2c); return i2c_pxa_internal_xfer(i2c, msgs, num, i2c_pxa_do_pio_xfer); } static const struct i2c_algorithm i2c_pxa_pio_algorithm = { .master_xfer = i2c_pxa_pio_xfer, .functionality = i2c_pxa_functionality, #ifdef CONFIG_I2C_PXA_SLAVE .reg_slave = i2c_pxa_slave_reg, .unreg_slave = i2c_pxa_slave_unreg, #endif }; static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c, enum pxa_i2c_types *i2c_types) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_id = of_match_device(i2c_pxa_dt_ids, &pdev->dev); if (!of_id) return 1; /* For device tree we always use the dynamic or alias-assigned ID */ i2c->adap.nr = -1; i2c->use_pio = of_property_read_bool(np, "mrvl,i2c-polling"); i2c->fast_mode = of_property_read_bool(np, "mrvl,i2c-fast-mode"); *i2c_types = (enum pxa_i2c_types)(of_id->data); return 0; } static int i2c_pxa_probe_pdata(struct platform_device *pdev, struct pxa_i2c *i2c, enum pxa_i2c_types *i2c_types) { struct i2c_pxa_platform_data *plat = dev_get_platdata(&pdev->dev); const struct platform_device_id *id = platform_get_device_id(pdev); *i2c_types = id->driver_data; if (plat) { i2c->use_pio = plat->use_pio; i2c->fast_mode = plat->fast_mode; i2c->high_mode = plat->high_mode; i2c->master_code = plat->master_code; if (!i2c->master_code) i2c->master_code = 0xe; i2c->rate = plat->rate; } return 0; } static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap) { struct pxa_i2c *i2c = adap->algo_data; u32 ibmr = readl(_IBMR(i2c)); /* * Program the GPIOs to reflect the current I2C bus state while * we transition to recovery; this avoids glitching the bus. */ gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS); gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS); } static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap) { struct pxa_i2c *i2c = adap->algo_data; struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; u32 isr; /* * The bus should now be free. Clear up the I2C controller before * handing control of the bus back to avoid the bus changing state. */ isr = readl(_ISR(i2c)); if (isr & (ISR_UB | ISR_IBB)) { dev_dbg(&i2c->adap.dev, "recovery: resetting controller, ISR=0x%08x\n", isr); i2c_pxa_do_reset(i2c); } WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default)); dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n", readl(_IBMR(i2c)), readl(_ISR(i2c))); i2c_pxa_enable(i2c); } static int i2c_pxa_init_recovery(struct pxa_i2c *i2c) { struct i2c_bus_recovery_info *bri = &i2c->recovery; struct device *dev = i2c->adap.dev.parent; /* * When slave mode is enabled, we are not the only master on the bus. * Bus recovery can only be performed when we are the master, which * we can't be certain of. Therefore, when slave mode is enabled, do * not configure bus recovery. */ if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE)) return 0; bri->pinctrl = devm_pinctrl_get(dev); if (PTR_ERR(bri->pinctrl) == -ENODEV) { bri->pinctrl = NULL; return 0; } if (IS_ERR(bri->pinctrl)) return PTR_ERR(bri->pinctrl); bri->prepare_recovery = i2c_pxa_prepare_recovery; bri->unprepare_recovery = i2c_pxa_unprepare_recovery; i2c->adap.bus_recovery_info = bri; return 0; } static int i2c_pxa_probe(struct platform_device *dev) { struct i2c_pxa_platform_data *plat = dev_get_platdata(&dev->dev); enum pxa_i2c_types i2c_type; struct pxa_i2c *i2c; struct resource *res; int ret, irq; i2c = devm_kzalloc(&dev->dev, sizeof(struct pxa_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; /* Default adapter num to device id; i2c_pxa_probe_dt can override. */ i2c->adap.nr = dev->id; i2c->adap.owner = THIS_MODULE; i2c->adap.retries = 5; i2c->adap.algo_data = i2c; i2c->adap.dev.parent = &dev->dev; #ifdef CONFIG_OF i2c->adap.dev.of_node = dev->dev.of_node; #endif i2c->reg_base = devm_platform_get_and_ioremap_resource(dev, 0, &res); if (IS_ERR(i2c->reg_base)) return PTR_ERR(i2c->reg_base); irq = platform_get_irq(dev, 0); if (irq < 0) return irq; ret = i2c_pxa_init_recovery(i2c); if (ret) return ret; ret = i2c_pxa_probe_dt(dev, i2c, &i2c_type); if (ret > 0) ret = i2c_pxa_probe_pdata(dev, i2c, &i2c_type); if (ret < 0) return ret; spin_lock_init(&i2c->lock); init_waitqueue_head(&i2c->wait); strscpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name)); i2c->clk = devm_clk_get(&dev->dev, NULL); if (IS_ERR(i2c->clk)) return dev_err_probe(&dev->dev, PTR_ERR(i2c->clk), "failed to get the clk\n"); i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr; i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr; i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr; i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr; i2c->fm_mask = pxa_reg_layout[i2c_type].fm; i2c->hs_mask = pxa_reg_layout[i2c_type].hs; if (i2c_type != REGS_CE4100) i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar; if (i2c_type == REGS_PXA910) { i2c->reg_ilcr = i2c->reg_base + pxa_reg_layout[i2c_type].ilcr; i2c->reg_iwcr = i2c->reg_base + pxa_reg_layout[i2c_type].iwcr; } i2c->iobase = res->start; i2c->iosize = resource_size(res); i2c->irq = irq; i2c->slave_addr = I2C_PXA_SLAVE_ADDR; i2c->highmode_enter = false; if (plat) { i2c->adap.class = plat->class; } if (i2c->high_mode) { if (i2c->rate) { clk_set_rate(i2c->clk, i2c->rate); pr_info("i2c: <%s> set rate to %ld\n", i2c->adap.name, clk_get_rate(i2c->clk)); } else pr_warn("i2c: <%s> clock rate not set\n", i2c->adap.name); } clk_prepare_enable(i2c->clk); if (i2c->use_pio) { i2c->adap.algo = &i2c_pxa_pio_algorithm; } else { i2c->adap.algo = &i2c_pxa_algorithm; ret = devm_request_irq(&dev->dev, irq, i2c_pxa_handler, IRQF_SHARED | IRQF_NO_SUSPEND, dev_name(&dev->dev), i2c); if (ret) { dev_err(&dev->dev, "failed to request irq: %d\n", ret); goto ereqirq; } } i2c_pxa_reset(i2c); ret = i2c_add_numbered_adapter(&i2c->adap); if (ret < 0) goto ereqirq; platform_set_drvdata(dev, i2c); #ifdef CONFIG_I2C_PXA_SLAVE dev_info(&i2c->adap.dev, " PXA I2C adapter, slave address %d\n", i2c->slave_addr); #else dev_info(&i2c->adap.dev, " PXA I2C adapter\n"); #endif return 0; ereqirq: clk_disable_unprepare(i2c->clk); return ret; } static void i2c_pxa_remove(struct platform_device *dev) { struct pxa_i2c *i2c = platform_get_drvdata(dev); i2c_del_adapter(&i2c->adap); clk_disable_unprepare(i2c->clk); } static int i2c_pxa_suspend_noirq(struct device *dev) { struct pxa_i2c *i2c = dev_get_drvdata(dev); clk_disable(i2c->clk); return 0; } static int i2c_pxa_resume_noirq(struct device *dev) { struct pxa_i2c *i2c = dev_get_drvdata(dev); clk_enable(i2c->clk); i2c_pxa_reset(i2c); return 0; } static const struct dev_pm_ops i2c_pxa_dev_pm_ops = { .suspend_noirq = i2c_pxa_suspend_noirq, .resume_noirq = i2c_pxa_resume_noirq, }; static struct platform_driver i2c_pxa_driver = { .probe = i2c_pxa_probe, .remove_new = i2c_pxa_remove, .driver = { .name = "pxa2xx-i2c", .pm = pm_sleep_ptr(&i2c_pxa_dev_pm_ops), .of_match_table = i2c_pxa_dt_ids, }, .id_table = i2c_pxa_id_table, }; static int __init i2c_adap_pxa_init(void) { return platform_driver_register(&i2c_pxa_driver); } static void __exit i2c_adap_pxa_exit(void) { platform_driver_unregister(&i2c_pxa_driver); } MODULE_LICENSE("GPL"); subsys_initcall(i2c_adap_pxa_init); module_exit(i2c_adap_pxa_exit);
linux-master
drivers/i2c/busses/i2c-pxa.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/i2c.h> #include <linux/pci.h> #include <linux/psp-platform-access.h> #include <linux/psp.h> #include <linux/workqueue.h> #include "i2c-designware-core.h" #define PSP_I2C_RESERVATION_TIME_MS 100 #define PSP_I2C_REQ_RETRY_CNT 400 #define PSP_I2C_REQ_RETRY_DELAY_US (25 * USEC_PER_MSEC) #define PSP_I2C_REQ_STS_OK 0x0 #define PSP_I2C_REQ_STS_BUS_BUSY 0x1 #define PSP_I2C_REQ_STS_INV_PARAM 0x3 enum psp_i2c_req_type { PSP_I2C_REQ_ACQUIRE, PSP_I2C_REQ_RELEASE, PSP_I2C_REQ_MAX }; struct psp_i2c_req { struct psp_req_buffer_hdr hdr; enum psp_i2c_req_type type; }; static DEFINE_MUTEX(psp_i2c_access_mutex); static unsigned long psp_i2c_sem_acquired; static u32 psp_i2c_access_count; static bool psp_i2c_mbox_fail; static struct device *psp_i2c_dev; static int (*_psp_send_i2c_req)(struct psp_i2c_req *req); /* Helper to verify status returned by PSP */ static int check_i2c_req_sts(struct psp_i2c_req *req) { u32 status; /* Status field in command-response buffer is updated by PSP */ status = READ_ONCE(req->hdr.status); switch (status) { case PSP_I2C_REQ_STS_OK: return 0; case PSP_I2C_REQ_STS_BUS_BUSY: return -EBUSY; case PSP_I2C_REQ_STS_INV_PARAM: default: return -EIO; } } /* * Errors in x86-PSP i2c-arbitration protocol may occur at two levels: * 1. mailbox communication - PSP is not operational or some IO errors with * basic communication had happened. * 2. i2c-requests - PSP refuses to grant i2c arbitration to x86 for too long. * * In order to distinguish between these in error handling code all mailbox * communication errors on the first level (from CCP symbols) will be passed * up and if -EIO is returned the second level will be checked. */ static int psp_send_i2c_req_cezanne(struct psp_i2c_req *req) { int ret; ret = psp_send_platform_access_msg(PSP_I2C_REQ_BUS_CMD, (struct psp_request *)req); if (ret == -EIO) return check_i2c_req_sts(req); return ret; } static int psp_send_i2c_req_doorbell(struct psp_i2c_req *req) { int ret; ret = psp_ring_platform_doorbell(req->type, &req->hdr.status); if (ret == -EIO) return check_i2c_req_sts(req); return ret; } static int psp_send_i2c_req(enum psp_i2c_req_type i2c_req_type) { struct psp_i2c_req *req; unsigned long start; int status, ret; /* Allocate command-response buffer */ req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->hdr.payload_size = sizeof(*req); req->type = i2c_req_type; start = jiffies; ret = read_poll_timeout(_psp_send_i2c_req, status, (status != -EBUSY), PSP_I2C_REQ_RETRY_DELAY_US, PSP_I2C_REQ_RETRY_CNT * PSP_I2C_REQ_RETRY_DELAY_US, 0, req); if (ret) { dev_err(psp_i2c_dev, "Timed out waiting for PSP to %s I2C bus\n", (i2c_req_type == PSP_I2C_REQ_ACQUIRE) ? "release" : "acquire"); goto cleanup; } ret = status; if (ret) { dev_err(psp_i2c_dev, "PSP communication error\n"); goto cleanup; } dev_dbg(psp_i2c_dev, "Request accepted by PSP after %ums\n", jiffies_to_msecs(jiffies - start)); cleanup: if (ret) { dev_err(psp_i2c_dev, "Assume i2c bus is for exclusive host usage\n"); psp_i2c_mbox_fail = true; } kfree(req); return ret; } static void release_bus(void) { int status; if (!psp_i2c_sem_acquired) return; status = psp_send_i2c_req(PSP_I2C_REQ_RELEASE); if (status) return; dev_dbg(psp_i2c_dev, "PSP semaphore held for %ums\n", jiffies_to_msecs(jiffies - psp_i2c_sem_acquired)); psp_i2c_sem_acquired = 0; } static void psp_release_i2c_bus_deferred(struct work_struct *work) { mutex_lock(&psp_i2c_access_mutex); /* * If there is any pending transaction, cannot release the bus here. * psp_release_i2c_bus will take care of this later. */ if (psp_i2c_access_count) goto cleanup; release_bus(); cleanup: mutex_unlock(&psp_i2c_access_mutex); } static DECLARE_DELAYED_WORK(release_queue, psp_release_i2c_bus_deferred); static int psp_acquire_i2c_bus(void) { int status; mutex_lock(&psp_i2c_access_mutex); /* Return early if mailbox malfunctioned */ if (psp_i2c_mbox_fail) goto cleanup; psp_i2c_access_count++; /* * No need to request bus arbitration once we are inside semaphore * reservation period. */ if (psp_i2c_sem_acquired) goto cleanup; status = psp_send_i2c_req(PSP_I2C_REQ_ACQUIRE); if (status) goto cleanup; psp_i2c_sem_acquired = jiffies; schedule_delayed_work(&release_queue, msecs_to_jiffies(PSP_I2C_RESERVATION_TIME_MS)); /* * In case of errors with PSP arbitrator psp_i2c_mbox_fail variable is * set above. As a consequence consecutive calls to acquire will bypass * communication with PSP. At any case i2c bus is granted to the caller, * thus always return success. */ cleanup: mutex_unlock(&psp_i2c_access_mutex); return 0; } static void psp_release_i2c_bus(void) { mutex_lock(&psp_i2c_access_mutex); /* Return early if mailbox was malfunctional */ if (psp_i2c_mbox_fail) goto cleanup; /* * If we are last owner of PSP semaphore, need to release aribtration * via mailbox. */ psp_i2c_access_count--; if (psp_i2c_access_count) goto cleanup; /* * Send a release command to PSP if the semaphore reservation timeout * elapsed but x86 still owns the controller. */ if (!delayed_work_pending(&release_queue)) release_bus(); cleanup: mutex_unlock(&psp_i2c_access_mutex); } /* * Locking methods are based on the default implementation from * drivers/i2c/i2c-core-base.c, but with psp acquire and release operations * added. With this in place we can ensure that i2c clients on the bus shared * with psp are able to lock HW access to the bus for arbitrary number of * operations - that is e.g. write-wait-read. */ static void i2c_adapter_dw_psp_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { psp_acquire_i2c_bus(); rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); } static int i2c_adapter_dw_psp_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { int ret; ret = rt_mutex_trylock(&adapter->bus_lock); if (ret) return ret; psp_acquire_i2c_bus(); return ret; } static void i2c_adapter_dw_psp_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { psp_release_i2c_bus(); rt_mutex_unlock(&adapter->bus_lock); } static const struct i2c_lock_operations i2c_dw_psp_lock_ops = { .lock_bus = i2c_adapter_dw_psp_lock_bus, .trylock_bus = i2c_adapter_dw_psp_trylock_bus, .unlock_bus = i2c_adapter_dw_psp_unlock_bus, }; int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev) { struct pci_dev *rdev; if (!IS_REACHABLE(CONFIG_CRYPTO_DEV_CCP_DD)) return -ENODEV; if (!dev) return -ENODEV; if (!(dev->flags & ARBITRATION_SEMAPHORE)) return -ENODEV; /* Allow to bind only one instance of a driver */ if (psp_i2c_dev) return -EEXIST; /* Cezanne uses platform mailbox, Mendocino and later use doorbell */ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); if (rdev->device == 0x1630) _psp_send_i2c_req = psp_send_i2c_req_cezanne; else _psp_send_i2c_req = psp_send_i2c_req_doorbell; pci_dev_put(rdev); if (psp_check_platform_access_status()) return -EPROBE_DEFER; psp_i2c_dev = dev->dev; dev_info(psp_i2c_dev, "I2C bus managed by AMD PSP\n"); /* * Install global locking callbacks for adapter as well as internal i2c * controller locks. */ dev->adapter.lock_ops = &i2c_dw_psp_lock_ops; dev->acquire_lock = psp_acquire_i2c_bus; dev->release_lock = psp_release_i2c_bus; return 0; }
linux-master
drivers/i2c/busses/i2c-designware-amdpsp.c
/* * Copyright (C) 2017 Spreadtrum Communications Inc. * * SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #define I2C_CTL 0x00 #define I2C_ADDR_CFG 0x04 #define I2C_COUNT 0x08 #define I2C_RX 0x0c #define I2C_TX 0x10 #define I2C_STATUS 0x14 #define I2C_HSMODE_CFG 0x18 #define I2C_VERSION 0x1c #define ADDR_DVD0 0x20 #define ADDR_DVD1 0x24 #define ADDR_STA0_DVD 0x28 #define ADDR_RST 0x2c /* I2C_CTL */ #define STP_EN BIT(20) #define FIFO_AF_LVL_MASK GENMASK(19, 16) #define FIFO_AF_LVL 16 #define FIFO_AE_LVL_MASK GENMASK(15, 12) #define FIFO_AE_LVL 12 #define I2C_DMA_EN BIT(11) #define FULL_INTEN BIT(10) #define EMPTY_INTEN BIT(9) #define I2C_DVD_OPT BIT(8) #define I2C_OUT_OPT BIT(7) #define I2C_TRIM_OPT BIT(6) #define I2C_HS_MODE BIT(4) #define I2C_MODE BIT(3) #define I2C_EN BIT(2) #define I2C_INT_EN BIT(1) #define I2C_START BIT(0) /* I2C_STATUS */ #define SDA_IN BIT(21) #define SCL_IN BIT(20) #define FIFO_FULL BIT(4) #define FIFO_EMPTY BIT(3) #define I2C_INT BIT(2) #define I2C_RX_ACK BIT(1) #define I2C_BUSY BIT(0) /* ADDR_RST */ #define I2C_RST BIT(0) #define I2C_FIFO_DEEP 12 #define I2C_FIFO_FULL_THLD 15 #define I2C_FIFO_EMPTY_THLD 4 #define I2C_DATA_STEP 8 #define I2C_ADDR_DVD0_CALC(high, low) \ ((((high) & GENMASK(15, 0)) << 16) | ((low) & GENMASK(15, 0))) #define I2C_ADDR_DVD1_CALC(high, low) \ (((high) & GENMASK(31, 16)) | (((low) & GENMASK(31, 16)) >> 16)) /* timeout (ms) for pm runtime autosuspend */ #define SPRD_I2C_PM_TIMEOUT 1000 /* timeout (ms) for transfer message */ #define I2C_XFER_TIMEOUT 1000 /* SPRD i2c data structure */ struct sprd_i2c { struct i2c_adapter adap; struct device *dev; void __iomem *base; struct i2c_msg *msg; struct clk *clk; u32 src_clk; u32 bus_freq; struct completion complete; u8 *buf; u32 count; int irq; int err; }; static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) { writel(count, i2c_dev->base + I2C_COUNT); } static void sprd_i2c_send_stop(struct sprd_i2c *i2c_dev, int stop) { u32 tmp = readl(i2c_dev->base + I2C_CTL); if (stop) writel(tmp & ~STP_EN, i2c_dev->base + I2C_CTL); else writel(tmp | STP_EN, i2c_dev->base + I2C_CTL); } static void sprd_i2c_clear_start(struct sprd_i2c *i2c_dev) { u32 tmp = readl(i2c_dev->base + I2C_CTL); writel(tmp & ~I2C_START, i2c_dev->base + I2C_CTL); } static void sprd_i2c_clear_ack(struct sprd_i2c *i2c_dev) { u32 tmp = readl(i2c_dev->base + I2C_STATUS); writel(tmp & ~I2C_RX_ACK, i2c_dev->base + I2C_STATUS); } static void sprd_i2c_clear_irq(struct sprd_i2c *i2c_dev) { u32 tmp = readl(i2c_dev->base + I2C_STATUS); writel(tmp & ~I2C_INT, i2c_dev->base + I2C_STATUS); } static void sprd_i2c_reset_fifo(struct sprd_i2c *i2c_dev) { writel(I2C_RST, i2c_dev->base + ADDR_RST); } static void sprd_i2c_set_devaddr(struct sprd_i2c *i2c_dev, struct i2c_msg *m) { writel(m->addr << 1, i2c_dev->base + I2C_ADDR_CFG); } static void sprd_i2c_write_bytes(struct sprd_i2c *i2c_dev, u8 *buf, u32 len) { u32 i; for (i = 0; i < len; i++) writeb(buf[i], i2c_dev->base + I2C_TX); } static void sprd_i2c_read_bytes(struct sprd_i2c *i2c_dev, u8 *buf, u32 len) { u32 i; for (i = 0; i < len; i++) buf[i] = readb(i2c_dev->base + I2C_RX); } static void sprd_i2c_set_full_thld(struct sprd_i2c *i2c_dev, u32 full_thld) { u32 tmp = readl(i2c_dev->base + I2C_CTL); tmp &= ~FIFO_AF_LVL_MASK; tmp |= full_thld << FIFO_AF_LVL; writel(tmp, i2c_dev->base + I2C_CTL); }; static void sprd_i2c_set_empty_thld(struct sprd_i2c *i2c_dev, u32 empty_thld) { u32 tmp = readl(i2c_dev->base + I2C_CTL); tmp &= ~FIFO_AE_LVL_MASK; tmp |= empty_thld << FIFO_AE_LVL; writel(tmp, i2c_dev->base + I2C_CTL); }; static void sprd_i2c_set_fifo_full_int(struct sprd_i2c *i2c_dev, int enable) { u32 tmp = readl(i2c_dev->base + I2C_CTL); if (enable) tmp |= FULL_INTEN; else tmp &= ~FULL_INTEN; writel(tmp, i2c_dev->base + I2C_CTL); }; static void sprd_i2c_set_fifo_empty_int(struct sprd_i2c *i2c_dev, int enable) { u32 tmp = readl(i2c_dev->base + I2C_CTL); if (enable) tmp |= EMPTY_INTEN; else tmp &= ~EMPTY_INTEN; writel(tmp, i2c_dev->base + I2C_CTL); }; static void sprd_i2c_opt_start(struct sprd_i2c *i2c_dev) { u32 tmp = readl(i2c_dev->base + I2C_CTL); writel(tmp | I2C_START, i2c_dev->base + I2C_CTL); } static void sprd_i2c_opt_mode(struct sprd_i2c *i2c_dev, int rw) { u32 cmd = readl(i2c_dev->base + I2C_CTL) & ~I2C_MODE; writel(cmd | rw << 3, i2c_dev->base + I2C_CTL); } static void sprd_i2c_data_transfer(struct sprd_i2c *i2c_dev) { u32 i2c_count = i2c_dev->count; u32 need_tran = i2c_count <= I2C_FIFO_DEEP ? i2c_count : I2C_FIFO_DEEP; struct i2c_msg *msg = i2c_dev->msg; if (msg->flags & I2C_M_RD) { sprd_i2c_read_bytes(i2c_dev, i2c_dev->buf, I2C_FIFO_FULL_THLD); i2c_dev->count -= I2C_FIFO_FULL_THLD; i2c_dev->buf += I2C_FIFO_FULL_THLD; /* * If the read data count is larger than rx fifo full threshold, * we should enable the rx fifo full interrupt to read data * again. */ if (i2c_dev->count >= I2C_FIFO_FULL_THLD) sprd_i2c_set_fifo_full_int(i2c_dev, 1); } else { sprd_i2c_write_bytes(i2c_dev, i2c_dev->buf, need_tran); i2c_dev->buf += need_tran; i2c_dev->count -= need_tran; /* * If the write data count is arger than tx fifo depth which * means we can not write all data in one time, then we should * enable the tx fifo empty interrupt to write again. */ if (i2c_count > I2C_FIFO_DEEP) sprd_i2c_set_fifo_empty_int(i2c_dev, 1); } } static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, bool is_last_msg) { struct sprd_i2c *i2c_dev = i2c_adap->algo_data; unsigned long time_left; i2c_dev->msg = msg; i2c_dev->buf = msg->buf; i2c_dev->count = msg->len; reinit_completion(&i2c_dev->complete); sprd_i2c_reset_fifo(i2c_dev); sprd_i2c_set_devaddr(i2c_dev, msg); sprd_i2c_set_count(i2c_dev, msg->len); if (msg->flags & I2C_M_RD) { sprd_i2c_opt_mode(i2c_dev, 1); sprd_i2c_send_stop(i2c_dev, 1); } else { sprd_i2c_opt_mode(i2c_dev, 0); sprd_i2c_send_stop(i2c_dev, !!is_last_msg); } /* * We should enable rx fifo full interrupt to get data when receiving * full data. */ if (msg->flags & I2C_M_RD) sprd_i2c_set_fifo_full_int(i2c_dev, 1); else sprd_i2c_data_transfer(i2c_dev); sprd_i2c_opt_start(i2c_dev); time_left = wait_for_completion_timeout(&i2c_dev->complete, msecs_to_jiffies(I2C_XFER_TIMEOUT)); if (!time_left) return -ETIMEDOUT; return i2c_dev->err; } static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct sprd_i2c *i2c_dev = i2c_adap->algo_data; int im, ret; ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; for (im = 0; im < num - 1; im++) { ret = sprd_i2c_handle_msg(i2c_adap, &msgs[im], 0); if (ret) goto err_msg; } ret = sprd_i2c_handle_msg(i2c_adap, &msgs[im++], 1); err_msg: pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return ret < 0 ? ret : im; } static u32 sprd_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm sprd_i2c_algo = { .master_xfer = sprd_i2c_master_xfer, .functionality = sprd_i2c_func, }; static void sprd_i2c_set_clk(struct sprd_i2c *i2c_dev, u32 freq) { u32 apb_clk = i2c_dev->src_clk; /* * From I2C databook, the prescale calculation formula: * prescale = freq_i2c / (4 * freq_scl) - 1; */ u32 i2c_dvd = apb_clk / (4 * freq) - 1; /* * From I2C databook, the high period of SCL clock is recommended as * 40% (2/5), and the low period of SCL clock is recommended as 60% * (3/5), then the formula should be: * high = (prescale * 2 * 2) / 5 * low = (prescale * 2 * 3) / 5 */ u32 high = ((i2c_dvd << 1) * 2) / 5; u32 low = ((i2c_dvd << 1) * 3) / 5; u32 div0 = I2C_ADDR_DVD0_CALC(high, low); u32 div1 = I2C_ADDR_DVD1_CALC(high, low); writel(div0, i2c_dev->base + ADDR_DVD0); writel(div1, i2c_dev->base + ADDR_DVD1); /* Start hold timing = hold time(us) * source clock */ if (freq == I2C_MAX_FAST_MODE_FREQ) writel((6 * apb_clk) / 10000000, i2c_dev->base + ADDR_STA0_DVD); else if (freq == I2C_MAX_STANDARD_MODE_FREQ) writel((4 * apb_clk) / 1000000, i2c_dev->base + ADDR_STA0_DVD); } static void sprd_i2c_enable(struct sprd_i2c *i2c_dev) { u32 tmp = I2C_DVD_OPT; writel(tmp, i2c_dev->base + I2C_CTL); sprd_i2c_set_full_thld(i2c_dev, I2C_FIFO_FULL_THLD); sprd_i2c_set_empty_thld(i2c_dev, I2C_FIFO_EMPTY_THLD); sprd_i2c_set_clk(i2c_dev, i2c_dev->bus_freq); sprd_i2c_reset_fifo(i2c_dev); sprd_i2c_clear_irq(i2c_dev); tmp = readl(i2c_dev->base + I2C_CTL); writel(tmp | I2C_EN | I2C_INT_EN, i2c_dev->base + I2C_CTL); } static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id) { struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else i2c_tran = i2c_dev->count; /* * If we got one ACK from slave when writing data, and we did not * finish this transmission (i2c_tran is not zero), then we should * continue to write data. * * For reading data, ack is always true, if i2c_tran is not 0 which * means we still need to contine to read data from slave. */ if (i2c_tran && ack) { sprd_i2c_data_transfer(i2c_dev); return IRQ_HANDLED; } i2c_dev->err = 0; /* * If we did not get one ACK from slave when writing data, we should * return -EIO to notify users. */ if (!ack) i2c_dev->err = -EIO; else if (msg->flags & I2C_M_RD && i2c_dev->count) sprd_i2c_read_bytes(i2c_dev, i2c_dev->buf, i2c_dev->count); /* Transmission is done and clear ack and start operation */ sprd_i2c_clear_ack(i2c_dev); sprd_i2c_clear_start(i2c_dev); complete(&i2c_dev->complete); return IRQ_HANDLED; } static irqreturn_t sprd_i2c_isr(int irq, void *dev_id) { struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else i2c_tran = i2c_dev->count; /* * If we did not get one ACK from slave when writing data, then we * should finish this transmission since we got some errors. * * When writing data, if i2c_tran == 0 which means we have writen * done all data, then we can finish this transmission. * * When reading data, if conut < rx fifo full threshold, which * means we can read all data in one time, then we can finish this * transmission too. */ if (!i2c_tran || !ack) { sprd_i2c_clear_start(i2c_dev); sprd_i2c_clear_irq(i2c_dev); } sprd_i2c_set_fifo_empty_int(i2c_dev, 0); sprd_i2c_set_fifo_full_int(i2c_dev, 0); return IRQ_WAKE_THREAD; } static int sprd_i2c_clk_init(struct sprd_i2c *i2c_dev) { struct clk *clk_i2c, *clk_parent; clk_i2c = devm_clk_get(i2c_dev->dev, "i2c"); if (IS_ERR(clk_i2c)) { dev_warn(i2c_dev->dev, "i2c%d can't get the i2c clock\n", i2c_dev->adap.nr); clk_i2c = NULL; } clk_parent = devm_clk_get(i2c_dev->dev, "source"); if (IS_ERR(clk_parent)) { dev_warn(i2c_dev->dev, "i2c%d can't get the source clock\n", i2c_dev->adap.nr); clk_parent = NULL; } if (clk_set_parent(clk_i2c, clk_parent)) i2c_dev->src_clk = clk_get_rate(clk_i2c); else i2c_dev->src_clk = 26000000; dev_dbg(i2c_dev->dev, "i2c%d set source clock is %d\n", i2c_dev->adap.nr, i2c_dev->src_clk); i2c_dev->clk = devm_clk_get(i2c_dev->dev, "enable"); if (IS_ERR(i2c_dev->clk)) { dev_err(i2c_dev->dev, "i2c%d can't get the enable clock\n", i2c_dev->adap.nr); return PTR_ERR(i2c_dev->clk); } return 0; } static int sprd_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sprd_i2c *i2c_dev; u32 prop; int ret; pdev->id = of_alias_get_id(dev->of_node, "i2c"); i2c_dev = devm_kzalloc(dev, sizeof(struct sprd_i2c), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; i2c_dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); i2c_dev->irq = platform_get_irq(pdev, 0); if (i2c_dev->irq < 0) return i2c_dev->irq; i2c_set_adapdata(&i2c_dev->adap, i2c_dev); init_completion(&i2c_dev->complete); snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name), "%s", "sprd-i2c"); i2c_dev->bus_freq = I2C_MAX_STANDARD_MODE_FREQ; i2c_dev->adap.owner = THIS_MODULE; i2c_dev->dev = dev; i2c_dev->adap.retries = 3; i2c_dev->adap.algo = &sprd_i2c_algo; i2c_dev->adap.algo_data = i2c_dev; i2c_dev->adap.dev.parent = dev; i2c_dev->adap.nr = pdev->id; i2c_dev->adap.dev.of_node = dev->of_node; if (!of_property_read_u32(dev->of_node, "clock-frequency", &prop)) i2c_dev->bus_freq = prop; /* We only support 100k and 400k now, otherwise will return error. */ if (i2c_dev->bus_freq != I2C_MAX_STANDARD_MODE_FREQ && i2c_dev->bus_freq != I2C_MAX_FAST_MODE_FREQ) return -EINVAL; ret = sprd_i2c_clk_init(i2c_dev); if (ret) return ret; platform_set_drvdata(pdev, i2c_dev); ret = clk_prepare_enable(i2c_dev->clk); if (ret) return ret; sprd_i2c_enable(i2c_dev); pm_runtime_set_autosuspend_delay(i2c_dev->dev, SPRD_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(i2c_dev->dev); pm_runtime_set_active(i2c_dev->dev); pm_runtime_enable(i2c_dev->dev); ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) goto err_rpm_put; ret = devm_request_threaded_irq(dev, i2c_dev->irq, sprd_i2c_isr, sprd_i2c_isr_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "failed to request irq %d\n", i2c_dev->irq); goto err_rpm_put; } ret = i2c_add_numbered_adapter(&i2c_dev->adap); if (ret) { dev_err(&pdev->dev, "add adapter failed\n"); goto err_rpm_put; } pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return 0; err_rpm_put: pm_runtime_put_noidle(i2c_dev->dev); pm_runtime_disable(i2c_dev->dev); clk_disable_unprepare(i2c_dev->clk); return ret; } static int sprd_i2c_remove(struct platform_device *pdev) { struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev); int ret; ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) dev_err(&pdev->dev, "Failed to resume device (%pe)\n", ERR_PTR(ret)); i2c_del_adapter(&i2c_dev->adap); if (ret >= 0) clk_disable_unprepare(i2c_dev->clk); pm_runtime_put_noidle(i2c_dev->dev); pm_runtime_disable(i2c_dev->dev); return 0; } static int __maybe_unused sprd_i2c_suspend_noirq(struct device *dev) { struct sprd_i2c *i2c_dev = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&i2c_dev->adap); return pm_runtime_force_suspend(dev); } static int __maybe_unused sprd_i2c_resume_noirq(struct device *dev) { struct sprd_i2c *i2c_dev = dev_get_drvdata(dev); i2c_mark_adapter_resumed(&i2c_dev->adap); return pm_runtime_force_resume(dev); } static int __maybe_unused sprd_i2c_runtime_suspend(struct device *dev) { struct sprd_i2c *i2c_dev = dev_get_drvdata(dev); clk_disable_unprepare(i2c_dev->clk); return 0; } static int __maybe_unused sprd_i2c_runtime_resume(struct device *dev) { struct sprd_i2c *i2c_dev = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(i2c_dev->clk); if (ret) return ret; sprd_i2c_enable(i2c_dev); return 0; } static const struct dev_pm_ops sprd_i2c_pm_ops = { SET_RUNTIME_PM_OPS(sprd_i2c_runtime_suspend, sprd_i2c_runtime_resume, NULL) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sprd_i2c_suspend_noirq, sprd_i2c_resume_noirq) }; static const struct of_device_id sprd_i2c_of_match[] = { { .compatible = "sprd,sc9860-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, sprd_i2c_of_match); static struct platform_driver sprd_i2c_driver = { .probe = sprd_i2c_probe, .remove = sprd_i2c_remove, .driver = { .name = "sprd-i2c", .of_match_table = sprd_i2c_of_match, .pm = &sprd_i2c_pm_ops, }, }; module_platform_driver(sprd_i2c_driver); MODULE_DESCRIPTION("Spreadtrum I2C master controller driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-sprd.c
// SPDX-License-Identifier: GPL-2.0-only /* * Loongson-2K/Loongson LS7A I2C master mode driver * * Copyright (C) 2013 Loongson Technology Corporation Limited. * Copyright (C) 2014-2017 Lemote, Inc. * Copyright (C) 2018-2022 Loongson Technology Corporation Limited. * * Originally written by liushaozong * Rewritten for mainline by Binbin Zhou <[email protected]> */ #include <linux/bits.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/iopoll.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/units.h> /* I2C Registers */ #define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */ #define I2C_LS2X_CTR 0x2 /* Control Register */ #define I2C_LS2X_TXR 0x3 /* Transport Data Register */ #define I2C_LS2X_RXR 0x3 /* Receive Data Register */ #define I2C_LS2X_CR 0x4 /* Command Control Register */ #define I2C_LS2X_SR 0x4 /* State Register */ /* Command Control Register Bit */ #define LS2X_CR_START BIT(7) /* Start signal */ #define LS2X_CR_STOP BIT(6) /* Stop signal */ #define LS2X_CR_READ BIT(5) /* Read signal */ #define LS2X_CR_WRITE BIT(4) /* Write signal */ #define LS2X_CR_ACK BIT(3) /* Response signal */ #define LS2X_CR_IACK BIT(0) /* Interrupt response signal */ /* State Register Bit */ #define LS2X_SR_NOACK BIT(7) /* Receive NACK */ #define LS2X_SR_BUSY BIT(6) /* Bus busy state */ #define LS2X_SR_AL BIT(5) /* Arbitration lost */ #define LS2X_SR_TIP BIT(1) /* Transmission state */ #define LS2X_SR_IF BIT(0) /* Interrupt flag */ /* Control Register Bit */ #define LS2X_CTR_EN BIT(7) /* 0: I2c frequency setting 1: Normal */ #define LS2X_CTR_IEN BIT(6) /* Enable i2c interrupt */ #define LS2X_CTR_MST BIT(5) /* 0: Slave mode 1: Master mode */ #define CTR_FREQ_MASK GENMASK(7, 6) #define CTR_READY_MASK GENMASK(7, 5) /* The PCLK frequency from LPB */ #define LS2X_I2C_PCLK_FREQ (50 * HZ_PER_MHZ) /* The default bus frequency, which is an empirical value */ #define LS2X_I2C_FREQ_STD (33 * HZ_PER_KHZ) struct ls2x_i2c_priv { struct i2c_adapter adapter; void __iomem *base; struct i2c_timings i2c_t; struct completion cmd_complete; }; /* * Interrupt service routine. * This gets called whenever an I2C interrupt occurs. */ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id) { struct ls2x_i2c_priv *priv = dev_id; if (!(readb(priv->base + I2C_LS2X_SR) & LS2X_SR_IF)) return IRQ_NONE; writeb(LS2X_CR_IACK, priv->base + I2C_LS2X_CR); complete(&priv->cmd_complete); return IRQ_HANDLED; } /* * The ls2x i2c controller supports standard mode and fast mode, so the * maximum bus frequency is '400kHz'. * The bus frequency is set to the empirical value of '33KHz' by default, * but it can also be taken from ACPI or FDT for compatibility with more * devices. */ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv) { struct i2c_timings *t = &priv->i2c_t; struct device *dev = priv->adapter.dev.parent; u32 acpi_speed = i2c_acpi_find_bus_speed(dev); i2c_parse_fw_timings(dev, t, false); if (acpi_speed || t->bus_freq_hz) t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); else t->bus_freq_hz = LS2X_I2C_FREQ_STD; /* Calculate and set i2c frequency. */ writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1, priv->base + I2C_LS2X_PRER); } static void ls2x_i2c_init(struct ls2x_i2c_priv *priv) { /* Set i2c frequency setting mode and disable interrupts. */ writeb(readb(priv->base + I2C_LS2X_CTR) & ~CTR_FREQ_MASK, priv->base + I2C_LS2X_CTR); ls2x_i2c_adjust_bus_speed(priv); /* Set i2c normal operating mode and enable interrupts. */ writeb(readb(priv->base + I2C_LS2X_CTR) | CTR_READY_MASK, priv->base + I2C_LS2X_CTR); } static int ls2x_i2c_xfer_byte(struct ls2x_i2c_priv *priv, u8 txdata, u8 *rxdatap) { u8 rxdata; unsigned long time_left; writeb(txdata, priv->base + I2C_LS2X_CR); time_left = wait_for_completion_timeout(&priv->cmd_complete, priv->adapter.timeout); if (!time_left) return -ETIMEDOUT; rxdata = readb(priv->base + I2C_LS2X_SR); if (rxdatap) *rxdatap = rxdata; return 0; } static int ls2x_i2c_send_byte(struct ls2x_i2c_priv *priv, u8 txdata) { int ret; u8 rxdata; ret = ls2x_i2c_xfer_byte(priv, txdata, &rxdata); if (ret) return ret; if (rxdata & LS2X_SR_AL) return -EAGAIN; if (rxdata & LS2X_SR_NOACK) return -ENXIO; return 0; } static int ls2x_i2c_stop(struct ls2x_i2c_priv *priv) { u8 value; writeb(LS2X_CR_STOP, priv->base + I2C_LS2X_CR); return readb_poll_timeout(priv->base + I2C_LS2X_SR, value, !(value & LS2X_SR_BUSY), 100, jiffies_to_usecs(priv->adapter.timeout)); } static int ls2x_i2c_start(struct ls2x_i2c_priv *priv, struct i2c_msg *msgs) { reinit_completion(&priv->cmd_complete); writeb(i2c_8bit_addr_from_msg(msgs), priv->base + I2C_LS2X_TXR); return ls2x_i2c_send_byte(priv, LS2X_CR_START | LS2X_CR_WRITE); } static int ls2x_i2c_rx(struct ls2x_i2c_priv *priv, struct i2c_msg *msg) { int ret; u8 rxdata, *buf = msg->buf; u16 len = msg->len; /* Contains steps to send start condition and address. */ ret = ls2x_i2c_start(priv, msg); if (ret) return ret; while (len--) { ret = ls2x_i2c_xfer_byte(priv, LS2X_CR_READ | (len ? 0 : LS2X_CR_ACK), &rxdata); if (ret) return ret; *buf++ = readb(priv->base + I2C_LS2X_RXR); } return 0; } static int ls2x_i2c_tx(struct ls2x_i2c_priv *priv, struct i2c_msg *msg) { int ret; u8 *buf = msg->buf; u16 len = msg->len; /* Contains steps to send start condition and address. */ ret = ls2x_i2c_start(priv, msg); if (ret) return ret; while (len--) { writeb(*buf++, priv->base + I2C_LS2X_TXR); ret = ls2x_i2c_send_byte(priv, LS2X_CR_WRITE); if (ret) return ret; } return 0; } static int ls2x_i2c_xfer_one(struct ls2x_i2c_priv *priv, struct i2c_msg *msg, bool stop) { int ret; if (msg->flags & I2C_M_RD) ret = ls2x_i2c_rx(priv, msg); else ret = ls2x_i2c_tx(priv, msg); if (ret < 0) { /* Fatel error. Needs reinit. */ if (ret == -ETIMEDOUT) ls2x_i2c_init(priv); return ret; } if (stop) { /* Failed to issue STOP. Needs reinit. */ ret = ls2x_i2c_stop(priv); if (ret) ls2x_i2c_init(priv); } return ret; } static int ls2x_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int ret; struct i2c_msg *msg, *emsg = msgs + num; struct ls2x_i2c_priv *priv = i2c_get_adapdata(adap); for (msg = msgs; msg < emsg; msg++) { ret = ls2x_i2c_xfer_one(priv, msg, msg == emsg - 1); if (ret) return ret; } return num; } static unsigned int ls2x_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm ls2x_i2c_algo = { .master_xfer = ls2x_i2c_master_xfer, .functionality = ls2x_i2c_func, }; static int ls2x_i2c_probe(struct platform_device *pdev) { int ret, irq; struct i2c_adapter *adap; struct ls2x_i2c_priv *priv; struct device *dev = &pdev->dev; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* Map hardware registers */ priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; /* Add the i2c adapter */ adap = &priv->adapter; adap->retries = 5; adap->nr = pdev->id; adap->dev.parent = dev; adap->owner = THIS_MODULE; adap->algo = &ls2x_i2c_algo; adap->timeout = msecs_to_jiffies(100); device_set_node(&adap->dev, dev_fwnode(dev)); i2c_set_adapdata(adap, priv); strscpy(adap->name, pdev->name, sizeof(adap->name)); init_completion(&priv->cmd_complete); platform_set_drvdata(pdev, priv); ls2x_i2c_init(priv); ret = devm_request_irq(dev, irq, ls2x_i2c_isr, IRQF_SHARED, "ls2x-i2c", priv); if (ret < 0) return dev_err_probe(dev, ret, "Unable to request irq %d\n", irq); return devm_i2c_add_adapter(dev, adap); } static int ls2x_i2c_suspend(struct device *dev) { struct ls2x_i2c_priv *priv = dev_get_drvdata(dev); /* Disable interrupts */ writeb(readb(priv->base + I2C_LS2X_CTR) & ~LS2X_CTR_IEN, priv->base + I2C_LS2X_CTR); return 0; } static int ls2x_i2c_resume(struct device *dev) { ls2x_i2c_init(dev_get_drvdata(dev)); return 0; } static DEFINE_RUNTIME_DEV_PM_OPS(ls2x_i2c_pm_ops, ls2x_i2c_suspend, ls2x_i2c_resume, NULL); static const struct of_device_id ls2x_i2c_id_table[] = { { .compatible = "loongson,ls2k-i2c" }, { .compatible = "loongson,ls7a-i2c" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ls2x_i2c_id_table); static const struct acpi_device_id ls2x_i2c_acpi_match[] = { { "LOON0004" }, /* Loongson LS7A */ { } }; MODULE_DEVICE_TABLE(acpi, ls2x_i2c_acpi_match); static struct platform_driver ls2x_i2c_driver = { .probe = ls2x_i2c_probe, .driver = { .name = "ls2x-i2c", .pm = pm_sleep_ptr(&ls2x_i2c_pm_ops), .of_match_table = ls2x_i2c_id_table, .acpi_match_table = ls2x_i2c_acpi_match, }, }; module_platform_driver(ls2x_i2c_driver); MODULE_DESCRIPTION("Loongson LS2X I2C Bus driver"); MODULE_AUTHOR("Loongson Technology Corporation Limited"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-ls2x.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2014 Broadcom Corporation #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #define N_DATA_REGS 8 /* * PER_I2C/BSC count register mask depends on 1 byte/4 byte data register * size. Cable modem and DSL SoCs with Peripheral i2c cores use 1 byte per * data register whereas STB SoCs use 4 byte per data register transfer, * account for this difference in total count per transaction and mask to * use. */ #define BSC_CNT_REG1_MASK(nb) (nb == 1 ? GENMASK(3, 0) : GENMASK(5, 0)) #define BSC_CNT_REG1_SHIFT 0 /* BSC CTL register field definitions */ #define BSC_CTL_REG_DTF_MASK 0x00000003 #define BSC_CTL_REG_SCL_SEL_MASK 0x00000030 #define BSC_CTL_REG_SCL_SEL_SHIFT 4 #define BSC_CTL_REG_INT_EN_MASK 0x00000040 #define BSC_CTL_REG_INT_EN_SHIFT 6 #define BSC_CTL_REG_DIV_CLK_MASK 0x00000080 /* BSC_IIC_ENABLE r/w enable and interrupt field definitions */ #define BSC_IIC_EN_RESTART_MASK 0x00000040 #define BSC_IIC_EN_NOSTART_MASK 0x00000020 #define BSC_IIC_EN_NOSTOP_MASK 0x00000010 #define BSC_IIC_EN_NOACK_MASK 0x00000004 #define BSC_IIC_EN_INTRP_MASK 0x00000002 #define BSC_IIC_EN_ENABLE_MASK 0x00000001 /* BSC_CTLHI control register field definitions */ #define BSC_CTLHI_REG_INPUT_SWITCHING_LEVEL_MASK 0x00000080 #define BSC_CTLHI_REG_DATAREG_SIZE_MASK 0x00000040 #define BSC_CTLHI_REG_IGNORE_ACK_MASK 0x00000002 #define BSC_CTLHI_REG_WAIT_DIS_MASK 0x00000001 #define I2C_TIMEOUT 100 /* msecs */ /* Condition mask used for non combined transfer */ #define COND_RESTART BSC_IIC_EN_RESTART_MASK #define COND_NOSTART BSC_IIC_EN_NOSTART_MASK #define COND_NOSTOP BSC_IIC_EN_NOSTOP_MASK #define COND_START_STOP (COND_RESTART | COND_NOSTART | COND_NOSTOP) /* BSC data transfer direction */ #define DTF_WR_MASK 0x00000000 #define DTF_RD_MASK 0x00000001 /* BSC data transfer direction combined format */ #define DTF_RD_WR_MASK 0x00000002 #define DTF_WR_RD_MASK 0x00000003 #define INT_ENABLE true #define INT_DISABLE false /* BSC block register map structure to cache fields to be written */ struct bsc_regs { u32 chip_address; /* slave address */ u32 data_in[N_DATA_REGS]; /* tx data buffer*/ u32 cnt_reg; /* rx/tx data length */ u32 ctl_reg; /* control register */ u32 iic_enable; /* xfer enable and status */ u32 data_out[N_DATA_REGS]; /* rx data buffer */ u32 ctlhi_reg; /* more control fields */ u32 scl_param; /* reserved */ }; struct bsc_clk_param { u32 hz; u32 scl_mask; u32 div_mask; }; enum bsc_xfer_cmd { CMD_WR, CMD_RD, CMD_WR_NOACK, CMD_RD_NOACK, }; static char const *cmd_string[] = { [CMD_WR] = "WR", [CMD_RD] = "RD", [CMD_WR_NOACK] = "WR NOACK", [CMD_RD_NOACK] = "RD NOACK", }; enum bus_speeds { SPD_375K, SPD_390K, SPD_187K, SPD_200K, SPD_93K, SPD_97K, SPD_46K, SPD_50K }; static const struct bsc_clk_param bsc_clk[] = { [SPD_375K] = { .hz = 375000, .scl_mask = SPD_375K << BSC_CTL_REG_SCL_SEL_SHIFT, .div_mask = 0 }, [SPD_390K] = { .hz = 390000, .scl_mask = SPD_390K << BSC_CTL_REG_SCL_SEL_SHIFT, .div_mask = 0 }, [SPD_187K] = { .hz = 187500, .scl_mask = SPD_187K << BSC_CTL_REG_SCL_SEL_SHIFT, .div_mask = 0 }, [SPD_200K] = { .hz = 200000, .scl_mask = SPD_200K << BSC_CTL_REG_SCL_SEL_SHIFT, .div_mask = 0 }, [SPD_93K] = { .hz = 93750, .scl_mask = SPD_375K << BSC_CTL_REG_SCL_SEL_SHIFT, .div_mask = BSC_CTL_REG_DIV_CLK_MASK }, [SPD_97K] = { .hz = 97500, .scl_mask = SPD_390K << BSC_CTL_REG_SCL_SEL_SHIFT, .div_mask = BSC_CTL_REG_DIV_CLK_MASK }, [SPD_46K] = { .hz = 46875, .scl_mask = SPD_187K << BSC_CTL_REG_SCL_SEL_SHIFT, .div_mask = BSC_CTL_REG_DIV_CLK_MASK }, [SPD_50K] = { .hz = 50000, .scl_mask = SPD_200K << BSC_CTL_REG_SCL_SEL_SHIFT, .div_mask = BSC_CTL_REG_DIV_CLK_MASK } }; struct brcmstb_i2c_dev { struct device *device; void __iomem *base; int irq; struct bsc_regs *bsc_regmap; struct i2c_adapter adapter; struct completion done; u32 clk_freq_hz; int data_regsz; }; /* register accessors for both be and le cpu arch */ #ifdef CONFIG_CPU_BIG_ENDIAN #define __bsc_readl(_reg) ioread32be(_reg) #define __bsc_writel(_val, _reg) iowrite32be(_val, _reg) #else #define __bsc_readl(_reg) ioread32(_reg) #define __bsc_writel(_val, _reg) iowrite32(_val, _reg) #endif #define bsc_readl(_dev, _reg) \ __bsc_readl(_dev->base + offsetof(struct bsc_regs, _reg)) #define bsc_writel(_dev, _val, _reg) \ __bsc_writel(_val, _dev->base + offsetof(struct bsc_regs, _reg)) static inline int brcmstb_i2c_get_xfersz(struct brcmstb_i2c_dev *dev) { return (N_DATA_REGS * dev->data_regsz); } static inline int brcmstb_i2c_get_data_regsz(struct brcmstb_i2c_dev *dev) { return dev->data_regsz; } static void brcmstb_i2c_enable_disable_irq(struct brcmstb_i2c_dev *dev, bool int_en) { if (int_en) /* Enable BSC CTL interrupt line */ dev->bsc_regmap->ctl_reg |= BSC_CTL_REG_INT_EN_MASK; else /* Disable BSC CTL interrupt line */ dev->bsc_regmap->ctl_reg &= ~BSC_CTL_REG_INT_EN_MASK; barrier(); bsc_writel(dev, dev->bsc_regmap->ctl_reg, ctl_reg); } static irqreturn_t brcmstb_i2c_isr(int irq, void *devid) { struct brcmstb_i2c_dev *dev = devid; u32 status_bsc_ctl = bsc_readl(dev, ctl_reg); u32 status_iic_intrp = bsc_readl(dev, iic_enable); dev_dbg(dev->device, "isr CTL_REG %x IIC_EN %x\n", status_bsc_ctl, status_iic_intrp); if (!(status_bsc_ctl & BSC_CTL_REG_INT_EN_MASK)) return IRQ_NONE; brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); complete(&dev->done); dev_dbg(dev->device, "isr handled"); return IRQ_HANDLED; } /* Wait for device to be ready */ static int brcmstb_i2c_wait_if_busy(struct brcmstb_i2c_dev *dev) { unsigned long timeout = jiffies + msecs_to_jiffies(I2C_TIMEOUT); while ((bsc_readl(dev, iic_enable) & BSC_IIC_EN_INTRP_MASK)) { if (time_after(jiffies, timeout)) return -ETIMEDOUT; cpu_relax(); } return 0; } /* i2c xfer completion function, handles both irq and polling mode */ static int brcmstb_i2c_wait_for_completion(struct brcmstb_i2c_dev *dev) { int ret = 0; unsigned long timeout = msecs_to_jiffies(I2C_TIMEOUT); if (dev->irq >= 0) { if (!wait_for_completion_timeout(&dev->done, timeout)) ret = -ETIMEDOUT; } else { /* we are in polling mode */ u32 bsc_intrp; unsigned long time_left = jiffies + timeout; do { bsc_intrp = bsc_readl(dev, iic_enable) & BSC_IIC_EN_INTRP_MASK; if (time_after(jiffies, time_left)) { ret = -ETIMEDOUT; break; } cpu_relax(); } while (!bsc_intrp); } if (dev->irq < 0 || ret == -ETIMEDOUT) brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); return ret; } /* Set xfer START/STOP conditions for subsequent transfer */ static void brcmstb_set_i2c_start_stop(struct brcmstb_i2c_dev *dev, u32 cond_flag) { u32 regval = dev->bsc_regmap->iic_enable; dev->bsc_regmap->iic_enable = (regval & ~COND_START_STOP) | cond_flag; } /* Send I2C request check completion */ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev, enum bsc_xfer_cmd cmd) { int rc = 0; struct bsc_regs *pi2creg = dev->bsc_regmap; /* Make sure the hardware is ready */ rc = brcmstb_i2c_wait_if_busy(dev); if (rc < 0) return rc; /* only if we are in interrupt mode */ if (dev->irq >= 0) reinit_completion(&dev->done); /* enable BSC CTL interrupt line */ brcmstb_i2c_enable_disable_irq(dev, INT_ENABLE); /* initiate transfer by setting iic_enable */ pi2creg->iic_enable |= BSC_IIC_EN_ENABLE_MASK; bsc_writel(dev, pi2creg->iic_enable, iic_enable); /* Wait for transaction to finish or timeout */ rc = brcmstb_i2c_wait_for_completion(dev); if (rc) { dev_dbg(dev->device, "intr timeout for cmd %s\n", cmd_string[cmd]); goto cmd_out; } if ((cmd == CMD_RD || cmd == CMD_WR) && bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) { rc = -EREMOTEIO; dev_dbg(dev->device, "controller received NOACK intr for %s\n", cmd_string[cmd]); } cmd_out: bsc_writel(dev, 0, cnt_reg); bsc_writel(dev, 0, iic_enable); return rc; } /* Actual data transfer through the BSC master */ static int brcmstb_i2c_xfer_bsc_data(struct brcmstb_i2c_dev *dev, u8 *buf, unsigned int len, struct i2c_msg *pmsg) { int cnt, byte, i, rc; enum bsc_xfer_cmd cmd; u32 ctl_reg; struct bsc_regs *pi2creg = dev->bsc_regmap; int no_ack = pmsg->flags & I2C_M_IGNORE_NAK; int data_regsz = brcmstb_i2c_get_data_regsz(dev); /* see if the transaction needs to check NACK conditions */ if (no_ack) { cmd = (pmsg->flags & I2C_M_RD) ? CMD_RD_NOACK : CMD_WR_NOACK; pi2creg->ctlhi_reg |= BSC_CTLHI_REG_IGNORE_ACK_MASK; } else { cmd = (pmsg->flags & I2C_M_RD) ? CMD_RD : CMD_WR; pi2creg->ctlhi_reg &= ~BSC_CTLHI_REG_IGNORE_ACK_MASK; } bsc_writel(dev, pi2creg->ctlhi_reg, ctlhi_reg); /* set data transfer direction */ ctl_reg = pi2creg->ctl_reg & ~BSC_CTL_REG_DTF_MASK; if (cmd == CMD_WR || cmd == CMD_WR_NOACK) pi2creg->ctl_reg = ctl_reg | DTF_WR_MASK; else pi2creg->ctl_reg = ctl_reg | DTF_RD_MASK; /* set the read/write length */ bsc_writel(dev, BSC_CNT_REG1_MASK(data_regsz) & (len << BSC_CNT_REG1_SHIFT), cnt_reg); /* Write data into data_in register */ if (cmd == CMD_WR || cmd == CMD_WR_NOACK) { for (cnt = 0, i = 0; cnt < len; cnt += data_regsz, i++) { u32 word = 0; for (byte = 0; byte < data_regsz; byte++) { word >>= BITS_PER_BYTE; if ((cnt + byte) < len) word |= buf[cnt + byte] << (BITS_PER_BYTE * (data_regsz - 1)); } bsc_writel(dev, word, data_in[i]); } } /* Initiate xfer, the function will return on completion */ rc = brcmstb_send_i2c_cmd(dev, cmd); if (rc != 0) { dev_dbg(dev->device, "%s failure", cmd_string[cmd]); return rc; } /* Read data from data_out register */ if (cmd == CMD_RD || cmd == CMD_RD_NOACK) { for (cnt = 0, i = 0; cnt < len; cnt += data_regsz, i++) { u32 data = bsc_readl(dev, data_out[i]); for (byte = 0; byte < data_regsz && (byte + cnt) < len; byte++) { buf[cnt + byte] = data & 0xff; data >>= BITS_PER_BYTE; } } } return 0; } /* Write a single byte of data to the i2c bus */ static int brcmstb_i2c_write_data_byte(struct brcmstb_i2c_dev *dev, u8 *buf, unsigned int nak_expected) { enum bsc_xfer_cmd cmd = nak_expected ? CMD_WR : CMD_WR_NOACK; bsc_writel(dev, 1, cnt_reg); bsc_writel(dev, *buf, data_in); return brcmstb_send_i2c_cmd(dev, cmd); } /* Send i2c address */ static int brcmstb_i2c_do_addr(struct brcmstb_i2c_dev *dev, struct i2c_msg *msg) { unsigned char addr; if (msg->flags & I2C_M_TEN) { /* First byte is 11110XX0 where XX is upper 2 bits */ addr = 0xF0 | ((msg->addr & 0x300) >> 7); bsc_writel(dev, addr, chip_address); /* Second byte is the remaining 8 bits */ addr = msg->addr & 0xFF; if (brcmstb_i2c_write_data_byte(dev, &addr, 0) < 0) return -EREMOTEIO; if (msg->flags & I2C_M_RD) { /* For read, send restart without stop condition */ brcmstb_set_i2c_start_stop(dev, COND_RESTART | COND_NOSTOP); /* Then re-send the first byte with the read bit set */ addr = 0xF0 | ((msg->addr & 0x300) >> 7) | 0x01; if (brcmstb_i2c_write_data_byte(dev, &addr, 0) < 0) return -EREMOTEIO; } } else { addr = i2c_8bit_addr_from_msg(msg); bsc_writel(dev, addr, chip_address); } return 0; } /* Master transfer function */ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) { struct brcmstb_i2c_dev *dev = i2c_get_adapdata(adapter); struct i2c_msg *pmsg; int rc = 0; int i; int bytes_to_xfer; u8 *tmp_buf; int len = 0; int xfersz = brcmstb_i2c_get_xfersz(dev); u32 cond, cond_per_msg; /* Loop through all messages */ for (i = 0; i < num; i++) { pmsg = &msgs[i]; len = pmsg->len; tmp_buf = pmsg->buf; dev_dbg(dev->device, "msg# %d/%d flg %x buf %x len %d\n", i, num - 1, pmsg->flags, pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) cond = ~COND_START_STOP; else cond = COND_RESTART | COND_NOSTOP; brcmstb_set_i2c_start_stop(dev, cond); /* Send slave address */ if (!(pmsg->flags & I2C_M_NOSTART)) { rc = brcmstb_i2c_do_addr(dev, pmsg); if (rc < 0) { dev_dbg(dev->device, "NACK for addr %2.2x msg#%d rc = %d\n", pmsg->addr, i, rc); goto out; } } cond_per_msg = cond; /* Perform data transfer */ while (len) { bytes_to_xfer = min(len, xfersz); if (len <= xfersz) { if (i == (num - 1)) cond_per_msg = cond_per_msg & ~(COND_RESTART | COND_NOSTOP); else cond_per_msg = cond; } else { cond_per_msg = (cond_per_msg & ~COND_RESTART) | COND_NOSTOP; } brcmstb_set_i2c_start_stop(dev, cond_per_msg); rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, bytes_to_xfer, pmsg); if (rc < 0) goto out; len -= bytes_to_xfer; tmp_buf += bytes_to_xfer; cond_per_msg = COND_NOSTART | COND_NOSTOP; } } rc = num; out: return rc; } static u32 brcmstb_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_NOSTART | I2C_FUNC_PROTOCOL_MANGLING; } static const struct i2c_algorithm brcmstb_i2c_algo = { .master_xfer = brcmstb_i2c_xfer, .functionality = brcmstb_i2c_functionality, }; static void brcmstb_i2c_set_bus_speed(struct brcmstb_i2c_dev *dev) { int i = 0, num_speeds = ARRAY_SIZE(bsc_clk); u32 clk_freq_hz = dev->clk_freq_hz; for (i = 0; i < num_speeds; i++) { if (bsc_clk[i].hz == clk_freq_hz) { dev->bsc_regmap->ctl_reg &= ~(BSC_CTL_REG_SCL_SEL_MASK | BSC_CTL_REG_DIV_CLK_MASK); dev->bsc_regmap->ctl_reg |= (bsc_clk[i].scl_mask | bsc_clk[i].div_mask); bsc_writel(dev, dev->bsc_regmap->ctl_reg, ctl_reg); break; } } /* in case we did not get find a valid speed */ if (i == num_speeds) { i = (bsc_readl(dev, ctl_reg) & BSC_CTL_REG_SCL_SEL_MASK) >> BSC_CTL_REG_SCL_SEL_SHIFT; dev_warn(dev->device, "leaving current clock-frequency @ %dHz\n", bsc_clk[i].hz); } } static void brcmstb_i2c_set_bsc_reg_defaults(struct brcmstb_i2c_dev *dev) { if (brcmstb_i2c_get_data_regsz(dev) == sizeof(u32)) /* set 4 byte data in/out xfers */ dev->bsc_regmap->ctlhi_reg = BSC_CTLHI_REG_DATAREG_SIZE_MASK; else dev->bsc_regmap->ctlhi_reg &= ~BSC_CTLHI_REG_DATAREG_SIZE_MASK; bsc_writel(dev, dev->bsc_regmap->ctlhi_reg, ctlhi_reg); /* set bus speed */ brcmstb_i2c_set_bus_speed(dev); } #define AUTOI2C_CTRL0 0x26c #define AUTOI2C_CTRL0_RELEASE_BSC BIT(1) static int bcm2711_release_bsc(struct brcmstb_i2c_dev *dev) { struct platform_device *pdev = to_platform_device(dev->device); void __iomem *autoi2c; /* Map hardware registers */ autoi2c = devm_platform_ioremap_resource_byname(pdev, "auto-i2c"); if (IS_ERR(autoi2c)) return PTR_ERR(autoi2c); writel(AUTOI2C_CTRL0_RELEASE_BSC, autoi2c + AUTOI2C_CTRL0); devm_iounmap(&pdev->dev, autoi2c); /* We need to reset the controller after the release */ dev->bsc_regmap->iic_enable = 0; bsc_writel(dev, dev->bsc_regmap->iic_enable, iic_enable); return 0; } static int brcmstb_i2c_probe(struct platform_device *pdev) { struct brcmstb_i2c_dev *dev; struct i2c_adapter *adap; const char *int_name; int rc; /* Allocate memory for private data structure */ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL); if (!dev->bsc_regmap) return -ENOMEM; platform_set_drvdata(pdev, dev); dev->device = &pdev->dev; init_completion(&dev->done); /* Map hardware registers */ dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) return PTR_ERR(dev->base); if (of_device_is_compatible(dev->device->of_node, "brcm,bcm2711-hdmi-i2c")) { rc = bcm2711_release_bsc(dev); if (rc) return rc; } rc = of_property_read_string(dev->device->of_node, "interrupt-names", &int_name); if (rc < 0) int_name = NULL; /* Get the interrupt number */ dev->irq = platform_get_irq_optional(pdev, 0); /* disable the bsc interrupt line */ brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); /* register the ISR handler */ if (dev->irq >= 0) { rc = devm_request_irq(&pdev->dev, dev->irq, brcmstb_i2c_isr, IRQF_SHARED, int_name ? int_name : pdev->name, dev); if (rc) { dev_dbg(dev->device, "falling back to polling mode"); dev->irq = -1; } } if (of_property_read_u32(dev->device->of_node, "clock-frequency", &dev->clk_freq_hz)) { dev_warn(dev->device, "setting clock-frequency@%dHz\n", bsc_clk[0].hz); dev->clk_freq_hz = bsc_clk[0].hz; } /* set the data in/out register size for compatible SoCs */ if (of_device_is_compatible(dev->device->of_node, "brcm,brcmper-i2c")) dev->data_regsz = sizeof(u8); else dev->data_regsz = sizeof(u32); brcmstb_i2c_set_bsc_reg_defaults(dev); /* Add the i2c adapter */ adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name)); adap->algo = &brcmstb_i2c_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; rc = i2c_add_adapter(adap); if (rc) return rc; dev_info(dev->device, "%s@%dhz registered in %s mode\n", int_name ? int_name : " ", dev->clk_freq_hz, (dev->irq >= 0) ? "interrupt" : "polling"); return 0; } static void brcmstb_i2c_remove(struct platform_device *pdev) { struct brcmstb_i2c_dev *dev = platform_get_drvdata(pdev); i2c_del_adapter(&dev->adapter); } static int brcmstb_i2c_suspend(struct device *dev) { struct brcmstb_i2c_dev *i2c_dev = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&i2c_dev->adapter); return 0; } static int brcmstb_i2c_resume(struct device *dev) { struct brcmstb_i2c_dev *i2c_dev = dev_get_drvdata(dev); brcmstb_i2c_set_bsc_reg_defaults(i2c_dev); i2c_mark_adapter_resumed(&i2c_dev->adapter); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_i2c_pm, brcmstb_i2c_suspend, brcmstb_i2c_resume); static const struct of_device_id brcmstb_i2c_of_match[] = { {.compatible = "brcm,brcmstb-i2c"}, {.compatible = "brcm,brcmper-i2c"}, {.compatible = "brcm,bcm2711-hdmi-i2c"}, {}, }; MODULE_DEVICE_TABLE(of, brcmstb_i2c_of_match); static struct platform_driver brcmstb_i2c_driver = { .driver = { .name = "brcmstb-i2c", .of_match_table = brcmstb_i2c_of_match, .pm = pm_sleep_ptr(&brcmstb_i2c_pm), }, .probe = brcmstb_i2c_probe, .remove_new = brcmstb_i2c_remove, }; module_platform_driver(brcmstb_i2c_driver); MODULE_AUTHOR("Kamal Dasu <[email protected]>"); MODULE_DESCRIPTION("Broadcom Settop I2C Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-brcmstb.c
// SPDX-License-Identifier: GPL-2.0-only /* * i2c-versatile.c * * Copyright (C) 2006 ARM Ltd. * written by Russell King, Deep Blue Solutions Ltd. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #define I2C_CONTROL 0x00 #define I2C_CONTROLS 0x00 #define I2C_CONTROLC 0x04 #define SCL (1 << 0) #define SDA (1 << 1) struct i2c_versatile { struct i2c_adapter adap; struct i2c_algo_bit_data algo; void __iomem *base; }; static void i2c_versatile_setsda(void *data, int state) { struct i2c_versatile *i2c = data; writel(SDA, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC)); } static void i2c_versatile_setscl(void *data, int state) { struct i2c_versatile *i2c = data; writel(SCL, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC)); } static int i2c_versatile_getsda(void *data) { struct i2c_versatile *i2c = data; return !!(readl(i2c->base + I2C_CONTROL) & SDA); } static int i2c_versatile_getscl(void *data) { struct i2c_versatile *i2c = data; return !!(readl(i2c->base + I2C_CONTROL) & SCL); } static const struct i2c_algo_bit_data i2c_versatile_algo = { .setsda = i2c_versatile_setsda, .setscl = i2c_versatile_setscl, .getsda = i2c_versatile_getsda, .getscl = i2c_versatile_getscl, .udelay = 30, .timeout = HZ, }; static int i2c_versatile_probe(struct platform_device *dev) { struct i2c_versatile *i2c; int ret; i2c = devm_kzalloc(&dev->dev, sizeof(struct i2c_versatile), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->base = devm_platform_get_and_ioremap_resource(dev, 0, NULL); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); writel(SCL | SDA, i2c->base + I2C_CONTROLS); i2c->adap.owner = THIS_MODULE; strscpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name)); i2c->adap.algo_data = &i2c->algo; i2c->adap.dev.parent = &dev->dev; i2c->adap.dev.of_node = dev->dev.of_node; i2c->algo = i2c_versatile_algo; i2c->algo.data = i2c; i2c->adap.nr = dev->id; ret = i2c_bit_add_numbered_bus(&i2c->adap); if (ret < 0) return ret; platform_set_drvdata(dev, i2c); return 0; } static void i2c_versatile_remove(struct platform_device *dev) { struct i2c_versatile *i2c = platform_get_drvdata(dev); i2c_del_adapter(&i2c->adap); } static const struct of_device_id i2c_versatile_match[] = { { .compatible = "arm,versatile-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, i2c_versatile_match); static struct platform_driver i2c_versatile_driver = { .probe = i2c_versatile_probe, .remove_new = i2c_versatile_remove, .driver = { .name = "versatile-i2c", .of_match_table = i2c_versatile_match, }, }; static int __init i2c_versatile_init(void) { return platform_driver_register(&i2c_versatile_driver); } static void __exit i2c_versatile_exit(void) { platform_driver_unregister(&i2c_versatile_driver); } subsys_initcall(i2c_versatile_init); module_exit(i2c_versatile_exit); MODULE_DESCRIPTION("ARM Versatile I2C bus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:versatile-i2c");
linux-master
drivers/i2c/busses/i2c-versatile.c
/* * P2WI (Push-Pull Two Wire Interface) bus driver. * * Author: Boris BREZILLON <[email protected]> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * * The P2WI controller looks like an SMBus controller which only supports byte * data transfers. But, it differs from standard SMBus protocol on several * aspects: * - it supports only one slave device, and thus drop the address field * - it adds a parity bit every 8bits of data * - only one read access is required to read a byte (instead of a write * followed by a read access in standard SMBus protocol) * - there's no Ack bit after each byte transfer * * This means this bus cannot be used to interface with standard SMBus * devices (the only known device to support this interface is the AXP221 * PMIC). * */ #include <linux/clk.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/reset.h> /* P2WI registers */ #define P2WI_CTRL 0x0 #define P2WI_CCR 0x4 #define P2WI_INTE 0x8 #define P2WI_INTS 0xc #define P2WI_DADDR0 0x10 #define P2WI_DADDR1 0x14 #define P2WI_DLEN 0x18 #define P2WI_DATA0 0x1c #define P2WI_DATA1 0x20 #define P2WI_LCR 0x24 #define P2WI_PMCR 0x28 /* CTRL fields */ #define P2WI_CTRL_START_TRANS BIT(7) #define P2WI_CTRL_ABORT_TRANS BIT(6) #define P2WI_CTRL_GLOBAL_INT_ENB BIT(1) #define P2WI_CTRL_SOFT_RST BIT(0) /* CLK CTRL fields */ #define P2WI_CCR_SDA_OUT_DELAY(v) (((v) & 0x7) << 8) #define P2WI_CCR_MAX_CLK_DIV 0xff #define P2WI_CCR_CLK_DIV(v) ((v) & P2WI_CCR_MAX_CLK_DIV) /* STATUS fields */ #define P2WI_INTS_TRANS_ERR_ID(v) (((v) >> 8) & 0xff) #define P2WI_INTS_LOAD_BSY BIT(2) #define P2WI_INTS_TRANS_ERR BIT(1) #define P2WI_INTS_TRANS_OVER BIT(0) /* DATA LENGTH fields*/ #define P2WI_DLEN_READ BIT(4) #define P2WI_DLEN_DATA_LENGTH(v) ((v - 1) & 0x7) /* LINE CTRL fields*/ #define P2WI_LCR_SCL_STATE BIT(5) #define P2WI_LCR_SDA_STATE BIT(4) #define P2WI_LCR_SCL_CTL BIT(3) #define P2WI_LCR_SCL_CTL_EN BIT(2) #define P2WI_LCR_SDA_CTL BIT(1) #define P2WI_LCR_SDA_CTL_EN BIT(0) /* PMU MODE CTRL fields */ #define P2WI_PMCR_PMU_INIT_SEND BIT(31) #define P2WI_PMCR_PMU_INIT_DATA(v) (((v) & 0xff) << 16) #define P2WI_PMCR_PMU_MODE_REG(v) (((v) & 0xff) << 8) #define P2WI_PMCR_PMU_DEV_ADDR(v) ((v) & 0xff) #define P2WI_MAX_FREQ 6000000 struct p2wi { struct i2c_adapter adapter; struct completion complete; unsigned int status; void __iomem *regs; struct clk *clk; struct reset_control *rstc; int slave_addr; }; static irqreturn_t p2wi_interrupt(int irq, void *dev_id) { struct p2wi *p2wi = dev_id; unsigned long status; status = readl(p2wi->regs + P2WI_INTS); p2wi->status = status; /* Clear interrupts */ status &= (P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR | P2WI_INTS_TRANS_OVER); writel(status, p2wi->regs + P2WI_INTS); complete(&p2wi->complete); return IRQ_HANDLED; } static u32 p2wi_functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_BYTE_DATA; } static int p2wi_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct p2wi *p2wi = i2c_get_adapdata(adap); unsigned long dlen = P2WI_DLEN_DATA_LENGTH(1); if (p2wi->slave_addr >= 0 && addr != p2wi->slave_addr) { dev_err(&adap->dev, "invalid P2WI address\n"); return -EINVAL; } if (!data) return -EINVAL; writel(command, p2wi->regs + P2WI_DADDR0); if (read_write == I2C_SMBUS_READ) dlen |= P2WI_DLEN_READ; else writel(data->byte, p2wi->regs + P2WI_DATA0); writel(dlen, p2wi->regs + P2WI_DLEN); if (readl(p2wi->regs + P2WI_CTRL) & P2WI_CTRL_START_TRANS) { dev_err(&adap->dev, "P2WI bus busy\n"); return -EBUSY; } reinit_completion(&p2wi->complete); writel(P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR | P2WI_INTS_TRANS_OVER, p2wi->regs + P2WI_INTE); writel(P2WI_CTRL_START_TRANS | P2WI_CTRL_GLOBAL_INT_ENB, p2wi->regs + P2WI_CTRL); wait_for_completion(&p2wi->complete); if (p2wi->status & P2WI_INTS_LOAD_BSY) { dev_err(&adap->dev, "P2WI bus busy\n"); return -EBUSY; } if (p2wi->status & P2WI_INTS_TRANS_ERR) { dev_err(&adap->dev, "P2WI bus xfer error\n"); return -ENXIO; } if (read_write == I2C_SMBUS_READ) data->byte = readl(p2wi->regs + P2WI_DATA0); return 0; } static const struct i2c_algorithm p2wi_algo = { .smbus_xfer = p2wi_smbus_xfer, .functionality = p2wi_functionality, }; static const struct of_device_id p2wi_of_match_table[] = { { .compatible = "allwinner,sun6i-a31-p2wi" }, {} }; MODULE_DEVICE_TABLE(of, p2wi_of_match_table); static int p2wi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *childnp; unsigned long parent_clk_freq; u32 clk_freq = I2C_MAX_STANDARD_MODE_FREQ; struct p2wi *p2wi; u32 slave_addr; int clk_div; int irq; int ret; of_property_read_u32(np, "clock-frequency", &clk_freq); if (clk_freq > P2WI_MAX_FREQ) { dev_err(dev, "required clock-frequency (%u Hz) is too high (max = 6MHz)", clk_freq); return -EINVAL; } if (of_get_child_count(np) > 1) { dev_err(dev, "P2WI only supports one slave device\n"); return -EINVAL; } p2wi = devm_kzalloc(dev, sizeof(struct p2wi), GFP_KERNEL); if (!p2wi) return -ENOMEM; p2wi->slave_addr = -1; /* * Authorize a p2wi node without any children to be able to use an * i2c-dev from userpace. * In this case the slave_addr is set to -1 and won't be checked when * launching a P2WI transfer. */ childnp = of_get_next_available_child(np, NULL); if (childnp) { ret = of_property_read_u32(childnp, "reg", &slave_addr); if (ret) { dev_err(dev, "invalid slave address on node %pOF\n", childnp); return -EINVAL; } p2wi->slave_addr = slave_addr; } p2wi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p2wi->regs)) return PTR_ERR(p2wi->regs); strscpy(p2wi->adapter.name, pdev->name, sizeof(p2wi->adapter.name)); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; p2wi->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(p2wi->clk)) { ret = PTR_ERR(p2wi->clk); dev_err(dev, "failed to enable clk: %d\n", ret); return ret; } parent_clk_freq = clk_get_rate(p2wi->clk); p2wi->rstc = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(p2wi->rstc)) { dev_err(dev, "failed to retrieve reset controller: %pe\n", p2wi->rstc); return PTR_ERR(p2wi->rstc); } ret = reset_control_deassert(p2wi->rstc); if (ret) { dev_err(dev, "failed to deassert reset line: %d\n", ret); return ret; } init_completion(&p2wi->complete); p2wi->adapter.dev.parent = dev; p2wi->adapter.algo = &p2wi_algo; p2wi->adapter.owner = THIS_MODULE; p2wi->adapter.dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, p2wi); i2c_set_adapdata(&p2wi->adapter, p2wi); ret = devm_request_irq(dev, irq, p2wi_interrupt, 0, pdev->name, p2wi); if (ret) { dev_err(dev, "can't register interrupt handler irq%d: %d\n", irq, ret); goto err_reset_assert; } writel(P2WI_CTRL_SOFT_RST, p2wi->regs + P2WI_CTRL); clk_div = parent_clk_freq / clk_freq; if (!clk_div) { dev_warn(dev, "clock-frequency is too high, setting it to %lu Hz\n", parent_clk_freq); clk_div = 1; } else if (clk_div > P2WI_CCR_MAX_CLK_DIV) { dev_warn(dev, "clock-frequency is too low, setting it to %lu Hz\n", parent_clk_freq / P2WI_CCR_MAX_CLK_DIV); clk_div = P2WI_CCR_MAX_CLK_DIV; } writel(P2WI_CCR_SDA_OUT_DELAY(1) | P2WI_CCR_CLK_DIV(clk_div), p2wi->regs + P2WI_CCR); ret = i2c_add_adapter(&p2wi->adapter); if (!ret) return 0; err_reset_assert: reset_control_assert(p2wi->rstc); return ret; } static void p2wi_remove(struct platform_device *dev) { struct p2wi *p2wi = platform_get_drvdata(dev); reset_control_assert(p2wi->rstc); i2c_del_adapter(&p2wi->adapter); } static struct platform_driver p2wi_driver = { .probe = p2wi_probe, .remove_new = p2wi_remove, .driver = { .name = "i2c-sunxi-p2wi", .of_match_table = p2wi_of_match_table, }, }; module_platform_driver(p2wi_driver); MODULE_AUTHOR("Boris BREZILLON <[email protected]>"); MODULE_DESCRIPTION("Allwinner P2WI driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-sun6i-p2wi.c
// SPDX-License-Identifier: GPL-2.0-only /* * i2c_pca_platform.c * * Platform driver for the PCA9564 I2C controller. * * Copyright (C) 2008 Pengutronix * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/i2c-algo-pca.h> #include <linux/platform_data/i2c-pca-platform.h> #include <linux/gpio/consumer.h> #include <linux/io.h> #include <linux/of.h> #include <asm/irq.h> struct i2c_pca_pf_data { void __iomem *reg_base; int irq; /* if 0, use polling */ struct gpio_desc *gpio; wait_queue_head_t wait; struct i2c_adapter adap; struct i2c_algo_pca_data algo_data; }; /* Read/Write functions for different register alignments */ static int i2c_pca_pf_readbyte8(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg); } static int i2c_pca_pf_readbyte16(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg * 2); } static int i2c_pca_pf_readbyte32(void *pd, int reg) { struct i2c_pca_pf_data *i2c = pd; return ioread8(i2c->reg_base + reg * 4); } static void i2c_pca_pf_writebyte8(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg); } static void i2c_pca_pf_writebyte16(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg * 2); } static void i2c_pca_pf_writebyte32(void *pd, int reg, int val) { struct i2c_pca_pf_data *i2c = pd; iowrite8(val, i2c->reg_base + reg * 4); } static int i2c_pca_pf_waitforcompletion(void *pd) { struct i2c_pca_pf_data *i2c = pd; unsigned long timeout; long ret; if (i2c->irq) { ret = wait_event_timeout(i2c->wait, i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI, i2c->adap.timeout); } else { /* Do polling */ timeout = jiffies + i2c->adap.timeout; do { ret = time_before(jiffies, timeout); if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) break; udelay(100); } while (ret); } return ret > 0; } static void i2c_pca_pf_dummyreset(void *pd) { struct i2c_pca_pf_data *i2c = pd; dev_warn(&i2c->adap.dev, "No reset-pin found. Chip may get stuck!\n"); } static void i2c_pca_pf_resetchip(void *pd) { struct i2c_pca_pf_data *i2c = pd; gpiod_set_value(i2c->gpio, 1); ndelay(100); gpiod_set_value(i2c->gpio, 0); } static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id) { struct i2c_pca_pf_data *i2c = dev_id; if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) return IRQ_NONE; wake_up(&i2c->wait); return IRQ_HANDLED; } static int i2c_pca_pf_probe(struct platform_device *pdev) { struct i2c_pca_pf_data *i2c; struct resource *res; struct i2c_pca9564_pf_platform_data *platform_data = dev_get_platdata(&pdev->dev); struct device_node *np = pdev->dev.of_node; int ret = 0; int irq; irq = platform_get_irq_optional(pdev, 0); /* If irq is 0, we do polling. */ if (irq < 0) irq = 0; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c->reg_base)) return PTR_ERR(i2c->reg_base); init_waitqueue_head(&i2c->wait); i2c->irq = irq; i2c->adap.nr = pdev->id; i2c->adap.owner = THIS_MODULE; snprintf(i2c->adap.name, sizeof(i2c->adap.name), "PCA9564/PCA9665 at 0x%08lx", (unsigned long) res->start); i2c->adap.algo_data = &i2c->algo_data; i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = np; i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(i2c->gpio)) return PTR_ERR(i2c->gpio); i2c->adap.timeout = HZ; ret = device_property_read_u32(&pdev->dev, "clock-frequency", &i2c->algo_data.i2c_clock); if (ret) i2c->algo_data.i2c_clock = 59000; if (platform_data) { i2c->adap.timeout = platform_data->timeout; i2c->algo_data.i2c_clock = platform_data->i2c_clock_speed; } i2c->algo_data.data = i2c; i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion; if (i2c->gpio) i2c->algo_data.reset_chip = i2c_pca_pf_resetchip; else i2c->algo_data.reset_chip = i2c_pca_pf_dummyreset; switch (res->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: i2c->algo_data.write_byte = i2c_pca_pf_writebyte32; i2c->algo_data.read_byte = i2c_pca_pf_readbyte32; break; case IORESOURCE_MEM_16BIT: i2c->algo_data.write_byte = i2c_pca_pf_writebyte16; i2c->algo_data.read_byte = i2c_pca_pf_readbyte16; break; case IORESOURCE_MEM_8BIT: default: i2c->algo_data.write_byte = i2c_pca_pf_writebyte8; i2c->algo_data.read_byte = i2c_pca_pf_readbyte8; break; } if (irq) { ret = devm_request_irq(&pdev->dev, irq, i2c_pca_pf_handler, IRQF_TRIGGER_FALLING, pdev->name, i2c); if (ret) return ret; } ret = i2c_pca_add_numbered_bus(&i2c->adap); if (ret) return ret; platform_set_drvdata(pdev, i2c); dev_info(&pdev->dev, "registered.\n"); return 0; } static void i2c_pca_pf_remove(struct platform_device *pdev) { struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); } #ifdef CONFIG_OF static const struct of_device_id i2c_pca_of_match_table[] = { { .compatible = "nxp,pca9564" }, { .compatible = "nxp,pca9665" }, {}, }; MODULE_DEVICE_TABLE(of, i2c_pca_of_match_table); #endif static struct platform_driver i2c_pca_pf_driver = { .probe = i2c_pca_pf_probe, .remove_new = i2c_pca_pf_remove, .driver = { .name = "i2c-pca-platform", .of_match_table = of_match_ptr(i2c_pca_of_match_table), }, }; module_platform_driver(i2c_pca_pf_driver); MODULE_AUTHOR("Wolfram Sang <[email protected]>"); MODULE_DESCRIPTION("I2C-PCA9564/PCA9665 platform driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-pca-platform.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for STMicroelectronics STM32F7 I2C controller * * This I2C controller is described in the STM32F75xxx and STM32F74xxx Soc * reference manual. * Please see below a link to the documentation: * http://www.st.com/resource/en/reference_manual/dm00124865.pdf * * Copyright (C) M'boumba Cedric Madianga 2017 * Copyright (C) STMicroelectronics 2017 * Author: M'boumba Cedric Madianga <[email protected]> * * This driver is based on i2c-stm32f4.c * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/slab.h> #include "i2c-stm32.h" /* STM32F7 I2C registers */ #define STM32F7_I2C_CR1 0x00 #define STM32F7_I2C_CR2 0x04 #define STM32F7_I2C_OAR1 0x08 #define STM32F7_I2C_OAR2 0x0C #define STM32F7_I2C_PECR 0x20 #define STM32F7_I2C_TIMINGR 0x10 #define STM32F7_I2C_ISR 0x18 #define STM32F7_I2C_ICR 0x1C #define STM32F7_I2C_RXDR 0x24 #define STM32F7_I2C_TXDR 0x28 /* STM32F7 I2C control 1 */ #define STM32F7_I2C_CR1_PECEN BIT(23) #define STM32F7_I2C_CR1_ALERTEN BIT(22) #define STM32F7_I2C_CR1_SMBHEN BIT(20) #define STM32F7_I2C_CR1_WUPEN BIT(18) #define STM32F7_I2C_CR1_SBC BIT(16) #define STM32F7_I2C_CR1_RXDMAEN BIT(15) #define STM32F7_I2C_CR1_TXDMAEN BIT(14) #define STM32F7_I2C_CR1_ANFOFF BIT(12) #define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8) #define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8) #define STM32F7_I2C_CR1_ERRIE BIT(7) #define STM32F7_I2C_CR1_TCIE BIT(6) #define STM32F7_I2C_CR1_STOPIE BIT(5) #define STM32F7_I2C_CR1_NACKIE BIT(4) #define STM32F7_I2C_CR1_ADDRIE BIT(3) #define STM32F7_I2C_CR1_RXIE BIT(2) #define STM32F7_I2C_CR1_TXIE BIT(1) #define STM32F7_I2C_CR1_PE BIT(0) #define STM32F7_I2C_ALL_IRQ_MASK (STM32F7_I2C_CR1_ERRIE \ | STM32F7_I2C_CR1_TCIE \ | STM32F7_I2C_CR1_STOPIE \ | STM32F7_I2C_CR1_NACKIE \ | STM32F7_I2C_CR1_RXIE \ | STM32F7_I2C_CR1_TXIE) #define STM32F7_I2C_XFER_IRQ_MASK (STM32F7_I2C_CR1_TCIE \ | STM32F7_I2C_CR1_STOPIE \ | STM32F7_I2C_CR1_NACKIE \ | STM32F7_I2C_CR1_RXIE \ | STM32F7_I2C_CR1_TXIE) /* STM32F7 I2C control 2 */ #define STM32F7_I2C_CR2_PECBYTE BIT(26) #define STM32F7_I2C_CR2_RELOAD BIT(24) #define STM32F7_I2C_CR2_NBYTES_MASK GENMASK(23, 16) #define STM32F7_I2C_CR2_NBYTES(n) (((n) & 0xff) << 16) #define STM32F7_I2C_CR2_NACK BIT(15) #define STM32F7_I2C_CR2_STOP BIT(14) #define STM32F7_I2C_CR2_START BIT(13) #define STM32F7_I2C_CR2_HEAD10R BIT(12) #define STM32F7_I2C_CR2_ADD10 BIT(11) #define STM32F7_I2C_CR2_RD_WRN BIT(10) #define STM32F7_I2C_CR2_SADD10_MASK GENMASK(9, 0) #define STM32F7_I2C_CR2_SADD10(n) (((n) & \ STM32F7_I2C_CR2_SADD10_MASK)) #define STM32F7_I2C_CR2_SADD7_MASK GENMASK(7, 1) #define STM32F7_I2C_CR2_SADD7(n) (((n) & 0x7f) << 1) /* STM32F7 I2C Own Address 1 */ #define STM32F7_I2C_OAR1_OA1EN BIT(15) #define STM32F7_I2C_OAR1_OA1MODE BIT(10) #define STM32F7_I2C_OAR1_OA1_10_MASK GENMASK(9, 0) #define STM32F7_I2C_OAR1_OA1_10(n) (((n) & \ STM32F7_I2C_OAR1_OA1_10_MASK)) #define STM32F7_I2C_OAR1_OA1_7_MASK GENMASK(7, 1) #define STM32F7_I2C_OAR1_OA1_7(n) (((n) & 0x7f) << 1) #define STM32F7_I2C_OAR1_MASK (STM32F7_I2C_OAR1_OA1_7_MASK \ | STM32F7_I2C_OAR1_OA1_10_MASK \ | STM32F7_I2C_OAR1_OA1EN \ | STM32F7_I2C_OAR1_OA1MODE) /* STM32F7 I2C Own Address 2 */ #define STM32F7_I2C_OAR2_OA2EN BIT(15) #define STM32F7_I2C_OAR2_OA2MSK_MASK GENMASK(10, 8) #define STM32F7_I2C_OAR2_OA2MSK(n) (((n) & 0x7) << 8) #define STM32F7_I2C_OAR2_OA2_7_MASK GENMASK(7, 1) #define STM32F7_I2C_OAR2_OA2_7(n) (((n) & 0x7f) << 1) #define STM32F7_I2C_OAR2_MASK (STM32F7_I2C_OAR2_OA2MSK_MASK \ | STM32F7_I2C_OAR2_OA2_7_MASK \ | STM32F7_I2C_OAR2_OA2EN) /* STM32F7 I2C Interrupt Status */ #define STM32F7_I2C_ISR_ADDCODE_MASK GENMASK(23, 17) #define STM32F7_I2C_ISR_ADDCODE_GET(n) \ (((n) & STM32F7_I2C_ISR_ADDCODE_MASK) >> 17) #define STM32F7_I2C_ISR_DIR BIT(16) #define STM32F7_I2C_ISR_BUSY BIT(15) #define STM32F7_I2C_ISR_ALERT BIT(13) #define STM32F7_I2C_ISR_PECERR BIT(11) #define STM32F7_I2C_ISR_ARLO BIT(9) #define STM32F7_I2C_ISR_BERR BIT(8) #define STM32F7_I2C_ISR_TCR BIT(7) #define STM32F7_I2C_ISR_TC BIT(6) #define STM32F7_I2C_ISR_STOPF BIT(5) #define STM32F7_I2C_ISR_NACKF BIT(4) #define STM32F7_I2C_ISR_ADDR BIT(3) #define STM32F7_I2C_ISR_RXNE BIT(2) #define STM32F7_I2C_ISR_TXIS BIT(1) #define STM32F7_I2C_ISR_TXE BIT(0) /* STM32F7 I2C Interrupt Clear */ #define STM32F7_I2C_ICR_ALERTCF BIT(13) #define STM32F7_I2C_ICR_PECCF BIT(11) #define STM32F7_I2C_ICR_ARLOCF BIT(9) #define STM32F7_I2C_ICR_BERRCF BIT(8) #define STM32F7_I2C_ICR_STOPCF BIT(5) #define STM32F7_I2C_ICR_NACKCF BIT(4) #define STM32F7_I2C_ICR_ADDRCF BIT(3) /* STM32F7 I2C Timing */ #define STM32F7_I2C_TIMINGR_PRESC(n) (((n) & 0xf) << 28) #define STM32F7_I2C_TIMINGR_SCLDEL(n) (((n) & 0xf) << 20) #define STM32F7_I2C_TIMINGR_SDADEL(n) (((n) & 0xf) << 16) #define STM32F7_I2C_TIMINGR_SCLH(n) (((n) & 0xff) << 8) #define STM32F7_I2C_TIMINGR_SCLL(n) ((n) & 0xff) #define STM32F7_I2C_MAX_LEN 0xff #define STM32F7_I2C_DMA_LEN_MIN 0x16 enum { STM32F7_SLAVE_HOSTNOTIFY, STM32F7_SLAVE_7_10_BITS_ADDR, STM32F7_SLAVE_7_BITS_ADDR, STM32F7_I2C_MAX_SLAVE }; #define STM32F7_I2C_DNF_DEFAULT 0 #define STM32F7_I2C_DNF_MAX 15 #define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */ #define STM32F7_I2C_ANALOG_FILTER_DELAY_MAX 260 /* ns */ #define STM32F7_I2C_RISE_TIME_DEFAULT 25 /* ns */ #define STM32F7_I2C_FALL_TIME_DEFAULT 10 /* ns */ #define STM32F7_PRESC_MAX BIT(4) #define STM32F7_SCLDEL_MAX BIT(4) #define STM32F7_SDADEL_MAX BIT(4) #define STM32F7_SCLH_MAX BIT(8) #define STM32F7_SCLL_MAX BIT(8) #define STM32F7_AUTOSUSPEND_DELAY (HZ / 100) /** * struct stm32f7_i2c_regs - i2c f7 registers backup * @cr1: Control register 1 * @cr2: Control register 2 * @oar1: Own address 1 register * @oar2: Own address 2 register * @tmgr: Timing register */ struct stm32f7_i2c_regs { u32 cr1; u32 cr2; u32 oar1; u32 oar2; u32 tmgr; }; /** * struct stm32f7_i2c_spec - private i2c specification timing * @rate: I2C bus speed (Hz) * @fall_max: Max fall time of both SDA and SCL signals (ns) * @rise_max: Max rise time of both SDA and SCL signals (ns) * @hddat_min: Min data hold time (ns) * @vddat_max: Max data valid time (ns) * @sudat_min: Min data setup time (ns) * @l_min: Min low period of the SCL clock (ns) * @h_min: Min high period of the SCL clock (ns) */ struct stm32f7_i2c_spec { u32 rate; u32 fall_max; u32 rise_max; u32 hddat_min; u32 vddat_max; u32 sudat_min; u32 l_min; u32 h_min; }; /** * struct stm32f7_i2c_setup - private I2C timing setup parameters * @speed_freq: I2C speed frequency (Hz) * @clock_src: I2C clock source frequency (Hz) * @rise_time: Rise time (ns) * @fall_time: Fall time (ns) * @fmp_clr_offset: Fast Mode Plus clear register offset from set register */ struct stm32f7_i2c_setup { u32 speed_freq; u32 clock_src; u32 rise_time; u32 fall_time; u32 fmp_clr_offset; }; /** * struct stm32f7_i2c_timings - private I2C output parameters * @node: List entry * @presc: Prescaler value * @scldel: Data setup time * @sdadel: Data hold time * @sclh: SCL high period (master mode) * @scll: SCL low period (master mode) */ struct stm32f7_i2c_timings { struct list_head node; u8 presc; u8 scldel; u8 sdadel; u8 sclh; u8 scll; }; /** * struct stm32f7_i2c_msg - client specific data * @addr: 8-bit or 10-bit slave addr, including r/w bit * @count: number of bytes to be transferred * @buf: data buffer * @result: result of the transfer * @stop: last I2C msg to be sent, i.e. STOP to be generated * @smbus: boolean to know if the I2C IP is used in SMBus mode * @size: type of SMBus protocol * @read_write: direction of SMBus protocol * SMBus block read and SMBus block write - block read process call protocols * @smbus_buf: buffer to be used for SMBus protocol transfer. It will * contain a maximum of 32 bytes of data + byte command + byte count + PEC * This buffer has to be 32-bit aligned to be compliant with memory address * register in DMA mode. */ struct stm32f7_i2c_msg { u16 addr; u32 count; u8 *buf; int result; bool stop; bool smbus; int size; char read_write; u8 smbus_buf[I2C_SMBUS_BLOCK_MAX + 3] __aligned(4); }; /** * struct stm32f7_i2c_alert - SMBus alert specific data * @setup: platform data for the smbus_alert i2c client * @ara: I2C slave device used to respond to the SMBus Alert with Alert * Response Address */ struct stm32f7_i2c_alert { struct i2c_smbus_alert_setup setup; struct i2c_client *ara; }; /** * struct stm32f7_i2c_dev - private data of the controller * @adap: I2C adapter for this controller * @dev: device for this controller * @base: virtual memory area * @complete: completion of I2C message * @clk: hw i2c clock * @bus_rate: I2C clock frequency of the controller * @msg: Pointer to data to be written * @msg_num: number of I2C messages to be executed * @msg_id: message identifiant * @f7_msg: customized i2c msg for driver usage * @setup: I2C timing input setup * @timing: I2C computed timings * @slave: list of slave devices registered on the I2C bus * @slave_running: slave device currently used * @backup_regs: backup of i2c controller registers (for suspend/resume) * @slave_dir: transfer direction for the current slave device * @master_mode: boolean to know in which mode the I2C is running (master or * slave) * @dma: dma data * @use_dma: boolean to know if dma is used in the current transfer * @regmap: holds SYSCFG phandle for Fast Mode Plus bits * @fmp_sreg: register address for setting Fast Mode Plus bits * @fmp_creg: register address for clearing Fast Mode Plus bits * @fmp_mask: mask for Fast Mode Plus bits in set register * @wakeup_src: boolean to know if the device is a wakeup source * @smbus_mode: states that the controller is configured in SMBus mode * @host_notify_client: SMBus host-notify client * @analog_filter: boolean to indicate enabling of the analog filter * @dnf_dt: value of digital filter requested via dt * @dnf: value of digital filter to apply * @alert: SMBus alert specific data */ struct stm32f7_i2c_dev { struct i2c_adapter adap; struct device *dev; void __iomem *base; struct completion complete; struct clk *clk; unsigned int bus_rate; struct i2c_msg *msg; unsigned int msg_num; unsigned int msg_id; struct stm32f7_i2c_msg f7_msg; struct stm32f7_i2c_setup setup; struct stm32f7_i2c_timings timing; struct i2c_client *slave[STM32F7_I2C_MAX_SLAVE]; struct i2c_client *slave_running; struct stm32f7_i2c_regs backup_regs; u32 slave_dir; bool master_mode; struct stm32_i2c_dma *dma; bool use_dma; struct regmap *regmap; u32 fmp_sreg; u32 fmp_creg; u32 fmp_mask; bool wakeup_src; bool smbus_mode; struct i2c_client *host_notify_client; bool analog_filter; u32 dnf_dt; u32 dnf; struct stm32f7_i2c_alert *alert; }; /* * All these values are coming from I2C Specification, Version 6.0, 4th of * April 2014. * * Table10. Characteristics of the SDA and SCL bus lines for Standard, Fast, * and Fast-mode Plus I2C-bus devices */ static struct stm32f7_i2c_spec stm32f7_i2c_specs[] = { { .rate = I2C_MAX_STANDARD_MODE_FREQ, .fall_max = 300, .rise_max = 1000, .hddat_min = 0, .vddat_max = 3450, .sudat_min = 250, .l_min = 4700, .h_min = 4000, }, { .rate = I2C_MAX_FAST_MODE_FREQ, .fall_max = 300, .rise_max = 300, .hddat_min = 0, .vddat_max = 900, .sudat_min = 100, .l_min = 1300, .h_min = 600, }, { .rate = I2C_MAX_FAST_MODE_PLUS_FREQ, .fall_max = 100, .rise_max = 120, .hddat_min = 0, .vddat_max = 450, .sudat_min = 50, .l_min = 500, .h_min = 260, }, }; static const struct stm32f7_i2c_setup stm32f7_setup = { .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, }; static const struct stm32f7_i2c_setup stm32mp15_setup = { .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, .fmp_clr_offset = 0x40, }; static const struct stm32f7_i2c_setup stm32mp13_setup = { .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, .fmp_clr_offset = 0x4, }; static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask) { writel_relaxed(readl_relaxed(reg) | mask, reg); } static inline void stm32f7_i2c_clr_bits(void __iomem *reg, u32 mask) { writel_relaxed(readl_relaxed(reg) & ~mask, reg); } static void stm32f7_i2c_disable_irq(struct stm32f7_i2c_dev *i2c_dev, u32 mask) { stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, mask); } static struct stm32f7_i2c_spec *stm32f7_get_specs(u32 rate) { int i; for (i = 0; i < ARRAY_SIZE(stm32f7_i2c_specs); i++) if (rate <= stm32f7_i2c_specs[i].rate) return &stm32f7_i2c_specs[i]; return ERR_PTR(-EINVAL); } #define RATE_MIN(rate) ((rate) * 8 / 10) static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, struct stm32f7_i2c_setup *setup, struct stm32f7_i2c_timings *output) { struct stm32f7_i2c_spec *specs; u32 p_prev = STM32F7_PRESC_MAX; u32 i2cclk = DIV_ROUND_CLOSEST(NSEC_PER_SEC, setup->clock_src); u32 i2cbus = DIV_ROUND_CLOSEST(NSEC_PER_SEC, setup->speed_freq); u32 clk_error_prev = i2cbus; u32 tsync; u32 af_delay_min, af_delay_max; u32 dnf_delay; u32 clk_min, clk_max; int sdadel_min, sdadel_max; int scldel_min; struct stm32f7_i2c_timings *v, *_v, *s; struct list_head solutions; u16 p, l, a, h; int ret = 0; specs = stm32f7_get_specs(setup->speed_freq); if (specs == ERR_PTR(-EINVAL)) { dev_err(i2c_dev->dev, "speed out of bound {%d}\n", setup->speed_freq); return -EINVAL; } if ((setup->rise_time > specs->rise_max) || (setup->fall_time > specs->fall_max)) { dev_err(i2c_dev->dev, "timings out of bound Rise{%d>%d}/Fall{%d>%d}\n", setup->rise_time, specs->rise_max, setup->fall_time, specs->fall_max); return -EINVAL; } i2c_dev->dnf = DIV_ROUND_CLOSEST(i2c_dev->dnf_dt, i2cclk); if (i2c_dev->dnf > STM32F7_I2C_DNF_MAX) { dev_err(i2c_dev->dev, "DNF out of bound %d/%d\n", i2c_dev->dnf * i2cclk, STM32F7_I2C_DNF_MAX * i2cclk); return -EINVAL; } /* Analog and Digital Filters */ af_delay_min = (i2c_dev->analog_filter ? STM32F7_I2C_ANALOG_FILTER_DELAY_MIN : 0); af_delay_max = (i2c_dev->analog_filter ? STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0); dnf_delay = i2c_dev->dnf * i2cclk; sdadel_min = specs->hddat_min + setup->fall_time - af_delay_min - (i2c_dev->dnf + 3) * i2cclk; sdadel_max = specs->vddat_max - setup->rise_time - af_delay_max - (i2c_dev->dnf + 4) * i2cclk; scldel_min = setup->rise_time + specs->sudat_min; if (sdadel_min < 0) sdadel_min = 0; if (sdadel_max < 0) sdadel_max = 0; dev_dbg(i2c_dev->dev, "SDADEL(min/max): %i/%i, SCLDEL(Min): %i\n", sdadel_min, sdadel_max, scldel_min); INIT_LIST_HEAD(&solutions); /* Compute possible values for PRESC, SCLDEL and SDADEL */ for (p = 0; p < STM32F7_PRESC_MAX; p++) { for (l = 0; l < STM32F7_SCLDEL_MAX; l++) { u32 scldel = (l + 1) * (p + 1) * i2cclk; if (scldel < scldel_min) continue; for (a = 0; a < STM32F7_SDADEL_MAX; a++) { u32 sdadel = (a * (p + 1) + 1) * i2cclk; if (((sdadel >= sdadel_min) && (sdadel <= sdadel_max)) && (p != p_prev)) { v = kmalloc(sizeof(*v), GFP_KERNEL); if (!v) { ret = -ENOMEM; goto exit; } v->presc = p; v->scldel = l; v->sdadel = a; p_prev = p; list_add_tail(&v->node, &solutions); break; } } if (p_prev == p) break; } } if (list_empty(&solutions)) { dev_err(i2c_dev->dev, "no Prescaler solution\n"); ret = -EPERM; goto exit; } tsync = af_delay_min + dnf_delay + (2 * i2cclk); s = NULL; clk_max = NSEC_PER_SEC / RATE_MIN(setup->speed_freq); clk_min = NSEC_PER_SEC / setup->speed_freq; /* * Among Prescaler possibilities discovered above figures out SCL Low * and High Period. Provided: * - SCL Low Period has to be higher than SCL Clock Low Period * defined by I2C Specification. I2C Clock has to be lower than * (SCL Low Period - Analog/Digital filters) / 4. * - SCL High Period has to be lower than SCL Clock High Period * defined by I2C Specification * - I2C Clock has to be lower than SCL High Period */ list_for_each_entry(v, &solutions, node) { u32 prescaler = (v->presc + 1) * i2cclk; for (l = 0; l < STM32F7_SCLL_MAX; l++) { u32 tscl_l = (l + 1) * prescaler + tsync; if ((tscl_l < specs->l_min) || (i2cclk >= ((tscl_l - af_delay_min - dnf_delay) / 4))) { continue; } for (h = 0; h < STM32F7_SCLH_MAX; h++) { u32 tscl_h = (h + 1) * prescaler + tsync; u32 tscl = tscl_l + tscl_h + setup->rise_time + setup->fall_time; if ((tscl >= clk_min) && (tscl <= clk_max) && (tscl_h >= specs->h_min) && (i2cclk < tscl_h)) { int clk_error = tscl - i2cbus; if (clk_error < 0) clk_error = -clk_error; if (clk_error < clk_error_prev) { clk_error_prev = clk_error; v->scll = l; v->sclh = h; s = v; } } } } } if (!s) { dev_err(i2c_dev->dev, "no solution at all\n"); ret = -EPERM; goto exit; } output->presc = s->presc; output->scldel = s->scldel; output->sdadel = s->sdadel; output->scll = s->scll; output->sclh = s->sclh; dev_dbg(i2c_dev->dev, "Presc: %i, scldel: %i, sdadel: %i, scll: %i, sclh: %i\n", output->presc, output->scldel, output->sdadel, output->scll, output->sclh); exit: /* Release list and memory */ list_for_each_entry_safe(v, _v, &solutions, node) { list_del(&v->node); kfree(v); } return ret; } static u32 stm32f7_get_lower_rate(u32 rate) { int i = ARRAY_SIZE(stm32f7_i2c_specs); while (--i) if (stm32f7_i2c_specs[i].rate < rate) break; return stm32f7_i2c_specs[i].rate; } static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev, struct stm32f7_i2c_setup *setup) { struct i2c_timings timings, *t = &timings; int ret = 0; t->bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; t->scl_rise_ns = i2c_dev->setup.rise_time; t->scl_fall_ns = i2c_dev->setup.fall_time; i2c_parse_fw_timings(i2c_dev->dev, t, false); if (t->bus_freq_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) { dev_err(i2c_dev->dev, "Invalid bus speed (%i>%i)\n", t->bus_freq_hz, I2C_MAX_FAST_MODE_PLUS_FREQ); return -EINVAL; } setup->speed_freq = t->bus_freq_hz; i2c_dev->setup.rise_time = t->scl_rise_ns; i2c_dev->setup.fall_time = t->scl_fall_ns; i2c_dev->dnf_dt = t->digital_filter_width_ns; setup->clock_src = clk_get_rate(i2c_dev->clk); if (!setup->clock_src) { dev_err(i2c_dev->dev, "clock rate is 0\n"); return -EINVAL; } if (!of_property_read_bool(i2c_dev->dev->of_node, "i2c-digital-filter")) i2c_dev->dnf_dt = STM32F7_I2C_DNF_DEFAULT; do { ret = stm32f7_i2c_compute_timing(i2c_dev, setup, &i2c_dev->timing); if (ret) { dev_err(i2c_dev->dev, "failed to compute I2C timings.\n"); if (setup->speed_freq <= I2C_MAX_STANDARD_MODE_FREQ) break; setup->speed_freq = stm32f7_get_lower_rate(setup->speed_freq); dev_warn(i2c_dev->dev, "downgrade I2C Speed Freq to (%i)\n", setup->speed_freq); } } while (ret); if (ret) { dev_err(i2c_dev->dev, "Impossible to compute I2C timings.\n"); return ret; } i2c_dev->analog_filter = of_property_read_bool(i2c_dev->dev->of_node, "i2c-analog-filter"); dev_dbg(i2c_dev->dev, "I2C Speed(%i), Clk Source(%i)\n", setup->speed_freq, setup->clock_src); dev_dbg(i2c_dev->dev, "I2C Rise(%i) and Fall(%i) Time\n", setup->rise_time, setup->fall_time); dev_dbg(i2c_dev->dev, "I2C Analog Filter(%s), DNF(%i)\n", (i2c_dev->analog_filter ? "On" : "Off"), i2c_dev->dnf); i2c_dev->bus_rate = setup->speed_freq; return 0; } static void stm32f7_i2c_disable_dma_req(struct stm32f7_i2c_dev *i2c_dev) { void __iomem *base = i2c_dev->base; u32 mask = STM32F7_I2C_CR1_RXDMAEN | STM32F7_I2C_CR1_TXDMAEN; stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1, mask); } static void stm32f7_i2c_dma_callback(void *arg) { struct stm32f7_i2c_dev *i2c_dev = (struct stm32f7_i2c_dev *)arg; struct stm32_i2c_dma *dma = i2c_dev->dma; struct device *dev = dma->chan_using->device->dev; stm32f7_i2c_disable_dma_req(i2c_dev); dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir); complete(&dma->dma_complete); } static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_timings *t = &i2c_dev->timing; u32 timing = 0; /* Timing settings */ timing |= STM32F7_I2C_TIMINGR_PRESC(t->presc); timing |= STM32F7_I2C_TIMINGR_SCLDEL(t->scldel); timing |= STM32F7_I2C_TIMINGR_SDADEL(t->sdadel); timing |= STM32F7_I2C_TIMINGR_SCLH(t->sclh); timing |= STM32F7_I2C_TIMINGR_SCLL(t->scll); writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); /* Configure the Analog Filter */ if (i2c_dev->analog_filter) stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_ANFOFF); else stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_ANFOFF); /* Program the Digital Filter */ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_DNF_MASK); stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_DNF(i2c_dev->dnf)); stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_PE); } static void stm32f7_i2c_write_tx_data(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; void __iomem *base = i2c_dev->base; if (f7_msg->count) { writeb_relaxed(*f7_msg->buf++, base + STM32F7_I2C_TXDR); f7_msg->count--; } } static void stm32f7_i2c_read_rx_data(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; void __iomem *base = i2c_dev->base; if (f7_msg->count) { *f7_msg->buf++ = readb_relaxed(base + STM32F7_I2C_RXDR); f7_msg->count--; } else { /* Flush RX buffer has no data is expected */ readb_relaxed(base + STM32F7_I2C_RXDR); } } static void stm32f7_i2c_reload(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; u32 cr2; if (i2c_dev->use_dma) f7_msg->count -= STM32F7_I2C_MAX_LEN; cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2); cr2 &= ~STM32F7_I2C_CR2_NBYTES_MASK; if (f7_msg->count > STM32F7_I2C_MAX_LEN) { cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN); } else { cr2 &= ~STM32F7_I2C_CR2_RELOAD; cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); } writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2); } static void stm32f7_i2c_smbus_reload(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; u32 cr2; u8 *val; /* * For I2C_SMBUS_BLOCK_DATA && I2C_SMBUS_BLOCK_PROC_CALL, the first * data received inform us how many data will follow. */ stm32f7_i2c_read_rx_data(i2c_dev); /* * Update NBYTES with the value read to continue the transfer */ val = f7_msg->buf - sizeof(u8); f7_msg->count = *val; cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2); cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK | STM32F7_I2C_CR2_RELOAD); cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2); } static void stm32f7_i2c_release_bus(struct i2c_adapter *i2c_adap) { struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_PE); stm32f7_i2c_hw_config(i2c_dev); } static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev) { u32 status; int ret; ret = readl_relaxed_poll_timeout(i2c_dev->base + STM32F7_I2C_ISR, status, !(status & STM32F7_I2C_ISR_BUSY), 10, 1000); if (!ret) return 0; stm32f7_i2c_release_bus(&i2c_dev->adap); return -EBUSY; } static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, struct i2c_msg *msg) { struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; void __iomem *base = i2c_dev->base; u32 cr1, cr2; int ret; f7_msg->addr = msg->addr; f7_msg->buf = msg->buf; f7_msg->count = msg->len; f7_msg->result = 0; f7_msg->stop = (i2c_dev->msg_id >= i2c_dev->msg_num - 1); reinit_completion(&i2c_dev->complete); cr1 = readl_relaxed(base + STM32F7_I2C_CR1); cr2 = readl_relaxed(base + STM32F7_I2C_CR2); /* Set transfer direction */ cr2 &= ~STM32F7_I2C_CR2_RD_WRN; if (msg->flags & I2C_M_RD) cr2 |= STM32F7_I2C_CR2_RD_WRN; /* Set slave address */ cr2 &= ~(STM32F7_I2C_CR2_HEAD10R | STM32F7_I2C_CR2_ADD10); if (msg->flags & I2C_M_TEN) { cr2 &= ~STM32F7_I2C_CR2_SADD10_MASK; cr2 |= STM32F7_I2C_CR2_SADD10(f7_msg->addr); cr2 |= STM32F7_I2C_CR2_ADD10; } else { cr2 &= ~STM32F7_I2C_CR2_SADD7_MASK; cr2 |= STM32F7_I2C_CR2_SADD7(f7_msg->addr); } /* Set nb bytes to transfer and reload if needed */ cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK | STM32F7_I2C_CR2_RELOAD); if (f7_msg->count > STM32F7_I2C_MAX_LEN) { cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN); cr2 |= STM32F7_I2C_CR2_RELOAD; } else { cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); } /* Enable NACK, STOP, error and transfer complete interrupts */ cr1 |= STM32F7_I2C_CR1_ERRIE | STM32F7_I2C_CR1_TCIE | STM32F7_I2C_CR1_STOPIE | STM32F7_I2C_CR1_NACKIE; /* Clear DMA req and TX/RX interrupt */ cr1 &= ~(STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TXIE | STM32F7_I2C_CR1_RXDMAEN | STM32F7_I2C_CR1_TXDMAEN); /* Configure DMA or enable RX/TX interrupt */ i2c_dev->use_dma = false; if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN) { ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma, msg->flags & I2C_M_RD, f7_msg->count, f7_msg->buf, stm32f7_i2c_dma_callback, i2c_dev); if (!ret) i2c_dev->use_dma = true; else dev_warn(i2c_dev->dev, "can't use DMA\n"); } if (!i2c_dev->use_dma) { if (msg->flags & I2C_M_RD) cr1 |= STM32F7_I2C_CR1_RXIE; else cr1 |= STM32F7_I2C_CR1_TXIE; } else { if (msg->flags & I2C_M_RD) cr1 |= STM32F7_I2C_CR1_RXDMAEN; else cr1 |= STM32F7_I2C_CR1_TXDMAEN; } /* Configure Start/Repeated Start */ cr2 |= STM32F7_I2C_CR2_START; i2c_dev->master_mode = true; /* Write configurations registers */ writel_relaxed(cr1, base + STM32F7_I2C_CR1); writel_relaxed(cr2, base + STM32F7_I2C_CR2); } static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, unsigned short flags, u8 command, union i2c_smbus_data *data) { struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; struct device *dev = i2c_dev->dev; void __iomem *base = i2c_dev->base; u32 cr1, cr2; int i, ret; f7_msg->result = 0; reinit_completion(&i2c_dev->complete); cr2 = readl_relaxed(base + STM32F7_I2C_CR2); cr1 = readl_relaxed(base + STM32F7_I2C_CR1); /* Set transfer direction */ cr2 &= ~STM32F7_I2C_CR2_RD_WRN; if (f7_msg->read_write) cr2 |= STM32F7_I2C_CR2_RD_WRN; /* Set slave address */ cr2 &= ~(STM32F7_I2C_CR2_ADD10 | STM32F7_I2C_CR2_SADD7_MASK); cr2 |= STM32F7_I2C_CR2_SADD7(f7_msg->addr); f7_msg->smbus_buf[0] = command; switch (f7_msg->size) { case I2C_SMBUS_QUICK: f7_msg->stop = true; f7_msg->count = 0; break; case I2C_SMBUS_BYTE: f7_msg->stop = true; f7_msg->count = 1; break; case I2C_SMBUS_BYTE_DATA: if (f7_msg->read_write) { f7_msg->stop = false; f7_msg->count = 1; cr2 &= ~STM32F7_I2C_CR2_RD_WRN; } else { f7_msg->stop = true; f7_msg->count = 2; f7_msg->smbus_buf[1] = data->byte; } break; case I2C_SMBUS_WORD_DATA: if (f7_msg->read_write) { f7_msg->stop = false; f7_msg->count = 1; cr2 &= ~STM32F7_I2C_CR2_RD_WRN; } else { f7_msg->stop = true; f7_msg->count = 3; f7_msg->smbus_buf[1] = data->word & 0xff; f7_msg->smbus_buf[2] = data->word >> 8; } break; case I2C_SMBUS_BLOCK_DATA: if (f7_msg->read_write) { f7_msg->stop = false; f7_msg->count = 1; cr2 &= ~STM32F7_I2C_CR2_RD_WRN; } else { f7_msg->stop = true; if (data->block[0] > I2C_SMBUS_BLOCK_MAX || !data->block[0]) { dev_err(dev, "Invalid block write size %d\n", data->block[0]); return -EINVAL; } f7_msg->count = data->block[0] + 2; for (i = 1; i < f7_msg->count; i++) f7_msg->smbus_buf[i] = data->block[i - 1]; } break; case I2C_SMBUS_PROC_CALL: f7_msg->stop = false; f7_msg->count = 3; f7_msg->smbus_buf[1] = data->word & 0xff; f7_msg->smbus_buf[2] = data->word >> 8; cr2 &= ~STM32F7_I2C_CR2_RD_WRN; f7_msg->read_write = I2C_SMBUS_READ; break; case I2C_SMBUS_BLOCK_PROC_CALL: f7_msg->stop = false; if (data->block[0] > I2C_SMBUS_BLOCK_MAX - 1) { dev_err(dev, "Invalid block write size %d\n", data->block[0]); return -EINVAL; } f7_msg->count = data->block[0] + 2; for (i = 1; i < f7_msg->count; i++) f7_msg->smbus_buf[i] = data->block[i - 1]; cr2 &= ~STM32F7_I2C_CR2_RD_WRN; f7_msg->read_write = I2C_SMBUS_READ; break; case I2C_SMBUS_I2C_BLOCK_DATA: /* Rely on emulated i2c transfer (through master_xfer) */ return -EOPNOTSUPP; default: dev_err(dev, "Unsupported smbus protocol %d\n", f7_msg->size); return -EOPNOTSUPP; } f7_msg->buf = f7_msg->smbus_buf; /* Configure PEC */ if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) { cr1 |= STM32F7_I2C_CR1_PECEN; cr2 |= STM32F7_I2C_CR2_PECBYTE; if (!f7_msg->read_write) f7_msg->count++; } else { cr1 &= ~STM32F7_I2C_CR1_PECEN; cr2 &= ~STM32F7_I2C_CR2_PECBYTE; } /* Set number of bytes to be transferred */ cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK | STM32F7_I2C_CR2_RELOAD); cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); /* Enable NACK, STOP, error and transfer complete interrupts */ cr1 |= STM32F7_I2C_CR1_ERRIE | STM32F7_I2C_CR1_TCIE | STM32F7_I2C_CR1_STOPIE | STM32F7_I2C_CR1_NACKIE; /* Clear DMA req and TX/RX interrupt */ cr1 &= ~(STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TXIE | STM32F7_I2C_CR1_RXDMAEN | STM32F7_I2C_CR1_TXDMAEN); /* Configure DMA or enable RX/TX interrupt */ i2c_dev->use_dma = false; if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN) { ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma, cr2 & STM32F7_I2C_CR2_RD_WRN, f7_msg->count, f7_msg->buf, stm32f7_i2c_dma_callback, i2c_dev); if (!ret) i2c_dev->use_dma = true; else dev_warn(i2c_dev->dev, "can't use DMA\n"); } if (!i2c_dev->use_dma) { if (cr2 & STM32F7_I2C_CR2_RD_WRN) cr1 |= STM32F7_I2C_CR1_RXIE; else cr1 |= STM32F7_I2C_CR1_TXIE; } else { if (cr2 & STM32F7_I2C_CR2_RD_WRN) cr1 |= STM32F7_I2C_CR1_RXDMAEN; else cr1 |= STM32F7_I2C_CR1_TXDMAEN; } /* Set Start bit */ cr2 |= STM32F7_I2C_CR2_START; i2c_dev->master_mode = true; /* Write configurations registers */ writel_relaxed(cr1, base + STM32F7_I2C_CR1); writel_relaxed(cr2, base + STM32F7_I2C_CR2); return 0; } static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; void __iomem *base = i2c_dev->base; u32 cr1, cr2; int ret; cr2 = readl_relaxed(base + STM32F7_I2C_CR2); cr1 = readl_relaxed(base + STM32F7_I2C_CR1); /* Set transfer direction */ cr2 |= STM32F7_I2C_CR2_RD_WRN; switch (f7_msg->size) { case I2C_SMBUS_BYTE_DATA: f7_msg->count = 1; break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: f7_msg->count = 2; break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: f7_msg->count = 1; cr2 |= STM32F7_I2C_CR2_RELOAD; break; } f7_msg->buf = f7_msg->smbus_buf; f7_msg->stop = true; /* Add one byte for PEC if needed */ if (cr1 & STM32F7_I2C_CR1_PECEN) f7_msg->count++; /* Set number of bytes to be transferred */ cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK); cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); /* * Configure RX/TX interrupt: */ cr1 &= ~(STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TXIE); cr1 |= STM32F7_I2C_CR1_RXIE; /* * Configure DMA or enable RX/TX interrupt: * For I2C_SMBUS_BLOCK_DATA and I2C_SMBUS_BLOCK_PROC_CALL we don't use * dma as we don't know in advance how many data will be received */ cr1 &= ~(STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TXIE | STM32F7_I2C_CR1_RXDMAEN | STM32F7_I2C_CR1_TXDMAEN); i2c_dev->use_dma = false; if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN && f7_msg->size != I2C_SMBUS_BLOCK_DATA && f7_msg->size != I2C_SMBUS_BLOCK_PROC_CALL) { ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma, cr2 & STM32F7_I2C_CR2_RD_WRN, f7_msg->count, f7_msg->buf, stm32f7_i2c_dma_callback, i2c_dev); if (!ret) i2c_dev->use_dma = true; else dev_warn(i2c_dev->dev, "can't use DMA\n"); } if (!i2c_dev->use_dma) cr1 |= STM32F7_I2C_CR1_RXIE; else cr1 |= STM32F7_I2C_CR1_RXDMAEN; /* Configure Repeated Start */ cr2 |= STM32F7_I2C_CR2_START; /* Write configurations registers */ writel_relaxed(cr1, base + STM32F7_I2C_CR1); writel_relaxed(cr2, base + STM32F7_I2C_CR2); } static int stm32f7_i2c_smbus_check_pec(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; u8 count, internal_pec, received_pec; internal_pec = readl_relaxed(i2c_dev->base + STM32F7_I2C_PECR); switch (f7_msg->size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: received_pec = f7_msg->smbus_buf[1]; break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: received_pec = f7_msg->smbus_buf[2]; break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: count = f7_msg->smbus_buf[0]; received_pec = f7_msg->smbus_buf[count]; break; default: dev_err(i2c_dev->dev, "Unsupported smbus protocol for PEC\n"); return -EINVAL; } if (internal_pec != received_pec) { dev_err(i2c_dev->dev, "Bad PEC 0x%02x vs. 0x%02x\n", internal_pec, received_pec); return -EBADMSG; } return 0; } static bool stm32f7_i2c_is_addr_match(struct i2c_client *slave, u32 addcode) { u32 addr; if (!slave) return false; if (slave->flags & I2C_CLIENT_TEN) { /* * For 10-bit addr, addcode = 11110XY with * X = Bit 9 of slave address * Y = Bit 8 of slave address */ addr = slave->addr >> 8; addr |= 0x78; if (addr == addcode) return true; } else { addr = slave->addr & 0x7f; if (addr == addcode) return true; } return false; } static void stm32f7_i2c_slave_start(struct stm32f7_i2c_dev *i2c_dev) { struct i2c_client *slave = i2c_dev->slave_running; void __iomem *base = i2c_dev->base; u32 mask; u8 value = 0; if (i2c_dev->slave_dir) { /* Notify i2c slave that new read transfer is starting */ i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value); /* * Disable slave TX config in case of I2C combined message * (I2C Write followed by I2C Read) */ mask = STM32F7_I2C_CR2_RELOAD; stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR2, mask); mask = STM32F7_I2C_CR1_SBC | STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TCIE; stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1, mask); /* Enable TX empty, STOP, NACK interrupts */ mask = STM32F7_I2C_CR1_STOPIE | STM32F7_I2C_CR1_NACKIE | STM32F7_I2C_CR1_TXIE; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask); /* Write 1st data byte */ writel_relaxed(value, base + STM32F7_I2C_TXDR); } else { /* Notify i2c slave that new write transfer is starting */ i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); /* Set reload mode to be able to ACK/NACK each received byte */ mask = STM32F7_I2C_CR2_RELOAD; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); /* * Set STOP, NACK, RX empty and transfer complete interrupts.* * Set Slave Byte Control to be able to ACK/NACK each data * byte received */ mask = STM32F7_I2C_CR1_STOPIE | STM32F7_I2C_CR1_NACKIE | STM32F7_I2C_CR1_SBC | STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TCIE; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask); } } static void stm32f7_i2c_slave_addr(struct stm32f7_i2c_dev *i2c_dev) { void __iomem *base = i2c_dev->base; u32 isr, addcode, dir, mask; int i; isr = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); addcode = STM32F7_I2C_ISR_ADDCODE_GET(isr); dir = isr & STM32F7_I2C_ISR_DIR; for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) { if (stm32f7_i2c_is_addr_match(i2c_dev->slave[i], addcode)) { i2c_dev->slave_running = i2c_dev->slave[i]; i2c_dev->slave_dir = dir; /* Start I2C slave processing */ stm32f7_i2c_slave_start(i2c_dev); /* Clear ADDR flag */ mask = STM32F7_I2C_ICR_ADDRCF; writel_relaxed(mask, base + STM32F7_I2C_ICR); break; } } } static int stm32f7_i2c_get_slave_id(struct stm32f7_i2c_dev *i2c_dev, struct i2c_client *slave, int *id) { int i; for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) { if (i2c_dev->slave[i] == slave) { *id = i; return 0; } } dev_err(i2c_dev->dev, "Slave 0x%x not registered\n", slave->addr); return -ENODEV; } static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev, struct i2c_client *slave, int *id) { struct device *dev = i2c_dev->dev; int i; /* * slave[STM32F7_SLAVE_HOSTNOTIFY] support only SMBus Host address (0x8) * slave[STM32F7_SLAVE_7_10_BITS_ADDR] supports 7-bit and 10-bit slave address * slave[STM32F7_SLAVE_7_BITS_ADDR] supports 7-bit slave address only */ if (i2c_dev->smbus_mode && (slave->addr == 0x08)) { if (i2c_dev->slave[STM32F7_SLAVE_HOSTNOTIFY]) goto fail; *id = STM32F7_SLAVE_HOSTNOTIFY; return 0; } for (i = STM32F7_I2C_MAX_SLAVE - 1; i > STM32F7_SLAVE_HOSTNOTIFY; i--) { if ((i == STM32F7_SLAVE_7_BITS_ADDR) && (slave->flags & I2C_CLIENT_TEN)) continue; if (!i2c_dev->slave[i]) { *id = i; return 0; } } fail: dev_err(dev, "Slave 0x%x could not be registered\n", slave->addr); return -EINVAL; } static bool stm32f7_i2c_is_slave_registered(struct stm32f7_i2c_dev *i2c_dev) { int i; for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) { if (i2c_dev->slave[i]) return true; } return false; } static bool stm32f7_i2c_is_slave_busy(struct stm32f7_i2c_dev *i2c_dev) { int i, busy; busy = 0; for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) { if (i2c_dev->slave[i]) busy++; } return i == busy; } static irqreturn_t stm32f7_i2c_slave_isr_event(struct stm32f7_i2c_dev *i2c_dev) { void __iomem *base = i2c_dev->base; u32 cr2, status, mask; u8 val; int ret; status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); /* Slave transmitter mode */ if (status & STM32F7_I2C_ISR_TXIS) { i2c_slave_event(i2c_dev->slave_running, I2C_SLAVE_READ_PROCESSED, &val); /* Write data byte */ writel_relaxed(val, base + STM32F7_I2C_TXDR); } /* Transfer Complete Reload for Slave receiver mode */ if (status & STM32F7_I2C_ISR_TCR || status & STM32F7_I2C_ISR_RXNE) { /* * Read data byte then set NBYTES to receive next byte or NACK * the current received byte */ val = readb_relaxed(i2c_dev->base + STM32F7_I2C_RXDR); ret = i2c_slave_event(i2c_dev->slave_running, I2C_SLAVE_WRITE_RECEIVED, &val); if (!ret) { cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2); cr2 |= STM32F7_I2C_CR2_NBYTES(1); writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2); } else { mask = STM32F7_I2C_CR2_NACK; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); } } /* NACK received */ if (status & STM32F7_I2C_ISR_NACKF) { dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__); writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); } /* STOP received */ if (status & STM32F7_I2C_ISR_STOPF) { /* Disable interrupts */ stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_XFER_IRQ_MASK); if (i2c_dev->slave_dir) { /* * Flush TX buffer in order to not used the byte in * TXDR for the next transfer */ mask = STM32F7_I2C_ISR_TXE; stm32f7_i2c_set_bits(base + STM32F7_I2C_ISR, mask); } /* Clear STOP flag */ writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR); /* Notify i2c slave that a STOP flag has been detected */ i2c_slave_event(i2c_dev->slave_running, I2C_SLAVE_STOP, &val); i2c_dev->slave_running = NULL; } /* Address match received */ if (status & STM32F7_I2C_ISR_ADDR) stm32f7_i2c_slave_addr(i2c_dev); return IRQ_HANDLED; } static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) { struct stm32f7_i2c_dev *i2c_dev = data; struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; struct stm32_i2c_dma *dma = i2c_dev->dma; void __iomem *base = i2c_dev->base; u32 status, mask; int ret = IRQ_HANDLED; /* Check if the interrupt if for a slave device */ if (!i2c_dev->master_mode) { ret = stm32f7_i2c_slave_isr_event(i2c_dev); return ret; } status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); /* Tx empty */ if (status & STM32F7_I2C_ISR_TXIS) stm32f7_i2c_write_tx_data(i2c_dev); /* RX not empty */ if (status & STM32F7_I2C_ISR_RXNE) stm32f7_i2c_read_rx_data(i2c_dev); /* NACK received */ if (status & STM32F7_I2C_ISR_NACKF) { dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n", __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); if (i2c_dev->use_dma) { stm32f7_i2c_disable_dma_req(i2c_dev); dmaengine_terminate_async(dma->chan_using); } f7_msg->result = -ENXIO; } /* STOP detection flag */ if (status & STM32F7_I2C_ISR_STOPF) { /* Disable interrupts */ if (stm32f7_i2c_is_slave_registered(i2c_dev)) mask = STM32F7_I2C_XFER_IRQ_MASK; else mask = STM32F7_I2C_ALL_IRQ_MASK; stm32f7_i2c_disable_irq(i2c_dev, mask); /* Clear STOP flag */ writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR); if (i2c_dev->use_dma && !f7_msg->result) { ret = IRQ_WAKE_THREAD; } else { i2c_dev->master_mode = false; complete(&i2c_dev->complete); } } /* Transfer complete */ if (status & STM32F7_I2C_ISR_TC) { if (f7_msg->stop) { mask = STM32F7_I2C_CR2_STOP; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); } else if (i2c_dev->use_dma && !f7_msg->result) { ret = IRQ_WAKE_THREAD; } else if (f7_msg->smbus) { stm32f7_i2c_smbus_rep_start(i2c_dev); } else { i2c_dev->msg_id++; i2c_dev->msg++; stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg); } } if (status & STM32F7_I2C_ISR_TCR) { if (f7_msg->smbus) stm32f7_i2c_smbus_reload(i2c_dev); else stm32f7_i2c_reload(i2c_dev); } return ret; } static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) { struct stm32f7_i2c_dev *i2c_dev = data; struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; struct stm32_i2c_dma *dma = i2c_dev->dma; u32 status; int ret; /* * Wait for dma transfer completion before sending next message or * notity the end of xfer to the client */ ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ); if (!ret) { dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__); stm32f7_i2c_disable_dma_req(i2c_dev); dmaengine_terminate_async(dma->chan_using); f7_msg->result = -ETIMEDOUT; } status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); if (status & STM32F7_I2C_ISR_TC) { if (f7_msg->smbus) { stm32f7_i2c_smbus_rep_start(i2c_dev); } else { i2c_dev->msg_id++; i2c_dev->msg++; stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg); } } else { i2c_dev->master_mode = false; complete(&i2c_dev->complete); } return IRQ_HANDLED; } static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) { struct stm32f7_i2c_dev *i2c_dev = data; struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; void __iomem *base = i2c_dev->base; struct device *dev = i2c_dev->dev; struct stm32_i2c_dma *dma = i2c_dev->dma; u32 status; status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); /* Bus error */ if (status & STM32F7_I2C_ISR_BERR) { dev_err(dev, "<%s>: Bus error accessing addr 0x%x\n", __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR); stm32f7_i2c_release_bus(&i2c_dev->adap); f7_msg->result = -EIO; } /* Arbitration loss */ if (status & STM32F7_I2C_ISR_ARLO) { dev_dbg(dev, "<%s>: Arbitration loss accessing addr 0x%x\n", __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR); f7_msg->result = -EAGAIN; } if (status & STM32F7_I2C_ISR_PECERR) { dev_err(dev, "<%s>: PEC error in reception accessing addr 0x%x\n", __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_PECCF, base + STM32F7_I2C_ICR); f7_msg->result = -EINVAL; } if (status & STM32F7_I2C_ISR_ALERT) { dev_dbg(dev, "<%s>: SMBus alert received\n", __func__); writel_relaxed(STM32F7_I2C_ICR_ALERTCF, base + STM32F7_I2C_ICR); i2c_handle_smbus_alert(i2c_dev->alert->ara); return IRQ_HANDLED; } if (!i2c_dev->slave_running) { u32 mask; /* Disable interrupts */ if (stm32f7_i2c_is_slave_registered(i2c_dev)) mask = STM32F7_I2C_XFER_IRQ_MASK; else mask = STM32F7_I2C_ALL_IRQ_MASK; stm32f7_i2c_disable_irq(i2c_dev, mask); } /* Disable dma */ if (i2c_dev->use_dma) { stm32f7_i2c_disable_dma_req(i2c_dev); dmaengine_terminate_async(dma->chan_using); } i2c_dev->master_mode = false; complete(&i2c_dev->complete); return IRQ_HANDLED; } static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; struct stm32_i2c_dma *dma = i2c_dev->dma; unsigned long time_left; int ret; i2c_dev->msg = msgs; i2c_dev->msg_num = num; i2c_dev->msg_id = 0; f7_msg->smbus = false; ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; ret = stm32f7_i2c_wait_free_bus(i2c_dev); if (ret) goto pm_free; stm32f7_i2c_xfer_msg(i2c_dev, msgs); time_left = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = f7_msg->result; if (ret) { if (i2c_dev->use_dma) dmaengine_synchronize(dma->chan_using); /* * It is possible that some unsent data have already been * written into TXDR. To avoid sending old data in a * further transfer, flush TXDR in case of any error */ writel_relaxed(STM32F7_I2C_ISR_TXE, i2c_dev->base + STM32F7_I2C_ISR); goto pm_free; } if (!time_left) { dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n", i2c_dev->msg->addr); if (i2c_dev->use_dma) dmaengine_terminate_sync(dma->chan_using); stm32f7_i2c_wait_free_bus(i2c_dev); ret = -ETIMEDOUT; } pm_free: pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return (ret < 0) ? ret : num; } static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(adapter); struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; struct stm32_i2c_dma *dma = i2c_dev->dma; struct device *dev = i2c_dev->dev; unsigned long timeout; int i, ret; f7_msg->addr = addr; f7_msg->size = size; f7_msg->read_write = read_write; f7_msg->smbus = true; ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; ret = stm32f7_i2c_wait_free_bus(i2c_dev); if (ret) goto pm_free; ret = stm32f7_i2c_smbus_xfer_msg(i2c_dev, flags, command, data); if (ret) goto pm_free; timeout = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = f7_msg->result; if (ret) { if (i2c_dev->use_dma) dmaengine_synchronize(dma->chan_using); /* * It is possible that some unsent data have already been * written into TXDR. To avoid sending old data in a * further transfer, flush TXDR in case of any error */ writel_relaxed(STM32F7_I2C_ISR_TXE, i2c_dev->base + STM32F7_I2C_ISR); goto pm_free; } if (!timeout) { dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr); if (i2c_dev->use_dma) dmaengine_terminate_sync(dma->chan_using); stm32f7_i2c_wait_free_bus(i2c_dev); ret = -ETIMEDOUT; goto pm_free; } /* Check PEC */ if ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && read_write) { ret = stm32f7_i2c_smbus_check_pec(i2c_dev); if (ret) goto pm_free; } if (read_write && size != I2C_SMBUS_QUICK) { switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = f7_msg->smbus_buf[0]; break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: data->word = f7_msg->smbus_buf[0] | (f7_msg->smbus_buf[1] << 8); break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: for (i = 0; i <= f7_msg->smbus_buf[0]; i++) data->block[i] = f7_msg->smbus_buf[i]; break; default: dev_err(dev, "Unsupported smbus transaction\n"); ret = -EINVAL; } } pm_free: pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } static void stm32f7_i2c_enable_wakeup(struct stm32f7_i2c_dev *i2c_dev, bool enable) { void __iomem *base = i2c_dev->base; u32 mask = STM32F7_I2C_CR1_WUPEN; if (!i2c_dev->wakeup_src) return; if (enable) { device_set_wakeup_enable(i2c_dev->dev, true); stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask); } else { device_set_wakeup_enable(i2c_dev->dev, false); stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1, mask); } } static int stm32f7_i2c_reg_slave(struct i2c_client *slave) { struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(slave->adapter); void __iomem *base = i2c_dev->base; struct device *dev = i2c_dev->dev; u32 oar1, oar2, mask; int id, ret; if (slave->flags & I2C_CLIENT_PEC) { dev_err(dev, "SMBus PEC not supported in slave mode\n"); return -EINVAL; } if (stm32f7_i2c_is_slave_busy(i2c_dev)) { dev_err(dev, "Too much slave registered\n"); return -EBUSY; } ret = stm32f7_i2c_get_free_slave_id(i2c_dev, slave, &id); if (ret) return ret; ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; if (!stm32f7_i2c_is_slave_registered(i2c_dev)) stm32f7_i2c_enable_wakeup(i2c_dev, true); switch (id) { case 0: /* Slave SMBus Host */ i2c_dev->slave[id] = slave; break; case 1: /* Configure Own Address 1 */ oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1); oar1 &= ~STM32F7_I2C_OAR1_MASK; if (slave->flags & I2C_CLIENT_TEN) { oar1 |= STM32F7_I2C_OAR1_OA1_10(slave->addr); oar1 |= STM32F7_I2C_OAR1_OA1MODE; } else { oar1 |= STM32F7_I2C_OAR1_OA1_7(slave->addr); } oar1 |= STM32F7_I2C_OAR1_OA1EN; i2c_dev->slave[id] = slave; writel_relaxed(oar1, i2c_dev->base + STM32F7_I2C_OAR1); break; case 2: /* Configure Own Address 2 */ oar2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR2); oar2 &= ~STM32F7_I2C_OAR2_MASK; if (slave->flags & I2C_CLIENT_TEN) { ret = -EOPNOTSUPP; goto pm_free; } oar2 |= STM32F7_I2C_OAR2_OA2_7(slave->addr); oar2 |= STM32F7_I2C_OAR2_OA2EN; i2c_dev->slave[id] = slave; writel_relaxed(oar2, i2c_dev->base + STM32F7_I2C_OAR2); break; default: dev_err(dev, "I2C slave id not supported\n"); ret = -ENODEV; goto pm_free; } /* Enable ACK */ stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR2, STM32F7_I2C_CR2_NACK); /* Enable Address match interrupt, error interrupt and enable I2C */ mask = STM32F7_I2C_CR1_ADDRIE | STM32F7_I2C_CR1_ERRIE | STM32F7_I2C_CR1_PE; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask); ret = 0; pm_free: if (!stm32f7_i2c_is_slave_registered(i2c_dev)) stm32f7_i2c_enable_wakeup(i2c_dev, false); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } static int stm32f7_i2c_unreg_slave(struct i2c_client *slave) { struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(slave->adapter); void __iomem *base = i2c_dev->base; u32 mask; int id, ret; ret = stm32f7_i2c_get_slave_id(i2c_dev, slave, &id); if (ret) return ret; WARN_ON(!i2c_dev->slave[id]); ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; if (id == 1) { mask = STM32F7_I2C_OAR1_OA1EN; stm32f7_i2c_clr_bits(base + STM32F7_I2C_OAR1, mask); } else if (id == 2) { mask = STM32F7_I2C_OAR2_OA2EN; stm32f7_i2c_clr_bits(base + STM32F7_I2C_OAR2, mask); } i2c_dev->slave[id] = NULL; if (!stm32f7_i2c_is_slave_registered(i2c_dev)) { stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK); stm32f7_i2c_enable_wakeup(i2c_dev, false); } pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return 0; } static int stm32f7_i2c_write_fm_plus_bits(struct stm32f7_i2c_dev *i2c_dev, bool enable) { int ret; if (i2c_dev->bus_rate <= I2C_MAX_FAST_MODE_FREQ || IS_ERR_OR_NULL(i2c_dev->regmap)) /* Optional */ return 0; if (i2c_dev->fmp_sreg == i2c_dev->fmp_creg) ret = regmap_update_bits(i2c_dev->regmap, i2c_dev->fmp_sreg, i2c_dev->fmp_mask, enable ? i2c_dev->fmp_mask : 0); else ret = regmap_write(i2c_dev->regmap, enable ? i2c_dev->fmp_sreg : i2c_dev->fmp_creg, i2c_dev->fmp_mask); return ret; } static int stm32f7_i2c_setup_fm_plus_bits(struct platform_device *pdev, struct stm32f7_i2c_dev *i2c_dev) { struct device_node *np = pdev->dev.of_node; int ret; i2c_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg-fmp"); if (IS_ERR(i2c_dev->regmap)) /* Optional */ return 0; ret = of_property_read_u32_index(np, "st,syscfg-fmp", 1, &i2c_dev->fmp_sreg); if (ret) return ret; i2c_dev->fmp_creg = i2c_dev->fmp_sreg + i2c_dev->setup.fmp_clr_offset; return of_property_read_u32_index(np, "st,syscfg-fmp", 2, &i2c_dev->fmp_mask); } static int stm32f7_i2c_enable_smbus_host(struct stm32f7_i2c_dev *i2c_dev) { struct i2c_adapter *adap = &i2c_dev->adap; void __iomem *base = i2c_dev->base; struct i2c_client *client; client = i2c_new_slave_host_notify_device(adap); if (IS_ERR(client)) return PTR_ERR(client); i2c_dev->host_notify_client = client; /* Enable SMBus Host address */ stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_SMBHEN); return 0; } static void stm32f7_i2c_disable_smbus_host(struct stm32f7_i2c_dev *i2c_dev) { void __iomem *base = i2c_dev->base; if (i2c_dev->host_notify_client) { /* Disable SMBus Host address */ stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_SMBHEN); i2c_free_slave_host_notify_device(i2c_dev->host_notify_client); } } static int stm32f7_i2c_enable_smbus_alert(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_alert *alert; struct i2c_adapter *adap = &i2c_dev->adap; struct device *dev = i2c_dev->dev; void __iomem *base = i2c_dev->base; alert = devm_kzalloc(dev, sizeof(*alert), GFP_KERNEL); if (!alert) return -ENOMEM; alert->ara = i2c_new_smbus_alert_device(adap, &alert->setup); if (IS_ERR(alert->ara)) return PTR_ERR(alert->ara); i2c_dev->alert = alert; /* Enable SMBus Alert */ stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_ALERTEN); return 0; } static void stm32f7_i2c_disable_smbus_alert(struct stm32f7_i2c_dev *i2c_dev) { struct stm32f7_i2c_alert *alert = i2c_dev->alert; void __iomem *base = i2c_dev->base; if (alert) { /* Disable SMBus Alert */ stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_ALERTEN); i2c_unregister_device(alert->ara); } } static u32 stm32f7_i2c_func(struct i2c_adapter *adap) { struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(adap); u32 func = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SLAVE | I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_PEC | I2C_FUNC_SMBUS_I2C_BLOCK; if (i2c_dev->smbus_mode) func |= I2C_FUNC_SMBUS_HOST_NOTIFY; return func; } static const struct i2c_algorithm stm32f7_i2c_algo = { .master_xfer = stm32f7_i2c_xfer, .smbus_xfer = stm32f7_i2c_smbus_xfer, .functionality = stm32f7_i2c_func, .reg_slave = stm32f7_i2c_reg_slave, .unreg_slave = stm32f7_i2c_unreg_slave, }; static int stm32f7_i2c_probe(struct platform_device *pdev) { struct stm32f7_i2c_dev *i2c_dev; const struct stm32f7_i2c_setup *setup; struct resource *res; struct i2c_adapter *adap; struct reset_control *rst; dma_addr_t phy_addr; int irq_error, irq_event, ret; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); phy_addr = (dma_addr_t)res->start; irq_event = platform_get_irq(pdev, 0); if (irq_event < 0) return irq_event; irq_error = platform_get_irq(pdev, 1); if (irq_error < 0) return irq_error; i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node, "wakeup-source"); i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c_dev->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(i2c_dev->clk), "Failed to get controller clock\n"); ret = clk_prepare_enable(i2c_dev->clk); if (ret) { dev_err(&pdev->dev, "Failed to prepare_enable clock\n"); return ret; } rst = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(rst)) { ret = dev_err_probe(&pdev->dev, PTR_ERR(rst), "Error: Missing reset ctrl\n"); goto clk_free; } reset_control_assert(rst); udelay(2); reset_control_deassert(rst); i2c_dev->dev = &pdev->dev; ret = devm_request_threaded_irq(&pdev->dev, irq_event, stm32f7_i2c_isr_event, stm32f7_i2c_isr_event_thread, IRQF_ONESHOT, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq event %i\n", irq_event); goto clk_free; } ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq error %i\n", irq_error); goto clk_free; } setup = of_device_get_match_data(&pdev->dev); if (!setup) { dev_err(&pdev->dev, "Can't get device data\n"); ret = -ENODEV; goto clk_free; } i2c_dev->setup = *setup; ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup); if (ret) goto clk_free; /* Setup Fast mode plus if necessary */ if (i2c_dev->bus_rate > I2C_MAX_FAST_MODE_FREQ) { ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev); if (ret) goto clk_free; ret = stm32f7_i2c_write_fm_plus_bits(i2c_dev, true); if (ret) goto clk_free; } adap = &i2c_dev->adap; i2c_set_adapdata(adap, i2c_dev); snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)", &res->start); adap->owner = THIS_MODULE; adap->timeout = 2 * HZ; adap->retries = 3; adap->algo = &stm32f7_i2c_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; init_completion(&i2c_dev->complete); /* Init DMA config if supported */ i2c_dev->dma = stm32_i2c_dma_request(i2c_dev->dev, phy_addr, STM32F7_I2C_TXDR, STM32F7_I2C_RXDR); if (IS_ERR(i2c_dev->dma)) { ret = PTR_ERR(i2c_dev->dma); /* DMA support is optional, only report other errors */ if (ret != -ENODEV) goto fmp_clear; dev_dbg(i2c_dev->dev, "No DMA option: fallback using interrupts\n"); i2c_dev->dma = NULL; } if (i2c_dev->wakeup_src) { device_set_wakeup_capable(i2c_dev->dev, true); ret = dev_pm_set_wake_irq(i2c_dev->dev, irq_event); if (ret) { dev_err(i2c_dev->dev, "Failed to set wake up irq\n"); goto clr_wakeup_capable; } } platform_set_drvdata(pdev, i2c_dev); pm_runtime_set_autosuspend_delay(i2c_dev->dev, STM32F7_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(i2c_dev->dev); pm_runtime_set_active(i2c_dev->dev); pm_runtime_enable(i2c_dev->dev); pm_runtime_get_noresume(&pdev->dev); stm32f7_i2c_hw_config(i2c_dev); i2c_dev->smbus_mode = of_property_read_bool(pdev->dev.of_node, "smbus"); ret = i2c_add_adapter(adap); if (ret) goto pm_disable; if (i2c_dev->smbus_mode) { ret = stm32f7_i2c_enable_smbus_host(i2c_dev); if (ret) { dev_err(i2c_dev->dev, "failed to enable SMBus Host-Notify protocol (%d)\n", ret); goto i2c_adapter_remove; } } if (of_property_read_bool(pdev->dev.of_node, "smbus-alert")) { ret = stm32f7_i2c_enable_smbus_alert(i2c_dev); if (ret) { dev_err(i2c_dev->dev, "failed to enable SMBus alert protocol (%d)\n", ret); goto i2c_disable_smbus_host; } } dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr); pm_runtime_mark_last_busy(i2c_dev->dev); pm_runtime_put_autosuspend(i2c_dev->dev); return 0; i2c_disable_smbus_host: stm32f7_i2c_disable_smbus_host(i2c_dev); i2c_adapter_remove: i2c_del_adapter(adap); pm_disable: pm_runtime_put_noidle(i2c_dev->dev); pm_runtime_disable(i2c_dev->dev); pm_runtime_set_suspended(i2c_dev->dev); pm_runtime_dont_use_autosuspend(i2c_dev->dev); if (i2c_dev->wakeup_src) dev_pm_clear_wake_irq(i2c_dev->dev); clr_wakeup_capable: if (i2c_dev->wakeup_src) device_set_wakeup_capable(i2c_dev->dev, false); if (i2c_dev->dma) { stm32_i2c_dma_free(i2c_dev->dma); i2c_dev->dma = NULL; } fmp_clear: stm32f7_i2c_write_fm_plus_bits(i2c_dev, false); clk_free: clk_disable_unprepare(i2c_dev->clk); return ret; } static void stm32f7_i2c_remove(struct platform_device *pdev) { struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev); stm32f7_i2c_disable_smbus_alert(i2c_dev); stm32f7_i2c_disable_smbus_host(i2c_dev); i2c_del_adapter(&i2c_dev->adap); pm_runtime_get_sync(i2c_dev->dev); if (i2c_dev->wakeup_src) { dev_pm_clear_wake_irq(i2c_dev->dev); /* * enforce that wakeup is disabled and that the device * is marked as non wakeup capable */ device_init_wakeup(i2c_dev->dev, false); } pm_runtime_put_noidle(i2c_dev->dev); pm_runtime_disable(i2c_dev->dev); pm_runtime_set_suspended(i2c_dev->dev); pm_runtime_dont_use_autosuspend(i2c_dev->dev); if (i2c_dev->dma) { stm32_i2c_dma_free(i2c_dev->dma); i2c_dev->dma = NULL; } stm32f7_i2c_write_fm_plus_bits(i2c_dev, false); clk_disable_unprepare(i2c_dev->clk); } static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev) { struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); if (!stm32f7_i2c_is_slave_registered(i2c_dev)) clk_disable_unprepare(i2c_dev->clk); return 0; } static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev) { struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); int ret; if (!stm32f7_i2c_is_slave_registered(i2c_dev)) { ret = clk_prepare_enable(i2c_dev->clk); if (ret) { dev_err(dev, "failed to prepare_enable clock\n"); return ret; } } return 0; } static int __maybe_unused stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev) { int ret; struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs; ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; backup_regs->cr1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR1); backup_regs->cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2); backup_regs->oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1); backup_regs->oar2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR2); backup_regs->tmgr = readl_relaxed(i2c_dev->base + STM32F7_I2C_TIMINGR); stm32f7_i2c_write_fm_plus_bits(i2c_dev, false); pm_runtime_put_sync(i2c_dev->dev); return ret; } static int __maybe_unused stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev) { u32 cr1; int ret; struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs; ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; cr1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR1); if (cr1 & STM32F7_I2C_CR1_PE) stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_PE); writel_relaxed(backup_regs->tmgr, i2c_dev->base + STM32F7_I2C_TIMINGR); writel_relaxed(backup_regs->cr1 & ~STM32F7_I2C_CR1_PE, i2c_dev->base + STM32F7_I2C_CR1); if (backup_regs->cr1 & STM32F7_I2C_CR1_PE) stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_PE); writel_relaxed(backup_regs->cr2, i2c_dev->base + STM32F7_I2C_CR2); writel_relaxed(backup_regs->oar1, i2c_dev->base + STM32F7_I2C_OAR1); writel_relaxed(backup_regs->oar2, i2c_dev->base + STM32F7_I2C_OAR2); stm32f7_i2c_write_fm_plus_bits(i2c_dev, true); pm_runtime_put_sync(i2c_dev->dev); return ret; } static int __maybe_unused stm32f7_i2c_suspend(struct device *dev) { struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); int ret; i2c_mark_adapter_suspended(&i2c_dev->adap); if (!device_may_wakeup(dev) && !device_wakeup_path(dev)) { ret = stm32f7_i2c_regs_backup(i2c_dev); if (ret < 0) { i2c_mark_adapter_resumed(&i2c_dev->adap); return ret; } pinctrl_pm_select_sleep_state(dev); pm_runtime_force_suspend(dev); } return 0; } static int __maybe_unused stm32f7_i2c_resume(struct device *dev) { struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); int ret; if (!device_may_wakeup(dev) && !device_wakeup_path(dev)) { ret = pm_runtime_force_resume(dev); if (ret < 0) return ret; pinctrl_pm_select_default_state(dev); ret = stm32f7_i2c_regs_restore(i2c_dev); if (ret < 0) return ret; } i2c_mark_adapter_resumed(&i2c_dev->adap); return 0; } static const struct dev_pm_ops stm32f7_i2c_pm_ops = { SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend, stm32f7_i2c_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(stm32f7_i2c_suspend, stm32f7_i2c_resume) }; static const struct of_device_id stm32f7_i2c_match[] = { { .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup}, { .compatible = "st,stm32mp15-i2c", .data = &stm32mp15_setup}, { .compatible = "st,stm32mp13-i2c", .data = &stm32mp13_setup}, {}, }; MODULE_DEVICE_TABLE(of, stm32f7_i2c_match); static struct platform_driver stm32f7_i2c_driver = { .driver = { .name = "stm32f7-i2c", .of_match_table = stm32f7_i2c_match, .pm = &stm32f7_i2c_pm_ops, }, .probe = stm32f7_i2c_probe, .remove_new = stm32f7_i2c_remove, }; module_platform_driver(stm32f7_i2c_driver); MODULE_AUTHOR("M'boumba Cedric Madianga <[email protected]>"); MODULE_DESCRIPTION("STMicroelectronics STM32F7 I2C driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-stm32f7.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 1998 - 2002 Frodo Looijaard <[email protected]>, Philip Edelbrock <[email protected]>, and Mark D. Studebaker <[email protected]> Copyright (C) 2007 - 2014 Jean Delvare <[email protected]> Copyright (C) 2010 Intel Corporation, David Woodhouse <[email protected]> */ /* * Supports the following Intel I/O Controller Hubs (ICH): * * I/O Block I2C * region SMBus Block proc. block * Chip name PCI ID size PEC buffer call read * --------------------------------------------------------------------------- * 82801AA (ICH) 0x2413 16 no no no no * 82801AB (ICH0) 0x2423 16 no no no no * 82801BA (ICH2) 0x2443 16 no no no no * 82801CA (ICH3) 0x2483 32 soft no no no * 82801DB (ICH4) 0x24c3 32 hard yes no no * 82801E (ICH5) 0x24d3 32 hard yes yes yes * 6300ESB 0x25a4 32 hard yes yes yes * 82801F (ICH6) 0x266a 32 hard yes yes yes * 6310ESB/6320ESB 0x269b 32 hard yes yes yes * 82801G (ICH7) 0x27da 32 hard yes yes yes * 82801H (ICH8) 0x283e 32 hard yes yes yes * 82801I (ICH9) 0x2930 32 hard yes yes yes * EP80579 (Tolapai) 0x5032 32 hard yes yes yes * ICH10 0x3a30 32 hard yes yes yes * ICH10 0x3a60 32 hard yes yes yes * 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes * 6 Series (PCH) 0x1c22 32 hard yes yes yes * Patsburg (PCH) 0x1d22 32 hard yes yes yes * Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes * Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes * Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes * DH89xxCC (PCH) 0x2330 32 hard yes yes yes * Panther Point (PCH) 0x1e22 32 hard yes yes yes * Lynx Point (PCH) 0x8c22 32 hard yes yes yes * Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes * Avoton (SOC) 0x1f3c 32 hard yes yes yes * Wellsburg (PCH) 0x8d22 32 hard yes yes yes * Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes * Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes * Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes * Coleto Creek (PCH) 0x23b0 32 hard yes yes yes * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes * BayTrail (SOC) 0x0f12 32 hard yes yes yes * Braswell (SOC) 0x2292 32 hard yes yes yes * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes * DNV (SOC) 0x19df 32 hard yes yes yes * Emmitsburg (PCH) 0x1bc9 32 hard yes yes yes * Broxton (SOC) 0x5ad4 32 hard yes yes yes * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes * Kaby Lake PCH-H (PCH) 0xa2a3 32 hard yes yes yes * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes * Cedar Fork (PCH) 0x18df 32 hard yes yes yes * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes * Ice Lake-N (PCH) 0x38a3 32 hard yes yes yes * Comet Lake (PCH) 0x02a3 32 hard yes yes yes * Comet Lake-H (PCH) 0x06a3 32 hard yes yes yes * Elkhart Lake (PCH) 0x4b23 32 hard yes yes yes * Tiger Lake-LP (PCH) 0xa0a3 32 hard yes yes yes * Tiger Lake-H (PCH) 0x43a3 32 hard yes yes yes * Jasper Lake (SOC) 0x4da3 32 hard yes yes yes * Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes * Alder Lake-S (PCH) 0x7aa3 32 hard yes yes yes * Alder Lake-P (PCH) 0x51a3 32 hard yes yes yes * Alder Lake-M (PCH) 0x54a3 32 hard yes yes yes * Raptor Lake-S (PCH) 0x7a23 32 hard yes yes yes * Meteor Lake-P (SOC) 0x7e22 32 hard yes yes yes * Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes * * Features supported by this driver: * Software PEC no * Hardware PEC yes * Block buffer yes * Block process call transaction yes * I2C block read transaction yes (doesn't use the block buffer) * Slave mode no * SMBus Host Notify yes * Interrupt processing yes * * See the file Documentation/i2c/busses/i2c-i801.rst for details. */ #define DRV_NAME "i801_smbus" #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/dmi.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/platform_data/itco_wdt.h> #include <linux/platform_data/x86/p2sb.h> #include <linux/pm_runtime.h> #include <linux/mutex.h> #if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI #include <linux/gpio/machine.h> #include <linux/platform_data/i2c-mux-gpio.h> #endif /* I801 SMBus address offsets */ #define SMBHSTSTS(p) (0 + (p)->smba) #define SMBHSTCNT(p) (2 + (p)->smba) #define SMBHSTCMD(p) (3 + (p)->smba) #define SMBHSTADD(p) (4 + (p)->smba) #define SMBHSTDAT0(p) (5 + (p)->smba) #define SMBHSTDAT1(p) (6 + (p)->smba) #define SMBBLKDAT(p) (7 + (p)->smba) #define SMBPEC(p) (8 + (p)->smba) /* ICH3 and later */ #define SMBAUXSTS(p) (12 + (p)->smba) /* ICH4 and later */ #define SMBAUXCTL(p) (13 + (p)->smba) /* ICH4 and later */ #define SMBSLVSTS(p) (16 + (p)->smba) /* ICH3 and later */ #define SMBSLVCMD(p) (17 + (p)->smba) /* ICH3 and later */ #define SMBNTFDADD(p) (20 + (p)->smba) /* ICH3 and later */ /* PCI Address Constants */ #define SMBBAR 4 #define SMBHSTCFG 0x040 #define TCOBASE 0x050 #define TCOCTL 0x054 #define SBREG_SMBCTRL 0xc6000c #define SBREG_SMBCTRL_DNV 0xcf000c /* Host configuration bits for SMBHSTCFG */ #define SMBHSTCFG_HST_EN BIT(0) #define SMBHSTCFG_SMB_SMI_EN BIT(1) #define SMBHSTCFG_I2C_EN BIT(2) #define SMBHSTCFG_SPD_WD BIT(4) /* TCO configuration bits for TCOCTL */ #define TCOCTL_EN BIT(8) /* Auxiliary status register bits, ICH4+ only */ #define SMBAUXSTS_CRCE BIT(0) #define SMBAUXSTS_STCO BIT(1) /* Auxiliary control register bits, ICH4+ only */ #define SMBAUXCTL_CRC BIT(0) #define SMBAUXCTL_E32B BIT(1) /* I801 command constants */ #define I801_QUICK 0x00 #define I801_BYTE 0x04 #define I801_BYTE_DATA 0x08 #define I801_WORD_DATA 0x0C #define I801_PROC_CALL 0x10 #define I801_BLOCK_DATA 0x14 #define I801_I2C_BLOCK_DATA 0x18 /* ICH5 and later */ #define I801_BLOCK_PROC_CALL 0x1C /* I801 Host Control register bits */ #define SMBHSTCNT_INTREN BIT(0) #define SMBHSTCNT_KILL BIT(1) #define SMBHSTCNT_LAST_BYTE BIT(5) #define SMBHSTCNT_START BIT(6) #define SMBHSTCNT_PEC_EN BIT(7) /* ICH3 and later */ /* I801 Hosts Status register bits */ #define SMBHSTSTS_BYTE_DONE BIT(7) #define SMBHSTSTS_INUSE_STS BIT(6) #define SMBHSTSTS_SMBALERT_STS BIT(5) #define SMBHSTSTS_FAILED BIT(4) #define SMBHSTSTS_BUS_ERR BIT(3) #define SMBHSTSTS_DEV_ERR BIT(2) #define SMBHSTSTS_INTR BIT(1) #define SMBHSTSTS_HOST_BUSY BIT(0) /* Host Notify Status register bits */ #define SMBSLVSTS_HST_NTFY_STS BIT(0) /* Host Notify Command register bits */ #define SMBSLVCMD_SMBALERT_DISABLE BIT(2) #define SMBSLVCMD_HST_NTFY_INTREN BIT(0) #define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \ SMBHSTSTS_DEV_ERR) #define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR | \ STATUS_ERROR_FLAGS) /* Older devices have their ID defined in <linux/pci_ids.h> */ #define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3 #define PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS 0x06a3 #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 #define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df #define PCI_DEVICE_ID_INTEL_EBG_SMBUS 0x1bc9 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22 #define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c #define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292 #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 #define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0 #define PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS 0x31d4 #define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS 0x34a3 #define PCI_DEVICE_ID_INTEL_ICELAKE_N_SMBUS 0x38a3 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 #define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3 #define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23 #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3 #define PCI_DEVICE_ID_INTEL_METEOR_LAKE_P_SMBUS 0x7e22 #define PCI_DEVICE_ID_INTEL_METEOR_LAKE_PCH_S_SMBUS 0x7f23 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2 #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22 #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS 0x9da3 #define PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS 0xa0a3 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 #define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3 #define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS 0xae22 struct i801_mux_config { char *gpio_chip; unsigned values[3]; int n_values; unsigned classes[3]; unsigned gpios[2]; /* Relative to gpio_chip->base */ int n_gpios; }; struct i801_priv { struct i2c_adapter adapter; unsigned long smba; unsigned char original_hstcfg; unsigned char original_hstcnt; unsigned char original_slvcmd; struct pci_dev *pci_dev; unsigned int features; /* isr processing */ struct completion done; u8 status; /* Command state used by isr for byte-by-byte block transactions */ u8 cmd; bool is_read; int count; int len; u8 *data; #if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI const struct i801_mux_config *mux_drvdata; struct platform_device *mux_pdev; struct gpiod_lookup_table *lookup; #endif struct platform_device *tco_pdev; /* * If set to true the host controller registers are reserved for * ACPI AML use. Protected by acpi_lock. */ bool acpi_reserved; struct mutex acpi_lock; }; #define FEATURE_SMBUS_PEC BIT(0) #define FEATURE_BLOCK_BUFFER BIT(1) #define FEATURE_BLOCK_PROC BIT(2) #define FEATURE_I2C_BLOCK_READ BIT(3) #define FEATURE_IRQ BIT(4) #define FEATURE_HOST_NOTIFY BIT(5) /* Not really a feature, but it's convenient to handle it as such */ #define FEATURE_IDF BIT(15) #define FEATURE_TCO_SPT BIT(16) #define FEATURE_TCO_CNL BIT(17) static const char *i801_feature_names[] = { "SMBus PEC", "Block buffer", "Block process call", "I2C block read", "Interrupt", "SMBus Host Notify", }; static unsigned int disable_features; module_param(disable_features, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n" "\t\t 0x01 disable SMBus PEC\n" "\t\t 0x02 disable the block buffer\n" "\t\t 0x08 disable the I2C block read functionality\n" "\t\t 0x10 don't use interrupts\n" "\t\t 0x20 disable SMBus Host Notify "); /* Make sure the SMBus host is ready to start transmitting. Return 0 if it is, -EBUSY if it is not. */ static int i801_check_pre(struct i801_priv *priv) { int status; status = inb_p(SMBHSTSTS(priv)); if (status & SMBHSTSTS_HOST_BUSY) { pci_err(priv->pci_dev, "SMBus is busy, can't use it!\n"); return -EBUSY; } status &= STATUS_FLAGS; if (status) { pci_dbg(priv->pci_dev, "Clearing status flags (%02x)\n", status); outb_p(status, SMBHSTSTS(priv)); } /* * Clear CRC status if needed. * During normal operation, i801_check_post() takes care * of it after every operation. We do it here only in case * the hardware was already in this state when the driver * started. */ if (priv->features & FEATURE_SMBUS_PEC) { status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE; if (status) { pci_dbg(priv->pci_dev, "Clearing aux status flags (%02x)\n", status); outb_p(status, SMBAUXSTS(priv)); } } return 0; } static int i801_check_post(struct i801_priv *priv, int status) { int result = 0; /* * If the SMBus is still busy, we give up */ if (unlikely(status < 0)) { dev_err(&priv->pci_dev->dev, "Transaction timeout\n"); /* try to stop the current command */ dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n"); outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv)); usleep_range(1000, 2000); outb_p(0, SMBHSTCNT(priv)); /* Check if it worked */ status = inb_p(SMBHSTSTS(priv)); if ((status & SMBHSTSTS_HOST_BUSY) || !(status & SMBHSTSTS_FAILED)) dev_err(&priv->pci_dev->dev, "Failed terminating the transaction\n"); return -ETIMEDOUT; } if (status & SMBHSTSTS_FAILED) { result = -EIO; dev_err(&priv->pci_dev->dev, "Transaction failed\n"); } if (status & SMBHSTSTS_DEV_ERR) { /* * This may be a PEC error, check and clear it. * * AUXSTS is handled differently from HSTSTS. * For HSTSTS, i801_isr() or i801_wait_intr() * has already cleared the error bits in hardware, * and we are passed a copy of the original value * in "status". * For AUXSTS, the hardware register is left * for us to handle here. * This is asymmetric, slightly iffy, but safe, * since all this code is serialized and the CRCE * bit is harmless as long as it's cleared before * the next operation. */ if ((priv->features & FEATURE_SMBUS_PEC) && (inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE)) { outb_p(SMBAUXSTS_CRCE, SMBAUXSTS(priv)); result = -EBADMSG; dev_dbg(&priv->pci_dev->dev, "PEC error\n"); } else { result = -ENXIO; dev_dbg(&priv->pci_dev->dev, "No response\n"); } } if (status & SMBHSTSTS_BUS_ERR) { result = -EAGAIN; dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n"); } return result; } /* Wait for BUSY being cleared and either INTR or an error flag being set */ static int i801_wait_intr(struct i801_priv *priv) { unsigned long timeout = jiffies + priv->adapter.timeout; int status, busy; do { usleep_range(250, 500); status = inb_p(SMBHSTSTS(priv)); busy = status & SMBHSTSTS_HOST_BUSY; status &= STATUS_ERROR_FLAGS | SMBHSTSTS_INTR; if (!busy && status) return status & STATUS_ERROR_FLAGS; } while (time_is_after_eq_jiffies(timeout)); return -ETIMEDOUT; } /* Wait for either BYTE_DONE or an error flag being set */ static int i801_wait_byte_done(struct i801_priv *priv) { unsigned long timeout = jiffies + priv->adapter.timeout; int status; do { usleep_range(250, 500); status = inb_p(SMBHSTSTS(priv)); if (status & (STATUS_ERROR_FLAGS | SMBHSTSTS_BYTE_DONE)) return status & STATUS_ERROR_FLAGS; } while (time_is_after_eq_jiffies(timeout)); return -ETIMEDOUT; } static int i801_transaction(struct i801_priv *priv, int xact) { unsigned long result; const struct i2c_adapter *adap = &priv->adapter; if (priv->features & FEATURE_IRQ) { reinit_completion(&priv->done); outb_p(xact | SMBHSTCNT_INTREN | SMBHSTCNT_START, SMBHSTCNT(priv)); result = wait_for_completion_timeout(&priv->done, adap->timeout); return result ? priv->status : -ETIMEDOUT; } outb_p(xact | SMBHSTCNT_START, SMBHSTCNT(priv)); return i801_wait_intr(priv); } static int i801_block_transaction_by_block(struct i801_priv *priv, union i2c_smbus_data *data, char read_write, int command) { int i, len, status, xact; switch (command) { case I2C_SMBUS_BLOCK_PROC_CALL: xact = I801_BLOCK_PROC_CALL; break; case I2C_SMBUS_BLOCK_DATA: xact = I801_BLOCK_DATA; break; default: return -EOPNOTSUPP; } /* Set block buffer mode */ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv)); inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; outb_p(len, SMBHSTDAT0(priv)); for (i = 0; i < len; i++) outb_p(data->block[i+1], SMBBLKDAT(priv)); } status = i801_transaction(priv, xact); if (status) goto out; if (read_write == I2C_SMBUS_READ || command == I2C_SMBUS_BLOCK_PROC_CALL) { len = inb_p(SMBHSTDAT0(priv)); if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) { status = -EPROTO; goto out; } data->block[0] = len; for (i = 0; i < len; i++) data->block[i + 1] = inb_p(SMBBLKDAT(priv)); } out: outb_p(inb_p(SMBAUXCTL(priv)) & ~SMBAUXCTL_E32B, SMBAUXCTL(priv)); return status; } static void i801_isr_byte_done(struct i801_priv *priv) { if (priv->is_read) { /* For SMBus block reads, length is received with first byte */ if (((priv->cmd & 0x1c) == I801_BLOCK_DATA) && (priv->count == 0)) { priv->len = inb_p(SMBHSTDAT0(priv)); if (priv->len < 1 || priv->len > I2C_SMBUS_BLOCK_MAX) { dev_err(&priv->pci_dev->dev, "Illegal SMBus block read size %d\n", priv->len); /* FIXME: Recover */ priv->len = I2C_SMBUS_BLOCK_MAX; } priv->data[-1] = priv->len; } /* Read next byte */ if (priv->count < priv->len) priv->data[priv->count++] = inb(SMBBLKDAT(priv)); else dev_dbg(&priv->pci_dev->dev, "Discarding extra byte on block read\n"); /* Set LAST_BYTE for last byte of read transaction */ if (priv->count == priv->len - 1) outb_p(priv->cmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv)); } else if (priv->count < priv->len - 1) { /* Write next byte, except for IRQ after last byte */ outb_p(priv->data[++priv->count], SMBBLKDAT(priv)); } } static irqreturn_t i801_host_notify_isr(struct i801_priv *priv) { unsigned short addr; addr = inb_p(SMBNTFDADD(priv)) >> 1; /* * With the tested platforms, reading SMBNTFDDAT (22 + (p)->smba) * always returns 0. Our current implementation doesn't provide * data, so we just ignore it. */ i2c_handle_smbus_host_notify(&priv->adapter, addr); /* clear Host Notify bit and return */ outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv)); return IRQ_HANDLED; } /* * There are three kinds of interrupts: * * 1) i801 signals transaction completion with one of these interrupts: * INTR - Success * DEV_ERR - Invalid command, NAK or communication timeout * BUS_ERR - SMI# transaction collision * FAILED - transaction was canceled due to a KILL request * When any of these occur, update ->status and signal completion. * * 2) For byte-by-byte (I2C read/write) transactions, one BYTE_DONE interrupt * occurs for each byte of a byte-by-byte to prepare the next byte. * * 3) Host Notify interrupts */ static irqreturn_t i801_isr(int irq, void *dev_id) { struct i801_priv *priv = dev_id; u16 pcists; u8 status; /* Confirm this is our interrupt */ pci_read_config_word(priv->pci_dev, PCI_STATUS, &pcists); if (!(pcists & PCI_STATUS_INTERRUPT)) return IRQ_NONE; if (priv->features & FEATURE_HOST_NOTIFY) { status = inb_p(SMBSLVSTS(priv)); if (status & SMBSLVSTS_HST_NTFY_STS) return i801_host_notify_isr(priv); } status = inb_p(SMBHSTSTS(priv)); if ((status & (SMBHSTSTS_BYTE_DONE | STATUS_ERROR_FLAGS)) == SMBHSTSTS_BYTE_DONE) i801_isr_byte_done(priv); /* * Clear IRQ sources: SMB_ALERT status is set after signal assertion * independently of the interrupt generation being blocked or not * so clear it always when the status is set. */ status &= STATUS_FLAGS | SMBHSTSTS_SMBALERT_STS; outb_p(status, SMBHSTSTS(priv)); status &= STATUS_ERROR_FLAGS | SMBHSTSTS_INTR; if (status) { priv->status = status & STATUS_ERROR_FLAGS; complete(&priv->done); } return IRQ_HANDLED; } /* * For "byte-by-byte" block transactions: * I2C write uses cmd=I801_BLOCK_DATA, I2C_EN=1 * I2C read uses cmd=I801_I2C_BLOCK_DATA */ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv, union i2c_smbus_data *data, char read_write, int command) { int i, len; int smbcmd; int status; unsigned long result; const struct i2c_adapter *adap = &priv->adapter; if (command == I2C_SMBUS_BLOCK_PROC_CALL) return -EOPNOTSUPP; len = data->block[0]; if (read_write == I2C_SMBUS_WRITE) { outb_p(len, SMBHSTDAT0(priv)); outb_p(data->block[1], SMBBLKDAT(priv)); } if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_READ) smbcmd = I801_I2C_BLOCK_DATA; else smbcmd = I801_BLOCK_DATA; if (priv->features & FEATURE_IRQ) { priv->is_read = (read_write == I2C_SMBUS_READ); if (len == 1 && priv->is_read) smbcmd |= SMBHSTCNT_LAST_BYTE; priv->cmd = smbcmd | SMBHSTCNT_INTREN; priv->len = len; priv->count = 0; priv->data = &data->block[1]; reinit_completion(&priv->done); outb_p(priv->cmd | SMBHSTCNT_START, SMBHSTCNT(priv)); result = wait_for_completion_timeout(&priv->done, adap->timeout); return result ? priv->status : -ETIMEDOUT; } for (i = 1; i <= len; i++) { if (i == len && read_write == I2C_SMBUS_READ) smbcmd |= SMBHSTCNT_LAST_BYTE; outb_p(smbcmd, SMBHSTCNT(priv)); if (i == 1) outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START, SMBHSTCNT(priv)); status = i801_wait_byte_done(priv); if (status) return status; if (i == 1 && read_write == I2C_SMBUS_READ && command != I2C_SMBUS_I2C_BLOCK_DATA) { len = inb_p(SMBHSTDAT0(priv)); if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) { dev_err(&priv->pci_dev->dev, "Illegal SMBus block read size %d\n", len); /* Recover */ while (inb_p(SMBHSTSTS(priv)) & SMBHSTSTS_HOST_BUSY) outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv)); outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv)); return -EPROTO; } data->block[0] = len; } /* Retrieve/store value in SMBBLKDAT */ if (read_write == I2C_SMBUS_READ) data->block[i] = inb_p(SMBBLKDAT(priv)); if (read_write == I2C_SMBUS_WRITE && i+1 <= len) outb_p(data->block[i+1], SMBBLKDAT(priv)); /* signals SMBBLKDAT ready */ outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv)); } return i801_wait_intr(priv); } static void i801_set_hstadd(struct i801_priv *priv, u8 addr, char read_write) { outb_p((addr << 1) | (read_write & 0x01), SMBHSTADD(priv)); } /* Single value transaction function */ static int i801_simple_transaction(struct i801_priv *priv, union i2c_smbus_data *data, u8 addr, u8 hstcmd, char read_write, int command) { int xact, ret; switch (command) { case I2C_SMBUS_QUICK: i801_set_hstadd(priv, addr, read_write); xact = I801_QUICK; break; case I2C_SMBUS_BYTE: i801_set_hstadd(priv, addr, read_write); if (read_write == I2C_SMBUS_WRITE) outb_p(hstcmd, SMBHSTCMD(priv)); xact = I801_BYTE; break; case I2C_SMBUS_BYTE_DATA: i801_set_hstadd(priv, addr, read_write); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0(priv)); outb_p(hstcmd, SMBHSTCMD(priv)); xact = I801_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: i801_set_hstadd(priv, addr, read_write); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0(priv)); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv)); } outb_p(hstcmd, SMBHSTCMD(priv)); xact = I801_WORD_DATA; break; case I2C_SMBUS_PROC_CALL: i801_set_hstadd(priv, addr, I2C_SMBUS_WRITE); outb_p(data->word & 0xff, SMBHSTDAT0(priv)); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv)); outb_p(hstcmd, SMBHSTCMD(priv)); read_write = I2C_SMBUS_READ; xact = I801_PROC_CALL; break; default: pci_err(priv->pci_dev, "Unsupported transaction %d\n", command); return -EOPNOTSUPP; } ret = i801_transaction(priv, xact); if (ret || read_write == I2C_SMBUS_WRITE) return ret; switch (command) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0(priv)); break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: data->word = inb_p(SMBHSTDAT0(priv)) + (inb_p(SMBHSTDAT1(priv)) << 8); break; } return 0; } /* Block transaction function */ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data, u8 addr, u8 hstcmd, char read_write, int command) { int result = 0; unsigned char hostc; if (read_write == I2C_SMBUS_READ && command == I2C_SMBUS_BLOCK_DATA) data->block[0] = I2C_SMBUS_BLOCK_MAX; else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; switch (command) { case I2C_SMBUS_BLOCK_DATA: i801_set_hstadd(priv, addr, read_write); outb_p(hstcmd, SMBHSTCMD(priv)); break; case I2C_SMBUS_I2C_BLOCK_DATA: /* * NB: page 240 of ICH5 datasheet shows that the R/#W * bit should be cleared here, even when reading. * However if SPD Write Disable is set (Lynx Point and later), * the read will fail if we don't set the R/#W bit. */ i801_set_hstadd(priv, addr, priv->original_hstcfg & SMBHSTCFG_SPD_WD ? read_write : I2C_SMBUS_WRITE); if (read_write == I2C_SMBUS_READ) { /* NB: page 240 of ICH5 datasheet also shows * that DATA1 is the cmd field when reading */ outb_p(hstcmd, SMBHSTDAT1(priv)); } else outb_p(hstcmd, SMBHSTCMD(priv)); if (read_write == I2C_SMBUS_WRITE) { /* set I2C_EN bit in configuration register */ pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc); pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc | SMBHSTCFG_I2C_EN); } else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) { dev_err(&priv->pci_dev->dev, "I2C block read is unsupported!\n"); return -EOPNOTSUPP; } break; case I2C_SMBUS_BLOCK_PROC_CALL: /* Needs to be flagged as write transaction */ i801_set_hstadd(priv, addr, I2C_SMBUS_WRITE); outb_p(hstcmd, SMBHSTCMD(priv)); break; } /* Experience has shown that the block buffer can only be used for SMBus (not I2C) block transactions, even though the datasheet doesn't mention this limitation. */ if ((priv->features & FEATURE_BLOCK_BUFFER) && command != I2C_SMBUS_I2C_BLOCK_DATA) result = i801_block_transaction_by_block(priv, data, read_write, command); else result = i801_block_transaction_byte_by_byte(priv, data, read_write, command); if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_WRITE) { /* restore saved configuration register value */ pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc); } return result; } /* Return negative errno on error. */ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int hwpec, ret; struct i801_priv *priv = i2c_get_adapdata(adap); mutex_lock(&priv->acpi_lock); if (priv->acpi_reserved) { mutex_unlock(&priv->acpi_lock); return -EBUSY; } pm_runtime_get_sync(&priv->pci_dev->dev); ret = i801_check_pre(priv); if (ret) goto out; hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA; if (hwpec) /* enable/disable hardware PEC */ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv)); else outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC), SMBAUXCTL(priv)); if (size == I2C_SMBUS_BLOCK_DATA || size == I2C_SMBUS_I2C_BLOCK_DATA || size == I2C_SMBUS_BLOCK_PROC_CALL) ret = i801_block_transaction(priv, data, addr, command, read_write, size); else ret = i801_simple_transaction(priv, data, addr, command, read_write, size); ret = i801_check_post(priv, ret); /* Some BIOSes don't like it when PEC is enabled at reboot or resume * time, so we forcibly disable it after every transaction. */ if (hwpec) outb_p(inb_p(SMBAUXCTL(priv)) & ~SMBAUXCTL_CRC, SMBAUXCTL(priv)); out: /* * Unlock the SMBus device for use by BIOS/ACPI, * and clear status flags if not done already. */ outb_p(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv)); pm_runtime_mark_last_busy(&priv->pci_dev->dev); pm_runtime_put_autosuspend(&priv->pci_dev->dev); mutex_unlock(&priv->acpi_lock); return ret; } static u32 i801_func(struct i2c_adapter *adapter) { struct i801_priv *priv = i2c_get_adapdata(adapter); return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | ((priv->features & FEATURE_BLOCK_PROC) ? I2C_FUNC_SMBUS_BLOCK_PROC_CALL : 0) | ((priv->features & FEATURE_I2C_BLOCK_READ) ? I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | ((priv->features & FEATURE_HOST_NOTIFY) ? I2C_FUNC_SMBUS_HOST_NOTIFY : 0); } static void i801_enable_host_notify(struct i2c_adapter *adapter) { struct i801_priv *priv = i2c_get_adapdata(adapter); if (!(priv->features & FEATURE_HOST_NOTIFY)) return; /* * Enable host notify interrupt and block the generation of interrupt * from the SMB_ALERT signal because the driver does not support * SMBus Alert. */ outb_p(SMBSLVCMD_HST_NTFY_INTREN | SMBSLVCMD_SMBALERT_DISABLE | priv->original_slvcmd, SMBSLVCMD(priv)); /* clear Host Notify bit to allow a new notification */ outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv)); } static void i801_disable_host_notify(struct i801_priv *priv) { if (!(priv->features & FEATURE_HOST_NOTIFY)) return; outb_p(priv->original_slvcmd, SMBSLVCMD(priv)); } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = i801_access, .functionality = i801_func, }; #define FEATURES_ICH5 (FEATURE_BLOCK_PROC | FEATURE_I2C_BLOCK_READ | \ FEATURE_IRQ | FEATURE_SMBUS_PEC | \ FEATURE_BLOCK_BUFFER | FEATURE_HOST_NOTIFY) #define FEATURES_ICH4 (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER | \ FEATURE_HOST_NOTIFY) static const struct pci_device_id i801_ids[] = { { PCI_DEVICE_DATA(INTEL, 82801AA_3, 0) }, { PCI_DEVICE_DATA(INTEL, 82801AB_3, 0) }, { PCI_DEVICE_DATA(INTEL, 82801BA_2, 0) }, { PCI_DEVICE_DATA(INTEL, 82801CA_3, FEATURE_HOST_NOTIFY) }, { PCI_DEVICE_DATA(INTEL, 82801DB_3, FEATURES_ICH4) }, { PCI_DEVICE_DATA(INTEL, 82801EB_3, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ESB_4, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ICH6_16, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ICH7_17, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ESB2_17, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ICH8_5, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ICH9_6, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, EP80579_1, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ICH10_4, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ICH10_5, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, 5_3400_SERIES_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, COUGARPOINT_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF0, FEATURES_ICH5 | FEATURE_IDF) }, { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF1, FEATURES_ICH5 | FEATURE_IDF) }, { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF2, FEATURES_ICH5 | FEATURE_IDF) }, { PCI_DEVICE_DATA(INTEL, DH89XXCC_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, PANTHERPOINT_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, LYNXPOINT_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, LYNXPOINT_LP_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, AVOTON_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS0, FEATURES_ICH5 | FEATURE_IDF) }, { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS1, FEATURES_ICH5 | FEATURE_IDF) }, { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS2, FEATURES_ICH5 | FEATURE_IDF) }, { PCI_DEVICE_DATA(INTEL, COLETOCREEK_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, GEMINILAKE_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_LP_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, BAYTRAIL_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, BRASWELL_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, { PCI_DEVICE_DATA(INTEL, CDF_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, DNV_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, { PCI_DEVICE_DATA(INTEL, EBG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, BROXTON_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, LEWISBURG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, { PCI_DEVICE_DATA(INTEL, LEWISBURG_SSKU_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, { PCI_DEVICE_DATA(INTEL, KABYLAKE_PCH_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, { PCI_DEVICE_DATA(INTEL, CANNONLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, CANNONLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, ICELAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, ICELAKE_N_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, COMETLAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, COMETLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, COMETLAKE_V_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) }, { PCI_DEVICE_DATA(INTEL, ELKHART_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, TIGERLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, TIGERLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, JASPER_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_M_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, RAPTOR_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { 0, } }; MODULE_DEVICE_TABLE(pci, i801_ids); #if defined CONFIG_X86 && defined CONFIG_DMI static unsigned char apanel_addr; /* Scan the system ROM for the signature "FJKEYINF" */ static __init const void __iomem *bios_signature(const void __iomem *bios) { ssize_t offset; const unsigned char signature[] = "FJKEYINF"; for (offset = 0; offset < 0x10000; offset += 0x10) { if (check_signature(bios + offset, signature, sizeof(signature)-1)) return bios + offset; } return NULL; } static void __init input_apanel_init(void) { void __iomem *bios; const void __iomem *p; bios = ioremap(0xF0000, 0x10000); /* Can't fail */ p = bios_signature(bios); if (p) { /* just use the first address */ apanel_addr = readb(p + 8 + 3) >> 1; } iounmap(bios); } struct dmi_onboard_device_info { const char *name; u8 type; unsigned short i2c_addr; const char *i2c_type; }; static const struct dmi_onboard_device_info dmi_devices[] = { { "Syleus", DMI_DEV_TYPE_OTHER, 0x73, "fscsyl" }, { "Hermes", DMI_DEV_TYPE_OTHER, 0x73, "fscher" }, { "Hades", DMI_DEV_TYPE_OTHER, 0x73, "fschds" }, }; static void dmi_check_onboard_device(u8 type, const char *name, struct i2c_adapter *adap) { int i; struct i2c_board_info info; for (i = 0; i < ARRAY_SIZE(dmi_devices); i++) { /* & ~0x80, ignore enabled/disabled bit */ if ((type & ~0x80) != dmi_devices[i].type) continue; if (strcasecmp(name, dmi_devices[i].name)) continue; memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = dmi_devices[i].i2c_addr; strscpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE); i2c_new_client_device(adap, &info); break; } } /* We use our own function to check for onboard devices instead of dmi_find_device() as some buggy BIOS's have the devices we are interested in marked as disabled */ static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap) { int i, count; if (dm->type != 10) return; count = (dm->length - sizeof(struct dmi_header)) / 2; for (i = 0; i < count; i++) { const u8 *d = (char *)(dm + 1) + (i * 2); const char *name = ((char *) dm) + dm->length; u8 type = d[0]; u8 s = d[1]; if (!s) continue; s--; while (s > 0 && name[0]) { name += strlen(name) + 1; s--; } if (name[0] == 0) /* Bogus string reference */ continue; dmi_check_onboard_device(type, name, adap); } } /* NOTE: Keep this list in sync with drivers/platform/x86/dell-smo8800.c */ static const char *const acpi_smo8800_ids[] = { "SMO8800", "SMO8801", "SMO8810", "SMO8811", "SMO8820", "SMO8821", "SMO8830", "SMO8831", }; static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { struct acpi_device_info *info; acpi_status status; char *hid; int i; status = acpi_get_object_info(obj_handle, &info); if (ACPI_FAILURE(status)) return AE_OK; if (!(info->valid & ACPI_VALID_HID)) goto smo88xx_not_found; hid = info->hardware_id.string; if (!hid) goto smo88xx_not_found; i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid); if (i < 0) goto smo88xx_not_found; kfree(info); *return_value = NULL; return AE_CTRL_TERMINATE; smo88xx_not_found: kfree(info); return AE_OK; } static bool is_dell_system_with_lis3lv02d(void) { void *err = ERR_PTR(-ENOENT); if (!dmi_match(DMI_SYS_VENDOR, "Dell Inc.")) return false; /* * Check that ACPI device SMO88xx is present and is functioning. * Function acpi_get_devices() already filters all ACPI devices * which are not present or are not functioning. * ACPI device SMO88xx represents our ST microelectronics lis3lv02d * accelerometer but unfortunately ACPI does not provide any other * information (like I2C address). */ acpi_get_devices(NULL, check_acpi_smo88xx_device, NULL, &err); return !IS_ERR(err); } /* * Accelerometer's I2C address is not specified in DMI nor ACPI, * so it is needed to define mapping table based on DMI product names. */ static const struct { const char *dmi_product_name; unsigned short i2c_addr; } dell_lis3lv02d_devices[] = { /* * Dell platform team told us that these Latitude devices have * ST microelectronics accelerometer at I2C address 0x29. */ { "Latitude E5250", 0x29 }, { "Latitude E5450", 0x29 }, { "Latitude E5550", 0x29 }, { "Latitude E6440", 0x29 }, { "Latitude E6440 ATG", 0x29 }, { "Latitude E6540", 0x29 }, /* * Additional individual entries were added after verification. */ { "Latitude 5480", 0x29 }, { "Vostro V131", 0x1d }, { "Vostro 5568", 0x29 }, }; static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv) { struct i2c_board_info info; const char *dmi_product_name; int i; dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); for (i = 0; i < ARRAY_SIZE(dell_lis3lv02d_devices); ++i) { if (strcmp(dmi_product_name, dell_lis3lv02d_devices[i].dmi_product_name) == 0) break; } if (i == ARRAY_SIZE(dell_lis3lv02d_devices)) { dev_warn(&priv->pci_dev->dev, "Accelerometer lis3lv02d is present on SMBus but its" " address is unknown, skipping registration\n"); return; } memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = dell_lis3lv02d_devices[i].i2c_addr; strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE); i2c_new_client_device(&priv->adapter, &info); } /* Register optional slaves */ static void i801_probe_optional_slaves(struct i801_priv *priv) { /* Only register slaves on main SMBus channel */ if (priv->features & FEATURE_IDF) return; if (apanel_addr) { struct i2c_board_info info = { .addr = apanel_addr, .type = "fujitsu_apanel", }; i2c_new_client_device(&priv->adapter, &info); } if (dmi_name_in_vendors("FUJITSU")) dmi_walk(dmi_check_onboard_devices, &priv->adapter); if (is_dell_system_with_lis3lv02d()) register_dell_lis3lv02d_i2c_device(priv); /* Instantiate SPD EEPROMs unless the SMBus is multiplexed */ #if IS_ENABLED(CONFIG_I2C_MUX_GPIO) if (!priv->mux_drvdata) #endif i2c_register_spd(&priv->adapter); } #else static void __init input_apanel_init(void) {} static void i801_probe_optional_slaves(struct i801_priv *priv) {} #endif /* CONFIG_X86 && CONFIG_DMI */ #if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI static struct i801_mux_config i801_mux_config_asus_z8_d12 = { .gpio_chip = "gpio_ich", .values = { 0x02, 0x03 }, .n_values = 2, .classes = { I2C_CLASS_SPD, I2C_CLASS_SPD }, .gpios = { 52, 53 }, .n_gpios = 2, }; static struct i801_mux_config i801_mux_config_asus_z8_d18 = { .gpio_chip = "gpio_ich", .values = { 0x02, 0x03, 0x01 }, .n_values = 3, .classes = { I2C_CLASS_SPD, I2C_CLASS_SPD, I2C_CLASS_SPD }, .gpios = { 52, 53 }, .n_gpios = 2, }; static const struct dmi_system_id mux_dmi_table[] = { { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8NA-D6(C)"), }, .driver_data = &i801_mux_config_asus_z8_d12, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8P(N)E-D12(X)"), }, .driver_data = &i801_mux_config_asus_z8_d12, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8NH-D12"), }, .driver_data = &i801_mux_config_asus_z8_d12, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8PH-D12/IFB"), }, .driver_data = &i801_mux_config_asus_z8_d12, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8NR-D12"), }, .driver_data = &i801_mux_config_asus_z8_d12, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8P(N)H-D12"), }, .driver_data = &i801_mux_config_asus_z8_d12, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8PG-D18"), }, .driver_data = &i801_mux_config_asus_z8_d18, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8PE-D18"), }, .driver_data = &i801_mux_config_asus_z8_d18, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "Z8PS-D12"), }, .driver_data = &i801_mux_config_asus_z8_d12, }, { } }; /* Setup multiplexing if needed */ static void i801_add_mux(struct i801_priv *priv) { struct device *dev = &priv->adapter.dev; const struct i801_mux_config *mux_config; struct i2c_mux_gpio_platform_data gpio_data; struct gpiod_lookup_table *lookup; int i; if (!priv->mux_drvdata) return; mux_config = priv->mux_drvdata; /* Prepare the platform data */ memset(&gpio_data, 0, sizeof(struct i2c_mux_gpio_platform_data)); gpio_data.parent = priv->adapter.nr; gpio_data.values = mux_config->values; gpio_data.n_values = mux_config->n_values; gpio_data.classes = mux_config->classes; gpio_data.idle = I2C_MUX_GPIO_NO_IDLE; /* Register GPIO descriptor lookup table */ lookup = devm_kzalloc(dev, struct_size(lookup, table, mux_config->n_gpios + 1), GFP_KERNEL); if (!lookup) return; lookup->dev_id = "i2c-mux-gpio"; for (i = 0; i < mux_config->n_gpios; i++) lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip, mux_config->gpios[i], "mux", 0); gpiod_add_lookup_table(lookup); priv->lookup = lookup; /* * Register the mux device, we use PLATFORM_DEVID_NONE here * because since we are referring to the GPIO chip by name we are * anyways in deep trouble if there is more than one of these * devices, and there should likely only be one platform controller * hub. */ priv->mux_pdev = platform_device_register_data(dev, "i2c-mux-gpio", PLATFORM_DEVID_NONE, &gpio_data, sizeof(struct i2c_mux_gpio_platform_data)); if (IS_ERR(priv->mux_pdev)) { gpiod_remove_lookup_table(lookup); dev_err(dev, "Failed to register i2c-mux-gpio device\n"); } } static void i801_del_mux(struct i801_priv *priv) { platform_device_unregister(priv->mux_pdev); gpiod_remove_lookup_table(priv->lookup); } static unsigned int i801_get_adapter_class(struct i801_priv *priv) { const struct dmi_system_id *id; const struct i801_mux_config *mux_config; unsigned int class = I2C_CLASS_HWMON | I2C_CLASS_SPD; int i; id = dmi_first_match(mux_dmi_table); if (id) { /* Remove branch classes from trunk */ mux_config = id->driver_data; for (i = 0; i < mux_config->n_values; i++) class &= ~mux_config->classes[i]; /* Remember for later */ priv->mux_drvdata = mux_config; } return class; } #else static inline void i801_add_mux(struct i801_priv *priv) { } static inline void i801_del_mux(struct i801_priv *priv) { } static inline unsigned int i801_get_adapter_class(struct i801_priv *priv) { return I2C_CLASS_HWMON | I2C_CLASS_SPD; } #endif static struct platform_device * i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev, struct resource *tco_res) { static const struct itco_wdt_platform_data pldata = { .name = "Intel PCH", .version = 4, }; struct resource *res; int ret; /* * We must access the NO_REBOOT bit over the Primary to Sideband * (P2SB) bridge. */ res = &tco_res[1]; ret = p2sb_bar(pci_dev->bus, 0, res); if (ret) return ERR_PTR(ret); if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) res->start += SBREG_SMBCTRL_DNV; else res->start += SBREG_SMBCTRL; res->end = res->start + 3; return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1, tco_res, 2, &pldata, sizeof(pldata)); } static struct platform_device * i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev, struct resource *tco_res) { static const struct itco_wdt_platform_data pldata = { .name = "Intel PCH", .version = 6, }; return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1, tco_res, 1, &pldata, sizeof(pldata)); } static void i801_add_tco(struct i801_priv *priv) { struct pci_dev *pci_dev = priv->pci_dev; struct resource tco_res[2], *res; u32 tco_base, tco_ctl; /* If we have ACPI based watchdog use that instead */ if (acpi_has_watchdog()) return; if (!(priv->features & (FEATURE_TCO_SPT | FEATURE_TCO_CNL))) return; pci_read_config_dword(pci_dev, TCOBASE, &tco_base); pci_read_config_dword(pci_dev, TCOCTL, &tco_ctl); if (!(tco_ctl & TCOCTL_EN)) return; memset(tco_res, 0, sizeof(tco_res)); /* * Always populate the main iTCO IO resource here. The second entry * for NO_REBOOT MMIO is filled by the SPT specific function. */ res = &tco_res[0]; res->start = tco_base & ~1; res->end = res->start + 32 - 1; res->flags = IORESOURCE_IO; if (priv->features & FEATURE_TCO_CNL) priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res); else priv->tco_pdev = i801_add_tco_spt(priv, pci_dev, tco_res); if (IS_ERR(priv->tco_pdev)) dev_warn(&pci_dev->dev, "failed to create iTCO device\n"); } #ifdef CONFIG_ACPI static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, acpi_physical_address address) { return address >= priv->smba && address <= pci_resource_end(priv->pci_dev, SMBBAR); } static acpi_status i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value, void *handler_context, void *region_context) { struct i801_priv *priv = handler_context; struct pci_dev *pdev = priv->pci_dev; acpi_status status; /* * Once BIOS AML code touches the OpRegion we warn and inhibit any * further access from the driver itself. This device is now owned * by the system firmware. */ mutex_lock(&priv->acpi_lock); if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { priv->acpi_reserved = true; dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n"); /* * BIOS is accessing the host controller so prevent it from * suspending automatically from now on. */ pm_runtime_get_sync(&pdev->dev); } if ((function & ACPI_IO_MASK) == ACPI_READ) status = acpi_os_read_port(address, (u32 *)value, bits); else status = acpi_os_write_port(address, (u32)*value, bits); mutex_unlock(&priv->acpi_lock); return status; } static int i801_acpi_probe(struct i801_priv *priv) { acpi_handle ah = ACPI_HANDLE(&priv->pci_dev->dev); acpi_status status; status = acpi_install_address_space_handler(ah, ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler, NULL, priv); if (ACPI_SUCCESS(status)) return 0; return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]); } static void i801_acpi_remove(struct i801_priv *priv) { acpi_handle ah = ACPI_HANDLE(&priv->pci_dev->dev); acpi_remove_address_space_handler(ah, ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler); } #else static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } static inline void i801_acpi_remove(struct i801_priv *priv) { } #endif static void i801_setup_hstcfg(struct i801_priv *priv) { unsigned char hstcfg = priv->original_hstcfg; hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ hstcfg |= SMBHSTCFG_HST_EN; pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg); } static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err, i; struct i801_priv *priv; priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; i2c_set_adapdata(&priv->adapter, priv); priv->adapter.owner = THIS_MODULE; priv->adapter.class = i801_get_adapter_class(priv); priv->adapter.algo = &smbus_algorithm; priv->adapter.dev.parent = &dev->dev; ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); priv->adapter.retries = 3; mutex_init(&priv->acpi_lock); priv->pci_dev = dev; priv->features = id->driver_data; /* Disable features on user request */ for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) { if (priv->features & disable_features & (1 << i)) dev_notice(&dev->dev, "%s disabled by user\n", i801_feature_names[i]); } priv->features &= ~disable_features; /* The block process call uses block buffer mode */ if (!(priv->features & FEATURE_BLOCK_BUFFER)) priv->features &= ~FEATURE_BLOCK_PROC; err = pcim_enable_device(dev); if (err) { dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n", err); return err; } pcim_pin_device(dev); /* Determine the address of the SMBus area */ priv->smba = pci_resource_start(dev, SMBBAR); if (!priv->smba) { dev_err(&dev->dev, "SMBus base address uninitialized, upgrade BIOS\n"); return -ENODEV; } if (i801_acpi_probe(priv)) return -ENODEV; err = pcim_iomap_regions(dev, 1 << SMBBAR, DRV_NAME); if (err) { dev_err(&dev->dev, "Failed to request SMBus region 0x%lx-0x%Lx\n", priv->smba, (unsigned long long)pci_resource_end(dev, SMBBAR)); i801_acpi_remove(priv); return err; } pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg); i801_setup_hstcfg(priv); if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN)) dev_info(&dev->dev, "Enabling SMBus device\n"); if (priv->original_hstcfg & SMBHSTCFG_SMB_SMI_EN) { dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n"); /* Disable SMBus interrupt feature if SMBus using SMI# */ priv->features &= ~FEATURE_IRQ; } if (priv->original_hstcfg & SMBHSTCFG_SPD_WD) dev_info(&dev->dev, "SPD Write Disable is set\n"); /* Clear special mode bits */ if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER)) outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv)); /* Default timeout in interrupt mode: 200 ms */ priv->adapter.timeout = HZ / 5; if (dev->irq == IRQ_NOTCONNECTED) priv->features &= ~FEATURE_IRQ; if (priv->features & FEATURE_IRQ) { u16 pcists; /* Complain if an interrupt is already pending */ pci_read_config_word(priv->pci_dev, PCI_STATUS, &pcists); if (pcists & PCI_STATUS_INTERRUPT) dev_warn(&dev->dev, "An interrupt is pending!\n"); } if (priv->features & FEATURE_IRQ) { init_completion(&priv->done); err = devm_request_irq(&dev->dev, dev->irq, i801_isr, IRQF_SHARED, DRV_NAME, priv); if (err) { dev_err(&dev->dev, "Failed to allocate irq %d: %d\n", dev->irq, err); priv->features &= ~FEATURE_IRQ; } } dev_info(&dev->dev, "SMBus using %s\n", priv->features & FEATURE_IRQ ? "PCI interrupt" : "polling"); /* Host notification uses an interrupt */ if (!(priv->features & FEATURE_IRQ)) priv->features &= ~FEATURE_HOST_NOTIFY; /* Remember original Interrupt and Host Notify settings */ priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL; if (priv->features & FEATURE_HOST_NOTIFY) priv->original_slvcmd = inb_p(SMBSLVCMD(priv)); i801_add_tco(priv); snprintf(priv->adapter.name, sizeof(priv->adapter.name), "SMBus I801 adapter at %04lx", priv->smba); err = i2c_add_adapter(&priv->adapter); if (err) { platform_device_unregister(priv->tco_pdev); i801_acpi_remove(priv); return err; } i801_enable_host_notify(&priv->adapter); i801_probe_optional_slaves(priv); /* We ignore errors - multiplexing is optional */ i801_add_mux(priv); pci_set_drvdata(dev, priv); dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); pm_runtime_set_autosuspend_delay(&dev->dev, 1000); pm_runtime_use_autosuspend(&dev->dev); pm_runtime_put_autosuspend(&dev->dev); pm_runtime_allow(&dev->dev); return 0; } static void i801_remove(struct pci_dev *dev) { struct i801_priv *priv = pci_get_drvdata(dev); outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); i801_disable_host_notify(priv); i801_del_mux(priv); i2c_del_adapter(&priv->adapter); i801_acpi_remove(priv); pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); platform_device_unregister(priv->tco_pdev); /* if acpi_reserved is set then usage_count is incremented already */ if (!priv->acpi_reserved) pm_runtime_get_noresume(&dev->dev); /* * do not call pci_disable_device(dev) since it can cause hard hangs on * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010) */ } static void i801_shutdown(struct pci_dev *dev) { struct i801_priv *priv = pci_get_drvdata(dev); /* Restore config registers to avoid hard hang on some systems */ outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); i801_disable_host_notify(priv); pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); } static int i801_suspend(struct device *dev) { struct i801_priv *priv = dev_get_drvdata(dev); outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg); return 0; } static int i801_resume(struct device *dev) { struct i801_priv *priv = dev_get_drvdata(dev); i801_setup_hstcfg(priv); i801_enable_host_notify(&priv->adapter); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(i801_pm_ops, i801_suspend, i801_resume); static struct pci_driver i801_driver = { .name = DRV_NAME, .id_table = i801_ids, .probe = i801_probe, .remove = i801_remove, .shutdown = i801_shutdown, .driver = { .pm = pm_sleep_ptr(&i801_pm_ops), .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; static int __init i2c_i801_init(void) { if (dmi_name_in_vendors("FUJITSU")) input_apanel_init(); return pci_register_driver(&i801_driver); } static void __exit i2c_i801_exit(void) { pci_unregister_driver(&i801_driver); } MODULE_AUTHOR("Mark D. Studebaker <[email protected]>"); MODULE_AUTHOR("Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("I801 SMBus driver"); MODULE_LICENSE("GPL"); module_init(i2c_i801_init); module_exit(i2c_i801_exit);
linux-master
drivers/i2c/busses/i2c-i801.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C bus driver for Amlogic Meson SoCs * * Copyright (C) 2014 Beniamino Galvani <[email protected]> */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/types.h> /* Meson I2C register map */ #define REG_CTRL 0x00 #define REG_SLAVE_ADDR 0x04 #define REG_TOK_LIST0 0x08 #define REG_TOK_LIST1 0x0c #define REG_TOK_WDATA0 0x10 #define REG_TOK_WDATA1 0x14 #define REG_TOK_RDATA0 0x18 #define REG_TOK_RDATA1 0x1c /* Control register fields */ #define REG_CTRL_START BIT(0) #define REG_CTRL_ACK_IGNORE BIT(1) #define REG_CTRL_STATUS BIT(2) #define REG_CTRL_ERROR BIT(3) #define REG_CTRL_CLKDIV_SHIFT 12 #define REG_CTRL_CLKDIV_MASK GENMASK(21, REG_CTRL_CLKDIV_SHIFT) #define REG_CTRL_CLKDIVEXT_SHIFT 28 #define REG_CTRL_CLKDIVEXT_MASK GENMASK(29, REG_CTRL_CLKDIVEXT_SHIFT) #define REG_SLV_ADDR_MASK GENMASK(7, 0) #define REG_SLV_SDA_FILTER_MASK GENMASK(10, 8) #define REG_SLV_SCL_FILTER_MASK GENMASK(13, 11) #define REG_SLV_SCL_LOW_SHIFT 16 #define REG_SLV_SCL_LOW_MASK GENMASK(27, REG_SLV_SCL_LOW_SHIFT) #define REG_SLV_SCL_LOW_EN BIT(28) #define I2C_TIMEOUT_MS 500 #define FILTER_DELAY 15 enum { TOKEN_END = 0, TOKEN_START, TOKEN_SLAVE_ADDR_WRITE, TOKEN_SLAVE_ADDR_READ, TOKEN_DATA, TOKEN_DATA_LAST, TOKEN_STOP, }; enum { STATE_IDLE, STATE_READ, STATE_WRITE, }; /** * struct meson_i2c - Meson I2C device private data * * @adap: I2C adapter instance * @dev: Pointer to device structure * @regs: Base address of the device memory mapped registers * @clk: Pointer to clock structure * @msg: Pointer to the current I2C message * @state: Current state in the driver state machine * @last: Flag set for the last message in the transfer * @count: Number of bytes to be sent/received in current transfer * @pos: Current position in the send/receive buffer * @error: Flag set when an error is received * @lock: To avoid race conditions between irq handler and xfer code * @done: Completion used to wait for transfer termination * @tokens: Sequence of tokens to be written to the device * @num_tokens: Number of tokens * @data: Pointer to the controller's platform data */ struct meson_i2c { struct i2c_adapter adap; struct device *dev; void __iomem *regs; struct clk *clk; struct i2c_msg *msg; int state; bool last; int count; int pos; int error; spinlock_t lock; struct completion done; u32 tokens[2]; int num_tokens; const struct meson_i2c_data *data; }; struct meson_i2c_data { void (*set_clk_div)(struct meson_i2c *i2c, unsigned int freq); }; static void meson_i2c_set_mask(struct meson_i2c *i2c, int reg, u32 mask, u32 val) { u32 data; data = readl(i2c->regs + reg); data &= ~mask; data |= val & mask; writel(data, i2c->regs + reg); } static void meson_i2c_reset_tokens(struct meson_i2c *i2c) { i2c->tokens[0] = 0; i2c->tokens[1] = 0; i2c->num_tokens = 0; } static void meson_i2c_add_token(struct meson_i2c *i2c, int token) { if (i2c->num_tokens < 8) i2c->tokens[0] |= (token & 0xf) << (i2c->num_tokens * 4); else i2c->tokens[1] |= (token & 0xf) << ((i2c->num_tokens % 8) * 4); i2c->num_tokens++; } static void meson_gxbb_axg_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) { unsigned long clk_rate = clk_get_rate(i2c->clk); unsigned int div_h, div_l; /* According to I2C-BUS Spec 2.1, in FAST-MODE, the minimum LOW period is 1.3uS, and * minimum HIGH is least 0.6us. * For 400000 freq, the period is 2.5us. To keep within the specs, give 40% of period to * HIGH and 60% to LOW. This means HIGH at 1.0us and LOW 1.5us. * The same applies for Fast-mode plus, where LOW is 0.5us and HIGH is 0.26us. * Duty = H/(H + L) = 2/5 */ if (freq <= I2C_MAX_STANDARD_MODE_FREQ) { div_h = DIV_ROUND_UP(clk_rate, freq); div_l = DIV_ROUND_UP(div_h, 4); div_h = DIV_ROUND_UP(div_h, 2) - FILTER_DELAY; } else { div_h = DIV_ROUND_UP(clk_rate * 2, freq * 5) - FILTER_DELAY; div_l = DIV_ROUND_UP(clk_rate * 3, freq * 5 * 2); } /* clock divider has 12 bits */ if (div_h > GENMASK(11, 0)) { dev_err(i2c->dev, "requested bus frequency too low\n"); div_h = GENMASK(11, 0); } if (div_l > GENMASK(11, 0)) { dev_err(i2c->dev, "requested bus frequency too low\n"); div_l = GENMASK(11, 0); } meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK, FIELD_PREP(REG_CTRL_CLKDIV_MASK, div_h & GENMASK(9, 0))); meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK, FIELD_PREP(REG_CTRL_CLKDIVEXT_MASK, div_h >> 10)); /* set SCL low delay */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_MASK, FIELD_PREP(REG_SLV_SCL_LOW_MASK, div_l)); /* Enable HIGH/LOW mode */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, REG_SLV_SCL_LOW_EN); dev_dbg(i2c->dev, "%s: clk %lu, freq %u, divh %u, divl %u\n", __func__, clk_rate, freq, div_h, div_l); } static void meson6_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) { unsigned long clk_rate = clk_get_rate(i2c->clk); unsigned int div; div = DIV_ROUND_UP(clk_rate, freq); div -= FILTER_DELAY; div = DIV_ROUND_UP(div, 4); /* clock divider has 12 bits */ if (div > GENMASK(11, 0)) { dev_err(i2c->dev, "requested bus frequency too low\n"); div = GENMASK(11, 0); } meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK, FIELD_PREP(REG_CTRL_CLKDIV_MASK, div & GENMASK(9, 0))); meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK, FIELD_PREP(REG_CTRL_CLKDIVEXT_MASK, div >> 10)); /* Disable HIGH/LOW mode */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0); dev_dbg(i2c->dev, "%s: clk %lu, freq %u, div %u\n", __func__, clk_rate, freq, div); } static void meson_i2c_get_data(struct meson_i2c *i2c, char *buf, int len) { u32 rdata0, rdata1; int i; rdata0 = readl(i2c->regs + REG_TOK_RDATA0); rdata1 = readl(i2c->regs + REG_TOK_RDATA1); dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__, rdata0, rdata1, len); for (i = 0; i < min(4, len); i++) *buf++ = (rdata0 >> i * 8) & 0xff; for (i = 4; i < min(8, len); i++) *buf++ = (rdata1 >> (i - 4) * 8) & 0xff; } static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len) { u32 wdata0 = 0, wdata1 = 0; int i; for (i = 0; i < min(4, len); i++) wdata0 |= *buf++ << (i * 8); for (i = 4; i < min(8, len); i++) wdata1 |= *buf++ << ((i - 4) * 8); writel(wdata0, i2c->regs + REG_TOK_WDATA0); writel(wdata1, i2c->regs + REG_TOK_WDATA1); dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__, wdata0, wdata1, len); } static void meson_i2c_prepare_xfer(struct meson_i2c *i2c) { bool write = !(i2c->msg->flags & I2C_M_RD); int i; i2c->count = min(i2c->msg->len - i2c->pos, 8); for (i = 0; i < i2c->count - 1; i++) meson_i2c_add_token(i2c, TOKEN_DATA); if (i2c->count) { if (write || i2c->pos + i2c->count < i2c->msg->len) meson_i2c_add_token(i2c, TOKEN_DATA); else meson_i2c_add_token(i2c, TOKEN_DATA_LAST); } if (write) meson_i2c_put_data(i2c, i2c->msg->buf + i2c->pos, i2c->count); if (i2c->last && i2c->pos + i2c->count >= i2c->msg->len) meson_i2c_add_token(i2c, TOKEN_STOP); writel(i2c->tokens[0], i2c->regs + REG_TOK_LIST0); writel(i2c->tokens[1], i2c->regs + REG_TOK_LIST1); } static void meson_i2c_transfer_complete(struct meson_i2c *i2c, u32 ctrl) { if (ctrl & REG_CTRL_ERROR) { /* * The bit is set when the IGNORE_NAK bit is cleared * and the device didn't respond. In this case, the * I2C controller automatically generates a STOP * condition. */ dev_dbg(i2c->dev, "error bit set\n"); i2c->error = -ENXIO; i2c->state = STATE_IDLE; } else { if (i2c->state == STATE_READ && i2c->count) meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos, i2c->count); i2c->pos += i2c->count; if (i2c->pos >= i2c->msg->len) i2c->state = STATE_IDLE; } } static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) { struct meson_i2c *i2c = dev_id; unsigned int ctrl; spin_lock(&i2c->lock); meson_i2c_reset_tokens(i2c); meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0); ctrl = readl(i2c->regs + REG_CTRL); dev_dbg(i2c->dev, "irq: state %d, pos %d, count %d, ctrl %08x\n", i2c->state, i2c->pos, i2c->count, ctrl); if (i2c->state == STATE_IDLE) { spin_unlock(&i2c->lock); return IRQ_NONE; } meson_i2c_transfer_complete(i2c, ctrl); if (i2c->state == STATE_IDLE) { complete(&i2c->done); goto out; } /* Restart the processing */ meson_i2c_prepare_xfer(i2c); meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, REG_CTRL_START); out: spin_unlock(&i2c->lock); return IRQ_HANDLED; } static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg) { int token; token = (msg->flags & I2C_M_RD) ? TOKEN_SLAVE_ADDR_READ : TOKEN_SLAVE_ADDR_WRITE; meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR_MASK, FIELD_PREP(REG_SLV_ADDR_MASK, msg->addr << 1)); meson_i2c_add_token(i2c, TOKEN_START); meson_i2c_add_token(i2c, token); } static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg, int last, bool atomic) { unsigned long time_left, flags; int ret = 0; u32 ctrl; i2c->msg = msg; i2c->last = last; i2c->pos = 0; i2c->count = 0; i2c->error = 0; meson_i2c_reset_tokens(i2c); flags = (msg->flags & I2C_M_IGNORE_NAK) ? REG_CTRL_ACK_IGNORE : 0; meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_ACK_IGNORE, flags); if (!(msg->flags & I2C_M_NOSTART)) meson_i2c_do_start(i2c, msg); i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE; meson_i2c_prepare_xfer(i2c); if (!atomic) reinit_completion(&i2c->done); /* Start the transfer */ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, REG_CTRL_START); if (atomic) { ret = readl_poll_timeout_atomic(i2c->regs + REG_CTRL, ctrl, !(ctrl & REG_CTRL_STATUS), 10, I2C_TIMEOUT_MS * 1000); } else { time_left = msecs_to_jiffies(I2C_TIMEOUT_MS); time_left = wait_for_completion_timeout(&i2c->done, time_left); if (!time_left) ret = -ETIMEDOUT; } /* * Protect access to i2c struct and registers from interrupt * handlers triggered by a transfer terminated after the * timeout period */ spin_lock_irqsave(&i2c->lock, flags); if (atomic && !ret) meson_i2c_transfer_complete(i2c, ctrl); /* Abort any active operation */ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0); if (ret) i2c->state = STATE_IDLE; if (i2c->error) ret = i2c->error; spin_unlock_irqrestore(&i2c->lock, flags); return ret; } static int meson_i2c_xfer_messages(struct i2c_adapter *adap, struct i2c_msg *msgs, int num, bool atomic) { struct meson_i2c *i2c = adap->algo_data; int i, ret = 0; for (i = 0; i < num; i++) { ret = meson_i2c_xfer_msg(i2c, msgs + i, i == num - 1, atomic); if (ret) break; } return ret ?: i; } static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return meson_i2c_xfer_messages(adap, msgs, num, false); } static int meson_i2c_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return meson_i2c_xfer_messages(adap, msgs, num, true); } static u32 meson_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm meson_i2c_algorithm = { .master_xfer = meson_i2c_xfer, .master_xfer_atomic = meson_i2c_xfer_atomic, .functionality = meson_i2c_func, }; static int meson_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct meson_i2c *i2c; struct i2c_timings timings; int irq, ret = 0; i2c = devm_kzalloc(&pdev->dev, sizeof(struct meson_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c_parse_fw_timings(&pdev->dev, &timings, true); i2c->dev = &pdev->dev; platform_set_drvdata(pdev, i2c); spin_lock_init(&i2c->lock); init_completion(&i2c->done); i2c->data = (const struct meson_i2c_data *) of_device_get_match_data(&pdev->dev); i2c->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) { dev_err(&pdev->dev, "can't get device clock\n"); return PTR_ERR(i2c->clk); } i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(&pdev->dev, irq, meson_i2c_irq, 0, NULL, i2c); if (ret < 0) { dev_err(&pdev->dev, "can't request IRQ\n"); return ret; } ret = clk_prepare_enable(i2c->clk); if (ret < 0) { dev_err(&pdev->dev, "can't prepare clock\n"); return ret; } strscpy(i2c->adap.name, "Meson I2C adapter", sizeof(i2c->adap.name)); i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &meson_i2c_algorithm; i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = np; i2c->adap.algo_data = i2c; /* * A transfer is triggered when START bit changes from 0 to 1. * Ensure that the bit is set to 0 after probe */ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0); /* Disable filtering */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SDA_FILTER_MASK | REG_SLV_SCL_FILTER_MASK, 0); if (!i2c->data->set_clk_div) { clk_disable_unprepare(i2c->clk); return -EINVAL; } i2c->data->set_clk_div(i2c, timings.bus_freq_hz); ret = i2c_add_adapter(&i2c->adap); if (ret < 0) { clk_disable_unprepare(i2c->clk); return ret; } return 0; } static void meson_i2c_remove(struct platform_device *pdev) { struct meson_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); clk_disable_unprepare(i2c->clk); } static const struct meson_i2c_data i2c_meson6_data = { .set_clk_div = meson6_i2c_set_clk_div, }; static const struct meson_i2c_data i2c_gxbb_data = { .set_clk_div = meson_gxbb_axg_i2c_set_clk_div, }; static const struct meson_i2c_data i2c_axg_data = { .set_clk_div = meson_gxbb_axg_i2c_set_clk_div, }; static const struct of_device_id meson_i2c_match[] = { { .compatible = "amlogic,meson6-i2c", .data = &i2c_meson6_data }, { .compatible = "amlogic,meson-gxbb-i2c", .data = &i2c_gxbb_data }, { .compatible = "amlogic,meson-axg-i2c", .data = &i2c_axg_data }, {}, }; MODULE_DEVICE_TABLE(of, meson_i2c_match); static struct platform_driver meson_i2c_driver = { .probe = meson_i2c_probe, .remove_new = meson_i2c_remove, .driver = { .name = "meson-i2c", .of_match_table = meson_i2c_match, }, }; module_platform_driver(meson_i2c_driver); MODULE_DESCRIPTION("Amlogic Meson I2C Bus driver"); MODULE_AUTHOR("Beniamino Galvani <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-meson.c
/* * I2C bus driver for the SH7760 I2C Interfaces. * * (c) 2005-2008 MSC Vertriebsges.m.b.H, Manuel Lauss <[email protected]> * * licensed under the terms outlined in the file COPYING. * */ #include <linux/completion.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/module.h> #include <asm/clock.h> #include <asm/i2c-sh7760.h> /* register offsets */ #define I2CSCR 0x0 /* slave ctrl */ #define I2CMCR 0x4 /* master ctrl */ #define I2CSSR 0x8 /* slave status */ #define I2CMSR 0xC /* master status */ #define I2CSIER 0x10 /* slave irq enable */ #define I2CMIER 0x14 /* master irq enable */ #define I2CCCR 0x18 /* clock dividers */ #define I2CSAR 0x1c /* slave address */ #define I2CMAR 0x20 /* master address */ #define I2CRXTX 0x24 /* data port */ #define I2CFCR 0x28 /* fifo control */ #define I2CFSR 0x2C /* fifo status */ #define I2CFIER 0x30 /* fifo irq enable */ #define I2CRFDR 0x34 /* rx fifo count */ #define I2CTFDR 0x38 /* tx fifo count */ #define REGSIZE 0x3C #define MCR_MDBS 0x80 /* non-fifo mode switch */ #define MCR_FSCL 0x40 /* override SCL pin */ #define MCR_FSDA 0x20 /* override SDA pin */ #define MCR_OBPC 0x10 /* override pins */ #define MCR_MIE 0x08 /* master if enable */ #define MCR_TSBE 0x04 #define MCR_FSB 0x02 /* force stop bit */ #define MCR_ESG 0x01 /* en startbit gen. */ #define MSR_MNR 0x40 /* nack received */ #define MSR_MAL 0x20 /* arbitration lost */ #define MSR_MST 0x10 /* sent a stop */ #define MSR_MDE 0x08 #define MSR_MDT 0x04 #define MSR_MDR 0x02 #define MSR_MAT 0x01 /* slave addr xfer done */ #define MIE_MNRE 0x40 /* nack irq en */ #define MIE_MALE 0x20 /* arblos irq en */ #define MIE_MSTE 0x10 /* stop irq en */ #define MIE_MDEE 0x08 #define MIE_MDTE 0x04 #define MIE_MDRE 0x02 #define MIE_MATE 0x01 /* address sent irq en */ #define FCR_RFRST 0x02 /* reset rx fifo */ #define FCR_TFRST 0x01 /* reset tx fifo */ #define FSR_TEND 0x04 /* last byte sent */ #define FSR_RDF 0x02 /* rx fifo trigger */ #define FSR_TDFE 0x01 /* tx fifo empty */ #define FIER_TEIE 0x04 /* tx fifo empty irq en */ #define FIER_RXIE 0x02 /* rx fifo trig irq en */ #define FIER_TXIE 0x01 /* tx fifo trig irq en */ #define FIFO_SIZE 16 struct cami2c { void __iomem *iobase; struct i2c_adapter adap; /* message processing */ struct i2c_msg *msg; #define IDF_SEND 1 #define IDF_RECV 2 #define IDF_STOP 4 int flags; #define IDS_DONE 1 #define IDS_ARBLOST 2 #define IDS_NACK 4 int status; struct completion xfer_done; int irq; struct resource *ioarea; }; static inline void OUT32(struct cami2c *cam, int reg, unsigned long val) { __raw_writel(val, (unsigned long)cam->iobase + reg); } static inline unsigned long IN32(struct cami2c *cam, int reg) { return __raw_readl((unsigned long)cam->iobase + reg); } static irqreturn_t sh7760_i2c_irq(int irq, void *ptr) { struct cami2c *id = ptr; struct i2c_msg *msg = id->msg; char *data = msg->buf; unsigned long msr, fsr, fier, len; msr = IN32(id, I2CMSR); fsr = IN32(id, I2CFSR); /* arbitration lost */ if (msr & MSR_MAL) { OUT32(id, I2CMCR, 0); OUT32(id, I2CSCR, 0); OUT32(id, I2CSAR, 0); id->status |= IDS_DONE | IDS_ARBLOST; goto out; } if (msr & MSR_MNR) { /* NACK handling is very screwed up. After receiving a * NAK IRQ one has to wait a bit before writing to any * registers, or the ctl will lock up. After that delay * do a normal i2c stop. Then wait at least 1 ms before * attempting another transfer or ctl will stop working */ udelay(100); /* wait or risk ctl hang */ OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST); OUT32(id, I2CMCR, MCR_MIE | MCR_FSB); OUT32(id, I2CFIER, 0); OUT32(id, I2CMIER, MIE_MSTE); OUT32(id, I2CSCR, 0); OUT32(id, I2CSAR, 0); id->status |= IDS_NACK; msr &= ~MSR_MAT; fsr = 0; /* In some cases the MST bit is also set. */ } /* i2c-stop was sent */ if (msr & MSR_MST) { id->status |= IDS_DONE; goto out; } /* i2c slave addr was sent; set to "normal" operation */ if (msr & MSR_MAT) OUT32(id, I2CMCR, MCR_MIE); fier = IN32(id, I2CFIER); if (fsr & FSR_RDF) { len = IN32(id, I2CRFDR); if (msg->len <= len) { if (id->flags & IDF_STOP) { OUT32(id, I2CMCR, MCR_MIE | MCR_FSB); OUT32(id, I2CFIER, 0); /* manual says: wait >= 0.5 SCL times */ udelay(5); /* next int should be MST */ } else { id->status |= IDS_DONE; /* keep the RDF bit: ctrl holds SCL low * until the setup for the next i2c_msg * clears this bit. */ fsr &= ~FSR_RDF; } } while (msg->len && len) { *data++ = IN32(id, I2CRXTX); msg->len--; len--; } if (msg->len) { len = (msg->len >= FIFO_SIZE) ? FIFO_SIZE - 1 : msg->len - 1; OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xf) << 4)); } } else if (id->flags & IDF_SEND) { if ((fsr & FSR_TEND) && (msg->len < 1)) { if (id->flags & IDF_STOP) { OUT32(id, I2CMCR, MCR_MIE | MCR_FSB); } else { id->status |= IDS_DONE; /* keep the TEND bit: ctl holds SCL low * until the setup for the next i2c_msg * clears this bit. */ fsr &= ~FSR_TEND; } } if (fsr & FSR_TDFE) { while (msg->len && (IN32(id, I2CTFDR) < FIFO_SIZE)) { OUT32(id, I2CRXTX, *data++); msg->len--; } if (msg->len < 1) { fier &= ~FIER_TXIE; OUT32(id, I2CFIER, fier); } else { len = (msg->len >= FIFO_SIZE) ? 2 : 0; OUT32(id, I2CFCR, FCR_RFRST | ((len & 3) << 2)); } } } out: if (id->status & IDS_DONE) { OUT32(id, I2CMIER, 0); OUT32(id, I2CFIER, 0); id->msg = NULL; complete(&id->xfer_done); } /* clear status flags and ctrl resumes work */ OUT32(id, I2CMSR, ~msr); OUT32(id, I2CFSR, ~fsr); OUT32(id, I2CSSR, 0); return IRQ_HANDLED; } /* prepare and start a master receive operation */ static void sh7760_i2c_mrecv(struct cami2c *id) { int len; id->flags |= IDF_RECV; /* set the slave addr reg; otherwise rcv wont work! */ OUT32(id, I2CSAR, 0xfe); OUT32(id, I2CMAR, (id->msg->addr << 1) | 1); /* adjust rx fifo trigger */ if (id->msg->len >= FIFO_SIZE) len = FIFO_SIZE - 1; /* trigger at fifo full */ else len = id->msg->len - 1; /* trigger before all received */ OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST); OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xF) << 4)); OUT32(id, I2CMSR, 0); OUT32(id, I2CMCR, MCR_MIE | MCR_ESG); OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE); OUT32(id, I2CFIER, FIER_RXIE); } /* prepare and start a master send operation */ static void sh7760_i2c_msend(struct cami2c *id) { int len; id->flags |= IDF_SEND; /* set the slave addr reg; otherwise xmit wont work! */ OUT32(id, I2CSAR, 0xfe); OUT32(id, I2CMAR, (id->msg->addr << 1) | 0); /* adjust tx fifo trigger */ if (id->msg->len >= FIFO_SIZE) len = 2; /* trig: 2 bytes left in TX fifo */ else len = 0; /* trig: 8 bytes left in TX fifo */ OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST); OUT32(id, I2CFCR, FCR_RFRST | ((len & 3) << 2)); while (id->msg->len && IN32(id, I2CTFDR) < FIFO_SIZE) { OUT32(id, I2CRXTX, *(id->msg->buf)); (id->msg->len)--; (id->msg->buf)++; } OUT32(id, I2CMSR, 0); OUT32(id, I2CMCR, MCR_MIE | MCR_ESG); OUT32(id, I2CFSR, 0); OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE); OUT32(id, I2CFIER, FIER_TEIE | (id->msg->len ? FIER_TXIE : 0)); } static inline int sh7760_i2c_busy_check(struct cami2c *id) { return (IN32(id, I2CMCR) & MCR_FSDA); } static int sh7760_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct cami2c *id = adap->algo_data; int i, retr; if (sh7760_i2c_busy_check(id)) { dev_err(&adap->dev, "sh7760-i2c%d: bus busy!\n", adap->nr); return -EBUSY; } i = 0; while (i < num) { retr = adap->retries; retry: id->flags = ((i == (num-1)) ? IDF_STOP : 0); id->status = 0; id->msg = msgs; init_completion(&id->xfer_done); if (msgs->flags & I2C_M_RD) sh7760_i2c_mrecv(id); else sh7760_i2c_msend(id); wait_for_completion(&id->xfer_done); if (id->status == 0) { num = -EIO; break; } if (id->status & IDS_NACK) { /* wait a bit or i2c module stops working */ mdelay(1); num = -EREMOTEIO; break; } if (id->status & IDS_ARBLOST) { if (retr--) { mdelay(2); goto retry; } num = -EREMOTEIO; break; } msgs++; i++; } id->msg = NULL; id->flags = 0; id->status = 0; OUT32(id, I2CMCR, 0); OUT32(id, I2CMSR, 0); OUT32(id, I2CMIER, 0); OUT32(id, I2CFIER, 0); /* reset slave module registers too: master mode enables slave * module for receive ops (ack, data). Without this reset, * eternal bus activity might be reported after NACK / ARBLOST. */ OUT32(id, I2CSCR, 0); OUT32(id, I2CSAR, 0); OUT32(id, I2CSSR, 0); return num; } static u32 sh7760_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static const struct i2c_algorithm sh7760_i2c_algo = { .master_xfer = sh7760_i2c_master_xfer, .functionality = sh7760_i2c_func, }; /* calculate CCR register setting for a desired scl clock. SCL clock is * derived from I2C module clock (iclk) which in turn is derived from * peripheral module clock (mclk, usually around 33MHz): * iclk = mclk/(CDF + 1). iclk must be < 20MHz. * scl = iclk/(SCGD*8 + 20). */ static int calc_CCR(unsigned long scl_hz) { struct clk *mclk; unsigned long mck, m1, dff, odff, iclk; signed char cdf, cdfm; int scgd, scgdm, scgds; mclk = clk_get(NULL, "peripheral_clk"); if (IS_ERR(mclk)) { return PTR_ERR(mclk); } else { mck = mclk->rate; clk_put(mclk); } odff = scl_hz; scgdm = cdfm = m1 = 0; for (cdf = 3; cdf >= 0; cdf--) { iclk = mck / (1 + cdf); if (iclk >= 20000000) continue; scgds = ((iclk / scl_hz) - 20) >> 3; for (scgd = scgds; (scgd < 63) && scgd <= scgds + 1; scgd++) { m1 = iclk / (20 + (scgd << 3)); dff = abs(scl_hz - m1); if (dff < odff) { odff = dff; cdfm = cdf; scgdm = scgd; } } } /* fail if more than 25% off of requested SCL */ if (odff > (scl_hz >> 2)) return -EINVAL; /* create a CCR register value */ return ((scgdm << 2) | cdfm); } static int sh7760_i2c_probe(struct platform_device *pdev) { struct sh7760_i2c_platdata *pd; struct resource *res; struct cami2c *id; int ret; pd = dev_get_platdata(&pdev->dev); if (!pd) { dev_err(&pdev->dev, "no platform_data!\n"); ret = -ENODEV; goto out0; } id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { ret = -ENOMEM; goto out0; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no mmio resources\n"); ret = -ENODEV; goto out1; } id->ioarea = request_mem_region(res->start, REGSIZE, pdev->name); if (!id->ioarea) { dev_err(&pdev->dev, "mmio already reserved\n"); ret = -EBUSY; goto out1; } id->iobase = ioremap(res->start, REGSIZE); if (!id->iobase) { dev_err(&pdev->dev, "cannot ioremap\n"); ret = -ENODEV; goto out2; } ret = platform_get_irq(pdev, 0); if (ret < 0) goto out3; id->irq = ret; id->adap.nr = pdev->id; id->adap.algo = &sh7760_i2c_algo; id->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; id->adap.retries = 3; id->adap.algo_data = id; id->adap.dev.parent = &pdev->dev; snprintf(id->adap.name, sizeof(id->adap.name), "SH7760 I2C at %08lx", (unsigned long)res->start); OUT32(id, I2CMCR, 0); OUT32(id, I2CMSR, 0); OUT32(id, I2CMIER, 0); OUT32(id, I2CMAR, 0); OUT32(id, I2CSIER, 0); OUT32(id, I2CSAR, 0); OUT32(id, I2CSCR, 0); OUT32(id, I2CSSR, 0); OUT32(id, I2CFIER, 0); OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST); OUT32(id, I2CFSR, 0); ret = calc_CCR(pd->speed_khz * 1000); if (ret < 0) { dev_err(&pdev->dev, "invalid SCL clock: %dkHz\n", pd->speed_khz); goto out3; } OUT32(id, I2CCCR, ret); if (request_irq(id->irq, sh7760_i2c_irq, 0, SH7760_I2C_DEVNAME, id)) { dev_err(&pdev->dev, "cannot get irq %d\n", id->irq); ret = -EBUSY; goto out3; } ret = i2c_add_numbered_adapter(&id->adap); if (ret < 0) goto out4; platform_set_drvdata(pdev, id); dev_info(&pdev->dev, "%d kHz mmio %08x irq %d\n", pd->speed_khz, res->start, id->irq); return 0; out4: free_irq(id->irq, id); out3: iounmap(id->iobase); out2: release_resource(id->ioarea); kfree(id->ioarea); out1: kfree(id); out0: return ret; } static void sh7760_i2c_remove(struct platform_device *pdev) { struct cami2c *id = platform_get_drvdata(pdev); i2c_del_adapter(&id->adap); free_irq(id->irq, id); iounmap(id->iobase); release_resource(id->ioarea); kfree(id->ioarea); kfree(id); } static struct platform_driver sh7760_i2c_drv = { .driver = { .name = SH7760_I2C_DEVNAME, }, .probe = sh7760_i2c_probe, .remove_new = sh7760_i2c_remove, }; module_platform_driver(sh7760_i2c_drv); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SH7760 I2C bus driver"); MODULE_AUTHOR("Manuel Lauss <[email protected]>");
linux-master
drivers/i2c/busses/i2c-sh7760.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2012 FUJITSU SEMICONDUCTOR LIMITED */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #define WAIT_PCLK(n, rate) \ ndelay(DIV_ROUND_UP(DIV_ROUND_UP(1000000000, rate), n) + 10) /* I2C register address definitions */ #define SYNQUACER_I2C_REG_BSR (0x00 << 2) // Bus Status #define SYNQUACER_I2C_REG_BCR (0x01 << 2) // Bus Control #define SYNQUACER_I2C_REG_CCR (0x02 << 2) // Clock Control #define SYNQUACER_I2C_REG_ADR (0x03 << 2) // Address #define SYNQUACER_I2C_REG_DAR (0x04 << 2) // Data #define SYNQUACER_I2C_REG_CSR (0x05 << 2) // Expansion CS #define SYNQUACER_I2C_REG_FSR (0x06 << 2) // Bus Clock Freq #define SYNQUACER_I2C_REG_BC2R (0x07 << 2) // Bus Control 2 /* I2C register bit definitions */ #define SYNQUACER_I2C_BSR_FBT BIT(0) // First Byte Transfer #define SYNQUACER_I2C_BSR_GCA BIT(1) // General Call Address #define SYNQUACER_I2C_BSR_AAS BIT(2) // Address as Slave #define SYNQUACER_I2C_BSR_TRX BIT(3) // Transfer/Receive #define SYNQUACER_I2C_BSR_LRB BIT(4) // Last Received Bit #define SYNQUACER_I2C_BSR_AL BIT(5) // Arbitration Lost #define SYNQUACER_I2C_BSR_RSC BIT(6) // Repeated Start Cond. #define SYNQUACER_I2C_BSR_BB BIT(7) // Bus Busy #define SYNQUACER_I2C_BCR_INT BIT(0) // Interrupt #define SYNQUACER_I2C_BCR_INTE BIT(1) // Interrupt Enable #define SYNQUACER_I2C_BCR_GCAA BIT(2) // Gen. Call Access Ack. #define SYNQUACER_I2C_BCR_ACK BIT(3) // Acknowledge #define SYNQUACER_I2C_BCR_MSS BIT(4) // Master Slave Select #define SYNQUACER_I2C_BCR_SCC BIT(5) // Start Condition Cont. #define SYNQUACER_I2C_BCR_BEIE BIT(6) // Bus Error Int Enable #define SYNQUACER_I2C_BCR_BER BIT(7) // Bus Error #define SYNQUACER_I2C_CCR_CS_MASK (0x1f) // CCR Clock Period Sel. #define SYNQUACER_I2C_CCR_EN BIT(5) // Enable #define SYNQUACER_I2C_CCR_FM BIT(6) // Speed Mode Select #define SYNQUACER_I2C_CSR_CS_MASK (0x3f) // CSR Clock Period Sel. #define SYNQUACER_I2C_BC2R_SCLL BIT(0) // SCL Low Drive #define SYNQUACER_I2C_BC2R_SDAL BIT(1) // SDA Low Drive #define SYNQUACER_I2C_BC2R_SCLS BIT(4) // SCL Status #define SYNQUACER_I2C_BC2R_SDAS BIT(5) // SDA Status /* PCLK frequency */ #define SYNQUACER_I2C_BUS_CLK_FR(rate) (((rate) / 20000000) + 1) /* STANDARD MODE frequency */ #define SYNQUACER_I2C_CLK_MASTER_STD(rate) \ DIV_ROUND_UP(DIV_ROUND_UP((rate), I2C_MAX_STANDARD_MODE_FREQ) - 2, 2) /* FAST MODE frequency */ #define SYNQUACER_I2C_CLK_MASTER_FAST(rate) \ DIV_ROUND_UP((DIV_ROUND_UP((rate), I2C_MAX_FAST_MODE_FREQ) - 2) * 2, 3) /* (clkrate <= 18000000) */ /* calculate the value of CS bits in CCR register on standard mode */ #define SYNQUACER_I2C_CCR_CS_STD_MAX_18M(rate) \ ((SYNQUACER_I2C_CLK_MASTER_STD(rate) - 65) \ & SYNQUACER_I2C_CCR_CS_MASK) /* calculate the value of CS bits in CSR register on standard mode */ #define SYNQUACER_I2C_CSR_CS_STD_MAX_18M(rate) 0x00 /* calculate the value of CS bits in CCR register on fast mode */ #define SYNQUACER_I2C_CCR_CS_FAST_MAX_18M(rate) \ ((SYNQUACER_I2C_CLK_MASTER_FAST(rate) - 1) \ & SYNQUACER_I2C_CCR_CS_MASK) /* calculate the value of CS bits in CSR register on fast mode */ #define SYNQUACER_I2C_CSR_CS_FAST_MAX_18M(rate) 0x00 /* (clkrate > 18000000) */ /* calculate the value of CS bits in CCR register on standard mode */ #define SYNQUACER_I2C_CCR_CS_STD_MIN_18M(rate) \ ((SYNQUACER_I2C_CLK_MASTER_STD(rate) - 1) \ & SYNQUACER_I2C_CCR_CS_MASK) /* calculate the value of CS bits in CSR register on standard mode */ #define SYNQUACER_I2C_CSR_CS_STD_MIN_18M(rate) \ (((SYNQUACER_I2C_CLK_MASTER_STD(rate) - 1) >> 5) \ & SYNQUACER_I2C_CSR_CS_MASK) /* calculate the value of CS bits in CCR register on fast mode */ #define SYNQUACER_I2C_CCR_CS_FAST_MIN_18M(rate) \ ((SYNQUACER_I2C_CLK_MASTER_FAST(rate) - 1) \ & SYNQUACER_I2C_CCR_CS_MASK) /* calculate the value of CS bits in CSR register on fast mode */ #define SYNQUACER_I2C_CSR_CS_FAST_MIN_18M(rate) \ (((SYNQUACER_I2C_CLK_MASTER_FAST(rate) - 1) >> 5) \ & SYNQUACER_I2C_CSR_CS_MASK) /* min I2C clock frequency 14M */ #define SYNQUACER_I2C_MIN_CLK_RATE (14 * 1000000) /* max I2C clock frequency 200M */ #define SYNQUACER_I2C_MAX_CLK_RATE (200 * 1000000) /* I2C clock frequency 18M */ #define SYNQUACER_I2C_CLK_RATE_18M (18 * 1000000) #define SYNQUACER_I2C_SPEED_FM 400 // Fast Mode #define SYNQUACER_I2C_SPEED_SM 100 // Standard Mode enum i2c_state { STATE_IDLE, STATE_START, STATE_READ, STATE_WRITE }; struct synquacer_i2c { struct completion completion; struct i2c_msg *msg; u32 msg_num; u32 msg_idx; u32 msg_ptr; int irq; struct device *dev; void __iomem *base; struct clk *pclk; u32 pclkrate; u32 speed_khz; u32 timeout_ms; enum i2c_state state; struct i2c_adapter adapter; }; static inline int is_lastmsg(struct synquacer_i2c *i2c) { return i2c->msg_idx >= (i2c->msg_num - 1); } static inline int is_msglast(struct synquacer_i2c *i2c) { return i2c->msg_ptr == (i2c->msg->len - 1); } static inline int is_msgend(struct synquacer_i2c *i2c) { return i2c->msg_ptr >= i2c->msg->len; } static inline unsigned long calc_timeout_ms(struct synquacer_i2c *i2c, struct i2c_msg *msgs, int num) { unsigned long bit_count = 0; int i; for (i = 0; i < num; i++, msgs++) bit_count += msgs->len; return DIV_ROUND_UP((bit_count * 9 + num * 10) * 3, 200) + 10; } static void synquacer_i2c_stop(struct synquacer_i2c *i2c, int ret) { /* * clear IRQ (INT=0, BER=0) * set Stop Condition (MSS=0) * Interrupt Disable */ writeb(0, i2c->base + SYNQUACER_I2C_REG_BCR); i2c->state = STATE_IDLE; i2c->msg_ptr = 0; i2c->msg = NULL; i2c->msg_idx++; i2c->msg_num = 0; if (ret) i2c->msg_idx = ret; complete(&i2c->completion); } static void synquacer_i2c_hw_init(struct synquacer_i2c *i2c) { unsigned char ccr_cs, csr_cs; u32 rt = i2c->pclkrate; /* Set own Address */ writeb(0, i2c->base + SYNQUACER_I2C_REG_ADR); /* Set PCLK frequency */ writeb(SYNQUACER_I2C_BUS_CLK_FR(i2c->pclkrate), i2c->base + SYNQUACER_I2C_REG_FSR); switch (i2c->speed_khz) { case SYNQUACER_I2C_SPEED_FM: if (i2c->pclkrate <= SYNQUACER_I2C_CLK_RATE_18M) { ccr_cs = SYNQUACER_I2C_CCR_CS_FAST_MAX_18M(rt); csr_cs = SYNQUACER_I2C_CSR_CS_FAST_MAX_18M(rt); } else { ccr_cs = SYNQUACER_I2C_CCR_CS_FAST_MIN_18M(rt); csr_cs = SYNQUACER_I2C_CSR_CS_FAST_MIN_18M(rt); } /* Set Clock and enable, Set fast mode */ writeb(ccr_cs | SYNQUACER_I2C_CCR_FM | SYNQUACER_I2C_CCR_EN, i2c->base + SYNQUACER_I2C_REG_CCR); writeb(csr_cs, i2c->base + SYNQUACER_I2C_REG_CSR); break; case SYNQUACER_I2C_SPEED_SM: if (i2c->pclkrate <= SYNQUACER_I2C_CLK_RATE_18M) { ccr_cs = SYNQUACER_I2C_CCR_CS_STD_MAX_18M(rt); csr_cs = SYNQUACER_I2C_CSR_CS_STD_MAX_18M(rt); } else { ccr_cs = SYNQUACER_I2C_CCR_CS_STD_MIN_18M(rt); csr_cs = SYNQUACER_I2C_CSR_CS_STD_MIN_18M(rt); } /* Set Clock and enable, Set standard mode */ writeb(ccr_cs | SYNQUACER_I2C_CCR_EN, i2c->base + SYNQUACER_I2C_REG_CCR); writeb(csr_cs, i2c->base + SYNQUACER_I2C_REG_CSR); break; default: WARN_ON(1); } /* clear IRQ (INT=0, BER=0), Interrupt Disable */ writeb(0, i2c->base + SYNQUACER_I2C_REG_BCR); writeb(0, i2c->base + SYNQUACER_I2C_REG_BC2R); } static void synquacer_i2c_hw_reset(struct synquacer_i2c *i2c) { /* Disable clock */ writeb(0, i2c->base + SYNQUACER_I2C_REG_CCR); writeb(0, i2c->base + SYNQUACER_I2C_REG_CSR); WAIT_PCLK(100, i2c->pclkrate); } static int synquacer_i2c_master_start(struct synquacer_i2c *i2c, struct i2c_msg *pmsg) { unsigned char bsr, bcr; writeb(i2c_8bit_addr_from_msg(pmsg), i2c->base + SYNQUACER_I2C_REG_DAR); dev_dbg(i2c->dev, "slave:0x%02x\n", pmsg->addr); /* Generate Start Condition */ bsr = readb(i2c->base + SYNQUACER_I2C_REG_BSR); bcr = readb(i2c->base + SYNQUACER_I2C_REG_BCR); dev_dbg(i2c->dev, "bsr:0x%02x, bcr:0x%02x\n", bsr, bcr); if ((bsr & SYNQUACER_I2C_BSR_BB) && !(bcr & SYNQUACER_I2C_BCR_MSS)) { dev_dbg(i2c->dev, "bus is busy"); return -EBUSY; } if (bsr & SYNQUACER_I2C_BSR_BB) { /* Bus is busy */ dev_dbg(i2c->dev, "Continuous Start"); writeb(bcr | SYNQUACER_I2C_BCR_SCC, i2c->base + SYNQUACER_I2C_REG_BCR); } else { if (bcr & SYNQUACER_I2C_BCR_MSS) { dev_dbg(i2c->dev, "not in master mode"); return -EAGAIN; } dev_dbg(i2c->dev, "Start Condition"); /* Start Condition + Enable Interrupts */ writeb(bcr | SYNQUACER_I2C_BCR_MSS | SYNQUACER_I2C_BCR_INTE | SYNQUACER_I2C_BCR_BEIE, i2c->base + SYNQUACER_I2C_REG_BCR); } WAIT_PCLK(10, i2c->pclkrate); /* get BSR & BCR registers */ bsr = readb(i2c->base + SYNQUACER_I2C_REG_BSR); bcr = readb(i2c->base + SYNQUACER_I2C_REG_BCR); dev_dbg(i2c->dev, "bsr:0x%02x, bcr:0x%02x\n", bsr, bcr); if ((bsr & SYNQUACER_I2C_BSR_AL) || !(bcr & SYNQUACER_I2C_BCR_MSS)) { dev_dbg(i2c->dev, "arbitration lost\n"); return -EAGAIN; } return 0; } static int synquacer_i2c_doxfer(struct synquacer_i2c *i2c, struct i2c_msg *msgs, int num) { unsigned char bsr; unsigned long timeout; int ret; synquacer_i2c_hw_init(i2c); bsr = readb(i2c->base + SYNQUACER_I2C_REG_BSR); if (bsr & SYNQUACER_I2C_BSR_BB) { dev_err(i2c->dev, "cannot get bus (bus busy)\n"); return -EBUSY; } reinit_completion(&i2c->completion); i2c->msg = msgs; i2c->msg_num = num; i2c->msg_ptr = 0; i2c->msg_idx = 0; i2c->state = STATE_START; ret = synquacer_i2c_master_start(i2c, i2c->msg); if (ret < 0) { dev_dbg(i2c->dev, "Address failed: (%d)\n", ret); return ret; } timeout = wait_for_completion_timeout(&i2c->completion, msecs_to_jiffies(i2c->timeout_ms)); if (timeout == 0) { dev_dbg(i2c->dev, "timeout\n"); return -EAGAIN; } ret = i2c->msg_idx; if (ret != num) { dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret); return -EAGAIN; } /* wait 2 clock periods to ensure the stop has been through the bus */ udelay(DIV_ROUND_UP(2 * 1000, i2c->speed_khz)); return ret; } static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id) { struct synquacer_i2c *i2c = dev_id; unsigned char byte; unsigned char bsr, bcr; int ret; bcr = readb(i2c->base + SYNQUACER_I2C_REG_BCR); bsr = readb(i2c->base + SYNQUACER_I2C_REG_BSR); dev_dbg(i2c->dev, "bsr:0x%02x, bcr:0x%02x\n", bsr, bcr); if (bcr & SYNQUACER_I2C_BCR_BER) { dev_err(i2c->dev, "bus error\n"); synquacer_i2c_stop(i2c, -EAGAIN); goto out; } if ((bsr & SYNQUACER_I2C_BSR_AL) || !(bcr & SYNQUACER_I2C_BCR_MSS)) { dev_dbg(i2c->dev, "arbitration lost\n"); synquacer_i2c_stop(i2c, -EAGAIN); goto out; } switch (i2c->state) { case STATE_START: if (bsr & SYNQUACER_I2C_BSR_LRB) { dev_dbg(i2c->dev, "ack was not received\n"); synquacer_i2c_stop(i2c, -EAGAIN); goto out; } if (i2c->msg->flags & I2C_M_RD) i2c->state = STATE_READ; else i2c->state = STATE_WRITE; if (is_lastmsg(i2c) && i2c->msg->len == 0) { synquacer_i2c_stop(i2c, 0); goto out; } if (i2c->state == STATE_READ) goto prepare_read; fallthrough; case STATE_WRITE: if (bsr & SYNQUACER_I2C_BSR_LRB) { dev_dbg(i2c->dev, "WRITE: No Ack\n"); synquacer_i2c_stop(i2c, -EAGAIN); goto out; } if (!is_msgend(i2c)) { writeb(i2c->msg->buf[i2c->msg_ptr++], i2c->base + SYNQUACER_I2C_REG_DAR); /* clear IRQ, and continue */ writeb(SYNQUACER_I2C_BCR_BEIE | SYNQUACER_I2C_BCR_MSS | SYNQUACER_I2C_BCR_INTE, i2c->base + SYNQUACER_I2C_REG_BCR); break; } if (is_lastmsg(i2c)) { synquacer_i2c_stop(i2c, 0); break; } dev_dbg(i2c->dev, "WRITE: Next Message\n"); i2c->msg_ptr = 0; i2c->msg_idx++; i2c->msg++; /* send the new start */ ret = synquacer_i2c_master_start(i2c, i2c->msg); if (ret < 0) { dev_dbg(i2c->dev, "restart error (%d)\n", ret); synquacer_i2c_stop(i2c, -EAGAIN); break; } i2c->state = STATE_START; break; case STATE_READ: byte = readb(i2c->base + SYNQUACER_I2C_REG_DAR); if (!(bsr & SYNQUACER_I2C_BSR_FBT)) /* data */ i2c->msg->buf[i2c->msg_ptr++] = byte; else /* address */ dev_dbg(i2c->dev, "address:0x%02x. ignore it.\n", byte); prepare_read: if (is_msglast(i2c)) { writeb(SYNQUACER_I2C_BCR_MSS | SYNQUACER_I2C_BCR_BEIE | SYNQUACER_I2C_BCR_INTE, i2c->base + SYNQUACER_I2C_REG_BCR); break; } if (!is_msgend(i2c)) { writeb(SYNQUACER_I2C_BCR_MSS | SYNQUACER_I2C_BCR_BEIE | SYNQUACER_I2C_BCR_INTE | SYNQUACER_I2C_BCR_ACK, i2c->base + SYNQUACER_I2C_REG_BCR); break; } if (is_lastmsg(i2c)) { /* last message, send stop and complete */ dev_dbg(i2c->dev, "READ: Send Stop\n"); synquacer_i2c_stop(i2c, 0); break; } dev_dbg(i2c->dev, "READ: Next Transfer\n"); i2c->msg_ptr = 0; i2c->msg_idx++; i2c->msg++; ret = synquacer_i2c_master_start(i2c, i2c->msg); if (ret < 0) { dev_dbg(i2c->dev, "restart error (%d)\n", ret); synquacer_i2c_stop(i2c, -EAGAIN); } else { i2c->state = STATE_START; } break; default: dev_err(i2c->dev, "called in err STATE (%d)\n", i2c->state); break; } out: WAIT_PCLK(10, i2c->pclkrate); return IRQ_HANDLED; } static int synquacer_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct synquacer_i2c *i2c; int retry; int ret; i2c = i2c_get_adapdata(adap); i2c->timeout_ms = calc_timeout_ms(i2c, msgs, num); dev_dbg(i2c->dev, "calculated timeout %d ms\n", i2c->timeout_ms); for (retry = 0; retry <= adap->retries; retry++) { ret = synquacer_i2c_doxfer(i2c, msgs, num); if (ret != -EAGAIN) return ret; dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry); synquacer_i2c_hw_reset(i2c); } return -EIO; } static u32 synquacer_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm synquacer_i2c_algo = { .master_xfer = synquacer_i2c_xfer, .functionality = synquacer_i2c_functionality, }; static const struct i2c_adapter synquacer_i2c_ops = { .owner = THIS_MODULE, .name = "synquacer_i2c-adapter", .algo = &synquacer_i2c_algo, .retries = 5, }; static int synquacer_i2c_probe(struct platform_device *pdev) { struct synquacer_i2c *i2c; u32 bus_speed; int ret; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; bus_speed = i2c_acpi_find_bus_speed(&pdev->dev); if (!bus_speed) device_property_read_u32(&pdev->dev, "clock-frequency", &bus_speed); device_property_read_u32(&pdev->dev, "socionext,pclk-rate", &i2c->pclkrate); i2c->pclk = devm_clk_get(&pdev->dev, "pclk"); if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER) return -EPROBE_DEFER; if (!IS_ERR_OR_NULL(i2c->pclk)) { dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk); ret = clk_prepare_enable(i2c->pclk); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n"); i2c->pclkrate = clk_get_rate(i2c->pclk); } if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE || i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE) return dev_err_probe(&pdev->dev, -EINVAL, "PCLK missing or out of range (%d)\n", i2c->pclkrate); i2c->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); i2c->irq = platform_get_irq(pdev, 0); if (i2c->irq < 0) return i2c->irq; ret = devm_request_irq(&pdev->dev, i2c->irq, synquacer_i2c_isr, 0, dev_name(&pdev->dev), i2c); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "cannot claim IRQ %d\n", i2c->irq); i2c->state = STATE_IDLE; i2c->dev = &pdev->dev; i2c->adapter = synquacer_i2c_ops; i2c_set_adapdata(&i2c->adapter, i2c); i2c->adapter.dev.parent = &pdev->dev; i2c->adapter.dev.of_node = pdev->dev.of_node; ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev)); i2c->adapter.nr = pdev->id; init_completion(&i2c->completion); if (bus_speed < I2C_MAX_FAST_MODE_FREQ) i2c->speed_khz = SYNQUACER_I2C_SPEED_SM; else i2c->speed_khz = SYNQUACER_I2C_SPEED_FM; synquacer_i2c_hw_init(i2c); ret = i2c_add_numbered_adapter(&i2c->adapter); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to add bus to i2c core\n"); platform_set_drvdata(pdev, i2c); dev_info(&pdev->dev, "%s: synquacer_i2c adapter\n", dev_name(&i2c->adapter.dev)); return 0; } static void synquacer_i2c_remove(struct platform_device *pdev) { struct synquacer_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adapter); if (!IS_ERR(i2c->pclk)) clk_disable_unprepare(i2c->pclk); }; static const struct of_device_id synquacer_i2c_dt_ids[] __maybe_unused = { { .compatible = "socionext,synquacer-i2c" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, synquacer_i2c_dt_ids); #ifdef CONFIG_ACPI static const struct acpi_device_id synquacer_i2c_acpi_ids[] = { { "SCX0003" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(acpi, synquacer_i2c_acpi_ids); #endif static struct platform_driver synquacer_i2c_driver = { .probe = synquacer_i2c_probe, .remove_new = synquacer_i2c_remove, .driver = { .name = "synquacer_i2c", .of_match_table = of_match_ptr(synquacer_i2c_dt_ids), .acpi_match_table = ACPI_PTR(synquacer_i2c_acpi_ids), }, }; module_platform_driver(synquacer_i2c_driver); MODULE_AUTHOR("Fujitsu Semiconductor Ltd"); MODULE_DESCRIPTION("Socionext SynQuacer I2C Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-synquacer.c
// SPDX-License-Identifier: GPL-2.0-or-later /* i2c Support for Apple SMU Controller Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp. <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/of_irq.h> #include <asm/pmac_low_i2c.h> MODULE_AUTHOR("Benjamin Herrenschmidt <[email protected]>"); MODULE_DESCRIPTION("I2C driver for Apple PowerMac"); MODULE_LICENSE("GPL"); /* * SMBUS-type transfer entrypoint */ static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data* data) { struct pmac_i2c_bus *bus = i2c_get_adapdata(adap); int rc = 0; int read = (read_write == I2C_SMBUS_READ); int addrdir = (addr << 1) | read; int mode, subsize, len; u32 subaddr; u8 *buf; u8 local[2]; if (size == I2C_SMBUS_QUICK || size == I2C_SMBUS_BYTE) { mode = pmac_i2c_mode_std; subsize = 0; subaddr = 0; } else { mode = read ? pmac_i2c_mode_combined : pmac_i2c_mode_stdsub; subsize = 1; subaddr = command; } switch (size) { case I2C_SMBUS_QUICK: buf = NULL; len = 0; break; case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: buf = &data->byte; len = 1; break; case I2C_SMBUS_WORD_DATA: if (!read) { local[0] = data->word & 0xff; local[1] = (data->word >> 8) & 0xff; } buf = local; len = 2; break; /* Note that these are broken vs. the expected smbus API where * on reads, the length is actually returned from the function, * but I think the current API makes no sense and I don't want * any driver that I haven't verified for correctness to go * anywhere near a pmac i2c bus anyway ... */ case I2C_SMBUS_BLOCK_DATA: buf = data->block; len = data->block[0] + 1; break; case I2C_SMBUS_I2C_BLOCK_DATA: buf = &data->block[1]; len = data->block[0]; break; default: return -EINVAL; } rc = pmac_i2c_open(bus, 0); if (rc) { dev_err(&adap->dev, "Failed to open I2C, err %d\n", rc); return rc; } rc = pmac_i2c_setmode(bus, mode); if (rc) { dev_err(&adap->dev, "Failed to set I2C mode %d, err %d\n", mode, rc); goto bail; } rc = pmac_i2c_xfer(bus, addrdir, subsize, subaddr, buf, len); if (rc) { if (rc == -ENXIO) dev_dbg(&adap->dev, "I2C transfer at 0x%02x failed, size %d, " "err %d\n", addrdir >> 1, size, rc); else dev_err(&adap->dev, "I2C transfer at 0x%02x failed, size %d, " "err %d\n", addrdir >> 1, size, rc); goto bail; } if (size == I2C_SMBUS_WORD_DATA && read) { data->word = ((u16)local[1]) << 8; data->word |= local[0]; } bail: pmac_i2c_close(bus); return rc; } /* * Generic i2c master transfer entrypoint. This driver only support single * messages (for "lame i2c" transfers). Anything else should use the smbus * entry point */ static int i2c_powermac_master_xfer( struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct pmac_i2c_bus *bus = i2c_get_adapdata(adap); int rc = 0; int addrdir; if (msgs->flags & I2C_M_TEN) return -EINVAL; addrdir = i2c_8bit_addr_from_msg(msgs); rc = pmac_i2c_open(bus, 0); if (rc) { dev_err(&adap->dev, "Failed to open I2C, err %d\n", rc); return rc; } rc = pmac_i2c_setmode(bus, pmac_i2c_mode_std); if (rc) { dev_err(&adap->dev, "Failed to set I2C mode %d, err %d\n", pmac_i2c_mode_std, rc); goto bail; } rc = pmac_i2c_xfer(bus, addrdir, 0, 0, msgs->buf, msgs->len); if (rc < 0) { if (rc == -ENXIO) dev_dbg(&adap->dev, "I2C %s 0x%02x failed, err %d\n", addrdir & 1 ? "read from" : "write to", addrdir >> 1, rc); else dev_err(&adap->dev, "I2C %s 0x%02x failed, err %d\n", addrdir & 1 ? "read from" : "write to", addrdir >> 1, rc); } bail: pmac_i2c_close(bus); return rc < 0 ? rc : 1; } static u32 i2c_powermac_func(struct i2c_adapter * adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_I2C; } /* For now, we only handle smbus */ static const struct i2c_algorithm i2c_powermac_algorithm = { .smbus_xfer = i2c_powermac_smbus_xfer, .master_xfer = i2c_powermac_master_xfer, .functionality = i2c_powermac_func, }; static const struct i2c_adapter_quirks i2c_powermac_quirks = { .max_num_msgs = 1, }; static void i2c_powermac_remove(struct platform_device *dev) { struct i2c_adapter *adapter = platform_get_drvdata(dev); i2c_del_adapter(adapter); memset(adapter, 0, sizeof(*adapter)); } static u32 i2c_powermac_get_addr(struct i2c_adapter *adap, struct pmac_i2c_bus *bus, struct device_node *node) { u32 prop; int ret; /* First check for valid "reg" */ ret = of_property_read_u32(node, "reg", &prop); if (ret == 0) return (prop & 0xff) >> 1; /* Then check old-style "i2c-address" */ ret = of_property_read_u32(node, "i2c-address", &prop); if (ret == 0) return (prop & 0xff) >> 1; /* Now handle some devices with missing "reg" properties */ if (of_node_name_eq(node, "cereal")) return 0x60; else if (of_node_name_eq(node, "deq")) return 0x34; dev_warn(&adap->dev, "No i2c address for %pOF\n", node); return 0xffffffff; } static void i2c_powermac_create_one(struct i2c_adapter *adap, const char *type, u32 addr) { struct i2c_board_info info = {}; struct i2c_client *newdev; strncpy(info.type, type, sizeof(info.type)); info.addr = addr; newdev = i2c_new_client_device(adap, &info); if (IS_ERR(newdev)) dev_err(&adap->dev, "i2c-powermac: Failure to register missing %s\n", type); } static void i2c_powermac_add_missing(struct i2c_adapter *adap, struct pmac_i2c_bus *bus, bool found_onyx) { struct device_node *busnode = pmac_i2c_get_bus_node(bus); int rc; /* Check for the onyx audio codec */ #define ONYX_REG_CONTROL 67 if (of_device_is_compatible(busnode, "k2-i2c") && !found_onyx) { union i2c_smbus_data data; rc = i2c_smbus_xfer(adap, 0x46, 0, I2C_SMBUS_READ, ONYX_REG_CONTROL, I2C_SMBUS_BYTE_DATA, &data); if (rc >= 0) i2c_powermac_create_one(adap, "MAC,pcm3052", 0x46); rc = i2c_smbus_xfer(adap, 0x47, 0, I2C_SMBUS_READ, ONYX_REG_CONTROL, I2C_SMBUS_BYTE_DATA, &data); if (rc >= 0) i2c_powermac_create_one(adap, "MAC,pcm3052", 0x47); } } static bool i2c_powermac_get_type(struct i2c_adapter *adap, struct device_node *node, u32 addr, char *type, int type_size) { char tmp[16]; /* * Note: we do _NOT_ want the standard i2c drivers to match with any of * our powermac stuff unless they have been specifically modified to * handle it on a case by case basis. For example, for thermal control, * things like lm75 etc... shall match with their corresponding * windfarm drivers, _NOT_ the generic ones, so we force a prefix of * 'MAC', onto the modalias to make that happen */ /* First try proper modalias */ if (of_alias_from_compatible(node, tmp, sizeof(tmp)) >= 0) { snprintf(type, type_size, "MAC,%s", tmp); return true; } /* Now look for known workarounds */ if (of_node_name_eq(node, "deq")) { /* Apple uses address 0x34 for TAS3001 and 0x35 for TAS3004 */ if (addr == 0x34) { snprintf(type, type_size, "MAC,tas3001"); return true; } else if (addr == 0x35) { snprintf(type, type_size, "MAC,tas3004"); return true; } } dev_err(&adap->dev, "i2c-powermac: modalias failure on %pOF\n", node); return false; } static void i2c_powermac_register_devices(struct i2c_adapter *adap, struct pmac_i2c_bus *bus) { struct i2c_client *newdev; struct device_node *node; bool found_onyx = false; /* * In some cases we end up with the via-pmu node itself, in this * case we skip this function completely as the device-tree will * not contain anything useful. */ if (of_node_name_eq(adap->dev.of_node, "via-pmu")) return; for_each_child_of_node(adap->dev.of_node, node) { struct i2c_board_info info = {}; u32 addr; /* Get address & channel */ addr = i2c_powermac_get_addr(adap, bus, node); if (addr == 0xffffffff) continue; /* Multibus setup, check channel */ if (!pmac_i2c_match_adapter(node, adap)) continue; dev_dbg(&adap->dev, "i2c-powermac: register %pOF\n", node); /* * Keep track of some device existence to handle * workarounds later. */ if (of_device_is_compatible(node, "pcm3052")) found_onyx = true; /* Make up a modalias */ if (!i2c_powermac_get_type(adap, node, addr, info.type, sizeof(info.type))) { continue; } /* Fill out the rest of the info structure */ info.addr = addr; info.irq = irq_of_parse_and_map(node, 0); info.of_node = of_node_get(node); newdev = i2c_new_client_device(adap, &info); if (IS_ERR(newdev)) { dev_err(&adap->dev, "i2c-powermac: Failure to register" " %pOF\n", node); of_node_put(node); /* We do not dispose of the interrupt mapping on * purpose. It's not necessary (interrupt cannot be * re-used) and somebody else might have grabbed it * via direct DT lookup so let's not bother */ continue; } } /* Additional workarounds */ i2c_powermac_add_missing(adap, bus, found_onyx); } static int i2c_powermac_probe(struct platform_device *dev) { struct pmac_i2c_bus *bus = dev_get_platdata(&dev->dev); struct device_node *parent; struct i2c_adapter *adapter; int rc; if (bus == NULL) return -EINVAL; adapter = pmac_i2c_get_adapter(bus); /* Ok, now we need to make up a name for the interface that will * match what we used to do in the past, that is basically the * controller's parent device node for keywest. PMU didn't have a * naming convention and SMU has a different one */ switch(pmac_i2c_get_type(bus)) { case pmac_i2c_bus_keywest: parent = of_get_parent(pmac_i2c_get_controller(bus)); if (parent == NULL) return -EINVAL; snprintf(adapter->name, sizeof(adapter->name), "%pOFn %d", parent, pmac_i2c_get_channel(bus)); of_node_put(parent); break; case pmac_i2c_bus_pmu: snprintf(adapter->name, sizeof(adapter->name), "pmu %d", pmac_i2c_get_channel(bus)); break; case pmac_i2c_bus_smu: /* This is not what we used to do but I'm fixing drivers at * the same time as this change */ snprintf(adapter->name, sizeof(adapter->name), "smu %d", pmac_i2c_get_channel(bus)); break; default: return -EINVAL; } platform_set_drvdata(dev, adapter); adapter->algo = &i2c_powermac_algorithm; adapter->quirks = &i2c_powermac_quirks; i2c_set_adapdata(adapter, bus); adapter->dev.parent = &dev->dev; /* Clear of_node to skip automatic registration of i2c child nodes */ adapter->dev.of_node = NULL; rc = i2c_add_adapter(adapter); if (rc) { printk(KERN_ERR "i2c-powermac: Adapter %s registration " "failed\n", adapter->name); memset(adapter, 0, sizeof(*adapter)); return rc; } printk(KERN_INFO "PowerMac i2c bus %s registered\n", adapter->name); /* Use custom child registration due to Apple device-tree funkyness */ adapter->dev.of_node = dev->dev.of_node; i2c_powermac_register_devices(adapter, bus); return 0; } static struct platform_driver i2c_powermac_driver = { .probe = i2c_powermac_probe, .remove_new = i2c_powermac_remove, .driver = { .name = "i2c-powermac", .bus = &platform_bus_type, }, }; module_platform_driver(i2c_powermac_driver); MODULE_ALIAS("platform:i2c-powermac");
linux-master
drivers/i2c/busses/i2c-powermac.c
// SPDX-License-Identifier: GPL-2.0 /* * i2c Support for Atmel's AT91 Two-Wire Interface (TWI) * * Copyright (C) 2011 Weinmann Medical GmbH * Author: Nikolaus Voss <[email protected]> * * Evolved from original work by: * Copyright (C) 2004 Rick Bronson * Converted to 2.6 by Andrew Victor <[email protected]> * * Borrowed heavily from original work by: * Copyright (C) 2000 Philip Edelbrock <[email protected]> */ #include <linux/clk.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/consumer.h> #include "i2c-at91.h" unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg) { return readl_relaxed(dev->base + reg); } void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val) { writel_relaxed(val, dev->base + reg); } void at91_disable_twi_interrupts(struct at91_twi_dev *dev) { at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK); } void at91_twi_irq_save(struct at91_twi_dev *dev) { dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK; at91_disable_twi_interrupts(dev); } void at91_twi_irq_restore(struct at91_twi_dev *dev) { at91_twi_write(dev, AT91_TWI_IER, dev->imr); } void at91_init_twi_bus(struct at91_twi_dev *dev) { at91_disable_twi_interrupts(dev); at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST); if (dev->slave_detected) at91_init_twi_bus_slave(dev); else at91_init_twi_bus_master(dev); } static struct at91_twi_pdata at91rm9200_config = { .clk_max_div = 5, .clk_offset = 3, .has_unre_flag = true, }; static struct at91_twi_pdata at91sam9261_config = { .clk_max_div = 5, .clk_offset = 4, }; static struct at91_twi_pdata at91sam9260_config = { .clk_max_div = 7, .clk_offset = 4, }; static struct at91_twi_pdata at91sam9g20_config = { .clk_max_div = 7, .clk_offset = 4, }; static struct at91_twi_pdata at91sam9g10_config = { .clk_max_div = 7, .clk_offset = 4, }; static const struct platform_device_id at91_twi_devtypes[] = { { .name = "i2c-at91rm9200", .driver_data = (unsigned long) &at91rm9200_config, }, { .name = "i2c-at91sam9261", .driver_data = (unsigned long) &at91sam9261_config, }, { .name = "i2c-at91sam9260", .driver_data = (unsigned long) &at91sam9260_config, }, { .name = "i2c-at91sam9g20", .driver_data = (unsigned long) &at91sam9g20_config, }, { .name = "i2c-at91sam9g10", .driver_data = (unsigned long) &at91sam9g10_config, }, { /* sentinel */ } }; #if defined(CONFIG_OF) static struct at91_twi_pdata at91sam9x5_config = { .clk_max_div = 7, .clk_offset = 4, }; static struct at91_twi_pdata sama5d4_config = { .clk_max_div = 7, .clk_offset = 4, .has_hold_field = true, .has_dig_filtr = true, }; static struct at91_twi_pdata sama5d2_config = { .clk_max_div = 7, .clk_offset = 3, .has_unre_flag = true, .has_alt_cmd = true, .has_hold_field = true, .has_dig_filtr = true, .has_adv_dig_filtr = true, .has_ana_filtr = true, .has_clear_cmd = false, /* due to errata, CLEAR cmd is not working */ }; static struct at91_twi_pdata sam9x60_config = { .clk_max_div = 7, .clk_offset = 3, .has_unre_flag = true, .has_alt_cmd = true, .has_hold_field = true, .has_dig_filtr = true, .has_adv_dig_filtr = true, .has_ana_filtr = true, .has_clear_cmd = true, }; static const struct of_device_id atmel_twi_dt_ids[] = { { .compatible = "atmel,at91rm9200-i2c", .data = &at91rm9200_config, }, { .compatible = "atmel,at91sam9260-i2c", .data = &at91sam9260_config, }, { .compatible = "atmel,at91sam9261-i2c", .data = &at91sam9261_config, }, { .compatible = "atmel,at91sam9g20-i2c", .data = &at91sam9g20_config, }, { .compatible = "atmel,at91sam9g10-i2c", .data = &at91sam9g10_config, }, { .compatible = "atmel,at91sam9x5-i2c", .data = &at91sam9x5_config, }, { .compatible = "atmel,sama5d4-i2c", .data = &sama5d4_config, }, { .compatible = "atmel,sama5d2-i2c", .data = &sama5d2_config, }, { .compatible = "microchip,sam9x60-i2c", .data = &sam9x60_config, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids); #endif static struct at91_twi_pdata *at91_twi_get_driver_data( struct platform_device *pdev) { if (pdev->dev.of_node) { const struct of_device_id *match; match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node); if (!match) return NULL; return (struct at91_twi_pdata *)match->data; } return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data; } static int at91_twi_probe(struct platform_device *pdev) { struct at91_twi_dev *dev; struct resource *mem; int rc; u32 phy_addr; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->dev = &pdev->dev; dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(dev->base)) return PTR_ERR(dev->base); phy_addr = mem->start; dev->pdata = at91_twi_get_driver_data(pdev); if (!dev->pdata) return -ENODEV; dev->irq = platform_get_irq(pdev, 0); if (dev->irq < 0) return dev->irq; platform_set_drvdata(pdev, dev); dev->clk = devm_clk_get(dev->dev, NULL); if (IS_ERR(dev->clk)) return dev_err_probe(dev->dev, PTR_ERR(dev->clk), "no clock defined\n"); clk_prepare_enable(dev->clk); snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91"); i2c_set_adapdata(&dev->adapter, dev); dev->adapter.owner = THIS_MODULE; dev->adapter.class = I2C_CLASS_DEPRECATED; dev->adapter.dev.parent = dev->dev; dev->adapter.nr = pdev->id; dev->adapter.timeout = AT91_I2C_TIMEOUT; dev->adapter.dev.of_node = pdev->dev.of_node; dev->slave_detected = i2c_detect_slave_mode(&pdev->dev); if (dev->slave_detected) rc = at91_twi_probe_slave(pdev, phy_addr, dev); else rc = at91_twi_probe_master(pdev, phy_addr, dev); if (rc) return rc; at91_init_twi_bus(dev); pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_active(dev->dev); pm_runtime_enable(dev->dev); rc = i2c_add_numbered_adapter(&dev->adapter); if (rc) { clk_disable_unprepare(dev->clk); pm_runtime_disable(dev->dev); pm_runtime_set_suspended(dev->dev); return rc; } dev_info(dev->dev, "AT91 i2c bus driver (hw version: %#x).\n", at91_twi_read(dev, AT91_TWI_VER)); return 0; } static void at91_twi_remove(struct platform_device *pdev) { struct at91_twi_dev *dev = platform_get_drvdata(pdev); i2c_del_adapter(&dev->adapter); clk_disable_unprepare(dev->clk); pm_runtime_disable(dev->dev); pm_runtime_set_suspended(dev->dev); } static int __maybe_unused at91_twi_runtime_suspend(struct device *dev) { struct at91_twi_dev *twi_dev = dev_get_drvdata(dev); clk_disable_unprepare(twi_dev->clk); pinctrl_pm_select_sleep_state(dev); return 0; } static int __maybe_unused at91_twi_runtime_resume(struct device *dev) { struct at91_twi_dev *twi_dev = dev_get_drvdata(dev); pinctrl_pm_select_default_state(dev); return clk_prepare_enable(twi_dev->clk); } static int __maybe_unused at91_twi_suspend_noirq(struct device *dev) { if (!pm_runtime_status_suspended(dev)) at91_twi_runtime_suspend(dev); return 0; } static int __maybe_unused at91_twi_resume_noirq(struct device *dev) { struct at91_twi_dev *twi_dev = dev_get_drvdata(dev); int ret; if (!pm_runtime_status_suspended(dev)) { ret = at91_twi_runtime_resume(dev); if (ret) return ret; } pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); at91_init_twi_bus(twi_dev); return 0; } static const struct dev_pm_ops __maybe_unused at91_twi_pm = { .suspend_noirq = at91_twi_suspend_noirq, .resume_noirq = at91_twi_resume_noirq, .runtime_suspend = at91_twi_runtime_suspend, .runtime_resume = at91_twi_runtime_resume, }; static struct platform_driver at91_twi_driver = { .probe = at91_twi_probe, .remove_new = at91_twi_remove, .id_table = at91_twi_devtypes, .driver = { .name = "at91_i2c", .of_match_table = of_match_ptr(atmel_twi_dt_ids), .pm = pm_ptr(&at91_twi_pm), }, }; static int __init at91_twi_init(void) { return platform_driver_register(&at91_twi_driver); } static void __exit at91_twi_exit(void) { platform_driver_unregister(&at91_twi_driver); } subsys_initcall(at91_twi_init); module_exit(at91_twi_exit); MODULE_AUTHOR("Nikolaus Voss <[email protected]>"); MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:at91_i2c");
linux-master
drivers/i2c/busses/i2c-at91-core.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */ #include <linux/err.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #define GXP_MAX_I2C_ENGINE 10 static const char * const gxp_i2c_name[] = { "gxp-i2c0", "gxp-i2c1", "gxp-i2c2", "gxp-i2c3", "gxp-i2c4", "gxp-i2c5", "gxp-i2c6", "gxp-i2c7", "gxp-i2c8", "gxp-i2c9" }; /* GXP I2C Global interrupt status/enable register*/ #define GXP_I2CINTSTAT 0x00 #define GXP_I2CINTEN 0x04 /* GXP I2C registers */ #define GXP_I2CSTAT 0x00 #define MASK_STOP_EVENT 0x20 #define MASK_ACK 0x08 #define MASK_RW 0x04 #define GXP_I2CEVTERR 0x01 #define MASK_SLAVE_CMD_EVENT 0x01 #define MASK_SLAVE_DATA_EVENT 0x02 #define MASK_MASTER_EVENT 0x10 #define GXP_I2CSNPDAT 0x02 #define GXP_I2CMCMD 0x04 #define GXP_I2CSCMD 0x06 #define GXP_I2CSNPAA 0x09 #define GXP_I2CADVFEAT 0x0A #define GXP_I2COWNADR 0x0B #define GXP_I2CFREQDIV 0x0C #define GXP_I2CFLTFAIR 0x0D #define GXP_I2CTMOEDG 0x0E #define GXP_I2CCYCTIM 0x0F /* I2CSCMD Bits */ #define SNOOP_EVT_CLR 0x80 #define SLAVE_EVT_CLR 0x40 #define SNOOP_EVT_MASK 0x20 #define SLAVE_EVT_MASK 0x10 #define SLAVE_ACK_ENAB 0x08 #define SLAVE_EVT_STALL 0x01 /* I2CMCMD Bits */ #define MASTER_EVT_CLR 0x80 #define MASTER_ACK_ENAB 0x08 #define RW_CMD 0x04 #define STOP_CMD 0x02 #define START_CMD 0x01 /* I2CTMOEDG value */ #define GXP_DATA_EDGE_RST_CTRL 0x0a /* 30ns */ /* I2CFLTFAIR Bits */ #define FILTER_CNT 0x30 #define FAIRNESS_CNT 0x02 enum { GXP_I2C_IDLE = 0, GXP_I2C_ADDR_PHASE, GXP_I2C_RDATA_PHASE, GXP_I2C_WDATA_PHASE, GXP_I2C_ADDR_NACK, GXP_I2C_DATA_NACK, GXP_I2C_ERROR, GXP_I2C_COMP }; struct gxp_i2c_drvdata { struct device *dev; void __iomem *base; struct i2c_timings t; u32 engine; int irq; struct completion completion; struct i2c_adapter adapter; struct i2c_msg *curr_msg; int msgs_remaining; int msgs_num; u8 *buf; size_t buf_remaining; unsigned char state; struct i2c_client *slave; unsigned char stopped; }; static struct regmap *i2cg_map; static void gxp_i2c_start(struct gxp_i2c_drvdata *drvdata) { u16 value; drvdata->buf = drvdata->curr_msg->buf; drvdata->buf_remaining = drvdata->curr_msg->len; /* Note: Address in struct i2c_msg is 7 bits */ value = drvdata->curr_msg->addr << 9; /* Read or Write */ value |= drvdata->curr_msg->flags & I2C_M_RD ? RW_CMD | START_CMD : START_CMD; drvdata->state = GXP_I2C_ADDR_PHASE; writew(value, drvdata->base + GXP_I2CMCMD); } static int gxp_i2c_master_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { int ret; struct gxp_i2c_drvdata *drvdata = i2c_get_adapdata(adapter); unsigned long time_left; drvdata->msgs_remaining = num; drvdata->curr_msg = msgs; drvdata->msgs_num = num; reinit_completion(&drvdata->completion); gxp_i2c_start(drvdata); time_left = wait_for_completion_timeout(&drvdata->completion, adapter->timeout); ret = num - drvdata->msgs_remaining; if (time_left == 0) return -ETIMEDOUT; if (drvdata->state == GXP_I2C_ADDR_NACK) return -ENXIO; if (drvdata->state == GXP_I2C_DATA_NACK) return -EIO; return ret; } static u32 gxp_i2c_func(struct i2c_adapter *adap) { if (IS_ENABLED(CONFIG_I2C_SLAVE)) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SLAVE; return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } #if IS_ENABLED(CONFIG_I2C_SLAVE) static int gxp_i2c_reg_slave(struct i2c_client *slave) { struct gxp_i2c_drvdata *drvdata = i2c_get_adapdata(slave->adapter); if (drvdata->slave) return -EBUSY; if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; drvdata->slave = slave; writeb(slave->addr << 1, drvdata->base + GXP_I2COWNADR); writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_ACK_ENAB | SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD); return 0; } static int gxp_i2c_unreg_slave(struct i2c_client *slave) { struct gxp_i2c_drvdata *drvdata = i2c_get_adapdata(slave->adapter); WARN_ON(!drvdata->slave); writeb(0x00, drvdata->base + GXP_I2COWNADR); writeb(SNOOP_EVT_CLR | SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_EVT_MASK, drvdata->base + GXP_I2CSCMD); drvdata->slave = NULL; return 0; } #endif static const struct i2c_algorithm gxp_i2c_algo = { .master_xfer = gxp_i2c_master_xfer, .functionality = gxp_i2c_func, #if IS_ENABLED(CONFIG_I2C_SLAVE) .reg_slave = gxp_i2c_reg_slave, .unreg_slave = gxp_i2c_unreg_slave, #endif }; static void gxp_i2c_stop(struct gxp_i2c_drvdata *drvdata) { /* Clear event and send stop */ writeb(MASTER_EVT_CLR | STOP_CMD, drvdata->base + GXP_I2CMCMD); complete(&drvdata->completion); } static void gxp_i2c_restart(struct gxp_i2c_drvdata *drvdata) { u16 value; drvdata->buf = drvdata->curr_msg->buf; drvdata->buf_remaining = drvdata->curr_msg->len; value = drvdata->curr_msg->addr << 9; if (drvdata->curr_msg->flags & I2C_M_RD) { /* Read and clear master event */ value |= MASTER_EVT_CLR | RW_CMD | START_CMD; } else { /* Write and clear master event */ value |= MASTER_EVT_CLR | START_CMD; } drvdata->state = GXP_I2C_ADDR_PHASE; writew(value, drvdata->base + GXP_I2CMCMD); } static void gxp_i2c_chk_addr_ack(struct gxp_i2c_drvdata *drvdata) { u16 value; value = readb(drvdata->base + GXP_I2CSTAT); if (!(value & MASK_ACK)) { /* Got no ack, stop */ drvdata->state = GXP_I2C_ADDR_NACK; gxp_i2c_stop(drvdata); return; } if (drvdata->curr_msg->flags & I2C_M_RD) { /* Start to read data from slave */ if (drvdata->buf_remaining == 0) { /* No more data to read, stop */ drvdata->msgs_remaining--; drvdata->state = GXP_I2C_COMP; gxp_i2c_stop(drvdata); return; } drvdata->state = GXP_I2C_RDATA_PHASE; if (drvdata->buf_remaining == 1) { /* The last data, do not ack */ writeb(MASTER_EVT_CLR | RW_CMD, drvdata->base + GXP_I2CMCMD); } else { /* Read data and ack it */ writeb(MASTER_EVT_CLR | MASTER_ACK_ENAB | RW_CMD, drvdata->base + GXP_I2CMCMD); } } else { /* Start to write first data to slave */ if (drvdata->buf_remaining == 0) { /* No more data to write, stop */ drvdata->msgs_remaining--; drvdata->state = GXP_I2C_COMP; gxp_i2c_stop(drvdata); return; } value = *drvdata->buf; value = value << 8; /* Clear master event */ value |= MASTER_EVT_CLR; drvdata->buf++; drvdata->buf_remaining--; drvdata->state = GXP_I2C_WDATA_PHASE; writew(value, drvdata->base + GXP_I2CMCMD); } } static void gxp_i2c_ack_data(struct gxp_i2c_drvdata *drvdata) { u8 value; /* Store the data returned */ value = readb(drvdata->base + GXP_I2CSNPDAT); *drvdata->buf = value; drvdata->buf++; drvdata->buf_remaining--; if (drvdata->buf_remaining == 0) { /* No more data, this message is completed. */ drvdata->msgs_remaining--; if (drvdata->msgs_remaining == 0) { /* No more messages, stop */ drvdata->state = GXP_I2C_COMP; gxp_i2c_stop(drvdata); return; } /* Move to next message and start transfer */ drvdata->curr_msg++; gxp_i2c_restart(drvdata); return; } /* Ack the slave to make it send next byte */ drvdata->state = GXP_I2C_RDATA_PHASE; if (drvdata->buf_remaining == 1) { /* The last data, do not ack */ writeb(MASTER_EVT_CLR | RW_CMD, drvdata->base + GXP_I2CMCMD); } else { /* Read data and ack it */ writeb(MASTER_EVT_CLR | MASTER_ACK_ENAB | RW_CMD, drvdata->base + GXP_I2CMCMD); } } static void gxp_i2c_chk_data_ack(struct gxp_i2c_drvdata *drvdata) { u16 value; value = readb(drvdata->base + GXP_I2CSTAT); if (!(value & MASK_ACK)) { /* Received No ack, stop */ drvdata->state = GXP_I2C_DATA_NACK; gxp_i2c_stop(drvdata); return; } /* Got ack, check if there is more data to write */ if (drvdata->buf_remaining == 0) { /* No more data, this message is completed */ drvdata->msgs_remaining--; if (drvdata->msgs_remaining == 0) { /* No more messages, stop */ drvdata->state = GXP_I2C_COMP; gxp_i2c_stop(drvdata); return; } /* Move to next message and start transfer */ drvdata->curr_msg++; gxp_i2c_restart(drvdata); return; } /* Write data to slave */ value = *drvdata->buf; value = value << 8; /* Clear master event */ value |= MASTER_EVT_CLR; drvdata->buf++; drvdata->buf_remaining--; drvdata->state = GXP_I2C_WDATA_PHASE; writew(value, drvdata->base + GXP_I2CMCMD); } static bool gxp_i2c_slave_irq_handler(struct gxp_i2c_drvdata *drvdata) { u8 value; u8 buf; int ret; value = readb(drvdata->base + GXP_I2CEVTERR); /* Received start or stop event */ if (value & MASK_SLAVE_CMD_EVENT) { value = readb(drvdata->base + GXP_I2CSTAT); /* Master sent stop */ if (value & MASK_STOP_EVENT) { if (drvdata->stopped == 0) i2c_slave_event(drvdata->slave, I2C_SLAVE_STOP, &buf); writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_ACK_ENAB | SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD); drvdata->stopped = 1; } else { /* Master sent start and wants to read */ drvdata->stopped = 0; if (value & MASK_RW) { i2c_slave_event(drvdata->slave, I2C_SLAVE_READ_REQUESTED, &buf); value = buf << 8 | (SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_EVT_STALL); writew(value, drvdata->base + GXP_I2CSCMD); } else { /* Master wants to write to us */ ret = i2c_slave_event(drvdata->slave, I2C_SLAVE_WRITE_REQUESTED, &buf); if (!ret) { /* Ack next byte from master */ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_ACK_ENAB | SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD); } else { /* Nack next byte from master */ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD); } } } } else if (value & MASK_SLAVE_DATA_EVENT) { value = readb(drvdata->base + GXP_I2CSTAT); /* Master wants to read */ if (value & MASK_RW) { /* Master wants another byte */ if (value & MASK_ACK) { i2c_slave_event(drvdata->slave, I2C_SLAVE_READ_PROCESSED, &buf); value = buf << 8 | (SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_EVT_STALL); writew(value, drvdata->base + GXP_I2CSCMD); } else { /* No more bytes needed */ writew(SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_ACK_ENAB | SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD); } } else { /* Master wants to write to us */ value = readb(drvdata->base + GXP_I2CSNPDAT); buf = (uint8_t)value; ret = i2c_slave_event(drvdata->slave, I2C_SLAVE_WRITE_RECEIVED, &buf); if (!ret) { /* Ack next byte from master */ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_ACK_ENAB | SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD); } else { /* Nack next byte from master */ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD); } } } else { return false; } return true; } static irqreturn_t gxp_i2c_irq_handler(int irq, void *_drvdata) { struct gxp_i2c_drvdata *drvdata = (struct gxp_i2c_drvdata *)_drvdata; u32 value; /* Check if the interrupt is for the current engine */ regmap_read(i2cg_map, GXP_I2CINTSTAT, &value); if (!(value & BIT(drvdata->engine))) return IRQ_NONE; value = readb(drvdata->base + GXP_I2CEVTERR); /* Error */ if (value & ~(MASK_MASTER_EVENT | MASK_SLAVE_CMD_EVENT | MASK_SLAVE_DATA_EVENT)) { /* Clear all events */ writeb(0x00, drvdata->base + GXP_I2CEVTERR); drvdata->state = GXP_I2C_ERROR; gxp_i2c_stop(drvdata); return IRQ_HANDLED; } if (IS_ENABLED(CONFIG_I2C_SLAVE)) { /* Slave mode */ if (value & (MASK_SLAVE_CMD_EVENT | MASK_SLAVE_DATA_EVENT)) { if (gxp_i2c_slave_irq_handler(drvdata)) return IRQ_HANDLED; return IRQ_NONE; } } /* Master mode */ switch (drvdata->state) { case GXP_I2C_ADDR_PHASE: gxp_i2c_chk_addr_ack(drvdata); break; case GXP_I2C_RDATA_PHASE: gxp_i2c_ack_data(drvdata); break; case GXP_I2C_WDATA_PHASE: gxp_i2c_chk_data_ack(drvdata); break; } return IRQ_HANDLED; } static void gxp_i2c_init(struct gxp_i2c_drvdata *drvdata) { drvdata->state = GXP_I2C_IDLE; writeb(2000000 / drvdata->t.bus_freq_hz, drvdata->base + GXP_I2CFREQDIV); writeb(FILTER_CNT | FAIRNESS_CNT, drvdata->base + GXP_I2CFLTFAIR); writeb(GXP_DATA_EDGE_RST_CTRL, drvdata->base + GXP_I2CTMOEDG); writeb(0x00, drvdata->base + GXP_I2CCYCTIM); writeb(0x00, drvdata->base + GXP_I2CSNPAA); writeb(0x00, drvdata->base + GXP_I2CADVFEAT); writeb(SNOOP_EVT_CLR | SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_EVT_MASK, drvdata->base + GXP_I2CSCMD); writeb(MASTER_EVT_CLR, drvdata->base + GXP_I2CMCMD); writeb(0x00, drvdata->base + GXP_I2CEVTERR); writeb(0x00, drvdata->base + GXP_I2COWNADR); } static int gxp_i2c_probe(struct platform_device *pdev) { struct gxp_i2c_drvdata *drvdata; int rc; struct i2c_adapter *adapter; if (!i2cg_map) { i2cg_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "hpe,sysreg"); if (IS_ERR(i2cg_map)) { return dev_err_probe(&pdev->dev, PTR_ERR(i2cg_map), "failed to map i2cg_handle\n"); } /* Disable interrupt */ regmap_update_bits(i2cg_map, GXP_I2CINTEN, 0x00000FFF, 0); } drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; platform_set_drvdata(pdev, drvdata); drvdata->dev = &pdev->dev; init_completion(&drvdata->completion); drvdata->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(drvdata->base)) return PTR_ERR(drvdata->base); /* Use physical memory address to determine which I2C engine this is. */ drvdata->engine = ((size_t)drvdata->base & 0xf00) >> 8; if (drvdata->engine >= GXP_MAX_I2C_ENGINE) { return dev_err_probe(&pdev->dev, -EINVAL, "i2c engine% is unsupported\n", drvdata->engine); } rc = platform_get_irq(pdev, 0); if (rc < 0) return rc; drvdata->irq = rc; rc = devm_request_irq(&pdev->dev, drvdata->irq, gxp_i2c_irq_handler, IRQF_SHARED, gxp_i2c_name[drvdata->engine], drvdata); if (rc < 0) return dev_err_probe(&pdev->dev, rc, "irq request failed\n"); i2c_parse_fw_timings(&pdev->dev, &drvdata->t, true); gxp_i2c_init(drvdata); /* Enable interrupt */ regmap_update_bits(i2cg_map, GXP_I2CINTEN, BIT(drvdata->engine), BIT(drvdata->engine)); adapter = &drvdata->adapter; i2c_set_adapdata(adapter, drvdata); adapter->owner = THIS_MODULE; strscpy(adapter->name, "HPE GXP I2C adapter", sizeof(adapter->name)); adapter->algo = &gxp_i2c_algo; adapter->dev.parent = &pdev->dev; adapter->dev.of_node = pdev->dev.of_node; rc = i2c_add_adapter(adapter); if (rc) return dev_err_probe(&pdev->dev, rc, "i2c add adapter failed\n"); return 0; } static void gxp_i2c_remove(struct platform_device *pdev) { struct gxp_i2c_drvdata *drvdata = platform_get_drvdata(pdev); /* Disable interrupt */ regmap_update_bits(i2cg_map, GXP_I2CINTEN, BIT(drvdata->engine), 0); i2c_del_adapter(&drvdata->adapter); } static const struct of_device_id gxp_i2c_of_match[] = { { .compatible = "hpe,gxp-i2c" }, {}, }; MODULE_DEVICE_TABLE(of, gxp_i2c_of_match); static struct platform_driver gxp_i2c_driver = { .probe = gxp_i2c_probe, .remove_new = gxp_i2c_remove, .driver = { .name = "gxp-i2c", .of_match_table = gxp_i2c_of_match, }, }; module_platform_driver(gxp_i2c_driver); MODULE_AUTHOR("Nick Hawkins <[email protected]>"); MODULE_DESCRIPTION("HPE GXP I2C bus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-gxp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/i2c/busses/i2c-ibm_iic.c * * Support for the IIC peripheral on IBM PPC 4xx * * Copyright (c) 2003, 2004 Zultys Technologies. * Eugene Surovegin <[email protected]> or <[email protected]> * * Copyright (c) 2008 PIKA Technologies * Sean MacLennan <[email protected]> * * Based on original work by * Ian DaSilva <[email protected]> * Armin Kuster <[email protected]> * Matt Porter <[email protected]> * * Copyright 2000-2003 MontaVista Software Inc. * * Original driver version was highly leveraged from i2c-elektor.c * * Copyright 1995-97 Simon G. Vogl * 1998-99 Hans Berglund * * With some changes from Kyösti Mälkki <[email protected]> * and even Frodo Looijaard <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/sched/signal.h> #include <asm/irq.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include "i2c-ibm_iic.h" #define DRIVER_VERSION "2.2" MODULE_DESCRIPTION("IBM IIC driver v" DRIVER_VERSION); MODULE_LICENSE("GPL"); static bool iic_force_poll; module_param(iic_force_poll, bool, 0); MODULE_PARM_DESC(iic_force_poll, "Force polling mode"); static bool iic_force_fast; module_param(iic_force_fast, bool, 0); MODULE_PARM_DESC(iic_force_fast, "Force fast mode (400 kHz)"); #define DBG_LEVEL 0 #ifdef DBG #undef DBG #endif #ifdef DBG2 #undef DBG2 #endif #if DBG_LEVEL > 0 # define DBG(f,x...) printk(KERN_DEBUG "ibm-iic" f, ##x) #else # define DBG(f,x...) ((void)0) #endif #if DBG_LEVEL > 1 # define DBG2(f,x...) DBG(f, ##x) #else # define DBG2(f,x...) ((void)0) #endif #if DBG_LEVEL > 2 static void dump_iic_regs(const char* header, struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; printk(KERN_DEBUG "ibm-iic%d: %s\n", dev->idx, header); printk(KERN_DEBUG " cntl = 0x%02x, mdcntl = 0x%02x\n" " sts = 0x%02x, extsts = 0x%02x\n" " clkdiv = 0x%02x, xfrcnt = 0x%02x\n" " xtcntlss = 0x%02x, directcntl = 0x%02x\n", in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts), in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt), in_8(&iic->xtcntlss), in_8(&iic->directcntl)); } # define DUMP_REGS(h,dev) dump_iic_regs((h),(dev)) #else # define DUMP_REGS(h,dev) ((void)0) #endif /* Bus timings (in ns) for bit-banging */ static struct ibm_iic_timings { unsigned int hd_sta; unsigned int su_sto; unsigned int low; unsigned int high; unsigned int buf; } timings [] = { /* Standard mode (100 KHz) */ { .hd_sta = 4000, .su_sto = 4000, .low = 4700, .high = 4000, .buf = 4700, }, /* Fast mode (400 KHz) */ { .hd_sta = 600, .su_sto = 600, .low = 1300, .high = 600, .buf = 1300, }}; /* Enable/disable interrupt generation */ static inline void iic_interrupt_mode(struct ibm_iic_private* dev, int enable) { out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0); } /* * Initialize IIC interface. */ static void iic_dev_init(struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; DBG("%d: init\n", dev->idx); /* Clear master address */ out_8(&iic->lmadr, 0); out_8(&iic->hmadr, 0); /* Clear slave address */ out_8(&iic->lsadr, 0); out_8(&iic->hsadr, 0); /* Clear status & extended status */ out_8(&iic->sts, STS_SCMP | STS_IRQA); out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD | EXTSTS_LA | EXTSTS_ICT | EXTSTS_XFRA); /* Set clock divider */ out_8(&iic->clkdiv, dev->clckdiv); /* Clear transfer count */ out_8(&iic->xfrcnt, 0); /* Clear extended control and status */ out_8(&iic->xtcntlss, XTCNTLSS_SRC | XTCNTLSS_SRS | XTCNTLSS_SWC | XTCNTLSS_SWS); /* Clear control register */ out_8(&iic->cntl, 0); /* Enable interrupts if possible */ iic_interrupt_mode(dev, dev->irq >= 0); /* Set mode control */ out_8(&iic->mdcntl, MDCNTL_FMDB | MDCNTL_EINT | MDCNTL_EUBS | (dev->fast_mode ? MDCNTL_FSM : 0)); DUMP_REGS("iic_init", dev); } /* * Reset IIC interface */ static void iic_dev_reset(struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; int i; u8 dc; DBG("%d: soft reset\n", dev->idx); DUMP_REGS("reset", dev); /* Place chip in the reset state */ out_8(&iic->xtcntlss, XTCNTLSS_SRST); /* Check if bus is free */ dc = in_8(&iic->directcntl); if (!DIRCTNL_FREE(dc)){ DBG("%d: trying to regain bus control\n", dev->idx); /* Try to set bus free state */ out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); /* Wait until we regain bus control */ for (i = 0; i < 100; ++i){ dc = in_8(&iic->directcntl); if (DIRCTNL_FREE(dc)) break; /* Toggle SCL line */ dc ^= DIRCNTL_SCC; out_8(&iic->directcntl, dc); udelay(10); dc ^= DIRCNTL_SCC; out_8(&iic->directcntl, dc); /* be nice */ cond_resched(); } } /* Remove reset */ out_8(&iic->xtcntlss, 0); /* Reinitialize interface */ iic_dev_init(dev); } /* * Do 0-length transaction using bit-banging through IIC_DIRECTCNTL register. */ /* Wait for SCL and/or SDA to be high */ static int iic_dc_wait(volatile struct iic_regs __iomem *iic, u8 mask) { unsigned long x = jiffies + HZ / 28 + 2; while ((in_8(&iic->directcntl) & mask) != mask){ if (unlikely(time_after(jiffies, x))) return -1; cond_resched(); } return 0; } static int iic_smbus_quick(struct ibm_iic_private* dev, const struct i2c_msg* p) { volatile struct iic_regs __iomem *iic = dev->vaddr; const struct ibm_iic_timings *t = &timings[dev->fast_mode ? 1 : 0]; u8 mask, v, sda; int i, res; /* Only 7-bit addresses are supported */ if (unlikely(p->flags & I2C_M_TEN)){ DBG("%d: smbus_quick - 10 bit addresses are not supported\n", dev->idx); return -EINVAL; } DBG("%d: smbus_quick(0x%02x)\n", dev->idx, p->addr); /* Reset IIC interface */ out_8(&iic->xtcntlss, XTCNTLSS_SRST); /* Wait for bus to become free */ out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); if (unlikely(iic_dc_wait(iic, DIRCNTL_MSDA | DIRCNTL_MSC))) goto err; ndelay(t->buf); /* START */ out_8(&iic->directcntl, DIRCNTL_SCC); sda = 0; ndelay(t->hd_sta); /* Send address */ v = i2c_8bit_addr_from_msg(p); for (i = 0, mask = 0x80; i < 8; ++i, mask >>= 1){ out_8(&iic->directcntl, sda); ndelay(t->low / 2); sda = (v & mask) ? DIRCNTL_SDAC : 0; out_8(&iic->directcntl, sda); ndelay(t->low / 2); out_8(&iic->directcntl, DIRCNTL_SCC | sda); if (unlikely(iic_dc_wait(iic, DIRCNTL_MSC))) goto err; ndelay(t->high); } /* ACK */ out_8(&iic->directcntl, sda); ndelay(t->low / 2); out_8(&iic->directcntl, DIRCNTL_SDAC); ndelay(t->low / 2); out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); if (unlikely(iic_dc_wait(iic, DIRCNTL_MSC))) goto err; res = (in_8(&iic->directcntl) & DIRCNTL_MSDA) ? -EREMOTEIO : 1; ndelay(t->high); /* STOP */ out_8(&iic->directcntl, 0); ndelay(t->low); out_8(&iic->directcntl, DIRCNTL_SCC); if (unlikely(iic_dc_wait(iic, DIRCNTL_MSC))) goto err; ndelay(t->su_sto); out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); ndelay(t->buf); DBG("%d: smbus_quick -> %s\n", dev->idx, res ? "NACK" : "ACK"); out: /* Remove reset */ out_8(&iic->xtcntlss, 0); /* Reinitialize interface */ iic_dev_init(dev); return res; err: DBG("%d: smbus_quick - bus is stuck\n", dev->idx); res = -EREMOTEIO; goto out; } /* * IIC interrupt handler */ static irqreturn_t iic_handler(int irq, void *dev_id) { struct ibm_iic_private* dev = (struct ibm_iic_private*)dev_id; volatile struct iic_regs __iomem *iic = dev->vaddr; DBG2("%d: irq handler, STS = 0x%02x, EXTSTS = 0x%02x\n", dev->idx, in_8(&iic->sts), in_8(&iic->extsts)); /* Acknowledge IRQ and wakeup iic_wait_for_tc */ out_8(&iic->sts, STS_IRQA | STS_SCMP); wake_up_interruptible(&dev->wq); return IRQ_HANDLED; } /* * Get master transfer result and clear errors if any. * Returns the number of actually transferred bytes or error (<0) */ static int iic_xfer_result(struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; if (unlikely(in_8(&iic->sts) & STS_ERR)){ DBG("%d: xfer error, EXTSTS = 0x%02x\n", dev->idx, in_8(&iic->extsts)); /* Clear errors and possible pending IRQs */ out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD | EXTSTS_LA | EXTSTS_ICT | EXTSTS_XFRA); /* Flush master data buffer */ out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB); /* Is bus free? * If error happened during combined xfer * IIC interface is usually stuck in some strange * state, the only way out - soft reset. */ if ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){ DBG("%d: bus is stuck, resetting\n", dev->idx); iic_dev_reset(dev); } return -EREMOTEIO; } else return in_8(&iic->xfrcnt) & XFRCNT_MTC_MASK; } /* * Try to abort active transfer. */ static void iic_abort_xfer(struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; unsigned long x; DBG("%d: iic_abort_xfer\n", dev->idx); out_8(&iic->cntl, CNTL_HMT); /* * Wait for the abort command to complete. * It's not worth to be optimized, just poll (timeout >= 1 tick) */ x = jiffies + 2; while ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){ if (time_after(jiffies, x)){ DBG("%d: abort timeout, resetting...\n", dev->idx); iic_dev_reset(dev); return; } schedule(); } /* Just to clear errors */ iic_xfer_result(dev); } /* * Wait for master transfer to complete. * It puts current process to sleep until we get interrupt or timeout expires. * Returns the number of transferred bytes or error (<0) */ static int iic_wait_for_tc(struct ibm_iic_private* dev){ volatile struct iic_regs __iomem *iic = dev->vaddr; int ret = 0; if (dev->irq >= 0){ /* Interrupt mode */ ret = wait_event_interruptible_timeout(dev->wq, !(in_8(&iic->sts) & STS_PT), dev->adap.timeout); if (unlikely(ret < 0)) DBG("%d: wait interrupted\n", dev->idx); else if (unlikely(in_8(&iic->sts) & STS_PT)){ DBG("%d: wait timeout\n", dev->idx); ret = -ETIMEDOUT; } } else { /* Polling mode */ unsigned long x = jiffies + dev->adap.timeout; while (in_8(&iic->sts) & STS_PT){ if (unlikely(time_after(jiffies, x))){ DBG("%d: poll timeout\n", dev->idx); ret = -ETIMEDOUT; break; } if (signal_pending(current)){ DBG("%d: poll interrupted\n", dev->idx); ret = -ERESTARTSYS; break; } schedule(); } } if (unlikely(ret < 0)) iic_abort_xfer(dev); else ret = iic_xfer_result(dev); DBG2("%d: iic_wait_for_tc -> %d\n", dev->idx, ret); return ret; } /* * Low level master transfer routine */ static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm, int combined_xfer) { volatile struct iic_regs __iomem *iic = dev->vaddr; char* buf = pm->buf; int i, j, loops, ret = 0; int len = pm->len; u8 cntl = (in_8(&iic->cntl) & CNTL_AMD) | CNTL_PT; if (pm->flags & I2C_M_RD) cntl |= CNTL_RW; loops = (len + 3) / 4; for (i = 0; i < loops; ++i, len -= 4){ int count = len > 4 ? 4 : len; u8 cmd = cntl | ((count - 1) << CNTL_TCT_SHIFT); if (!(cntl & CNTL_RW)) for (j = 0; j < count; ++j) out_8((void __iomem *)&iic->mdbuf, *buf++); if (i < loops - 1) cmd |= CNTL_CHT; else if (combined_xfer) cmd |= CNTL_RPST; DBG2("%d: xfer_bytes, %d, CNTL = 0x%02x\n", dev->idx, count, cmd); /* Start transfer */ out_8(&iic->cntl, cmd); /* Wait for completion */ ret = iic_wait_for_tc(dev); if (unlikely(ret < 0)) break; else if (unlikely(ret != count)){ DBG("%d: xfer_bytes, requested %d, transferred %d\n", dev->idx, count, ret); /* If it's not a last part of xfer, abort it */ if (combined_xfer || (i < loops - 1)) iic_abort_xfer(dev); ret = -EREMOTEIO; break; } if (cntl & CNTL_RW) for (j = 0; j < count; ++j) *buf++ = in_8((void __iomem *)&iic->mdbuf); } return ret > 0 ? 0 : ret; } /* * Set target slave address for master transfer */ static inline void iic_address(struct ibm_iic_private* dev, struct i2c_msg* msg) { volatile struct iic_regs __iomem *iic = dev->vaddr; u16 addr = msg->addr; DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx, addr, msg->flags & I2C_M_TEN ? 10 : 7); if (msg->flags & I2C_M_TEN){ out_8(&iic->cntl, CNTL_AMD); out_8(&iic->lmadr, addr); out_8(&iic->hmadr, 0xf0 | ((addr >> 7) & 0x06)); } else { out_8(&iic->cntl, 0); out_8(&iic->lmadr, addr << 1); } } static inline int iic_invalid_address(const struct i2c_msg* p) { return (p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN) && (p->addr > 0x7f)); } static inline int iic_address_neq(const struct i2c_msg* p1, const struct i2c_msg* p2) { return (p1->addr != p2->addr) || ((p1->flags & I2C_M_TEN) != (p2->flags & I2C_M_TEN)); } /* * Generic master transfer entrypoint. * Returns the number of processed messages or error (<0) */ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct ibm_iic_private* dev = (struct ibm_iic_private*)(i2c_get_adapdata(adap)); volatile struct iic_regs __iomem *iic = dev->vaddr; int i, ret = 0; DBG2("%d: iic_xfer, %d msg(s)\n", dev->idx, num); /* Check the sanity of the passed messages. * Uhh, generic i2c layer is more suitable place for such code... */ if (unlikely(iic_invalid_address(&msgs[0]))){ DBG("%d: invalid address 0x%03x (%d-bit)\n", dev->idx, msgs[0].addr, msgs[0].flags & I2C_M_TEN ? 10 : 7); return -EINVAL; } for (i = 0; i < num; ++i){ if (unlikely(msgs[i].len <= 0)){ if (num == 1 && !msgs[0].len){ /* Special case for I2C_SMBUS_QUICK emulation. * IBM IIC doesn't support 0-length transactions * so we have to emulate them using bit-banging. */ return iic_smbus_quick(dev, &msgs[0]); } DBG("%d: invalid len %d in msg[%d]\n", dev->idx, msgs[i].len, i); return -EINVAL; } if (unlikely(iic_address_neq(&msgs[0], &msgs[i]))){ DBG("%d: invalid addr in msg[%d]\n", dev->idx, i); return -EINVAL; } } /* Check bus state */ if (unlikely((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE)){ DBG("%d: iic_xfer, bus is not free\n", dev->idx); /* Usually it means something serious has happened. * We *cannot* have unfinished previous transfer * so it doesn't make any sense to try to stop it. * Probably we were not able to recover from the * previous error. * The only *reasonable* thing I can think of here * is soft reset. --ebs */ iic_dev_reset(dev); if ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){ DBG("%d: iic_xfer, bus is still not free\n", dev->idx); return -EREMOTEIO; } } else { /* Flush master data buffer (just in case) */ out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB); } /* Load slave address */ iic_address(dev, &msgs[0]); /* Do real transfer */ for (i = 0; i < num && !ret; ++i) ret = iic_xfer_bytes(dev, &msgs[i], i < num - 1); return ret < 0 ? ret : num; } static u32 iic_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; } static const struct i2c_algorithm iic_algo = { .master_xfer = iic_xfer, .functionality = iic_func }; /* * Calculates IICx_CLCKDIV value for a specific OPB clock frequency */ static inline u8 iic_clckdiv(unsigned int opb) { /* Compatibility kludge, should go away after all cards * are fixed to fill correct value for opbfreq. * Previous driver version used hardcoded divider value 4, * it corresponds to OPB frequency from the range (40, 50] MHz */ if (!opb){ printk(KERN_WARNING "ibm-iic: using compatibility value for OPB freq," " fix your board specific setup\n"); opb = 50000000; } /* Convert to MHz */ opb /= 1000000; if (opb < 20 || opb > 150){ printk(KERN_WARNING "ibm-iic: invalid OPB clock frequency %u MHz\n", opb); opb = opb < 20 ? 20 : 150; } return (u8)((opb + 9) / 10 - 1); } static int iic_request_irq(struct platform_device *ofdev, struct ibm_iic_private *dev) { struct device_node *np = ofdev->dev.of_node; int irq; if (iic_force_poll) return 0; irq = irq_of_parse_and_map(np, 0); if (!irq) { dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n"); return 0; } /* Disable interrupts until we finish initialization, assumes * level-sensitive IRQ setup... */ iic_interrupt_mode(dev, 0); if (request_irq(irq, iic_handler, 0, "IBM IIC", dev)) { dev_err(&ofdev->dev, "request_irq %d failed\n", irq); /* Fallback to the polling mode */ return 0; } return irq; } /* * Register single IIC interface */ static int iic_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct ibm_iic_private *dev; struct i2c_adapter *adap; const u32 *freq; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; platform_set_drvdata(ofdev, dev); dev->vaddr = of_iomap(np, 0); if (dev->vaddr == NULL) { dev_err(&ofdev->dev, "failed to iomap device\n"); ret = -ENXIO; goto error_cleanup; } init_waitqueue_head(&dev->wq); dev->irq = iic_request_irq(ofdev, dev); if (!dev->irq) dev_warn(&ofdev->dev, "using polling mode\n"); /* Board specific settings */ if (iic_force_fast || of_get_property(np, "fast-mode", NULL)) dev->fast_mode = 1; freq = of_get_property(np, "clock-frequency", NULL); if (freq == NULL) { freq = of_get_property(np->parent, "clock-frequency", NULL); if (freq == NULL) { dev_err(&ofdev->dev, "Unable to get bus frequency\n"); ret = -EINVAL; goto error_cleanup; } } dev->clckdiv = iic_clckdiv(*freq); dev_dbg(&ofdev->dev, "clckdiv = %d\n", dev->clckdiv); /* Initialize IIC interface */ iic_dev_init(dev); /* Register it with i2c layer */ adap = &dev->adap; adap->dev.parent = &ofdev->dev; adap->dev.of_node = of_node_get(np); strscpy(adap->name, "IBM IIC", sizeof(adap->name)); i2c_set_adapdata(adap, dev); adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->algo = &iic_algo; adap->timeout = HZ; ret = i2c_add_adapter(adap); if (ret < 0) goto error_cleanup; dev_info(&ofdev->dev, "using %s mode\n", dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); return 0; error_cleanup: if (dev->irq) { iic_interrupt_mode(dev, 0); free_irq(dev->irq, dev); } if (dev->vaddr) iounmap(dev->vaddr); kfree(dev); return ret; } /* * Cleanup initialized IIC interface */ static void iic_remove(struct platform_device *ofdev) { struct ibm_iic_private *dev = platform_get_drvdata(ofdev); i2c_del_adapter(&dev->adap); if (dev->irq) { iic_interrupt_mode(dev, 0); free_irq(dev->irq, dev); } iounmap(dev->vaddr); kfree(dev); } static const struct of_device_id ibm_iic_match[] = { { .compatible = "ibm,iic", }, {} }; MODULE_DEVICE_TABLE(of, ibm_iic_match); static struct platform_driver ibm_iic_driver = { .driver = { .name = "ibm-iic", .of_match_table = ibm_iic_match, }, .probe = iic_probe, .remove_new = iic_remove, }; module_platform_driver(ibm_iic_driver);
linux-master
drivers/i2c/busses/i2c-ibm_iic.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005 Simtec Electronics * Ben Dooks <[email protected]> * * Simtec Generic I2C Controller */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> struct simtec_i2c_data { struct resource *ioarea; void __iomem *reg; struct i2c_adapter adap; struct i2c_algo_bit_data bit; }; #define CMD_SET_SDA (1<<2) #define CMD_SET_SCL (1<<3) #define STATE_SDA (1<<0) #define STATE_SCL (1<<1) /* i2c bit-bus functions */ static void simtec_i2c_setsda(void *pw, int state) { struct simtec_i2c_data *pd = pw; writeb(CMD_SET_SDA | (state ? STATE_SDA : 0), pd->reg); } static void simtec_i2c_setscl(void *pw, int state) { struct simtec_i2c_data *pd = pw; writeb(CMD_SET_SCL | (state ? STATE_SCL : 0), pd->reg); } static int simtec_i2c_getsda(void *pw) { struct simtec_i2c_data *pd = pw; return readb(pd->reg) & STATE_SDA ? 1 : 0; } static int simtec_i2c_getscl(void *pw) { struct simtec_i2c_data *pd = pw; return readb(pd->reg) & STATE_SCL ? 1 : 0; } /* device registration */ static int simtec_i2c_probe(struct platform_device *dev) { struct simtec_i2c_data *pd; struct resource *res; int size; int ret; pd = kzalloc(sizeof(struct simtec_i2c_data), GFP_KERNEL); if (pd == NULL) return -ENOMEM; platform_set_drvdata(dev, pd); res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&dev->dev, "cannot find IO resource\n"); ret = -ENOENT; goto err; } size = resource_size(res); pd->ioarea = request_mem_region(res->start, size, dev->name); if (pd->ioarea == NULL) { dev_err(&dev->dev, "cannot request IO\n"); ret = -ENXIO; goto err; } pd->reg = ioremap(res->start, size); if (pd->reg == NULL) { dev_err(&dev->dev, "cannot map IO\n"); ret = -ENXIO; goto err_res; } /* setup the private data */ pd->adap.owner = THIS_MODULE; pd->adap.algo_data = &pd->bit; pd->adap.dev.parent = &dev->dev; strscpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name)); pd->bit.data = pd; pd->bit.setsda = simtec_i2c_setsda; pd->bit.setscl = simtec_i2c_setscl; pd->bit.getsda = simtec_i2c_getsda; pd->bit.getscl = simtec_i2c_getscl; pd->bit.timeout = HZ; pd->bit.udelay = 20; ret = i2c_bit_add_bus(&pd->adap); if (ret) goto err_all; return 0; err_all: iounmap(pd->reg); err_res: release_mem_region(pd->ioarea->start, size); err: kfree(pd); return ret; } static void simtec_i2c_remove(struct platform_device *dev) { struct simtec_i2c_data *pd = platform_get_drvdata(dev); i2c_del_adapter(&pd->adap); iounmap(pd->reg); release_mem_region(pd->ioarea->start, resource_size(pd->ioarea)); kfree(pd); } /* device driver */ static struct platform_driver simtec_i2c_driver = { .driver = { .name = "simtec-i2c", }, .probe = simtec_i2c_probe, .remove_new = simtec_i2c_remove, }; module_platform_driver(simtec_i2c_driver); MODULE_DESCRIPTION("Simtec Generic I2C Bus driver"); MODULE_AUTHOR("Ben Dooks <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:simtec-i2c");
linux-master
drivers/i2c/busses/i2c-simtec.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * TI DAVINCI I2C adapter driver. * * Copyright (C) 2006 Texas Instruments. * Copyright (C) 2007 MontaVista Software Inc. * * Updated by Vinod & Sudhakar Feb 2005 * * ---------------------------------------------------------------------------- * * ---------------------------------------------------------------------------- */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/clk.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/platform_data/i2c-davinci.h> #include <linux/pm_runtime.h> /* ----- global defines ----------------------------------------------- */ #define DAVINCI_I2C_TIMEOUT (1*HZ) #define DAVINCI_I2C_MAX_TRIES 2 #define DAVINCI_I2C_OWN_ADDRESS 0x08 #define I2C_DAVINCI_INTR_ALL (DAVINCI_I2C_IMR_SCD | \ DAVINCI_I2C_IMR_ARDY | \ DAVINCI_I2C_IMR_NACK | \ DAVINCI_I2C_IMR_AL) #define DAVINCI_I2C_OAR_REG 0x00 #define DAVINCI_I2C_IMR_REG 0x04 #define DAVINCI_I2C_STR_REG 0x08 #define DAVINCI_I2C_CLKL_REG 0x0c #define DAVINCI_I2C_CLKH_REG 0x10 #define DAVINCI_I2C_CNT_REG 0x14 #define DAVINCI_I2C_DRR_REG 0x18 #define DAVINCI_I2C_SAR_REG 0x1c #define DAVINCI_I2C_DXR_REG 0x20 #define DAVINCI_I2C_MDR_REG 0x24 #define DAVINCI_I2C_IVR_REG 0x28 #define DAVINCI_I2C_EMDR_REG 0x2c #define DAVINCI_I2C_PSC_REG 0x30 #define DAVINCI_I2C_FUNC_REG 0x48 #define DAVINCI_I2C_DIR_REG 0x4c #define DAVINCI_I2C_DIN_REG 0x50 #define DAVINCI_I2C_DOUT_REG 0x54 #define DAVINCI_I2C_DSET_REG 0x58 #define DAVINCI_I2C_DCLR_REG 0x5c #define DAVINCI_I2C_IVR_AAS 0x07 #define DAVINCI_I2C_IVR_SCD 0x06 #define DAVINCI_I2C_IVR_XRDY 0x05 #define DAVINCI_I2C_IVR_RDR 0x04 #define DAVINCI_I2C_IVR_ARDY 0x03 #define DAVINCI_I2C_IVR_NACK 0x02 #define DAVINCI_I2C_IVR_AL 0x01 #define DAVINCI_I2C_STR_BB BIT(12) #define DAVINCI_I2C_STR_RSFULL BIT(11) #define DAVINCI_I2C_STR_SCD BIT(5) #define DAVINCI_I2C_STR_ARDY BIT(2) #define DAVINCI_I2C_STR_NACK BIT(1) #define DAVINCI_I2C_STR_AL BIT(0) #define DAVINCI_I2C_MDR_NACK BIT(15) #define DAVINCI_I2C_MDR_STT BIT(13) #define DAVINCI_I2C_MDR_STP BIT(11) #define DAVINCI_I2C_MDR_MST BIT(10) #define DAVINCI_I2C_MDR_TRX BIT(9) #define DAVINCI_I2C_MDR_XA BIT(8) #define DAVINCI_I2C_MDR_RM BIT(7) #define DAVINCI_I2C_MDR_IRS BIT(5) #define DAVINCI_I2C_IMR_AAS BIT(6) #define DAVINCI_I2C_IMR_SCD BIT(5) #define DAVINCI_I2C_IMR_XRDY BIT(4) #define DAVINCI_I2C_IMR_RRDY BIT(3) #define DAVINCI_I2C_IMR_ARDY BIT(2) #define DAVINCI_I2C_IMR_NACK BIT(1) #define DAVINCI_I2C_IMR_AL BIT(0) /* set SDA and SCL as GPIO */ #define DAVINCI_I2C_FUNC_PFUNC0 BIT(0) /* set SCL as output when used as GPIO*/ #define DAVINCI_I2C_DIR_PDIR0 BIT(0) /* set SDA as output when used as GPIO*/ #define DAVINCI_I2C_DIR_PDIR1 BIT(1) /* read SCL GPIO level */ #define DAVINCI_I2C_DIN_PDIN0 BIT(0) /* read SDA GPIO level */ #define DAVINCI_I2C_DIN_PDIN1 BIT(1) /*set the SCL GPIO high */ #define DAVINCI_I2C_DSET_PDSET0 BIT(0) /*set the SDA GPIO high */ #define DAVINCI_I2C_DSET_PDSET1 BIT(1) /* set the SCL GPIO low */ #define DAVINCI_I2C_DCLR_PDCLR0 BIT(0) /* set the SDA GPIO low */ #define DAVINCI_I2C_DCLR_PDCLR1 BIT(1) /* timeout for pm runtime autosuspend */ #define DAVINCI_I2C_PM_TIMEOUT 1000 /* ms */ struct davinci_i2c_dev { struct device *dev; void __iomem *base; struct completion cmd_complete; struct clk *clk; int cmd_err; u8 *buf; size_t buf_len; int irq; int stop; u8 terminate; struct i2c_adapter adapter; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif struct davinci_i2c_platform_data *pdata; }; /* default platform data to use if not supplied in the platform_device */ static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = { .bus_freq = 100, .bus_delay = 0, }; static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev, int reg, u16 val) { writew_relaxed(val, i2c_dev->base + reg); } static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg) { return readw_relaxed(i2c_dev->base + reg); } static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev, int val) { u16 w; w = davinci_i2c_read_reg(i2c_dev, DAVINCI_I2C_MDR_REG); if (!val) /* put I2C into reset */ w &= ~DAVINCI_I2C_MDR_IRS; else /* take I2C out of reset */ w |= DAVINCI_I2C_MDR_IRS; davinci_i2c_write_reg(i2c_dev, DAVINCI_I2C_MDR_REG, w); } static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) { struct davinci_i2c_platform_data *pdata = dev->pdata; u16 psc; u32 clk; u32 d; u32 clkh; u32 clkl; u32 input_clock = clk_get_rate(dev->clk); struct device_node *of_node = dev->dev->of_node; /* NOTE: I2C Clock divider programming info * As per I2C specs the following formulas provide prescaler * and low/high divider values * input clk --> PSC Div -----------> ICCL/H Div --> output clock * module clk * * output clk = module clk / (PSC + 1) [ (ICCL + d) + (ICCH + d) ] * * Thus, * (ICCL + ICCH) = clk = (input clk / ((psc +1) * output clk)) - 2d; * * where if PSC == 0, d = 7, * if PSC == 1, d = 6 * if PSC > 1 , d = 5 * * Note: * d is always 6 on Keystone I2C controller */ /* * Both Davinci and current Keystone User Guides recommend a value * between 7MHz and 12MHz. In reality 7MHz module clock doesn't * always produce enough margin between SDA and SCL transitions. * Measurements show that the higher the module clock is, the * bigger is the margin, providing more reliable communication. * So we better target for 12MHz. */ psc = (input_clock / 12000000) - 1; if ((input_clock / (psc + 1)) > 12000000) psc++; /* better to run under spec than over */ d = (psc >= 2) ? 5 : 7 - psc; if (of_node && of_device_is_compatible(of_node, "ti,keystone-i2c")) d = 6; clk = ((input_clock / (psc + 1)) / (pdata->bus_freq * 1000)); /* Avoid driving the bus too fast because of rounding errors above */ if (input_clock / (psc + 1) / clk > pdata->bus_freq * 1000) clk++; /* * According to I2C-BUS Spec 2.1, in FAST-MODE LOW period should be at * least 1.3uS, which is not the case with 50% duty cycle. Driving HIGH * to LOW ratio as 1 to 2 is more safe. */ if (pdata->bus_freq > 100) clkl = (clk << 1) / 3; else clkl = (clk >> 1); /* * It's not always possible to have 1 to 2 ratio when d=7, so fall back * to minimal possible clkh in this case. * * Note: * CLKH is not allowed to be 0, in this case I2C clock is not generated * at all */ if (clk > clkl + d) { clkh = clk - clkl - d; clkl -= d; } else { clkh = 1; clkl = clk - (d << 1); } davinci_i2c_write_reg(dev, DAVINCI_I2C_PSC_REG, psc); davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKH_REG, clkh); davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKL_REG, clkl); dev_dbg(dev->dev, "input_clock = %d, CLK = %d\n", input_clock, clk); } /* * This function configures I2C and brings I2C out of reset. * This function is called during I2C init function. This function * also gets called if I2C encounters any errors. */ static int i2c_davinci_init(struct davinci_i2c_dev *dev) { struct davinci_i2c_platform_data *pdata = dev->pdata; /* put I2C into reset */ davinci_i2c_reset_ctrl(dev, 0); /* compute clock dividers */ i2c_davinci_calc_clk_dividers(dev); /* Respond at reserved "SMBus Host" slave address" (and zero); * we seem to have no option to not respond... */ davinci_i2c_write_reg(dev, DAVINCI_I2C_OAR_REG, DAVINCI_I2C_OWN_ADDRESS); dev_dbg(dev->dev, "PSC = %d\n", davinci_i2c_read_reg(dev, DAVINCI_I2C_PSC_REG)); dev_dbg(dev->dev, "CLKL = %d\n", davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKL_REG)); dev_dbg(dev->dev, "CLKH = %d\n", davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKH_REG)); dev_dbg(dev->dev, "bus_freq = %dkHz, bus_delay = %d\n", pdata->bus_freq, pdata->bus_delay); /* Take the I2C module out of reset: */ davinci_i2c_reset_ctrl(dev, 1); /* Enable interrupts */ davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, I2C_DAVINCI_INTR_ALL); return 0; } /* * This routine does i2c bus recovery by using i2c_generic_scl_recovery * which is provided by I2C Bus recovery infrastructure. */ static void davinci_i2c_prepare_recovery(struct i2c_adapter *adap) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); /* Disable interrupts */ davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, 0); /* put I2C into reset */ davinci_i2c_reset_ctrl(dev, 0); } static void davinci_i2c_unprepare_recovery(struct i2c_adapter *adap) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); i2c_davinci_init(dev); } static struct i2c_bus_recovery_info davinci_i2c_gpio_recovery_info = { .recover_bus = i2c_generic_scl_recovery, .prepare_recovery = davinci_i2c_prepare_recovery, .unprepare_recovery = davinci_i2c_unprepare_recovery, }; static void davinci_i2c_set_scl(struct i2c_adapter *adap, int val) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); if (val) davinci_i2c_write_reg(dev, DAVINCI_I2C_DSET_REG, DAVINCI_I2C_DSET_PDSET0); else davinci_i2c_write_reg(dev, DAVINCI_I2C_DCLR_REG, DAVINCI_I2C_DCLR_PDCLR0); } static int davinci_i2c_get_scl(struct i2c_adapter *adap) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); int val; /* read the state of SCL */ val = davinci_i2c_read_reg(dev, DAVINCI_I2C_DIN_REG); return val & DAVINCI_I2C_DIN_PDIN0; } static int davinci_i2c_get_sda(struct i2c_adapter *adap) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); int val; /* read the state of SDA */ val = davinci_i2c_read_reg(dev, DAVINCI_I2C_DIN_REG); return val & DAVINCI_I2C_DIN_PDIN1; } static void davinci_i2c_scl_prepare_recovery(struct i2c_adapter *adap) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); davinci_i2c_prepare_recovery(adap); /* SCL output, SDA input */ davinci_i2c_write_reg(dev, DAVINCI_I2C_DIR_REG, DAVINCI_I2C_DIR_PDIR0); /* change to GPIO mode */ davinci_i2c_write_reg(dev, DAVINCI_I2C_FUNC_REG, DAVINCI_I2C_FUNC_PFUNC0); } static void davinci_i2c_scl_unprepare_recovery(struct i2c_adapter *adap) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); /* change back to I2C mode */ davinci_i2c_write_reg(dev, DAVINCI_I2C_FUNC_REG, 0); davinci_i2c_unprepare_recovery(adap); } static struct i2c_bus_recovery_info davinci_i2c_scl_recovery_info = { .recover_bus = i2c_generic_scl_recovery, .set_scl = davinci_i2c_set_scl, .get_scl = davinci_i2c_get_scl, .get_sda = davinci_i2c_get_sda, .prepare_recovery = davinci_i2c_scl_prepare_recovery, .unprepare_recovery = davinci_i2c_scl_unprepare_recovery, }; /* * Waiting for bus not busy */ static int i2c_davinci_wait_bus_not_busy(struct davinci_i2c_dev *dev) { unsigned long timeout = jiffies + dev->adapter.timeout; do { if (!(davinci_i2c_read_reg(dev, DAVINCI_I2C_STR_REG) & DAVINCI_I2C_STR_BB)) return 0; schedule_timeout_uninterruptible(1); } while (time_before_eq(jiffies, timeout)); dev_warn(dev->dev, "timeout waiting for bus ready\n"); i2c_recover_bus(&dev->adapter); /* * if bus is still "busy" here, it's most probably a HW problem like * short-circuit */ if (davinci_i2c_read_reg(dev, DAVINCI_I2C_STR_REG) & DAVINCI_I2C_STR_BB) return -EIO; return 0; } /* * Low level master read/write transaction. This function is called * from i2c_davinci_xfer. */ static int i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); struct davinci_i2c_platform_data *pdata = dev->pdata; u32 flag; u16 w; unsigned long time_left; if (msg->addr == DAVINCI_I2C_OWN_ADDRESS) { dev_warn(dev->dev, "transfer to own address aborted\n"); return -EADDRNOTAVAIL; } /* Introduce a delay, required for some boards (e.g Davinci EVM) */ if (pdata->bus_delay) udelay(pdata->bus_delay); /* set the slave address */ davinci_i2c_write_reg(dev, DAVINCI_I2C_SAR_REG, msg->addr); dev->buf = msg->buf; dev->buf_len = msg->len; dev->stop = stop; davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len); reinit_completion(&dev->cmd_complete); dev->cmd_err = 0; /* Take I2C out of reset and configure it as master */ flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST; /* if the slave address is ten bit address, enable XA bit */ if (msg->flags & I2C_M_TEN) flag |= DAVINCI_I2C_MDR_XA; if (!(msg->flags & I2C_M_RD)) flag |= DAVINCI_I2C_MDR_TRX; if (msg->len == 0) flag |= DAVINCI_I2C_MDR_RM; /* Enable receive or transmit interrupts */ w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG); if (msg->flags & I2C_M_RD) w |= DAVINCI_I2C_IMR_RRDY; else w |= DAVINCI_I2C_IMR_XRDY; davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, w); dev->terminate = 0; /* * Write mode register first as needed for correct behaviour * on OMAP-L138, but don't set STT yet to avoid a race with XRDY * occurring before we have loaded DXR */ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); /* * First byte should be set here, not after interrupt, * because transmit-data-ready interrupt can come before * NACK-interrupt during sending of previous message and * ICDXR may have wrong data * It also saves us one interrupt, slightly faster */ if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) { davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); dev->buf_len--; } /* Set STT to begin transmit now DXR is loaded */ flag |= DAVINCI_I2C_MDR_STT; if (stop && msg->len != 0) flag |= DAVINCI_I2C_MDR_STP; davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); time_left = wait_for_completion_timeout(&dev->cmd_complete, dev->adapter.timeout); if (!time_left) { dev_err(dev->dev, "controller timed out\n"); i2c_recover_bus(adap); dev->buf_len = 0; return -ETIMEDOUT; } if (dev->buf_len) { /* This should be 0 if all bytes were transferred * or dev->cmd_err denotes an error. */ dev_err(dev->dev, "abnormal termination buf_len=%zu\n", dev->buf_len); dev->terminate = 1; wmb(); dev->buf_len = 0; return -EREMOTEIO; } /* no error */ if (likely(!dev->cmd_err)) return msg->len; /* We have an error */ if (dev->cmd_err & DAVINCI_I2C_STR_AL) { i2c_davinci_init(dev); return -EIO; } if (dev->cmd_err & DAVINCI_I2C_STR_NACK) { if (msg->flags & I2C_M_IGNORE_NAK) return msg->len; w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); w |= DAVINCI_I2C_MDR_STP; davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); return -EREMOTEIO; } return -EIO; } /* * Prepare controller for a transaction and call i2c_davinci_xfer_msg */ static int i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct davinci_i2c_dev *dev = i2c_get_adapdata(adap); int i; int ret; dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); ret = pm_runtime_resume_and_get(dev->dev); if (ret < 0) { dev_err(dev->dev, "Failed to runtime_get device: %d\n", ret); return ret; } ret = i2c_davinci_wait_bus_not_busy(dev); if (ret < 0) { dev_warn(dev->dev, "timeout waiting for bus ready\n"); goto out; } for (i = 0; i < num; i++) { ret = i2c_davinci_xfer_msg(adap, &msgs[i], (i == (num - 1))); dev_dbg(dev->dev, "%s [%d/%d] ret: %d\n", __func__, i + 1, num, ret); if (ret < 0) goto out; } ret = num; out: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; } static u32 i2c_davinci_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static void terminate_read(struct davinci_i2c_dev *dev) { u16 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); w |= DAVINCI_I2C_MDR_NACK; davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); /* Throw away data */ davinci_i2c_read_reg(dev, DAVINCI_I2C_DRR_REG); if (!dev->terminate) dev_err(dev->dev, "RDR IRQ while no data requested\n"); } static void terminate_write(struct davinci_i2c_dev *dev) { u16 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); w |= DAVINCI_I2C_MDR_RM | DAVINCI_I2C_MDR_STP; davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); if (!dev->terminate) dev_dbg(dev->dev, "TDR IRQ while no data to send\n"); } /* * Interrupt service routine. This gets called whenever an I2C interrupt * occurs. */ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id) { struct davinci_i2c_dev *dev = dev_id; u32 stat; int count = 0; u16 w; if (pm_runtime_suspended(dev->dev)) return IRQ_NONE; while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) { dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat); if (count++ == 100) { dev_warn(dev->dev, "Too much work in one IRQ\n"); break; } switch (stat) { case DAVINCI_I2C_IVR_AL: /* Arbitration lost, must retry */ dev->cmd_err |= DAVINCI_I2C_STR_AL; dev->buf_len = 0; complete(&dev->cmd_complete); break; case DAVINCI_I2C_IVR_NACK: dev->cmd_err |= DAVINCI_I2C_STR_NACK; dev->buf_len = 0; complete(&dev->cmd_complete); break; case DAVINCI_I2C_IVR_ARDY: davinci_i2c_write_reg(dev, DAVINCI_I2C_STR_REG, DAVINCI_I2C_STR_ARDY); if (((dev->buf_len == 0) && (dev->stop != 0)) || (dev->cmd_err & DAVINCI_I2C_STR_NACK)) { w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); w |= DAVINCI_I2C_MDR_STP; davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); } complete(&dev->cmd_complete); break; case DAVINCI_I2C_IVR_RDR: if (dev->buf_len) { *dev->buf++ = davinci_i2c_read_reg(dev, DAVINCI_I2C_DRR_REG); dev->buf_len--; if (dev->buf_len) continue; davinci_i2c_write_reg(dev, DAVINCI_I2C_STR_REG, DAVINCI_I2C_IMR_RRDY); } else { /* signal can terminate transfer */ terminate_read(dev); } break; case DAVINCI_I2C_IVR_XRDY: if (dev->buf_len) { davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); dev->buf_len--; if (dev->buf_len) continue; w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG); w &= ~DAVINCI_I2C_IMR_XRDY; davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, w); } else { /* signal can terminate transfer */ terminate_write(dev); } break; case DAVINCI_I2C_IVR_SCD: davinci_i2c_write_reg(dev, DAVINCI_I2C_STR_REG, DAVINCI_I2C_STR_SCD); complete(&dev->cmd_complete); break; case DAVINCI_I2C_IVR_AAS: dev_dbg(dev->dev, "Address as slave interrupt\n"); break; default: dev_warn(dev->dev, "Unrecognized irq stat %d\n", stat); break; } } return count ? IRQ_HANDLED : IRQ_NONE; } #ifdef CONFIG_CPU_FREQ static int i2c_davinci_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct davinci_i2c_dev *dev; dev = container_of(nb, struct davinci_i2c_dev, freq_transition); i2c_lock_bus(&dev->adapter, I2C_LOCK_ROOT_ADAPTER); if (val == CPUFREQ_PRECHANGE) { davinci_i2c_reset_ctrl(dev, 0); } else if (val == CPUFREQ_POSTCHANGE) { i2c_davinci_calc_clk_dividers(dev); davinci_i2c_reset_ctrl(dev, 1); } i2c_unlock_bus(&dev->adapter, I2C_LOCK_ROOT_ADAPTER); return 0; } static inline int i2c_davinci_cpufreq_register(struct davinci_i2c_dev *dev) { dev->freq_transition.notifier_call = i2c_davinci_cpufreq_transition; return cpufreq_register_notifier(&dev->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void i2c_davinci_cpufreq_deregister(struct davinci_i2c_dev *dev) { cpufreq_unregister_notifier(&dev->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int i2c_davinci_cpufreq_register(struct davinci_i2c_dev *dev) { return 0; } static inline void i2c_davinci_cpufreq_deregister(struct davinci_i2c_dev *dev) { } #endif static const struct i2c_algorithm i2c_davinci_algo = { .master_xfer = i2c_davinci_xfer, .functionality = i2c_davinci_func, }; static const struct of_device_id davinci_i2c_of_match[] = { {.compatible = "ti,davinci-i2c", }, {.compatible = "ti,keystone-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, davinci_i2c_of_match); static int davinci_i2c_probe(struct platform_device *pdev) { struct davinci_i2c_dev *dev; struct i2c_adapter *adap; struct i2c_bus_recovery_info *rinfo; int r, irq; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; init_completion(&dev->cmd_complete); dev->dev = &pdev->dev; dev->irq = irq; dev->pdata = dev_get_platdata(&pdev->dev); platform_set_drvdata(pdev, dev); if (!dev->pdata && pdev->dev.of_node) { u32 prop; dev->pdata = devm_kzalloc(&pdev->dev, sizeof(struct davinci_i2c_platform_data), GFP_KERNEL); if (!dev->pdata) return -ENOMEM; memcpy(dev->pdata, &davinci_i2c_platform_data_default, sizeof(struct davinci_i2c_platform_data)); if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency", &prop)) dev->pdata->bus_freq = prop / 1000; dev->pdata->has_pfunc = of_property_read_bool(pdev->dev.of_node, "ti,has-pfunc"); } else if (!dev->pdata) { dev->pdata = &davinci_i2c_platform_data_default; } dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return PTR_ERR(dev->clk); dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) { return PTR_ERR(dev->base); } pm_runtime_set_autosuspend_delay(dev->dev, DAVINCI_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(dev->dev); pm_runtime_enable(dev->dev); r = pm_runtime_resume_and_get(dev->dev); if (r < 0) { dev_err(dev->dev, "failed to runtime_get device: %d\n", r); goto err_pm; } i2c_davinci_init(dev); r = devm_request_irq(&pdev->dev, dev->irq, i2c_davinci_isr, 0, pdev->name, dev); if (r) { dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq); goto err_unuse_clocks; } r = i2c_davinci_cpufreq_register(dev); if (r) { dev_err(&pdev->dev, "failed to register cpufreq\n"); goto err_unuse_clocks; } adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; adap->class = I2C_CLASS_DEPRECATED; strscpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name)); adap->algo = &i2c_davinci_algo; adap->dev.parent = &pdev->dev; adap->timeout = DAVINCI_I2C_TIMEOUT; adap->dev.of_node = pdev->dev.of_node; if (dev->pdata->has_pfunc) adap->bus_recovery_info = &davinci_i2c_scl_recovery_info; else if (dev->pdata->gpio_recovery) { rinfo = &davinci_i2c_gpio_recovery_info; adap->bus_recovery_info = rinfo; rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); if (IS_ERR(rinfo->scl_gpiod)) { r = PTR_ERR(rinfo->scl_gpiod); goto err_unuse_clocks; } rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); if (IS_ERR(rinfo->sda_gpiod)) { r = PTR_ERR(rinfo->sda_gpiod); goto err_unuse_clocks; } } adap->nr = pdev->id; r = i2c_add_numbered_adapter(adap); if (r) goto err_unuse_clocks; pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; err_unuse_clocks: pm_runtime_dont_use_autosuspend(dev->dev); pm_runtime_put_sync(dev->dev); err_pm: pm_runtime_disable(dev->dev); return r; } static void davinci_i2c_remove(struct platform_device *pdev) { struct davinci_i2c_dev *dev = platform_get_drvdata(pdev); int ret; i2c_davinci_cpufreq_deregister(dev); i2c_del_adapter(&dev->adapter); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) dev_err(&pdev->dev, "Failed to resume device\n"); else davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0); pm_runtime_dont_use_autosuspend(dev->dev); pm_runtime_put_sync(dev->dev); pm_runtime_disable(dev->dev); } static int davinci_i2c_suspend(struct device *dev) { struct davinci_i2c_dev *i2c_dev = dev_get_drvdata(dev); /* put I2C into reset */ davinci_i2c_reset_ctrl(i2c_dev, 0); return 0; } static int davinci_i2c_resume(struct device *dev) { struct davinci_i2c_dev *i2c_dev = dev_get_drvdata(dev); /* take I2C out of reset */ davinci_i2c_reset_ctrl(i2c_dev, 1); return 0; } static const struct dev_pm_ops davinci_i2c_pm = { .suspend = davinci_i2c_suspend, .resume = davinci_i2c_resume, NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static const struct platform_device_id davinci_i2c_driver_ids[] = { { .name = "i2c_davinci", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, davinci_i2c_driver_ids); static struct platform_driver davinci_i2c_driver = { .probe = davinci_i2c_probe, .remove_new = davinci_i2c_remove, .id_table = davinci_i2c_driver_ids, .driver = { .name = "i2c_davinci", .pm = pm_sleep_ptr(&davinci_i2c_pm), .of_match_table = davinci_i2c_of_match, }, }; /* I2C may be needed to bring up other drivers */ static int __init davinci_i2c_init_driver(void) { return platform_driver_register(&davinci_i2c_driver); } subsys_initcall(davinci_i2c_init_driver); static void __exit davinci_i2c_exit_driver(void) { platform_driver_unregister(&davinci_i2c_driver); } module_exit(davinci_i2c_exit_driver); MODULE_AUTHOR("Texas Instruments India"); MODULE_DESCRIPTION("TI DaVinci I2C bus adapter"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-davinci.c
// SPDX-License-Identifier: GPL-2.0 /* * Nvidia GPU I2C controller Driver * * Copyright (C) 2018 NVIDIA Corporation. All rights reserved. * Author: Ajay Gupta <[email protected]> */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/power_supply.h> #include <asm/unaligned.h> #include "i2c-ccgx-ucsi.h" /* I2C definitions */ #define I2C_MST_CNTL 0x00 #define I2C_MST_CNTL_GEN_START BIT(0) #define I2C_MST_CNTL_GEN_STOP BIT(1) #define I2C_MST_CNTL_CMD_READ (1 << 2) #define I2C_MST_CNTL_CMD_WRITE (2 << 2) #define I2C_MST_CNTL_BURST_SIZE_SHIFT 6 #define I2C_MST_CNTL_GEN_NACK BIT(28) #define I2C_MST_CNTL_STATUS GENMASK(30, 29) #define I2C_MST_CNTL_STATUS_OKAY (0 << 29) #define I2C_MST_CNTL_STATUS_NO_ACK (1 << 29) #define I2C_MST_CNTL_STATUS_TIMEOUT (2 << 29) #define I2C_MST_CNTL_STATUS_BUS_BUSY (3 << 29) #define I2C_MST_CNTL_CYCLE_TRIGGER BIT(31) #define I2C_MST_ADDR 0x04 #define I2C_MST_I2C0_TIMING 0x08 #define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ 0x10e #define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT 16 #define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX 255 #define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK BIT(24) #define I2C_MST_DATA 0x0c #define I2C_MST_HYBRID_PADCTL 0x20 #define I2C_MST_HYBRID_PADCTL_MODE_I2C BIT(0) #define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV BIT(14) #define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV BIT(15) struct gpu_i2c_dev { struct device *dev; void __iomem *regs; struct i2c_adapter adapter; struct i2c_board_info *gpu_ccgx_ucsi; struct i2c_client *ccgx_client; }; static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd) { u32 val; /* enable I2C */ val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL); val |= I2C_MST_HYBRID_PADCTL_MODE_I2C | I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV | I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV; writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL); /* enable 100KHZ mode */ val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ; val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT); val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK; writel(val, i2cd->regs + I2C_MST_I2C0_TIMING); } static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd) { u32 val; int ret; ret = readl_poll_timeout(i2cd->regs + I2C_MST_CNTL, val, !(val & I2C_MST_CNTL_CYCLE_TRIGGER) || (val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY, 500, 1000 * USEC_PER_MSEC); if (ret) { dev_err(i2cd->dev, "i2c timeout error %x\n", val); return -ETIMEDOUT; } val = readl(i2cd->regs + I2C_MST_CNTL); switch (val & I2C_MST_CNTL_STATUS) { case I2C_MST_CNTL_STATUS_OKAY: return 0; case I2C_MST_CNTL_STATUS_NO_ACK: return -ENXIO; case I2C_MST_CNTL_STATUS_TIMEOUT: return -ETIMEDOUT; default: return 0; } } static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len) { int status; u32 val; val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ | (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) | I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK; writel(val, i2cd->regs + I2C_MST_CNTL); status = gpu_i2c_check_status(i2cd); if (status < 0) return status; val = readl(i2cd->regs + I2C_MST_DATA); switch (len) { case 1: data[0] = val; break; case 2: put_unaligned_be16(val, data); break; case 3: put_unaligned_be24(val, data); break; case 4: put_unaligned_be32(val, data); break; default: break; } return status; } static int gpu_i2c_start(struct gpu_i2c_dev *i2cd) { writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL); return gpu_i2c_check_status(i2cd); } static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd) { writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL); return gpu_i2c_check_status(i2cd); } static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data) { u32 val; writel(data, i2cd->regs + I2C_MST_DATA); val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT); writel(val, i2cd->regs + I2C_MST_CNTL); return gpu_i2c_check_status(i2cd); } static int gpu_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap); int status, status2; bool send_stop = true; int i, j; /* * The controller supports maximum 4 byte read due to known * limitation of sending STOP after every read. */ pm_runtime_get_sync(i2cd->dev); for (i = 0; i < num; i++) { if (msgs[i].flags & I2C_M_RD) { /* program client address before starting read */ writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR); /* gpu_i2c_read has implicit start */ status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len); if (status < 0) goto exit; } else { u8 addr = i2c_8bit_addr_from_msg(msgs + i); status = gpu_i2c_start(i2cd); if (status < 0) { if (i == 0) send_stop = false; goto exit; } status = gpu_i2c_write(i2cd, addr); if (status < 0) goto exit; for (j = 0; j < msgs[i].len; j++) { status = gpu_i2c_write(i2cd, msgs[i].buf[j]); if (status < 0) goto exit; } } } send_stop = false; status = gpu_i2c_stop(i2cd); if (status < 0) goto exit; status = i; exit: if (send_stop) { status2 = gpu_i2c_stop(i2cd); if (status2 < 0) dev_err(i2cd->dev, "i2c stop failed %d\n", status2); } pm_runtime_mark_last_busy(i2cd->dev); pm_runtime_put_autosuspend(i2cd->dev); return status; } static const struct i2c_adapter_quirks gpu_i2c_quirks = { .max_read_len = 4, .max_comb_2nd_msg_len = 4, .flags = I2C_AQ_COMB_WRITE_THEN_READ, }; static u32 gpu_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm gpu_i2c_algorithm = { .master_xfer = gpu_i2c_master_xfer, .functionality = gpu_i2c_functionality, }; /* * This driver is for Nvidia GPU cards with USB Type-C interface. * We want to identify the cards using vendor ID and class code only * to avoid dependency of adding product id for any new card which * requires this driver. * Currently there is no class code defined for UCSI device over PCI * so using UNKNOWN class for now and it will be updated when UCSI * over PCI gets a class code. * There is no other NVIDIA cards with UNKNOWN class code. Even if the * driver gets loaded for an undesired card then eventually i2c_read() * (initiated from UCSI i2c_client) will timeout or UCSI commands will * timeout. */ #define PCI_CLASS_SERIAL_UNKNOWN 0x0c80 static const struct pci_device_id gpu_i2c_ids[] = { { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00}, { } }; MODULE_DEVICE_TABLE(pci, gpu_i2c_ids); static const struct property_entry ccgx_props[] = { /* Use FW built for NVIDIA GPU only */ PROPERTY_ENTRY_STRING("firmware-name", "nvidia,gpu"), /* USB-C doesn't power the system */ PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE), { } }; static const struct software_node ccgx_node = { .properties = ccgx_props, }; static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; struct gpu_i2c_dev *i2cd; int status; i2cd = devm_kzalloc(dev, sizeof(*i2cd), GFP_KERNEL); if (!i2cd) return -ENOMEM; i2cd->dev = dev; dev_set_drvdata(dev, i2cd); status = pcim_enable_device(pdev); if (status < 0) return dev_err_probe(dev, status, "pcim_enable_device failed\n"); pci_set_master(pdev); i2cd->regs = pcim_iomap(pdev, 0, 0); if (!i2cd->regs) return dev_err_probe(dev, -ENOMEM, "pcim_iomap failed\n"); status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); if (status < 0) return dev_err_probe(dev, status, "pci_alloc_irq_vectors err\n"); gpu_enable_i2c_bus(i2cd); i2c_set_adapdata(&i2cd->adapter, i2cd); i2cd->adapter.owner = THIS_MODULE; strscpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter", sizeof(i2cd->adapter.name)); i2cd->adapter.algo = &gpu_i2c_algorithm; i2cd->adapter.quirks = &gpu_i2c_quirks; i2cd->adapter.dev.parent = dev; status = i2c_add_adapter(&i2cd->adapter); if (status < 0) goto free_irq_vectors; i2cd->ccgx_client = i2c_new_ccgx_ucsi(&i2cd->adapter, pdev->irq, &ccgx_node); if (IS_ERR(i2cd->ccgx_client)) { status = dev_err_probe(dev, PTR_ERR(i2cd->ccgx_client), "register UCSI failed\n"); goto del_adapter; } pm_runtime_set_autosuspend_delay(dev, 3000); pm_runtime_use_autosuspend(dev); pm_runtime_put_autosuspend(dev); pm_runtime_allow(dev); return 0; del_adapter: i2c_del_adapter(&i2cd->adapter); free_irq_vectors: pci_free_irq_vectors(pdev); return status; } static void gpu_i2c_remove(struct pci_dev *pdev) { struct gpu_i2c_dev *i2cd = pci_get_drvdata(pdev); pm_runtime_get_noresume(i2cd->dev); i2c_del_adapter(&i2cd->adapter); pci_free_irq_vectors(pdev); } #define gpu_i2c_suspend NULL static __maybe_unused int gpu_i2c_resume(struct device *dev) { struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev); gpu_enable_i2c_bus(i2cd); /* * Runtime resume ccgx client so that it can see for any * connector change event. Old ccg firmware has known * issue of not triggering interrupt when a device is * connected to runtime resume the controller. */ pm_request_resume(&i2cd->ccgx_client->dev); return 0; } static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, gpu_i2c_suspend, gpu_i2c_resume, NULL); static struct pci_driver gpu_i2c_driver = { .name = "nvidia-gpu", .id_table = gpu_i2c_ids, .probe = gpu_i2c_probe, .remove = gpu_i2c_remove, .driver = { .pm = &gpu_i2c_driver_pm, }, }; module_pci_driver(gpu_i2c_driver); MODULE_AUTHOR("Ajay Gupta <[email protected]>"); MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-nvidia-gpu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard * * Copyright (C) 2004, 2008 Jean Delvare <[email protected]> */ /* * We select the channels by sending commands to the Philips * PCA9556 chip at I2C address 0x18. The main adapter is used for * the non-multiplexed part of the bus, and 4 virtual adapters * are defined for the multiplexed addresses: 0x50-0x53 (memory * module EEPROM) located on channels 1-4, and 0x4c (LM63) * located on multiplexed channels 0 and 5-7. We define one * virtual adapter per CPU, which corresponds to two multiplexed * channels: * CPU0: virtual adapter 1, channels 1 and 0 * CPU1: virtual adapter 2, channels 2 and 5 * CPU2: virtual adapter 3, channels 3 and 6 * CPU3: virtual adapter 4, channels 4 and 7 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> extern struct i2c_adapter amd756_smbus; static struct i2c_adapter *s4882_adapter; static struct i2c_algorithm *s4882_algo; /* Wrapper access functions for multiplexed SMBus */ static DEFINE_MUTEX(amd756_lock); static s32 amd756_access_virt0(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int error; /* We exclude the multiplexed addresses */ if (addr == 0x4c || (addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 || addr == 0x18) return -ENXIO; mutex_lock(&amd756_lock); error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); mutex_unlock(&amd756_lock); return error; } /* We remember the last used channels combination so as to only switch channels when it is really needed. This greatly reduces the SMBus overhead, but also assumes that nobody will be writing to the PCA9556 in our back. */ static u8 last_channels; static inline s32 amd756_access_channel(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data, u8 channels) { int error; /* We exclude the non-multiplexed addresses */ if (addr != 0x4c && (addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) return -ENXIO; mutex_lock(&amd756_lock); if (last_channels != channels) { union i2c_smbus_data mplxdata; mplxdata.byte = channels; error = amd756_smbus.algo->smbus_xfer(adap, 0x18, 0, I2C_SMBUS_WRITE, 0x01, I2C_SMBUS_BYTE_DATA, &mplxdata); if (error) goto UNLOCK; last_channels = channels; } error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); UNLOCK: mutex_unlock(&amd756_lock); return error; } static s32 amd756_access_virt1(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU0: channels 1 and 0 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x03); } static s32 amd756_access_virt2(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU1: channels 2 and 5 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x24); } static s32 amd756_access_virt3(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU2: channels 3 and 6 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x48); } static s32 amd756_access_virt4(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU3: channels 4 and 7 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x90); } static int __init amd756_s4882_init(void) { int i, error; union i2c_smbus_data ioconfig; if (!amd756_smbus.dev.parent) return -ENODEV; /* Configure the PCA9556 multiplexer */ ioconfig.byte = 0x00; /* All I/O to output mode */ error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, I2C_SMBUS_BYTE_DATA, &ioconfig); if (error) { dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n"); error = -EIO; goto ERROR0; } /* Unregister physical bus */ i2c_del_adapter(&amd756_smbus); printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n"); /* Define the 5 virtual adapters and algorithms structures */ if (!(s4882_adapter = kcalloc(5, sizeof(struct i2c_adapter), GFP_KERNEL))) { error = -ENOMEM; goto ERROR1; } if (!(s4882_algo = kcalloc(5, sizeof(struct i2c_algorithm), GFP_KERNEL))) { error = -ENOMEM; goto ERROR2; } /* Fill in the new structures */ s4882_algo[0] = *(amd756_smbus.algo); s4882_algo[0].smbus_xfer = amd756_access_virt0; s4882_adapter[0] = amd756_smbus; s4882_adapter[0].algo = s4882_algo; s4882_adapter[0].dev.parent = amd756_smbus.dev.parent; for (i = 1; i < 5; i++) { s4882_algo[i] = *(amd756_smbus.algo); s4882_adapter[i] = amd756_smbus; snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name), "SMBus 8111 adapter (CPU%d)", i-1); s4882_adapter[i].algo = s4882_algo+i; s4882_adapter[i].dev.parent = amd756_smbus.dev.parent; } s4882_algo[1].smbus_xfer = amd756_access_virt1; s4882_algo[2].smbus_xfer = amd756_access_virt2; s4882_algo[3].smbus_xfer = amd756_access_virt3; s4882_algo[4].smbus_xfer = amd756_access_virt4; /* Register virtual adapters */ for (i = 0; i < 5; i++) { error = i2c_add_adapter(s4882_adapter+i); if (error) { printk(KERN_ERR "i2c-amd756-s4882: " "Virtual adapter %d registration " "failed, module not inserted\n", i); for (i--; i >= 0; i--) i2c_del_adapter(s4882_adapter+i); goto ERROR3; } } return 0; ERROR3: kfree(s4882_algo); s4882_algo = NULL; ERROR2: kfree(s4882_adapter); s4882_adapter = NULL; ERROR1: /* Restore physical bus */ i2c_add_adapter(&amd756_smbus); ERROR0: return error; } static void __exit amd756_s4882_exit(void) { if (s4882_adapter) { int i; for (i = 0; i < 5; i++) i2c_del_adapter(s4882_adapter+i); kfree(s4882_adapter); s4882_adapter = NULL; } kfree(s4882_algo); s4882_algo = NULL; /* Restore physical bus */ if (i2c_add_adapter(&amd756_smbus)) printk(KERN_ERR "i2c-amd756-s4882: " "Physical bus restoration failed\n"); } MODULE_AUTHOR("Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("S4882 SMBus multiplexing"); MODULE_LICENSE("GPL"); module_init(amd756_s4882_init); module_exit(amd756_s4882_exit);
linux-master
drivers/i2c/busses/i2c-amd756-s4882.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2000 Frodo Looijaard <[email protected]>, * Philip Edelbrock <[email protected]>, * Mark D. Studebaker <[email protected]>, * Dan Eaton <[email protected]> and * Stephen Rousset <[email protected]> */ /* This is the driver for the SMB Host controller on Acer Labs Inc. (ALI) M1535 South Bridge. The M1535 is a South bridge for portable systems. It is very similar to the M15x3 South bridges also produced by Acer Labs Inc. Some of the registers within the part have moved and some have been redefined slightly. Additionally, the sequencing of the SMBus transactions has been modified to be more consistent with the sequencing recommended by the manufacturer and observed through testing. These changes are reflected in this driver and can be identified by comparing this driver to the i2c-ali15x3 driver. For an overview of these chips see http://www.acerlabs.com The SMB controller is part of the 7101 device, which is an ACPI-compliant Power Management Unit (PMU). The whole 7101 device has to be enabled for the SMB to work. You can't just enable the SMB alone. The SMB and the ACPI have separate I/O spaces. We make sure that the SMB is enabled. We leave the ACPI alone. This driver controls the SMB Host only. This driver does not use interrupts. */ /* Note: we assume there can only be one ALI1535, with one SMBus interface */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* ALI1535 SMBus address offsets */ #define SMBHSTSTS (0 + ali1535_smba) #define SMBHSTTYP (1 + ali1535_smba) #define SMBHSTPORT (2 + ali1535_smba) #define SMBHSTCMD (7 + ali1535_smba) #define SMBHSTADD (3 + ali1535_smba) #define SMBHSTDAT0 (4 + ali1535_smba) #define SMBHSTDAT1 (5 + ali1535_smba) #define SMBBLKDAT (6 + ali1535_smba) /* PCI Address Constants */ #define SMBCOM 0x004 #define SMBREV 0x008 #define SMBCFG 0x0D1 #define SMBBA 0x0E2 #define SMBHSTCFG 0x0F0 #define SMBCLK 0x0F2 /* Other settings */ #define MAX_TIMEOUT 500 /* times 1/100 sec */ #define ALI1535_SMB_IOSIZE 32 #define ALI1535_SMB_DEFAULTBASE 0x8040 /* ALI1535 address lock bits */ #define ALI1535_LOCK 0x06 /* dwe */ /* ALI1535 command constants */ #define ALI1535_QUICK 0x00 #define ALI1535_BYTE 0x10 #define ALI1535_BYTE_DATA 0x20 #define ALI1535_WORD_DATA 0x30 #define ALI1535_BLOCK_DATA 0x40 #define ALI1535_I2C_READ 0x60 #define ALI1535_DEV10B_EN 0x80 /* Enable 10-bit addressing in */ /* I2C read */ #define ALI1535_T_OUT 0x08 /* Time-out Command (write) */ #define ALI1535_A_HIGH_BIT9 0x08 /* Bit 9 of 10-bit address in */ /* Alert-Response-Address */ /* (read) */ #define ALI1535_KILL 0x04 /* Kill Command (write) */ #define ALI1535_A_HIGH_BIT8 0x04 /* Bit 8 of 10-bit address in */ /* Alert-Response-Address */ /* (read) */ #define ALI1535_D_HI_MASK 0x03 /* Mask for isolating bits 9-8 */ /* of 10-bit address in I2C */ /* Read Command */ /* ALI1535 status register bits */ #define ALI1535_STS_IDLE 0x04 #define ALI1535_STS_BUSY 0x08 /* host busy */ #define ALI1535_STS_DONE 0x10 /* transaction complete */ #define ALI1535_STS_DEV 0x20 /* device error */ #define ALI1535_STS_BUSERR 0x40 /* bus error */ #define ALI1535_STS_FAIL 0x80 /* failed bus transaction */ #define ALI1535_STS_ERR 0xE0 /* all the bad error bits */ #define ALI1535_BLOCK_CLR 0x04 /* reset block data index */ /* ALI1535 device address register bits */ #define ALI1535_RD_ADDR 0x01 /* Read/Write Bit in Device */ /* Address field */ /* -> Write = 0 */ /* -> Read = 1 */ #define ALI1535_SMBIO_EN 0x04 /* SMB I/O Space enable */ static struct pci_driver ali1535_driver; static unsigned long ali1535_smba; static unsigned short ali1535_offset; /* Detect whether a ALI1535 can be found, and initialize it, where necessary. Note the differences between kernels with the old PCI BIOS interface and newer kernels with the real PCI interface. In compat.h some things are defined to make the transition easier. */ static int ali1535_setup(struct pci_dev *dev) { int retval; unsigned char temp; /* Check the following things: - SMB I/O address is initialized - Device is enabled - We can use the addresses */ retval = pci_enable_device(dev); if (retval) { dev_err(&dev->dev, "ALI1535_smb can't enable device\n"); goto exit; } /* Determine the address of the SMBus area */ pci_read_config_word(dev, SMBBA, &ali1535_offset); dev_dbg(&dev->dev, "ALI1535_smb is at offset 0x%04x\n", ali1535_offset); ali1535_offset &= (0xffff & ~(ALI1535_SMB_IOSIZE - 1)); if (ali1535_offset == 0) { dev_warn(&dev->dev, "ALI1535_smb region uninitialized - upgrade BIOS?\n"); retval = -ENODEV; goto exit; } if (pci_resource_flags(dev, 0) & IORESOURCE_IO) ali1535_smba = pci_resource_start(dev, 0) + ali1535_offset; else ali1535_smba = ali1535_offset; retval = acpi_check_region(ali1535_smba, ALI1535_SMB_IOSIZE, ali1535_driver.name); if (retval) goto exit; if (!request_region(ali1535_smba, ALI1535_SMB_IOSIZE, ali1535_driver.name)) { dev_err(&dev->dev, "ALI1535_smb region 0x%lx already in use!\n", ali1535_smba); retval = -EBUSY; goto exit; } /* check if whole device is enabled */ pci_read_config_byte(dev, SMBCFG, &temp); if ((temp & ALI1535_SMBIO_EN) == 0) { dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n"); retval = -ENODEV; goto exit_free; } /* Is SMB Host controller enabled? */ pci_read_config_byte(dev, SMBHSTCFG, &temp); if ((temp & 1) == 0) { dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n"); retval = -ENODEV; goto exit_free; } /* set SMB clock to 74KHz as recommended in data sheet */ pci_write_config_byte(dev, SMBCLK, 0x20); /* The interrupt routing for SMB is set up in register 0x77 in the 1533 ISA Bridge device, NOT in the 7101 device. Don't bother with finding the 1533 device and reading the register. if ((....... & 0x0F) == 1) dev_dbg(&dev->dev, "ALI1535 using Interrupt 9 for SMBus.\n"); */ pci_read_config_byte(dev, SMBREV, &temp); dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp); dev_dbg(&dev->dev, "ALI1535_smba = 0x%lx\n", ali1535_smba); return 0; exit_free: release_region(ali1535_smba, ALI1535_SMB_IOSIZE); exit: return retval; } static int ali1535_transaction(struct i2c_adapter *adap) { int temp; int result = 0; int timeout = 0; dev_dbg(&adap->dev, "Transaction (pre): STS=%02x, TYP=%02x, " "CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTSTS), inb_p(SMBHSTTYP), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* get status */ temp = inb_p(SMBHSTSTS); /* Make sure the SMBus host is ready to start transmitting */ /* Check the busy bit first */ if (temp & ALI1535_STS_BUSY) { /* If the host controller is still busy, it may have timed out * in the previous transaction, resulting in a "SMBus Timeout" * printk. I've tried the following to reset a stuck busy bit. * 1. Reset the controller with an KILL command. (this * doesn't seem to clear the controller if an external * device is hung) * 2. Reset the controller and the other SMBus devices with a * T_OUT command. (this clears the host busy bit if an * external device is hung, but it comes back upon a new * access to a device) * 3. Disable and reenable the controller in SMBHSTCFG. Worst * case, nothing seems to work except power reset. */ /* Try resetting entire SMB bus, including other devices - This * may not work either - it clears the BUSY bit but then the * BUSY bit may come back on when you try and use the chip * again. If that's the case you are stuck. */ dev_info(&adap->dev, "Resetting entire SMB Bus to clear busy condition (%02x)\n", temp); outb_p(ALI1535_T_OUT, SMBHSTTYP); temp = inb_p(SMBHSTSTS); } /* now check the error bits and the busy bit */ if (temp & (ALI1535_STS_ERR | ALI1535_STS_BUSY)) { /* do a clear-on-write */ outb_p(0xFF, SMBHSTSTS); temp = inb_p(SMBHSTSTS); if (temp & (ALI1535_STS_ERR | ALI1535_STS_BUSY)) { /* This is probably going to be correctable only by a * power reset as one of the bits now appears to be * stuck */ /* This may be a bus or device with electrical problems. */ dev_err(&adap->dev, "SMBus reset failed! (0x%02x) - controller or " "device on bus is probably hung\n", temp); return -EBUSY; } } else { /* check and clear done bit */ if (temp & ALI1535_STS_DONE) outb_p(temp, SMBHSTSTS); } /* start the transaction by writing anything to the start register */ outb_p(0xFF, SMBHSTPORT); /* We will always wait for a fraction of a second! */ timeout = 0; do { usleep_range(1000, 2000); temp = inb_p(SMBHSTSTS); } while (((temp & ALI1535_STS_BUSY) && !(temp & ALI1535_STS_IDLE)) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { result = -ETIMEDOUT; dev_err(&adap->dev, "SMBus Timeout!\n"); } if (temp & ALI1535_STS_FAIL) { result = -EIO; dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); } /* Unfortunately the ALI SMB controller maps "no response" and "bus * collision" into a single bit. No response is the usual case so don't * do a printk. This means that bus collisions go unreported. */ if (temp & ALI1535_STS_BUSERR) { result = -ENXIO; dev_dbg(&adap->dev, "Error: no response or bus collision ADD=%02x\n", inb_p(SMBHSTADD)); } /* haven't ever seen this */ if (temp & ALI1535_STS_DEV) { result = -EIO; dev_err(&adap->dev, "Error: device error\n"); } /* check to see if the "command complete" indication is set */ if (!(temp & ALI1535_STS_DONE)) { result = -ETIMEDOUT; dev_err(&adap->dev, "Error: command never completed\n"); } dev_dbg(&adap->dev, "Transaction (post): STS=%02x, TYP=%02x, " "CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTSTS), inb_p(SMBHSTTYP), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* take consequent actions for error conditions */ if (!(temp & ALI1535_STS_DONE)) { /* issue "kill" to reset host controller */ outb_p(ALI1535_KILL, SMBHSTTYP); outb_p(0xFF, SMBHSTSTS); } else if (temp & ALI1535_STS_ERR) { /* issue "timeout" to reset all devices on bus */ outb_p(ALI1535_T_OUT, SMBHSTTYP); outb_p(0xFF, SMBHSTSTS); } return result; } /* Return negative errno on error. */ static s32 ali1535_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int i, len; int temp; int timeout; s32 result = 0; /* make sure SMBus is idle */ temp = inb_p(SMBHSTSTS); for (timeout = 0; (timeout < MAX_TIMEOUT) && !(temp & ALI1535_STS_IDLE); timeout++) { usleep_range(1000, 2000); temp = inb_p(SMBHSTSTS); } if (timeout >= MAX_TIMEOUT) dev_warn(&adap->dev, "Idle wait Timeout! STS=0x%02x\n", temp); /* clear status register (clear-on-write) */ outb_p(0xFF, SMBHSTSTS); switch (size) { case I2C_SMBUS_QUICK: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_QUICK; outb_p(size, SMBHSTTYP); /* output command */ break; case I2C_SMBUS_BYTE: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_BYTE; outb_p(size, SMBHSTTYP); /* output command */ if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMBHSTCMD); break; case I2C_SMBUS_BYTE_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_BYTE_DATA; outb_p(size, SMBHSTTYP); /* output command */ outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0); break; case I2C_SMBUS_WORD_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_WORD_DATA; outb_p(size, SMBHSTTYP); /* output command */ outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); } break; case I2C_SMBUS_BLOCK_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI1535_BLOCK_DATA; outb_p(size, SMBHSTTYP); /* output command */ outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) { len = 0; data->block[0] = len; } if (len > 32) { len = 32; data->block[0] = len; } outb_p(len, SMBHSTDAT0); /* Reset SMBBLKDAT */ outb_p(inb_p(SMBHSTTYP) | ALI1535_BLOCK_CLR, SMBHSTTYP); for (i = 1; i <= len; i++) outb_p(data->block[i], SMBBLKDAT); } break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); result = -EOPNOTSUPP; goto EXIT; } result = ali1535_transaction(adap); if (result) goto EXIT; if ((read_write == I2C_SMBUS_WRITE) || (size == ALI1535_QUICK)) { result = 0; goto EXIT; } switch (size) { case ALI1535_BYTE: /* Result put in SMBHSTDAT0 */ data->byte = inb_p(SMBHSTDAT0); break; case ALI1535_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0); break; case ALI1535_WORD_DATA: data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); break; case ALI1535_BLOCK_DATA: len = inb_p(SMBHSTDAT0); if (len > 32) len = 32; data->block[0] = len; /* Reset SMBBLKDAT */ outb_p(inb_p(SMBHSTTYP) | ALI1535_BLOCK_CLR, SMBHSTTYP); for (i = 1; i <= data->block[0]; i++) { data->block[i] = inb_p(SMBBLKDAT); dev_dbg(&adap->dev, "Blk: len=%d, i=%d, data=%02x\n", len, i, data->block[i]); } break; } EXIT: return result; } static u32 ali1535_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = ali1535_access, .functionality = ali1535_func, }; static struct i2c_adapter ali1535_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static const struct pci_device_id ali1535_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { }, }; MODULE_DEVICE_TABLE(pci, ali1535_ids); static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) { if (ali1535_setup(dev)) { dev_warn(&dev->dev, "ALI1535 not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ ali1535_adapter.dev.parent = &dev->dev; snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name), "SMBus ALI1535 adapter at %04x", ali1535_offset); return i2c_add_adapter(&ali1535_adapter); } static void ali1535_remove(struct pci_dev *dev) { i2c_del_adapter(&ali1535_adapter); release_region(ali1535_smba, ALI1535_SMB_IOSIZE); /* * do not call pci_disable_device(dev) since it can cause hard hangs on * some systems during power-off */ } static struct pci_driver ali1535_driver = { .name = "ali1535_smbus", .id_table = ali1535_ids, .probe = ali1535_probe, .remove = ali1535_remove, }; module_pci_driver(ali1535_driver); MODULE_AUTHOR("Frodo Looijaard <[email protected]>"); MODULE_AUTHOR("Philip Edelbrock <[email protected]>"); MODULE_AUTHOR("Mark D. Studebaker <[email protected]>"); MODULE_AUTHOR("Dan Eaton <[email protected]>"); MODULE_DESCRIPTION("ALI1535 SMBus driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-ali1535.c
// SPDX-License-Identifier: GPL-2.0 /* * Renesas RIIC driver * * Copyright (C) 2013 Wolfram Sang <[email protected]> * Copyright (C) 2013 Renesas Solutions Corp. */ /* * This i2c core has a lot of interrupts, namely 8. We use their chaining as * some kind of state machine. * * 1) The main xfer routine kicks off a transmission by putting the start bit * (or repeated start) on the bus and enabling the transmit interrupt (TIE) * since we need to send the slave address + RW bit in every case. * * 2) TIE sends slave address + RW bit and selects how to continue. * * 3a) Write case: We keep utilizing TIE as long as we have data to send. If we * are done, we switch over to the transmission done interrupt (TEIE) and mark * the message as completed (includes sending STOP) there. * * 3b) Read case: We switch over to receive interrupt (RIE). One dummy read is * needed to start clocking, then we keep receiving until we are done. Note * that we use the RDRFS mode all the time, i.e. we ACK/NACK every byte by * writing to the ACKBT bit. I tried using the RDRFS mode only at the end of a * message to create the final NACK as sketched in the datasheet. This caused * some subtle races (when byte n was processed and byte n+1 was already * waiting), though, and I started with the safe approach. * * 4) If we got a NACK somewhere, we flag the error and stop the transmission * via NAKIE. * * Also check the comments in the interrupt routines for some gory details. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #define RIIC_ICCR1 0x00 #define RIIC_ICCR2 0x04 #define RIIC_ICMR1 0x08 #define RIIC_ICMR3 0x10 #define RIIC_ICSER 0x18 #define RIIC_ICIER 0x1c #define RIIC_ICSR2 0x24 #define RIIC_ICBRL 0x34 #define RIIC_ICBRH 0x38 #define RIIC_ICDRT 0x3c #define RIIC_ICDRR 0x40 #define ICCR1_ICE 0x80 #define ICCR1_IICRST 0x40 #define ICCR1_SOWP 0x10 #define ICCR2_BBSY 0x80 #define ICCR2_SP 0x08 #define ICCR2_RS 0x04 #define ICCR2_ST 0x02 #define ICMR1_CKS_MASK 0x70 #define ICMR1_BCWP 0x08 #define ICMR1_CKS(_x) ((((_x) << 4) & ICMR1_CKS_MASK) | ICMR1_BCWP) #define ICMR3_RDRFS 0x20 #define ICMR3_ACKWP 0x10 #define ICMR3_ACKBT 0x08 #define ICIER_TIE 0x80 #define ICIER_TEIE 0x40 #define ICIER_RIE 0x20 #define ICIER_NAKIE 0x10 #define ICIER_SPIE 0x08 #define ICSR2_NACKF 0x10 #define ICBR_RESERVED 0xe0 /* Should be 1 on writes */ #define RIIC_INIT_MSG -1 struct riic_dev { void __iomem *base; u8 *buf; struct i2c_msg *msg; int bytes_left; int err; int is_last; struct completion msg_done; struct i2c_adapter adapter; struct clk *clk; }; struct riic_irq_desc { int res_num; irq_handler_t isr; char *name; }; static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u8 reg) { writeb((readb(riic->base + reg) & ~clear) | set, riic->base + reg); } static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct riic_dev *riic = i2c_get_adapdata(adap); unsigned long time_left; int i; u8 start_bit; pm_runtime_get_sync(adap->dev.parent); if (readb(riic->base + RIIC_ICCR2) & ICCR2_BBSY) { riic->err = -EBUSY; goto out; } reinit_completion(&riic->msg_done); riic->err = 0; writeb(0, riic->base + RIIC_ICSR2); for (i = 0, start_bit = ICCR2_ST; i < num; i++) { riic->bytes_left = RIIC_INIT_MSG; riic->buf = msgs[i].buf; riic->msg = &msgs[i]; riic->is_last = (i == num - 1); writeb(ICIER_NAKIE | ICIER_TIE, riic->base + RIIC_ICIER); writeb(start_bit, riic->base + RIIC_ICCR2); time_left = wait_for_completion_timeout(&riic->msg_done, riic->adapter.timeout); if (time_left == 0) riic->err = -ETIMEDOUT; if (riic->err) break; start_bit = ICCR2_RS; } out: pm_runtime_put(adap->dev.parent); return riic->err ?: num; } static irqreturn_t riic_tdre_isr(int irq, void *data) { struct riic_dev *riic = data; u8 val; if (!riic->bytes_left) return IRQ_NONE; if (riic->bytes_left == RIIC_INIT_MSG) { if (riic->msg->flags & I2C_M_RD) /* On read, switch over to receive interrupt */ riic_clear_set_bit(riic, ICIER_TIE, ICIER_RIE, RIIC_ICIER); else /* On write, initialize length */ riic->bytes_left = riic->msg->len; val = i2c_8bit_addr_from_msg(riic->msg); } else { val = *riic->buf; riic->buf++; riic->bytes_left--; } /* * Switch to transmission ended interrupt when done. Do check here * after bytes_left was initialized to support SMBUS_QUICK (new msg has * 0 length then) */ if (riic->bytes_left == 0) riic_clear_set_bit(riic, ICIER_TIE, ICIER_TEIE, RIIC_ICIER); /* * This acks the TIE interrupt. We get another TIE immediately if our * value could be moved to the shadow shift register right away. So * this must be after updates to ICIER (where we want to disable TIE)! */ writeb(val, riic->base + RIIC_ICDRT); return IRQ_HANDLED; } static irqreturn_t riic_tend_isr(int irq, void *data) { struct riic_dev *riic = data; if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) { /* We got a NACKIE */ readb(riic->base + RIIC_ICDRR); /* dummy read */ riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2); riic->err = -ENXIO; } else if (riic->bytes_left) { return IRQ_NONE; } if (riic->is_last || riic->err) { riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER); writeb(ICCR2_SP, riic->base + RIIC_ICCR2); } else { /* Transfer is complete, but do not send STOP */ riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER); complete(&riic->msg_done); } return IRQ_HANDLED; } static irqreturn_t riic_rdrf_isr(int irq, void *data) { struct riic_dev *riic = data; if (!riic->bytes_left) return IRQ_NONE; if (riic->bytes_left == RIIC_INIT_MSG) { riic->bytes_left = riic->msg->len; readb(riic->base + RIIC_ICDRR); /* dummy read */ return IRQ_HANDLED; } if (riic->bytes_left == 1) { /* STOP must come before we set ACKBT! */ if (riic->is_last) { riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); writeb(ICCR2_SP, riic->base + RIIC_ICCR2); } riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3); } else { riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3); } /* Reading acks the RIE interrupt */ *riic->buf = readb(riic->base + RIIC_ICDRR); riic->buf++; riic->bytes_left--; return IRQ_HANDLED; } static irqreturn_t riic_stop_isr(int irq, void *data) { struct riic_dev *riic = data; /* read back registers to confirm writes have fully propagated */ writeb(0, riic->base + RIIC_ICSR2); readb(riic->base + RIIC_ICSR2); writeb(0, riic->base + RIIC_ICIER); readb(riic->base + RIIC_ICIER); complete(&riic->msg_done); return IRQ_HANDLED; } static u32 riic_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm riic_algo = { .master_xfer = riic_xfer, .functionality = riic_func, }; static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t) { int ret = 0; unsigned long rate; int total_ticks, cks, brl, brh; pm_runtime_get_sync(riic->adapter.dev.parent); if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) { dev_err(&riic->adapter.dev, "unsupported bus speed (%dHz). %d max\n", t->bus_freq_hz, I2C_MAX_FAST_MODE_FREQ); ret = -EINVAL; goto out; } rate = clk_get_rate(riic->clk); /* * Assume the default register settings: * FER.SCLE = 1 (SCL sync circuit enabled, adds 2 or 3 cycles) * FER.NFE = 1 (noise circuit enabled) * MR3.NF = 0 (1 cycle of noise filtered out) * * Freq (CKS=000) = (I2CCLK + tr + tf)/ (BRH + 3 + 1) + (BRL + 3 + 1) * Freq (CKS!=000) = (I2CCLK + tr + tf)/ (BRH + 2 + 1) + (BRL + 2 + 1) */ /* * Determine reference clock rate. We must be able to get the desired * frequency with only 62 clock ticks max (31 high, 31 low). * Aim for a duty of 60% LOW, 40% HIGH. */ total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz); for (cks = 0; cks < 7; cks++) { /* * 60% low time must be less than BRL + 2 + 1 * BRL max register value is 0x1F. */ brl = ((total_ticks * 6) / 10); if (brl <= (0x1F + 3)) break; total_ticks /= 2; rate /= 2; } if (brl > (0x1F + 3)) { dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n", (unsigned long)t->bus_freq_hz); ret = -EINVAL; goto out; } brh = total_ticks - brl; /* Remove automatic clock ticks for sync circuit and NF */ if (cks == 0) { brl -= 4; brh -= 4; } else { brl -= 3; brh -= 3; } /* * Remove clock ticks for rise and fall times. Convert ns to clock * ticks. */ brl -= t->scl_fall_ns / (1000000000 / rate); brh -= t->scl_rise_ns / (1000000000 / rate); /* Adjust for min register values for when SCLE=1 and NFE=1 */ if (brl < 1) brl = 1; if (brh < 1) brh = 1; pr_debug("i2c-riic: freq=%lu, duty=%d, fall=%lu, rise=%lu, cks=%d, brl=%d, brh=%d\n", rate / total_ticks, ((brl + 3) * 100) / (brl + brh + 6), t->scl_fall_ns / (1000000000 / rate), t->scl_rise_ns / (1000000000 / rate), cks, brl, brh); /* Changing the order of accessing IICRST and ICE may break things! */ writeb(ICCR1_IICRST | ICCR1_SOWP, riic->base + RIIC_ICCR1); riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1); writeb(ICMR1_CKS(cks), riic->base + RIIC_ICMR1); writeb(brh | ICBR_RESERVED, riic->base + RIIC_ICBRH); writeb(brl | ICBR_RESERVED, riic->base + RIIC_ICBRL); writeb(0, riic->base + RIIC_ICSER); writeb(ICMR3_ACKWP | ICMR3_RDRFS, riic->base + RIIC_ICMR3); riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1); out: pm_runtime_put(riic->adapter.dev.parent); return ret; } static struct riic_irq_desc riic_irqs[] = { { .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" }, { .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" }, { .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" }, { .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" }, { .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" }, }; static void riic_reset_control_assert(void *data) { reset_control_assert(data); } static int riic_i2c_probe(struct platform_device *pdev) { struct riic_dev *riic; struct i2c_adapter *adap; struct i2c_timings i2c_t; struct reset_control *rstc; int i, ret; riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL); if (!riic) return -ENOMEM; riic->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(riic->base)) return PTR_ERR(riic->base); riic->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(riic->clk)) { dev_err(&pdev->dev, "missing controller clock"); return PTR_ERR(riic->clk); } rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); if (IS_ERR(rstc)) return dev_err_probe(&pdev->dev, PTR_ERR(rstc), "Error: missing reset ctrl\n"); ret = reset_control_deassert(rstc); if (ret) return ret; ret = devm_add_action_or_reset(&pdev->dev, riic_reset_control_assert, rstc); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) { ret = platform_get_irq(pdev, riic_irqs[i].res_num); if (ret < 0) return ret; ret = devm_request_irq(&pdev->dev, ret, riic_irqs[i].isr, 0, riic_irqs[i].name, riic); if (ret) { dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name); return ret; } } adap = &riic->adapter; i2c_set_adapdata(adap, riic); strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name)); adap->owner = THIS_MODULE; adap->algo = &riic_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; init_completion(&riic->msg_done); i2c_parse_fw_timings(&pdev->dev, &i2c_t, true); pm_runtime_enable(&pdev->dev); ret = riic_init_hw(riic, &i2c_t); if (ret) goto out; ret = i2c_add_adapter(adap); if (ret) goto out; platform_set_drvdata(pdev, riic); dev_info(&pdev->dev, "registered with %dHz bus speed\n", i2c_t.bus_freq_hz); return 0; out: pm_runtime_disable(&pdev->dev); return ret; } static void riic_i2c_remove(struct platform_device *pdev) { struct riic_dev *riic = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); writeb(0, riic->base + RIIC_ICIER); pm_runtime_put(&pdev->dev); i2c_del_adapter(&riic->adapter); pm_runtime_disable(&pdev->dev); } static const struct of_device_id riic_i2c_dt_ids[] = { { .compatible = "renesas,riic-rz", }, { /* Sentinel */ }, }; static struct platform_driver riic_i2c_driver = { .probe = riic_i2c_probe, .remove_new = riic_i2c_remove, .driver = { .name = "i2c-riic", .of_match_table = riic_i2c_dt_ids, }, }; module_platform_driver(riic_i2c_driver); MODULE_DESCRIPTION("Renesas RIIC adapter"); MODULE_AUTHOR("Wolfram Sang <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, riic_i2c_dt_ids);
linux-master
drivers/i2c/busses/i2c-riic.c
// SPDX-License-Identifier: GPL-2.0-or-later /* i2c Support for the Apple `Hydra' Mac I/O Copyright (c) 1999-2004 Geert Uytterhoeven <[email protected]> Based on i2c Support for Via Technologies 82C586B South Bridge Copyright (c) 1998, 1999 Kyösti Mälkki <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/io.h> #include <asm/hydra.h> #define HYDRA_CPD_PD0 0x00000001 /* CachePD lines */ #define HYDRA_CPD_PD1 0x00000002 #define HYDRA_CPD_PD2 0x00000004 #define HYDRA_CPD_PD3 0x00000008 #define HYDRA_SCLK HYDRA_CPD_PD0 #define HYDRA_SDAT HYDRA_CPD_PD1 #define HYDRA_SCLK_OE 0x00000010 #define HYDRA_SDAT_OE 0x00000020 static inline void pdregw(void *data, u32 val) { struct Hydra *hydra = (struct Hydra *)data; writel(val, &hydra->CachePD); } static inline u32 pdregr(void *data) { struct Hydra *hydra = (struct Hydra *)data; return readl(&hydra->CachePD); } static void hydra_bit_setscl(void *data, int state) { u32 val = pdregr(data); if (state) val &= ~HYDRA_SCLK_OE; else { val &= ~HYDRA_SCLK; val |= HYDRA_SCLK_OE; } pdregw(data, val); } static void hydra_bit_setsda(void *data, int state) { u32 val = pdregr(data); if (state) val &= ~HYDRA_SDAT_OE; else { val &= ~HYDRA_SDAT; val |= HYDRA_SDAT_OE; } pdregw(data, val); } static int hydra_bit_getscl(void *data) { return (pdregr(data) & HYDRA_SCLK) != 0; } static int hydra_bit_getsda(void *data) { return (pdregr(data) & HYDRA_SDAT) != 0; } /* ------------------------------------------------------------------------ */ static struct i2c_algo_bit_data hydra_bit_data = { .setsda = hydra_bit_setsda, .setscl = hydra_bit_setscl, .getsda = hydra_bit_getsda, .getscl = hydra_bit_getscl, .udelay = 5, .timeout = HZ }; static struct i2c_adapter hydra_adap = { .owner = THIS_MODULE, .name = "Hydra i2c", .algo_data = &hydra_bit_data, }; static const struct pci_device_id hydra_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) }, { 0, } }; MODULE_DEVICE_TABLE (pci, hydra_ids); static int hydra_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned long base = pci_resource_start(dev, 0); int res; if (!request_mem_region(base+offsetof(struct Hydra, CachePD), 4, hydra_adap.name)) return -EBUSY; hydra_bit_data.data = pci_ioremap_bar(dev, 0); if (hydra_bit_data.data == NULL) { release_mem_region(base+offsetof(struct Hydra, CachePD), 4); return -ENODEV; } pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */ hydra_adap.dev.parent = &dev->dev; res = i2c_bit_add_bus(&hydra_adap); if (res < 0) { iounmap(hydra_bit_data.data); release_mem_region(base+offsetof(struct Hydra, CachePD), 4); return res; } return 0; } static void hydra_remove(struct pci_dev *dev) { pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */ i2c_del_adapter(&hydra_adap); iounmap(hydra_bit_data.data); release_mem_region(pci_resource_start(dev, 0)+ offsetof(struct Hydra, CachePD), 4); } static struct pci_driver hydra_driver = { .name = "hydra_smbus", .id_table = hydra_ids, .probe = hydra_probe, .remove = hydra_remove, }; module_pci_driver(hydra_driver); MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>"); MODULE_DESCRIPTION("i2c for Apple Hydra Mac I/O"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-hydra.c
// SPDX-License-Identifier: GPL-2.0 /* * HiSilicon I2C Controller Driver for Kunpeng SoC * * Copyright (c) 2021 HiSilicon Technologies Co., Ltd. */ #include <linux/bits.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/units.h> #define HISI_I2C_FRAME_CTRL 0x0000 #define HISI_I2C_FRAME_CTRL_SPEED_MODE GENMASK(1, 0) #define HISI_I2C_FRAME_CTRL_ADDR_TEN BIT(2) #define HISI_I2C_SLV_ADDR 0x0004 #define HISI_I2C_SLV_ADDR_VAL GENMASK(9, 0) #define HISI_I2C_SLV_ADDR_GC_S_MODE BIT(10) #define HISI_I2C_SLV_ADDR_GC_S_EN BIT(11) #define HISI_I2C_CMD_TXDATA 0x0008 #define HISI_I2C_CMD_TXDATA_DATA GENMASK(7, 0) #define HISI_I2C_CMD_TXDATA_RW BIT(8) #define HISI_I2C_CMD_TXDATA_P_EN BIT(9) #define HISI_I2C_CMD_TXDATA_SR_EN BIT(10) #define HISI_I2C_RXDATA 0x000c #define HISI_I2C_RXDATA_DATA GENMASK(7, 0) #define HISI_I2C_SS_SCL_HCNT 0x0010 #define HISI_I2C_SS_SCL_LCNT 0x0014 #define HISI_I2C_FS_SCL_HCNT 0x0018 #define HISI_I2C_FS_SCL_LCNT 0x001c #define HISI_I2C_HS_SCL_HCNT 0x0020 #define HISI_I2C_HS_SCL_LCNT 0x0024 #define HISI_I2C_FIFO_CTRL 0x0028 #define HISI_I2C_FIFO_RX_CLR BIT(0) #define HISI_I2C_FIFO_TX_CLR BIT(1) #define HISI_I2C_FIFO_RX_AF_THRESH GENMASK(7, 2) #define HISI_I2C_FIFO_TX_AE_THRESH GENMASK(13, 8) #define HISI_I2C_FIFO_STATE 0x002c #define HISI_I2C_FIFO_STATE_RX_RERR BIT(0) #define HISI_I2C_FIFO_STATE_RX_WERR BIT(1) #define HISI_I2C_FIFO_STATE_RX_EMPTY BIT(3) #define HISI_I2C_FIFO_STATE_TX_RERR BIT(6) #define HISI_I2C_FIFO_STATE_TX_WERR BIT(7) #define HISI_I2C_FIFO_STATE_TX_FULL BIT(11) #define HISI_I2C_SDA_HOLD 0x0030 #define HISI_I2C_SDA_HOLD_TX GENMASK(15, 0) #define HISI_I2C_SDA_HOLD_RX GENMASK(23, 16) #define HISI_I2C_FS_SPK_LEN 0x0038 #define HISI_I2C_FS_SPK_LEN_CNT GENMASK(7, 0) #define HISI_I2C_HS_SPK_LEN 0x003c #define HISI_I2C_HS_SPK_LEN_CNT GENMASK(7, 0) #define HISI_I2C_INT_MSTAT 0x0044 #define HISI_I2C_INT_CLR 0x0048 #define HISI_I2C_INT_MASK 0x004C #define HISI_I2C_TRANS_STATE 0x0050 #define HISI_I2C_TRANS_ERR 0x0054 #define HISI_I2C_VERSION 0x0058 #define HISI_I2C_INT_ALL GENMASK(4, 0) #define HISI_I2C_INT_TRANS_CPLT BIT(0) #define HISI_I2C_INT_TRANS_ERR BIT(1) #define HISI_I2C_INT_FIFO_ERR BIT(2) #define HISI_I2C_INT_RX_FULL BIT(3) #define HISI_I2C_INT_TX_EMPTY BIT(4) #define HISI_I2C_INT_ERR \ (HISI_I2C_INT_TRANS_ERR | HISI_I2C_INT_FIFO_ERR) #define HISI_I2C_STD_SPEED_MODE 0 #define HISI_I2C_FAST_SPEED_MODE 1 #define HISI_I2C_HIGH_SPEED_MODE 2 #define HISI_I2C_TX_FIFO_DEPTH 64 #define HISI_I2C_RX_FIFO_DEPTH 64 #define HISI_I2C_TX_F_AE_THRESH 1 #define HISI_I2C_RX_F_AF_THRESH 60 #define NSEC_TO_CYCLES(ns, clk_rate_khz) \ DIV_ROUND_UP_ULL((clk_rate_khz) * (ns), NSEC_PER_MSEC) struct hisi_i2c_controller { struct i2c_adapter adapter; void __iomem *iobase; struct device *dev; struct clk *clk; int irq; /* Intermediates for recording the transfer process */ struct completion *completion; struct i2c_msg *msgs; int msg_num; int msg_tx_idx; int buf_tx_idx; int msg_rx_idx; int buf_rx_idx; u16 tar_addr; u32 xfer_err; /* I2C bus configuration */ struct i2c_timings t; u32 clk_rate_khz; u32 spk_len; }; static void hisi_i2c_enable_int(struct hisi_i2c_controller *ctlr, u32 mask) { writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_MASK); } static void hisi_i2c_disable_int(struct hisi_i2c_controller *ctlr, u32 mask) { writel_relaxed((~mask) & HISI_I2C_INT_ALL, ctlr->iobase + HISI_I2C_INT_MASK); } static void hisi_i2c_clear_int(struct hisi_i2c_controller *ctlr, u32 mask) { writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_CLR); } static void hisi_i2c_handle_errors(struct hisi_i2c_controller *ctlr) { u32 int_err = ctlr->xfer_err, reg; if (int_err & HISI_I2C_INT_FIFO_ERR) { reg = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); if (reg & HISI_I2C_FIFO_STATE_RX_RERR) dev_err(ctlr->dev, "rx fifo error read\n"); if (reg & HISI_I2C_FIFO_STATE_RX_WERR) dev_err(ctlr->dev, "rx fifo error write\n"); if (reg & HISI_I2C_FIFO_STATE_TX_RERR) dev_err(ctlr->dev, "tx fifo error read\n"); if (reg & HISI_I2C_FIFO_STATE_TX_WERR) dev_err(ctlr->dev, "tx fifo error write\n"); } } static int hisi_i2c_start_xfer(struct hisi_i2c_controller *ctlr) { struct i2c_msg *msg = ctlr->msgs; u32 reg; reg = readl(ctlr->iobase + HISI_I2C_FRAME_CTRL); reg &= ~HISI_I2C_FRAME_CTRL_ADDR_TEN; if (msg->flags & I2C_M_TEN) reg |= HISI_I2C_FRAME_CTRL_ADDR_TEN; writel(reg, ctlr->iobase + HISI_I2C_FRAME_CTRL); reg = readl(ctlr->iobase + HISI_I2C_SLV_ADDR); reg &= ~HISI_I2C_SLV_ADDR_VAL; reg |= FIELD_PREP(HISI_I2C_SLV_ADDR_VAL, msg->addr); writel(reg, ctlr->iobase + HISI_I2C_SLV_ADDR); reg = readl(ctlr->iobase + HISI_I2C_FIFO_CTRL); reg |= HISI_I2C_FIFO_RX_CLR | HISI_I2C_FIFO_TX_CLR; writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL); reg &= ~(HISI_I2C_FIFO_RX_CLR | HISI_I2C_FIFO_TX_CLR); writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL); hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL); hisi_i2c_enable_int(ctlr, HISI_I2C_INT_ALL); return 0; } static void hisi_i2c_reset_xfer(struct hisi_i2c_controller *ctlr) { ctlr->msg_num = 0; ctlr->xfer_err = 0; ctlr->msg_tx_idx = 0; ctlr->msg_rx_idx = 0; ctlr->buf_tx_idx = 0; ctlr->buf_rx_idx = 0; } /* * Initialize the transfer information and start the I2C bus transfer. * We only configure the transfer and do some pre/post works here, and * wait for the transfer done. The major transfer process is performed * in the IRQ handler. */ static int hisi_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct hisi_i2c_controller *ctlr = i2c_get_adapdata(adap); DECLARE_COMPLETION_ONSTACK(done); int ret = num; hisi_i2c_reset_xfer(ctlr); ctlr->completion = &done; ctlr->msg_num = num; ctlr->msgs = msgs; hisi_i2c_start_xfer(ctlr); if (!wait_for_completion_timeout(ctlr->completion, adap->timeout)) { hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL); synchronize_irq(ctlr->irq); i2c_recover_bus(&ctlr->adapter); dev_err(ctlr->dev, "bus transfer timeout\n"); ret = -EIO; } if (ctlr->xfer_err) { hisi_i2c_handle_errors(ctlr); ret = -EIO; } hisi_i2c_reset_xfer(ctlr); ctlr->completion = NULL; return ret; } static u32 hisi_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm hisi_i2c_algo = { .master_xfer = hisi_i2c_master_xfer, .functionality = hisi_i2c_functionality, }; static int hisi_i2c_read_rx_fifo(struct hisi_i2c_controller *ctlr) { struct i2c_msg *cur_msg; u32 fifo_state; while (ctlr->msg_rx_idx < ctlr->msg_num) { cur_msg = ctlr->msgs + ctlr->msg_rx_idx; if (!(cur_msg->flags & I2C_M_RD)) { ctlr->msg_rx_idx++; continue; } fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); while (!(fifo_state & HISI_I2C_FIFO_STATE_RX_EMPTY) && ctlr->buf_rx_idx < cur_msg->len) { cur_msg->buf[ctlr->buf_rx_idx++] = readl(ctlr->iobase + HISI_I2C_RXDATA); fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); } if (ctlr->buf_rx_idx == cur_msg->len) { ctlr->buf_rx_idx = 0; ctlr->msg_rx_idx++; } if (fifo_state & HISI_I2C_FIFO_STATE_RX_EMPTY) break; } return 0; } static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr) { int max_write = HISI_I2C_TX_FIFO_DEPTH; bool need_restart = false, last_msg; struct i2c_msg *cur_msg; u32 cmd, fifo_state; while (ctlr->msg_tx_idx < ctlr->msg_num) { cur_msg = ctlr->msgs + ctlr->msg_tx_idx; last_msg = (ctlr->msg_tx_idx == ctlr->msg_num - 1); /* Signal the SR bit when we start transferring a new message */ if (ctlr->msg_tx_idx && !ctlr->buf_tx_idx) need_restart = true; fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); while (!(fifo_state & HISI_I2C_FIFO_STATE_TX_FULL) && ctlr->buf_tx_idx < cur_msg->len && max_write) { cmd = 0; if (need_restart) { cmd |= HISI_I2C_CMD_TXDATA_SR_EN; need_restart = false; } /* Signal the STOP bit at the last frame of the last message */ if (ctlr->buf_tx_idx == cur_msg->len - 1 && last_msg) cmd |= HISI_I2C_CMD_TXDATA_P_EN; if (cur_msg->flags & I2C_M_RD) cmd |= HISI_I2C_CMD_TXDATA_RW; else cmd |= FIELD_PREP(HISI_I2C_CMD_TXDATA_DATA, cur_msg->buf[ctlr->buf_tx_idx]); writel(cmd, ctlr->iobase + HISI_I2C_CMD_TXDATA); ctlr->buf_tx_idx++; max_write--; fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); } /* Update the transfer index after per message transfer is done. */ if (ctlr->buf_tx_idx == cur_msg->len) { ctlr->buf_tx_idx = 0; ctlr->msg_tx_idx++; } if ((fifo_state & HISI_I2C_FIFO_STATE_TX_FULL) || max_write == 0) break; } /* * Disable the TX_EMPTY interrupt after finishing all the messages to * avoid overwhelming the CPU. */ if (ctlr->msg_tx_idx == ctlr->msg_num) hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY); } static irqreturn_t hisi_i2c_irq(int irq, void *context) { struct hisi_i2c_controller *ctlr = context; u32 int_stat; /* * Don't handle the interrupt if cltr->completion is NULL. We may * reach here because the interrupt is spurious or the transfer is * started by another port (e.g. firmware) rather than us. */ if (!ctlr->completion) return IRQ_NONE; int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT); hisi_i2c_clear_int(ctlr, int_stat); if (!(int_stat & HISI_I2C_INT_ALL)) return IRQ_NONE; if (int_stat & HISI_I2C_INT_TX_EMPTY) hisi_i2c_xfer_msg(ctlr); if (int_stat & HISI_I2C_INT_ERR) { ctlr->xfer_err = int_stat; goto out; } /* Drain the rx fifo before finish the transfer */ if (int_stat & (HISI_I2C_INT_TRANS_CPLT | HISI_I2C_INT_RX_FULL)) hisi_i2c_read_rx_fifo(ctlr); out: /* * Only use TRANS_CPLT to indicate the completion. On error cases we'll * get two interrupts, INT_ERR first then TRANS_CPLT. */ if (int_stat & HISI_I2C_INT_TRANS_CPLT) { hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL); hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL); complete(ctlr->completion); } return IRQ_HANDLED; } /* * Helper function for calculating and configuring the HIGH and LOW * periods of SCL clock. The caller will pass the ratio of the * counts (divide / divisor) according to the target speed mode, * and the target registers. */ static void hisi_i2c_set_scl(struct hisi_i2c_controller *ctlr, u32 divide, u32 divisor, u32 reg_hcnt, u32 reg_lcnt) { u32 total_cnt, t_scl_hcnt, t_scl_lcnt, scl_fall_cnt, scl_rise_cnt; u32 scl_hcnt, scl_lcnt; /* Total SCL clock cycles per speed period */ total_cnt = DIV_ROUND_UP_ULL(ctlr->clk_rate_khz * HZ_PER_KHZ, ctlr->t.bus_freq_hz); /* Total HIGH level SCL clock cycles including edges */ t_scl_hcnt = DIV_ROUND_UP_ULL(total_cnt * divide, divisor); /* Total LOW level SCL clock cycles including edges */ t_scl_lcnt = total_cnt - t_scl_hcnt; /* Fall edge SCL clock cycles */ scl_fall_cnt = NSEC_TO_CYCLES(ctlr->t.scl_fall_ns, ctlr->clk_rate_khz); /* Rise edge SCL clock cycles */ scl_rise_cnt = NSEC_TO_CYCLES(ctlr->t.scl_rise_ns, ctlr->clk_rate_khz); /* Calculated HIGH and LOW periods of SCL clock */ scl_hcnt = t_scl_hcnt - ctlr->spk_len - 7 - scl_fall_cnt; scl_lcnt = t_scl_lcnt - 1 - scl_rise_cnt; writel(scl_hcnt, ctlr->iobase + reg_hcnt); writel(scl_lcnt, ctlr->iobase + reg_lcnt); } static void hisi_i2c_configure_bus(struct hisi_i2c_controller *ctlr) { u32 reg, sda_hold_cnt, speed_mode; i2c_parse_fw_timings(ctlr->dev, &ctlr->t, true); ctlr->spk_len = NSEC_TO_CYCLES(ctlr->t.digital_filter_width_ns, ctlr->clk_rate_khz); switch (ctlr->t.bus_freq_hz) { case I2C_MAX_FAST_MODE_FREQ: speed_mode = HISI_I2C_FAST_SPEED_MODE; hisi_i2c_set_scl(ctlr, 26, 76, HISI_I2C_FS_SCL_HCNT, HISI_I2C_FS_SCL_LCNT); break; case I2C_MAX_HIGH_SPEED_MODE_FREQ: speed_mode = HISI_I2C_HIGH_SPEED_MODE; hisi_i2c_set_scl(ctlr, 6, 22, HISI_I2C_HS_SCL_HCNT, HISI_I2C_HS_SCL_LCNT); break; case I2C_MAX_STANDARD_MODE_FREQ: default: speed_mode = HISI_I2C_STD_SPEED_MODE; /* For default condition force the bus speed to standard mode. */ ctlr->t.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; hisi_i2c_set_scl(ctlr, 40, 87, HISI_I2C_SS_SCL_HCNT, HISI_I2C_SS_SCL_LCNT); break; } reg = readl(ctlr->iobase + HISI_I2C_FRAME_CTRL); reg &= ~HISI_I2C_FRAME_CTRL_SPEED_MODE; reg |= FIELD_PREP(HISI_I2C_FRAME_CTRL_SPEED_MODE, speed_mode); writel(reg, ctlr->iobase + HISI_I2C_FRAME_CTRL); sda_hold_cnt = NSEC_TO_CYCLES(ctlr->t.sda_hold_ns, ctlr->clk_rate_khz); reg = FIELD_PREP(HISI_I2C_SDA_HOLD_TX, sda_hold_cnt); writel(reg, ctlr->iobase + HISI_I2C_SDA_HOLD); writel(ctlr->spk_len, ctlr->iobase + HISI_I2C_FS_SPK_LEN); reg = FIELD_PREP(HISI_I2C_FIFO_RX_AF_THRESH, HISI_I2C_RX_F_AF_THRESH); reg |= FIELD_PREP(HISI_I2C_FIFO_TX_AE_THRESH, HISI_I2C_TX_F_AE_THRESH); writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL); } static int hisi_i2c_probe(struct platform_device *pdev) { struct hisi_i2c_controller *ctlr; struct device *dev = &pdev->dev; struct i2c_adapter *adapter; u64 clk_rate_hz; u32 hw_version; int ret; ctlr = devm_kzalloc(dev, sizeof(*ctlr), GFP_KERNEL); if (!ctlr) return -ENOMEM; ctlr->iobase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctlr->iobase)) return PTR_ERR(ctlr->iobase); ctlr->irq = platform_get_irq(pdev, 0); if (ctlr->irq < 0) return ctlr->irq; ctlr->dev = dev; hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL); ret = devm_request_irq(dev, ctlr->irq, hisi_i2c_irq, 0, "hisi-i2c", ctlr); if (ret) return dev_err_probe(dev, ret, "failed to request irq handler\n"); ctlr->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); if (IS_ERR_OR_NULL(ctlr->clk)) { ret = device_property_read_u64(dev, "clk_rate", &clk_rate_hz); if (ret) return dev_err_probe(dev, ret, "failed to get clock frequency\n"); } else { clk_rate_hz = clk_get_rate(ctlr->clk); } ctlr->clk_rate_khz = DIV_ROUND_UP_ULL(clk_rate_hz, HZ_PER_KHZ); hisi_i2c_configure_bus(ctlr); adapter = &ctlr->adapter; snprintf(adapter->name, sizeof(adapter->name), "HiSilicon I2C Controller %s", dev_name(dev)); adapter->owner = THIS_MODULE; adapter->algo = &hisi_i2c_algo; adapter->dev.parent = dev; i2c_set_adapdata(adapter, ctlr); ret = devm_i2c_add_adapter(dev, adapter); if (ret) return ret; hw_version = readl(ctlr->iobase + HISI_I2C_VERSION); dev_info(ctlr->dev, "speed mode is %s. hw version 0x%x\n", i2c_freq_mode_string(ctlr->t.bus_freq_hz), hw_version); return 0; } static const struct acpi_device_id hisi_i2c_acpi_ids[] = { { "HISI03D1", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, hisi_i2c_acpi_ids); static const struct of_device_id hisi_i2c_dts_ids[] = { { .compatible = "hisilicon,ascend910-i2c", }, { } }; MODULE_DEVICE_TABLE(of, hisi_i2c_dts_ids); static struct platform_driver hisi_i2c_driver = { .probe = hisi_i2c_probe, .driver = { .name = "hisi-i2c", .acpi_match_table = hisi_i2c_acpi_ids, .of_match_table = hisi_i2c_dts_ids, }, }; module_platform_driver(hisi_i2c_driver); MODULE_AUTHOR("Yicong Yang <[email protected]>"); MODULE_DESCRIPTION("HiSilicon I2C Controller Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-hisi.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C bus driver for Kontron COM modules * * Copyright (c) 2010-2013 Kontron Europe GmbH * Author: Michael Brunner <[email protected]> * * The driver is based on the i2c-ocores driver by Peter Korsgaard. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/mfd/kempld.h> #define KEMPLD_I2C_PRELOW 0x0b #define KEMPLD_I2C_PREHIGH 0x0c #define KEMPLD_I2C_DATA 0x0e #define KEMPLD_I2C_CTRL 0x0d #define I2C_CTRL_IEN 0x40 #define I2C_CTRL_EN 0x80 #define KEMPLD_I2C_STAT 0x0f #define I2C_STAT_IF 0x01 #define I2C_STAT_TIP 0x02 #define I2C_STAT_ARBLOST 0x20 #define I2C_STAT_BUSY 0x40 #define I2C_STAT_NACK 0x80 #define KEMPLD_I2C_CMD 0x0f #define I2C_CMD_START 0x91 #define I2C_CMD_STOP 0x41 #define I2C_CMD_READ 0x21 #define I2C_CMD_WRITE 0x11 #define I2C_CMD_READ_ACK 0x21 #define I2C_CMD_READ_NACK 0x29 #define I2C_CMD_IACK 0x01 #define KEMPLD_I2C_FREQ_MAX 2700 /* 2.7 mHz */ #define KEMPLD_I2C_FREQ_STD 100 /* 100 kHz */ enum { STATE_DONE = 0, STATE_INIT, STATE_ADDR, STATE_ADDR10, STATE_START, STATE_WRITE, STATE_READ, STATE_ERROR, }; struct kempld_i2c_data { struct device *dev; struct kempld_device_data *pld; struct i2c_adapter adap; struct i2c_msg *msg; int pos; int nmsgs; int state; bool was_active; }; static unsigned int bus_frequency = KEMPLD_I2C_FREQ_STD; module_param(bus_frequency, uint, 0); MODULE_PARM_DESC(bus_frequency, "Set I2C bus frequency in kHz (default=" __MODULE_STRING(KEMPLD_I2C_FREQ_STD)")"); static int i2c_bus = -1; module_param(i2c_bus, int, 0); MODULE_PARM_DESC(i2c_bus, "Set I2C bus number (default=-1 for dynamic assignment)"); static bool i2c_gpio_mux; module_param(i2c_gpio_mux, bool, 0); MODULE_PARM_DESC(i2c_gpio_mux, "Enable I2C port on GPIO out (default=false)"); /* * kempld_get_mutex must be called prior to calling this function. */ static int kempld_i2c_process(struct kempld_i2c_data *i2c) { struct kempld_device_data *pld = i2c->pld; u8 stat = kempld_read8(pld, KEMPLD_I2C_STAT); struct i2c_msg *msg = i2c->msg; u8 addr; /* Ready? */ if (stat & I2C_STAT_TIP) return -EBUSY; if (i2c->state == STATE_DONE || i2c->state == STATE_ERROR) { /* Stop has been sent */ kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_IACK); if (i2c->state == STATE_ERROR) return -EIO; return 0; } /* Error? */ if (stat & I2C_STAT_ARBLOST) { i2c->state = STATE_ERROR; kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_STOP); return -EAGAIN; } if (i2c->state == STATE_INIT) { if (stat & I2C_STAT_BUSY) return -EBUSY; i2c->state = STATE_ADDR; } if (i2c->state == STATE_ADDR) { /* 10 bit address? */ if (i2c->msg->flags & I2C_M_TEN) { addr = 0xf0 | ((i2c->msg->addr >> 7) & 0x6); /* Set read bit if necessary */ addr |= (i2c->msg->flags & I2C_M_RD) ? 1 : 0; i2c->state = STATE_ADDR10; } else { addr = i2c_8bit_addr_from_msg(i2c->msg); i2c->state = STATE_START; } kempld_write8(pld, KEMPLD_I2C_DATA, addr); kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_START); return 0; } /* Second part of 10 bit addressing */ if (i2c->state == STATE_ADDR10) { kempld_write8(pld, KEMPLD_I2C_DATA, i2c->msg->addr & 0xff); kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_WRITE); i2c->state = STATE_START; return 0; } if (i2c->state == STATE_START || i2c->state == STATE_WRITE) { i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE; if (stat & I2C_STAT_NACK) { i2c->state = STATE_ERROR; kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_STOP); return -ENXIO; } } else { msg->buf[i2c->pos++] = kempld_read8(pld, KEMPLD_I2C_DATA); } if (i2c->pos >= msg->len) { i2c->nmsgs--; i2c->msg++; i2c->pos = 0; msg = i2c->msg; if (i2c->nmsgs) { if (!(msg->flags & I2C_M_NOSTART)) { i2c->state = STATE_ADDR; return 0; } else { i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE; } } else { i2c->state = STATE_DONE; kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_STOP); return 0; } } if (i2c->state == STATE_READ) { kempld_write8(pld, KEMPLD_I2C_CMD, i2c->pos == (msg->len - 1) ? I2C_CMD_READ_NACK : I2C_CMD_READ_ACK); } else { kempld_write8(pld, KEMPLD_I2C_DATA, msg->buf[i2c->pos++]); kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_WRITE); } return 0; } static int kempld_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct kempld_i2c_data *i2c = i2c_get_adapdata(adap); struct kempld_device_data *pld = i2c->pld; unsigned long timeout = jiffies + HZ; int ret; i2c->msg = msgs; i2c->pos = 0; i2c->nmsgs = num; i2c->state = STATE_INIT; /* Handle the transfer */ while (time_before(jiffies, timeout)) { kempld_get_mutex(pld); ret = kempld_i2c_process(i2c); kempld_release_mutex(pld); if (i2c->state == STATE_DONE || i2c->state == STATE_ERROR) return (i2c->state == STATE_DONE) ? num : ret; if (ret == 0) timeout = jiffies + HZ; usleep_range(5, 15); } i2c->state = STATE_ERROR; return -ETIMEDOUT; } /* * kempld_get_mutex must be called prior to calling this function. */ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c) { struct kempld_device_data *pld = i2c->pld; u16 prescale_corr; long prescale; u8 ctrl; u8 stat; u8 cfg; /* Make sure the device is disabled */ ctrl = kempld_read8(pld, KEMPLD_I2C_CTRL); ctrl &= ~(I2C_CTRL_EN | I2C_CTRL_IEN); kempld_write8(pld, KEMPLD_I2C_CTRL, ctrl); if (bus_frequency > KEMPLD_I2C_FREQ_MAX) bus_frequency = KEMPLD_I2C_FREQ_MAX; if (pld->info.spec_major == 1) prescale = pld->pld_clock / (bus_frequency * 5) - 1000; else prescale = pld->pld_clock / (bus_frequency * 4) - 3000; if (prescale < 0) prescale = 0; /* Round to the best matching value */ prescale_corr = prescale / 1000; if (prescale % 1000 >= 500) prescale_corr++; kempld_write8(pld, KEMPLD_I2C_PRELOW, prescale_corr & 0xff); kempld_write8(pld, KEMPLD_I2C_PREHIGH, prescale_corr >> 8); /* Activate I2C bus output on GPIO pins */ cfg = kempld_read8(pld, KEMPLD_CFG); if (i2c_gpio_mux) cfg |= KEMPLD_CFG_GPIO_I2C_MUX; else cfg &= ~KEMPLD_CFG_GPIO_I2C_MUX; kempld_write8(pld, KEMPLD_CFG, cfg); /* Enable the device */ kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_IACK); ctrl |= I2C_CTRL_EN; kempld_write8(pld, KEMPLD_I2C_CTRL, ctrl); stat = kempld_read8(pld, KEMPLD_I2C_STAT); if (stat & I2C_STAT_BUSY) kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_STOP); } static u32 kempld_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm kempld_i2c_algorithm = { .master_xfer = kempld_i2c_xfer, .functionality = kempld_i2c_func, }; static const struct i2c_adapter kempld_i2c_adapter = { .owner = THIS_MODULE, .name = "i2c-kempld", .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED, .algo = &kempld_i2c_algorithm, }; static int kempld_i2c_probe(struct platform_device *pdev) { struct kempld_device_data *pld = dev_get_drvdata(pdev->dev.parent); struct kempld_i2c_data *i2c; int ret; u8 ctrl; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->pld = pld; i2c->dev = &pdev->dev; i2c->adap = kempld_i2c_adapter; i2c->adap.dev.parent = i2c->dev; ACPI_COMPANION_SET(&i2c->adap.dev, ACPI_COMPANION(&pdev->dev)); i2c_set_adapdata(&i2c->adap, i2c); platform_set_drvdata(pdev, i2c); kempld_get_mutex(pld); ctrl = kempld_read8(pld, KEMPLD_I2C_CTRL); if (ctrl & I2C_CTRL_EN) i2c->was_active = true; kempld_i2c_device_init(i2c); kempld_release_mutex(pld); /* Add I2C adapter to I2C tree */ if (i2c_bus >= -1) i2c->adap.nr = i2c_bus; ret = i2c_add_numbered_adapter(&i2c->adap); if (ret) return ret; dev_info(i2c->dev, "I2C bus initialized at %dkHz\n", bus_frequency); return 0; } static void kempld_i2c_remove(struct platform_device *pdev) { struct kempld_i2c_data *i2c = platform_get_drvdata(pdev); struct kempld_device_data *pld = i2c->pld; u8 ctrl; kempld_get_mutex(pld); /* * Disable I2C logic if it was not activated before the * driver loaded */ if (!i2c->was_active) { ctrl = kempld_read8(pld, KEMPLD_I2C_CTRL); ctrl &= ~I2C_CTRL_EN; kempld_write8(pld, KEMPLD_I2C_CTRL, ctrl); } kempld_release_mutex(pld); i2c_del_adapter(&i2c->adap); } static int kempld_i2c_suspend(struct device *dev) { struct kempld_i2c_data *i2c = dev_get_drvdata(dev); struct kempld_device_data *pld = i2c->pld; u8 ctrl; kempld_get_mutex(pld); ctrl = kempld_read8(pld, KEMPLD_I2C_CTRL); ctrl &= ~I2C_CTRL_EN; kempld_write8(pld, KEMPLD_I2C_CTRL, ctrl); kempld_release_mutex(pld); return 0; } static int kempld_i2c_resume(struct device *dev) { struct kempld_i2c_data *i2c = dev_get_drvdata(dev); struct kempld_device_data *pld = i2c->pld; kempld_get_mutex(pld); kempld_i2c_device_init(i2c); kempld_release_mutex(pld); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(kempld_i2c_pm_ops, kempld_i2c_suspend, kempld_i2c_resume); static struct platform_driver kempld_i2c_driver = { .driver = { .name = "kempld-i2c", .pm = pm_sleep_ptr(&kempld_i2c_pm_ops), }, .probe = kempld_i2c_probe, .remove_new = kempld_i2c_remove, }; module_platform_driver(kempld_i2c_driver); MODULE_DESCRIPTION("KEM PLD I2C Driver"); MODULE_AUTHOR("Michael Brunner <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:kempld_i2c");
linux-master
drivers/i2c/busses/i2c-kempld.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for the Diolan u2c-12 USB-I2C adapter * * Copyright (c) 2010-2011 Ericsson AB * * Derived from: * i2c-tiny-usb.c * Copyright (C) 2006-2007 Till Harbaum ([email protected]) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/i2c.h> #define DRIVER_NAME "i2c-diolan-u2c" #define USB_VENDOR_ID_DIOLAN 0x0abf #define USB_DEVICE_ID_DIOLAN_U2C 0x3370 /* commands via USB, must match command ids in the firmware */ #define CMD_I2C_READ 0x01 #define CMD_I2C_WRITE 0x02 #define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */ #define CMD_I2C_RELEASE_SDA 0x04 #define CMD_I2C_RELEASE_SCL 0x05 #define CMD_I2C_DROP_SDA 0x06 #define CMD_I2C_DROP_SCL 0x07 #define CMD_I2C_READ_SDA 0x08 #define CMD_I2C_READ_SCL 0x09 #define CMD_GET_FW_VERSION 0x0a #define CMD_GET_SERIAL 0x0b #define CMD_I2C_START 0x0c #define CMD_I2C_STOP 0x0d #define CMD_I2C_REPEATED_START 0x0e #define CMD_I2C_PUT_BYTE 0x0f #define CMD_I2C_GET_BYTE 0x10 #define CMD_I2C_PUT_ACK 0x11 #define CMD_I2C_GET_ACK 0x12 #define CMD_I2C_PUT_BYTE_ACK 0x13 #define CMD_I2C_GET_BYTE_ACK 0x14 #define CMD_I2C_SET_SPEED 0x1b #define CMD_I2C_GET_SPEED 0x1c #define CMD_I2C_SET_CLK_SYNC 0x24 #define CMD_I2C_GET_CLK_SYNC 0x25 #define CMD_I2C_SET_CLK_SYNC_TO 0x26 #define CMD_I2C_GET_CLK_SYNC_TO 0x27 #define RESP_OK 0x00 #define RESP_FAILED 0x01 #define RESP_BAD_MEMADDR 0x04 #define RESP_DATA_ERR 0x05 #define RESP_NOT_IMPLEMENTED 0x06 #define RESP_NACK 0x07 #define RESP_TIMEOUT 0x09 #define U2C_I2C_SPEED_FAST 0 /* 400 kHz */ #define U2C_I2C_SPEED_STD 1 /* 100 kHz */ #define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */ #define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1) #define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10)) #define DIOLAN_USB_TIMEOUT 100 /* in ms */ #define DIOLAN_SYNC_TIMEOUT 20 /* in ms */ #define DIOLAN_OUTBUF_LEN 128 #define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4) #define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */ /* Structure to hold all of our device specific stuff */ struct i2c_diolan_u2c { u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */ u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */ int ep_in, ep_out; /* Endpoints */ struct usb_device *usb_dev; /* the usb device for this device */ struct usb_interface *interface;/* the interface for this device */ struct i2c_adapter adapter; /* i2c related things */ int olen; /* Output buffer length */ int ocount; /* Number of enqueued messages */ }; static uint frequency = I2C_MAX_STANDARD_MODE_FREQ; /* I2C clock frequency in Hz */ module_param(frequency, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz"); /* usb layer */ /* Send command to device, and get response. */ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev) { int ret = 0; int actual; int i; if (!dev->olen || !dev->ocount) return -EINVAL; ret = usb_bulk_msg(dev->usb_dev, usb_sndbulkpipe(dev->usb_dev, dev->ep_out), dev->obuffer, dev->olen, &actual, DIOLAN_USB_TIMEOUT); if (!ret) { for (i = 0; i < dev->ocount; i++) { int tmpret; tmpret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, dev->ep_in), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); /* * Stop command processing if a previous command * returned an error. * Note that we still need to retrieve all messages. */ if (ret < 0) continue; ret = tmpret; if (ret == 0 && actual > 0) { switch (dev->ibuffer[actual - 1]) { case RESP_NACK: /* * Return ENXIO if NACK was received as * response to the address phase, * EIO otherwise */ ret = i == 1 ? -ENXIO : -EIO; break; case RESP_TIMEOUT: ret = -ETIMEDOUT; break; case RESP_OK: /* strip off return code */ ret = actual - 1; break; default: ret = -EIO; break; } } } } dev->olen = 0; dev->ocount = 0; return ret; } static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush) { if (flush || dev->olen >= DIOLAN_FLUSH_LEN) return diolan_usb_transfer(dev); return 0; } /* Send command (no data) */ static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush) { dev->obuffer[dev->olen++] = command; dev->ocount++; return diolan_write_cmd(dev, flush); } /* Send command with one byte of data */ static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data, bool flush) { dev->obuffer[dev->olen++] = command; dev->obuffer[dev->olen++] = data; dev->ocount++; return diolan_write_cmd(dev, flush); } /* Send command with two bytes of data */ static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1, u8 d2, bool flush) { dev->obuffer[dev->olen++] = command; dev->obuffer[dev->olen++] = d1; dev->obuffer[dev->olen++] = d2; dev->ocount++; return diolan_write_cmd(dev, flush); } /* * Flush input queue. * If we don't do this at startup and the controller has queued up * messages which were not retrieved, it will stop responding * at some point. */ static void diolan_flush_input(struct i2c_diolan_u2c *dev) { int i; for (i = 0; i < 10; i++) { int actual = 0; int ret; ret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, dev->ep_in), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); if (ret < 0 || actual == 0) break; } if (i == 10) dev_err(&dev->interface->dev, "Failed to flush input buffer\n"); } static int diolan_i2c_start(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_START, false); } static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false); } static int diolan_i2c_stop(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_STOP, true); } static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack, u8 *byte) { int ret; ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true); if (ret > 0) *byte = dev->ibuffer[0]; else if (ret == 0) ret = -EIO; return ret; } static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte) { return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false); } static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed) { return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true); } /* Enable or disable clock synchronization (stretching) */ static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable) { return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true); } /* Set clock synchronization timeout in ms */ static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms) { int to_val = ms * 10; return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO, to_val & 0xff, (to_val >> 8) & 0xff, true); } static void diolan_fw_version(struct i2c_diolan_u2c *dev) { int ret; ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true); if (ret >= 2) dev_info(&dev->interface->dev, "Diolan U2C firmware version %u.%u\n", (unsigned int)dev->ibuffer[0], (unsigned int)dev->ibuffer[1]); } static void diolan_get_serial(struct i2c_diolan_u2c *dev) { int ret; u32 serial; ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true); if (ret >= 4) { serial = le32_to_cpu(*(u32 *)dev->ibuffer); dev_info(&dev->interface->dev, "Diolan U2C serial number %u\n", serial); } } static int diolan_init(struct i2c_diolan_u2c *dev) { int speed, ret; if (frequency >= 2 * I2C_MAX_STANDARD_MODE_FREQ) { speed = U2C_I2C_SPEED_FAST; frequency = I2C_MAX_FAST_MODE_FREQ; } else if (frequency >= I2C_MAX_STANDARD_MODE_FREQ || frequency == 0) { speed = U2C_I2C_SPEED_STD; frequency = I2C_MAX_STANDARD_MODE_FREQ; } else { speed = U2C_I2C_SPEED(frequency); if (speed > U2C_I2C_SPEED_2KHZ) speed = U2C_I2C_SPEED_2KHZ; frequency = U2C_I2C_FREQ(speed); } dev_info(&dev->interface->dev, "Diolan U2C at USB bus %03d address %03d speed %d Hz\n", dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency); diolan_flush_input(dev); diolan_fw_version(dev); diolan_get_serial(dev); /* Set I2C speed */ ret = diolan_set_speed(dev, speed); if (ret < 0) return ret; /* Configure I2C clock synchronization */ ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST); if (ret < 0) return ret; if (speed != U2C_I2C_SPEED_FAST) ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT); return ret; } /* i2c layer */ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter); struct i2c_msg *pmsg; int i, j; int ret, sret; ret = diolan_i2c_start(dev); if (ret < 0) return ret; for (i = 0; i < num; i++) { pmsg = &msgs[i]; if (i) { ret = diolan_i2c_repeated_start(dev); if (ret < 0) goto abort; } ret = diolan_i2c_put_byte_ack(dev, i2c_8bit_addr_from_msg(pmsg)); if (ret < 0) goto abort; if (pmsg->flags & I2C_M_RD) { for (j = 0; j < pmsg->len; j++) { u8 byte; bool ack = j < pmsg->len - 1; /* * Don't send NACK if this is the first byte * of a SMBUS_BLOCK message. */ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) ack = true; ret = diolan_i2c_get_byte_ack(dev, ack, &byte); if (ret < 0) goto abort; /* * Adjust count if first received byte is length */ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) { if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; goto abort; } pmsg->len += byte; } pmsg->buf[j] = byte; } } else { for (j = 0; j < pmsg->len; j++) { ret = diolan_i2c_put_byte_ack(dev, pmsg->buf[j]); if (ret < 0) goto abort; } } } ret = num; abort: sret = diolan_i2c_stop(dev); if (sret < 0 && ret >= 0) ret = sret; return ret; } /* * Return list of supported functionality. */ static u32 diolan_usb_func(struct i2c_adapter *a) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm diolan_usb_algorithm = { .master_xfer = diolan_usb_xfer, .functionality = diolan_usb_func, }; /* device layer */ static const struct usb_device_id diolan_u2c_table[] = { { USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) }, { } }; MODULE_DEVICE_TABLE(usb, diolan_u2c_table); static void diolan_u2c_free(struct i2c_diolan_u2c *dev) { usb_put_dev(dev->usb_dev); kfree(dev); } static int diolan_u2c_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_host_interface *hostif = interface->cur_altsetting; struct i2c_diolan_u2c *dev; int ret; if (hostif->desc.bInterfaceNumber != 0 || hostif->desc.bNumEndpoints < 2) return -ENODEV; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { ret = -ENOMEM; goto error; } dev->ep_out = hostif->endpoint[0].desc.bEndpointAddress; dev->ep_in = hostif->endpoint[1].desc.bEndpointAddress; dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* setup i2c adapter description */ dev->adapter.owner = THIS_MODULE; dev->adapter.class = I2C_CLASS_HWMON; dev->adapter.algo = &diolan_usb_algorithm; i2c_set_adapdata(&dev->adapter, dev); snprintf(dev->adapter.name, sizeof(dev->adapter.name), DRIVER_NAME " at bus %03d device %03d", dev->usb_dev->bus->busnum, dev->usb_dev->devnum); dev->adapter.dev.parent = &dev->interface->dev; /* initialize diolan i2c interface */ ret = diolan_init(dev); if (ret < 0) { dev_err(&interface->dev, "failed to initialize adapter\n"); goto error_free; } /* and finally attach to i2c layer */ ret = i2c_add_adapter(&dev->adapter); if (ret < 0) goto error_free; dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n"); return 0; error_free: usb_set_intfdata(interface, NULL); diolan_u2c_free(dev); error: return ret; } static void diolan_u2c_disconnect(struct usb_interface *interface) { struct i2c_diolan_u2c *dev = usb_get_intfdata(interface); i2c_del_adapter(&dev->adapter); usb_set_intfdata(interface, NULL); diolan_u2c_free(dev); dev_dbg(&interface->dev, "disconnected\n"); } static struct usb_driver diolan_u2c_driver = { .name = DRIVER_NAME, .probe = diolan_u2c_probe, .disconnect = diolan_u2c_disconnect, .id_table = diolan_u2c_table, }; module_usb_driver(diolan_u2c_driver); MODULE_AUTHOR("Guenter Roeck <[email protected]>"); MODULE_DESCRIPTION(DRIVER_NAME " driver"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-diolan-u2c.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. #include <linux/acpi.h> #include <linux/clk.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/dma/qcom-gpi-dma.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/soc/qcom/geni-se.h> #include <linux/spinlock.h> #define SE_I2C_TX_TRANS_LEN 0x26c #define SE_I2C_RX_TRANS_LEN 0x270 #define SE_I2C_SCL_COUNTERS 0x278 #define SE_I2C_ERR (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\ M_GP_IRQ_1_EN | M_GP_IRQ_3_EN | M_GP_IRQ_4_EN) #define SE_I2C_ABORT BIT(1) /* M_CMD OP codes for I2C */ #define I2C_WRITE 0x1 #define I2C_READ 0x2 #define I2C_WRITE_READ 0x3 #define I2C_ADDR_ONLY 0x4 #define I2C_BUS_CLEAR 0x6 #define I2C_STOP_ON_BUS 0x7 /* M_CMD params for I2C */ #define PRE_CMD_DELAY BIT(0) #define TIMESTAMP_BEFORE BIT(1) #define STOP_STRETCH BIT(2) #define TIMESTAMP_AFTER BIT(3) #define POST_COMMAND_DELAY BIT(4) #define IGNORE_ADD_NACK BIT(6) #define READ_FINISHED_WITH_ACK BIT(7) #define BYPASS_ADDR_PHASE BIT(8) #define SLV_ADDR_MSK GENMASK(15, 9) #define SLV_ADDR_SHFT 9 /* I2C SCL COUNTER fields */ #define HIGH_COUNTER_MSK GENMASK(29, 20) #define HIGH_COUNTER_SHFT 20 #define LOW_COUNTER_MSK GENMASK(19, 10) #define LOW_COUNTER_SHFT 10 #define CYCLE_COUNTER_MSK GENMASK(9, 0) #define I2C_PACK_TX BIT(0) #define I2C_PACK_RX BIT(1) enum geni_i2c_err_code { GP_IRQ0, NACK, GP_IRQ2, BUS_PROTO, ARB_LOST, GP_IRQ5, GENI_OVERRUN, GENI_ILLEGAL_CMD, GENI_ABORT_DONE, GENI_TIMEOUT, }; #define DM_I2C_CB_ERR ((BIT(NACK) | BIT(BUS_PROTO) | BIT(ARB_LOST)) \ << 5) #define I2C_AUTO_SUSPEND_DELAY 250 #define KHZ(freq) (1000 * freq) #define PACKING_BYTES_PW 4 #define ABORT_TIMEOUT HZ #define XFER_TIMEOUT HZ #define RST_TIMEOUT HZ struct geni_i2c_dev { struct geni_se se; u32 tx_wm; int irq; int err; struct i2c_adapter adap; struct completion done; struct i2c_msg *cur; int cur_wr; int cur_rd; spinlock_t lock; struct clk *core_clk; u32 clk_freq_out; const struct geni_i2c_clk_fld *clk_fld; int suspended; void *dma_buf; size_t xfer_len; dma_addr_t dma_addr; struct dma_chan *tx_c; struct dma_chan *rx_c; bool gpi_mode; bool abort_done; }; struct geni_i2c_desc { bool has_core_clk; char *icc_ddr; bool no_dma_support; unsigned int tx_fifo_depth; }; struct geni_i2c_err_log { int err; const char *msg; }; static const struct geni_i2c_err_log gi2c_log[] = { [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"}, [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"}, [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"}, [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"}, [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"}, [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"}, [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"}, [GENI_ILLEGAL_CMD] = {-EIO, "Illegal cmd, check GENI cmd-state machine"}, [GENI_ABORT_DONE] = {-ETIMEDOUT, "Abort after timeout successful"}, [GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"}, }; struct geni_i2c_clk_fld { u32 clk_freq_out; u8 clk_div; u8 t_high_cnt; u8 t_low_cnt; u8 t_cycle_cnt; }; /* * Hardware uses the underlying formula to calculate time periods of * SCL clock cycle. Firmware uses some additional cycles excluded from the * below formula and it is confirmed that the time periods are within * specification limits. * * time of high period of SCL: t_high = (t_high_cnt * clk_div) / source_clock * time of low period of SCL: t_low = (t_low_cnt * clk_div) / source_clock * time of full period of SCL: t_cycle = (t_cycle_cnt * clk_div) / source_clock * clk_freq_out = t / t_cycle * source_clock = 19.2 MHz */ static const struct geni_i2c_clk_fld geni_i2c_clk_map[] = { {KHZ(100), 7, 10, 11, 26}, {KHZ(400), 2, 5, 12, 24}, {KHZ(1000), 1, 3, 9, 18}, }; static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c) { int i; const struct geni_i2c_clk_fld *itr = geni_i2c_clk_map; for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) { if (itr->clk_freq_out == gi2c->clk_freq_out) { gi2c->clk_fld = itr; return 0; } } return -EINVAL; } static void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c) { const struct geni_i2c_clk_fld *itr = gi2c->clk_fld; u32 val; writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL); val = (itr->clk_div << CLK_DIV_SHFT) | SER_CLK_EN; writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG); val = itr->t_high_cnt << HIGH_COUNTER_SHFT; val |= itr->t_low_cnt << LOW_COUNTER_SHFT; val |= itr->t_cycle_cnt; writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS); } static void geni_i2c_err_misc(struct geni_i2c_dev *gi2c) { u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0); u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS); u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS); u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS); u32 dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN); u32 rx_st, tx_st; if (dma) { rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT); tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT); } else { rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS); tx_st = readl_relaxed(gi2c->se.base + SE_GENI_TX_FIFO_STATUS); } dev_dbg(gi2c->se.dev, "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n", dma, tx_st, rx_st, m_stat); dev_dbg(gi2c->se.dev, "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n", m_cmd, geni_s, geni_ios); } static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err) { if (!gi2c->err) gi2c->err = gi2c_log[err].err; if (gi2c->cur) dev_dbg(gi2c->se.dev, "len:%d, slv-addr:0x%x, RD/WR:%d\n", gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags); switch (err) { case GENI_ABORT_DONE: gi2c->abort_done = true; break; case NACK: case GENI_TIMEOUT: dev_dbg(gi2c->se.dev, "%s\n", gi2c_log[err].msg); break; default: dev_err(gi2c->se.dev, "%s\n", gi2c_log[err].msg); geni_i2c_err_misc(gi2c); break; } } static irqreturn_t geni_i2c_irq(int irq, void *dev) { struct geni_i2c_dev *gi2c = dev; void __iomem *base = gi2c->se.base; int j, p; u32 m_stat; u32 rx_st; u32 dm_tx_st; u32 dm_rx_st; u32 dma; u32 val; struct i2c_msg *cur; spin_lock(&gi2c->lock); m_stat = readl_relaxed(base + SE_GENI_M_IRQ_STATUS); rx_st = readl_relaxed(base + SE_GENI_RX_FIFO_STATUS); dm_tx_st = readl_relaxed(base + SE_DMA_TX_IRQ_STAT); dm_rx_st = readl_relaxed(base + SE_DMA_RX_IRQ_STAT); dma = readl_relaxed(base + SE_GENI_DMA_MODE_EN); cur = gi2c->cur; if (!cur || m_stat & (M_CMD_FAILURE_EN | M_CMD_ABORT_EN) || dm_rx_st & (DM_I2C_CB_ERR)) { if (m_stat & M_GP_IRQ_1_EN) geni_i2c_err(gi2c, NACK); if (m_stat & M_GP_IRQ_3_EN) geni_i2c_err(gi2c, BUS_PROTO); if (m_stat & M_GP_IRQ_4_EN) geni_i2c_err(gi2c, ARB_LOST); if (m_stat & M_CMD_OVERRUN_EN) geni_i2c_err(gi2c, GENI_OVERRUN); if (m_stat & M_ILLEGAL_CMD_EN) geni_i2c_err(gi2c, GENI_ILLEGAL_CMD); if (m_stat & M_CMD_ABORT_EN) geni_i2c_err(gi2c, GENI_ABORT_DONE); if (m_stat & M_GP_IRQ_0_EN) geni_i2c_err(gi2c, GP_IRQ0); /* Disable the TX Watermark interrupt to stop TX */ if (!dma) writel_relaxed(0, base + SE_GENI_TX_WATERMARK_REG); } else if (dma) { dev_dbg(gi2c->se.dev, "i2c dma tx:0x%x, dma rx:0x%x\n", dm_tx_st, dm_rx_st); } else if (cur->flags & I2C_M_RD && m_stat & (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN)) { u32 rxcnt = rx_st & RX_FIFO_WC_MSK; for (j = 0; j < rxcnt; j++) { p = 0; val = readl_relaxed(base + SE_GENI_RX_FIFOn); while (gi2c->cur_rd < cur->len && p < sizeof(val)) { cur->buf[gi2c->cur_rd++] = val & 0xff; val >>= 8; p++; } if (gi2c->cur_rd == cur->len) break; } } else if (!(cur->flags & I2C_M_RD) && m_stat & M_TX_FIFO_WATERMARK_EN) { for (j = 0; j < gi2c->tx_wm; j++) { u32 temp; val = 0; p = 0; while (gi2c->cur_wr < cur->len && p < sizeof(val)) { temp = cur->buf[gi2c->cur_wr++]; val |= temp << (p * 8); p++; } writel_relaxed(val, base + SE_GENI_TX_FIFOn); /* TX Complete, Disable the TX Watermark interrupt */ if (gi2c->cur_wr == cur->len) { writel_relaxed(0, base + SE_GENI_TX_WATERMARK_REG); break; } } } if (m_stat) writel_relaxed(m_stat, base + SE_GENI_M_IRQ_CLEAR); if (dma && dm_tx_st) writel_relaxed(dm_tx_st, base + SE_DMA_TX_IRQ_CLR); if (dma && dm_rx_st) writel_relaxed(dm_rx_st, base + SE_DMA_RX_IRQ_CLR); /* if this is err with done-bit not set, handle that through timeout. */ if (m_stat & M_CMD_DONE_EN || m_stat & M_CMD_ABORT_EN || dm_tx_st & TX_DMA_DONE || dm_tx_st & TX_RESET_DONE || dm_rx_st & RX_DMA_DONE || dm_rx_st & RX_RESET_DONE) complete(&gi2c->done); spin_unlock(&gi2c->lock); return IRQ_HANDLED; } static void geni_i2c_abort_xfer(struct geni_i2c_dev *gi2c) { unsigned long time_left = ABORT_TIMEOUT; unsigned long flags; spin_lock_irqsave(&gi2c->lock, flags); geni_i2c_err(gi2c, GENI_TIMEOUT); gi2c->cur = NULL; gi2c->abort_done = false; geni_se_abort_m_cmd(&gi2c->se); spin_unlock_irqrestore(&gi2c->lock, flags); do { time_left = wait_for_completion_timeout(&gi2c->done, time_left); } while (!gi2c->abort_done && time_left); if (!time_left) dev_err(gi2c->se.dev, "Timeout abort_m_cmd\n"); } static void geni_i2c_rx_fsm_rst(struct geni_i2c_dev *gi2c) { u32 val; unsigned long time_left = RST_TIMEOUT; writel_relaxed(1, gi2c->se.base + SE_DMA_RX_FSM_RST); do { time_left = wait_for_completion_timeout(&gi2c->done, time_left); val = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT); } while (!(val & RX_RESET_DONE) && time_left); if (!(val & RX_RESET_DONE)) dev_err(gi2c->se.dev, "Timeout resetting RX_FSM\n"); } static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c) { u32 val; unsigned long time_left = RST_TIMEOUT; writel_relaxed(1, gi2c->se.base + SE_DMA_TX_FSM_RST); do { time_left = wait_for_completion_timeout(&gi2c->done, time_left); val = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT); } while (!(val & TX_RESET_DONE) && time_left); if (!(val & TX_RESET_DONE)) dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n"); } static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c, struct i2c_msg *cur) { gi2c->cur_rd = 0; if (gi2c->dma_buf) { if (gi2c->err) geni_i2c_rx_fsm_rst(gi2c); geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len); i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err); } } static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c, struct i2c_msg *cur) { gi2c->cur_wr = 0; if (gi2c->dma_buf) { if (gi2c->err) geni_i2c_tx_fsm_rst(gi2c); geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len); i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err); } } static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, u32 m_param) { dma_addr_t rx_dma = 0; unsigned long time_left; void *dma_buf; struct geni_se *se = &gi2c->se; size_t len = msg->len; struct i2c_msg *cur; dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); if (dma_buf) geni_se_select_mode(se, GENI_SE_DMA); else geni_se_select_mode(se, GENI_SE_FIFO); writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN); geni_se_setup_m_cmd(se, I2C_READ, m_param); if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) { geni_se_select_mode(se, GENI_SE_FIFO); i2c_put_dma_safe_msg_buf(dma_buf, msg, false); dma_buf = NULL; } else { gi2c->xfer_len = len; gi2c->dma_addr = rx_dma; gi2c->dma_buf = dma_buf; } cur = gi2c->cur; time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); if (!time_left) geni_i2c_abort_xfer(gi2c); geni_i2c_rx_msg_cleanup(gi2c, cur); return gi2c->err; } static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, u32 m_param) { dma_addr_t tx_dma = 0; unsigned long time_left; void *dma_buf; struct geni_se *se = &gi2c->se; size_t len = msg->len; struct i2c_msg *cur; dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); if (dma_buf) geni_se_select_mode(se, GENI_SE_DMA); else geni_se_select_mode(se, GENI_SE_FIFO); writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN); geni_se_setup_m_cmd(se, I2C_WRITE, m_param); if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) { geni_se_select_mode(se, GENI_SE_FIFO); i2c_put_dma_safe_msg_buf(dma_buf, msg, false); dma_buf = NULL; } else { gi2c->xfer_len = len; gi2c->dma_addr = tx_dma; gi2c->dma_buf = dma_buf; } if (!dma_buf) /* Get FIFO IRQ */ writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG); cur = gi2c->cur; time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); if (!time_left) geni_i2c_abort_xfer(gi2c); geni_i2c_tx_msg_cleanup(gi2c, cur); return gi2c->err; } static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result) { struct geni_i2c_dev *gi2c = cb; if (result->result != DMA_TRANS_NOERROR) { dev_err(gi2c->se.dev, "DMA txn failed:%d\n", result->result); gi2c->err = -EIO; } else if (result->residue) { dev_dbg(gi2c->se.dev, "DMA xfer has pending: %d\n", result->residue); } complete(&gi2c->done); } static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, void *tx_buf, dma_addr_t tx_addr, void *rx_buf, dma_addr_t rx_addr) { if (tx_buf) { dma_unmap_single(gi2c->se.dev->parent, tx_addr, msg->len, DMA_TO_DEVICE); i2c_put_dma_safe_msg_buf(tx_buf, msg, !gi2c->err); } if (rx_buf) { dma_unmap_single(gi2c->se.dev->parent, rx_addr, msg->len, DMA_FROM_DEVICE); i2c_put_dma_safe_msg_buf(rx_buf, msg, !gi2c->err); } } static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, struct dma_slave_config *config, dma_addr_t *dma_addr_p, void **buf, unsigned int op, struct dma_chan *dma_chan) { struct gpi_i2c_config *peripheral; unsigned int flags; void *dma_buf; dma_addr_t addr; enum dma_data_direction map_dirn; enum dma_transfer_direction dma_dirn; struct dma_async_tx_descriptor *desc; int ret; peripheral = config->peripheral_config; dma_buf = i2c_get_dma_safe_msg_buf(msg, 1); if (!dma_buf) return -ENOMEM; if (op == I2C_WRITE) map_dirn = DMA_TO_DEVICE; else map_dirn = DMA_FROM_DEVICE; addr = dma_map_single(gi2c->se.dev->parent, dma_buf, msg->len, map_dirn); if (dma_mapping_error(gi2c->se.dev->parent, addr)) { i2c_put_dma_safe_msg_buf(dma_buf, msg, false); return -ENOMEM; } /* set the length as message for rx txn */ peripheral->rx_len = msg->len; peripheral->op = op; ret = dmaengine_slave_config(dma_chan, config); if (ret) { dev_err(gi2c->se.dev, "dma config error: %d for op:%d\n", ret, op); goto err_config; } peripheral->set_config = 0; peripheral->multi_msg = true; flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; if (op == I2C_WRITE) dma_dirn = DMA_MEM_TO_DEV; else dma_dirn = DMA_DEV_TO_MEM; desc = dmaengine_prep_slave_single(dma_chan, addr, msg->len, dma_dirn, flags); if (!desc) { dev_err(gi2c->se.dev, "prep_slave_sg failed\n"); ret = -EIO; goto err_config; } desc->callback_result = i2c_gpi_cb_result; desc->callback_param = gi2c; dmaengine_submit(desc); *buf = dma_buf; *dma_addr_p = addr; return 0; err_config: dma_unmap_single(gi2c->se.dev->parent, addr, msg->len, map_dirn); i2c_put_dma_safe_msg_buf(dma_buf, msg, false); return ret; } static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], int num) { struct dma_slave_config config = {}; struct gpi_i2c_config peripheral = {}; int i, ret = 0, timeout; dma_addr_t tx_addr, rx_addr; void *tx_buf = NULL, *rx_buf = NULL; const struct geni_i2c_clk_fld *itr = gi2c->clk_fld; config.peripheral_config = &peripheral; config.peripheral_size = sizeof(peripheral); peripheral.pack_enable = I2C_PACK_TX | I2C_PACK_RX; peripheral.cycle_count = itr->t_cycle_cnt; peripheral.high_count = itr->t_high_cnt; peripheral.low_count = itr->t_low_cnt; peripheral.clk_div = itr->clk_div; peripheral.set_config = 1; peripheral.multi_msg = false; for (i = 0; i < num; i++) { gi2c->cur = &msgs[i]; gi2c->err = 0; dev_dbg(gi2c->se.dev, "msg[%d].len:%d\n", i, gi2c->cur->len); peripheral.stretch = 0; if (i < num - 1) peripheral.stretch = 1; peripheral.addr = msgs[i].addr; if (msgs[i].flags & I2C_M_RD) { ret = geni_i2c_gpi(gi2c, &msgs[i], &config, &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c); if (ret) goto err; } ret = geni_i2c_gpi(gi2c, &msgs[i], &config, &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c); if (ret) goto err; if (msgs[i].flags & I2C_M_RD) dma_async_issue_pending(gi2c->rx_c); dma_async_issue_pending(gi2c->tx_c); timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); if (!timeout) { dev_err(gi2c->se.dev, "I2C timeout gpi flags:%d addr:0x%x\n", gi2c->cur->flags, gi2c->cur->addr); gi2c->err = -ETIMEDOUT; } if (gi2c->err) { ret = gi2c->err; goto err; } geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr); } return num; err: dev_err(gi2c->se.dev, "GPI transfer failed: %d\n", ret); dmaengine_terminate_sync(gi2c->rx_c); dmaengine_terminate_sync(gi2c->tx_c); geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr); return ret; } static int geni_i2c_fifo_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], int num) { int i, ret = 0; for (i = 0; i < num; i++) { u32 m_param = i < (num - 1) ? STOP_STRETCH : 0; m_param |= ((msgs[i].addr << SLV_ADDR_SHFT) & SLV_ADDR_MSK); gi2c->cur = &msgs[i]; if (msgs[i].flags & I2C_M_RD) ret = geni_i2c_rx_one_msg(gi2c, &msgs[i], m_param); else ret = geni_i2c_tx_one_msg(gi2c, &msgs[i], m_param); if (ret) return ret; } return num; } static int geni_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap); int ret; gi2c->err = 0; reinit_completion(&gi2c->done); ret = pm_runtime_get_sync(gi2c->se.dev); if (ret < 0) { dev_err(gi2c->se.dev, "error turning SE resources:%d\n", ret); pm_runtime_put_noidle(gi2c->se.dev); /* Set device in suspended since resume failed */ pm_runtime_set_suspended(gi2c->se.dev); return ret; } qcom_geni_i2c_conf(gi2c); if (gi2c->gpi_mode) ret = geni_i2c_gpi_xfer(gi2c, msgs, num); else ret = geni_i2c_fifo_xfer(gi2c, msgs, num); pm_runtime_mark_last_busy(gi2c->se.dev); pm_runtime_put_autosuspend(gi2c->se.dev); gi2c->cur = NULL; gi2c->err = 0; return ret; } static u32 geni_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static const struct i2c_algorithm geni_i2c_algo = { .master_xfer = geni_i2c_xfer, .functionality = geni_i2c_func, }; #ifdef CONFIG_ACPI static const struct acpi_device_id geni_i2c_acpi_match[] = { { "QCOM0220"}, { }, }; MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match); #endif static void release_gpi_dma(struct geni_i2c_dev *gi2c) { if (gi2c->rx_c) dma_release_channel(gi2c->rx_c); if (gi2c->tx_c) dma_release_channel(gi2c->tx_c); } static int setup_gpi_dma(struct geni_i2c_dev *gi2c) { int ret; geni_se_select_mode(&gi2c->se, GENI_GPI_DMA); gi2c->tx_c = dma_request_chan(gi2c->se.dev, "tx"); if (IS_ERR(gi2c->tx_c)) { ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->tx_c), "Failed to get tx DMA ch\n"); goto err_tx; } gi2c->rx_c = dma_request_chan(gi2c->se.dev, "rx"); if (IS_ERR(gi2c->rx_c)) { ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->rx_c), "Failed to get rx DMA ch\n"); goto err_rx; } dev_dbg(gi2c->se.dev, "Grabbed GPI dma channels\n"); return 0; err_rx: dma_release_channel(gi2c->tx_c); err_tx: return ret; } static int geni_i2c_probe(struct platform_device *pdev) { struct geni_i2c_dev *gi2c; u32 proto, tx_depth, fifo_disable; int ret; struct device *dev = &pdev->dev; const struct geni_i2c_desc *desc = NULL; gi2c = devm_kzalloc(dev, sizeof(*gi2c), GFP_KERNEL); if (!gi2c) return -ENOMEM; gi2c->se.dev = dev; gi2c->se.wrapper = dev_get_drvdata(dev->parent); gi2c->se.base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gi2c->se.base)) return PTR_ERR(gi2c->se.base); desc = device_get_match_data(&pdev->dev); if (desc && desc->has_core_clk) { gi2c->core_clk = devm_clk_get(dev, "core"); if (IS_ERR(gi2c->core_clk)) return PTR_ERR(gi2c->core_clk); } gi2c->se.clk = devm_clk_get(dev, "se"); if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(dev)) return PTR_ERR(gi2c->se.clk); ret = device_property_read_u32(dev, "clock-frequency", &gi2c->clk_freq_out); if (ret) { dev_info(dev, "Bus frequency not specified, default to 100kHz.\n"); gi2c->clk_freq_out = KHZ(100); } if (has_acpi_companion(dev)) ACPI_COMPANION_SET(&gi2c->adap.dev, ACPI_COMPANION(dev)); gi2c->irq = platform_get_irq(pdev, 0); if (gi2c->irq < 0) return gi2c->irq; ret = geni_i2c_clk_map_idx(gi2c); if (ret) { dev_err(dev, "Invalid clk frequency %d Hz: %d\n", gi2c->clk_freq_out, ret); return ret; } gi2c->adap.algo = &geni_i2c_algo; init_completion(&gi2c->done); spin_lock_init(&gi2c->lock); platform_set_drvdata(pdev, gi2c); ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0, dev_name(dev), gi2c); if (ret) { dev_err(dev, "Request_irq failed:%d: err:%d\n", gi2c->irq, ret); return ret; } /* Disable the interrupt so that the system can enter low-power mode */ disable_irq(gi2c->irq); i2c_set_adapdata(&gi2c->adap, gi2c); gi2c->adap.dev.parent = dev; gi2c->adap.dev.of_node = dev->of_node; strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name)); ret = geni_icc_get(&gi2c->se, desc ? desc->icc_ddr : "qup-memory"); if (ret) return ret; /* * Set the bus quota for core and cpu to a reasonable value for * register access. * Set quota for DDR based on bus speed. */ gi2c->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW; gi2c->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW; if (!desc || desc->icc_ddr) gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out); ret = geni_icc_set_bw(&gi2c->se); if (ret) return ret; ret = clk_prepare_enable(gi2c->core_clk); if (ret) return ret; ret = geni_se_resources_on(&gi2c->se); if (ret) { dev_err(dev, "Error turning on resources %d\n", ret); return ret; } proto = geni_se_read_proto(&gi2c->se); if (proto != GENI_SE_I2C) { dev_err(dev, "Invalid proto %d\n", proto); geni_se_resources_off(&gi2c->se); clk_disable_unprepare(gi2c->core_clk); return -ENXIO; } if (desc && desc->no_dma_support) fifo_disable = false; else fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE; if (fifo_disable) { /* FIFO is disabled, so we can only use GPI DMA */ gi2c->gpi_mode = true; ret = setup_gpi_dma(gi2c); if (ret) return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n"); dev_dbg(dev, "Using GPI DMA mode for I2C\n"); } else { gi2c->gpi_mode = false; tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se); /* I2C Master Hub Serial Elements doesn't have the HW_PARAM_0 register */ if (!tx_depth && desc) tx_depth = desc->tx_fifo_depth; if (!tx_depth) { dev_err(dev, "Invalid TX FIFO depth\n"); return -EINVAL; } gi2c->tx_wm = tx_depth - 1; geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth); geni_se_config_packing(&gi2c->se, BITS_PER_BYTE, PACKING_BYTES_PW, true, true, true); dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth); } clk_disable_unprepare(gi2c->core_clk); ret = geni_se_resources_off(&gi2c->se); if (ret) { dev_err(dev, "Error turning off resources %d\n", ret); goto err_dma; } ret = geni_icc_disable(&gi2c->se); if (ret) goto err_dma; gi2c->suspended = 1; pm_runtime_set_suspended(gi2c->se.dev); pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY); pm_runtime_use_autosuspend(gi2c->se.dev); pm_runtime_enable(gi2c->se.dev); ret = i2c_add_adapter(&gi2c->adap); if (ret) { dev_err(dev, "Error adding i2c adapter %d\n", ret); pm_runtime_disable(gi2c->se.dev); goto err_dma; } dev_dbg(dev, "Geni-I2C adaptor successfully added\n"); return 0; err_dma: release_gpi_dma(gi2c); return ret; } static void geni_i2c_remove(struct platform_device *pdev) { struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev); i2c_del_adapter(&gi2c->adap); release_gpi_dma(gi2c); pm_runtime_disable(gi2c->se.dev); } static void geni_i2c_shutdown(struct platform_device *pdev) { struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev); /* Make client i2c transfers start failing */ i2c_mark_adapter_suspended(&gi2c->adap); } static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev) { int ret; struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); disable_irq(gi2c->irq); ret = geni_se_resources_off(&gi2c->se); if (ret) { enable_irq(gi2c->irq); return ret; } else { gi2c->suspended = 1; } clk_disable_unprepare(gi2c->core_clk); return geni_icc_disable(&gi2c->se); } static int __maybe_unused geni_i2c_runtime_resume(struct device *dev) { int ret; struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); ret = geni_icc_enable(&gi2c->se); if (ret) return ret; ret = clk_prepare_enable(gi2c->core_clk); if (ret) return ret; ret = geni_se_resources_on(&gi2c->se); if (ret) return ret; enable_irq(gi2c->irq); gi2c->suspended = 0; return 0; } static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev) { struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&gi2c->adap); if (!gi2c->suspended) { geni_i2c_runtime_suspend(dev); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); pm_runtime_enable(dev); } return 0; } static int __maybe_unused geni_i2c_resume_noirq(struct device *dev) { struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); i2c_mark_adapter_resumed(&gi2c->adap); return 0; } static const struct dev_pm_ops geni_i2c_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq) SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume, NULL) }; static const struct geni_i2c_desc i2c_master_hub = { .has_core_clk = true, .icc_ddr = NULL, .no_dma_support = true, .tx_fifo_depth = 16, }; static const struct of_device_id geni_i2c_dt_match[] = { { .compatible = "qcom,geni-i2c" }, { .compatible = "qcom,geni-i2c-master-hub", .data = &i2c_master_hub }, {} }; MODULE_DEVICE_TABLE(of, geni_i2c_dt_match); static struct platform_driver geni_i2c_driver = { .probe = geni_i2c_probe, .remove_new = geni_i2c_remove, .shutdown = geni_i2c_shutdown, .driver = { .name = "geni_i2c", .pm = &geni_i2c_pm_ops, .of_match_table = geni_i2c_dt_match, .acpi_match_table = ACPI_PTR(geni_i2c_acpi_match), }, }; module_platform_driver(geni_i2c_driver); MODULE_DESCRIPTION("I2C Controller Driver for GENI based QUP cores"); MODULE_LICENSE("GPL v2");
linux-master
drivers/i2c/busses/i2c-qcom-geni.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 MediaTek Inc. * Author: Xudong Chen <[email protected]> */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #define I2C_RS_TRANSFER (1 << 4) #define I2C_ARB_LOST (1 << 3) #define I2C_HS_NACKERR (1 << 2) #define I2C_ACKERR (1 << 1) #define I2C_TRANSAC_COMP (1 << 0) #define I2C_TRANSAC_START (1 << 0) #define I2C_RS_MUL_CNFG (1 << 15) #define I2C_RS_MUL_TRIG (1 << 14) #define I2C_DCM_DISABLE 0x0000 #define I2C_IO_CONFIG_OPEN_DRAIN 0x0003 #define I2C_IO_CONFIG_PUSH_PULL 0x0000 #define I2C_SOFT_RST 0x0001 #define I2C_HANDSHAKE_RST 0x0020 #define I2C_FIFO_ADDR_CLR 0x0001 #define I2C_DELAY_LEN 0x0002 #define I2C_ST_START_CON 0x8001 #define I2C_FS_START_CON 0x1800 #define I2C_TIME_CLR_VALUE 0x0000 #define I2C_TIME_DEFAULT_VALUE 0x0003 #define I2C_WRRD_TRANAC_VALUE 0x0002 #define I2C_RD_TRANAC_VALUE 0x0001 #define I2C_SCL_MIS_COMP_VALUE 0x0000 #define I2C_CHN_CLR_FLAG 0x0000 #define I2C_RELIABILITY 0x0010 #define I2C_DMAACK_ENABLE 0x0008 #define I2C_DMA_CON_TX 0x0000 #define I2C_DMA_CON_RX 0x0001 #define I2C_DMA_ASYNC_MODE 0x0004 #define I2C_DMA_SKIP_CONFIG 0x0010 #define I2C_DMA_DIR_CHANGE 0x0200 #define I2C_DMA_START_EN 0x0001 #define I2C_DMA_INT_FLAG_NONE 0x0000 #define I2C_DMA_CLR_FLAG 0x0000 #define I2C_DMA_WARM_RST 0x0001 #define I2C_DMA_HARD_RST 0x0002 #define I2C_DMA_HANDSHAKE_RST 0x0004 #define MAX_SAMPLE_CNT_DIV 8 #define MAX_STEP_CNT_DIV 64 #define MAX_CLOCK_DIV_8BITS 256 #define MAX_CLOCK_DIV_5BITS 32 #define MAX_HS_STEP_CNT_DIV 8 #define I2C_STANDARD_MODE_BUFFER (1000 / 3) #define I2C_FAST_MODE_BUFFER (300 / 3) #define I2C_FAST_MODE_PLUS_BUFFER (20 / 3) #define I2C_CONTROL_RS (0x1 << 1) #define I2C_CONTROL_DMA_EN (0x1 << 2) #define I2C_CONTROL_CLK_EXT_EN (0x1 << 3) #define I2C_CONTROL_DIR_CHANGE (0x1 << 4) #define I2C_CONTROL_ACKERR_DET_EN (0x1 << 5) #define I2C_CONTROL_TRANSFER_LEN_CHANGE (0x1 << 6) #define I2C_CONTROL_DMAACK_EN (0x1 << 8) #define I2C_CONTROL_ASYNC_MODE (0x1 << 9) #define I2C_CONTROL_WRAPPER (0x1 << 0) #define I2C_DRV_NAME "i2c-mt65xx" /** * enum i2c_mt65xx_clks - Clocks enumeration for MT65XX I2C * * @I2C_MT65XX_CLK_MAIN: main clock for i2c bus * @I2C_MT65XX_CLK_DMA: DMA clock for i2c via DMA * @I2C_MT65XX_CLK_PMIC: PMIC clock for i2c from PMIC * @I2C_MT65XX_CLK_ARB: Arbitrator clock for i2c * @I2C_MT65XX_CLK_MAX: Number of supported clocks */ enum i2c_mt65xx_clks { I2C_MT65XX_CLK_MAIN = 0, I2C_MT65XX_CLK_DMA, I2C_MT65XX_CLK_PMIC, I2C_MT65XX_CLK_ARB, I2C_MT65XX_CLK_MAX }; static const char * const i2c_mt65xx_clk_ids[I2C_MT65XX_CLK_MAX] = { "main", "dma", "pmic", "arb" }; enum DMA_REGS_OFFSET { OFFSET_INT_FLAG = 0x0, OFFSET_INT_EN = 0x04, OFFSET_EN = 0x08, OFFSET_RST = 0x0c, OFFSET_CON = 0x18, OFFSET_TX_MEM_ADDR = 0x1c, OFFSET_RX_MEM_ADDR = 0x20, OFFSET_TX_LEN = 0x24, OFFSET_RX_LEN = 0x28, OFFSET_TX_4G_MODE = 0x54, OFFSET_RX_4G_MODE = 0x58, }; enum i2c_trans_st_rs { I2C_TRANS_STOP = 0, I2C_TRANS_REPEATED_START, }; enum mtk_trans_op { I2C_MASTER_WR = 1, I2C_MASTER_RD, I2C_MASTER_WRRD, }; enum I2C_REGS_OFFSET { OFFSET_DATA_PORT, OFFSET_SLAVE_ADDR, OFFSET_INTR_MASK, OFFSET_INTR_STAT, OFFSET_CONTROL, OFFSET_TRANSFER_LEN, OFFSET_TRANSAC_LEN, OFFSET_DELAY_LEN, OFFSET_TIMING, OFFSET_START, OFFSET_EXT_CONF, OFFSET_FIFO_STAT, OFFSET_FIFO_THRESH, OFFSET_FIFO_ADDR_CLR, OFFSET_IO_CONFIG, OFFSET_RSV_DEBUG, OFFSET_HS, OFFSET_SOFTRESET, OFFSET_DCM_EN, OFFSET_MULTI_DMA, OFFSET_PATH_DIR, OFFSET_DEBUGSTAT, OFFSET_DEBUGCTRL, OFFSET_TRANSFER_LEN_AUX, OFFSET_CLOCK_DIV, OFFSET_LTIMING, OFFSET_SCL_HIGH_LOW_RATIO, OFFSET_HS_SCL_HIGH_LOW_RATIO, OFFSET_SCL_MIS_COMP_POINT, OFFSET_STA_STO_AC_TIMING, OFFSET_HS_STA_STO_AC_TIMING, OFFSET_SDA_TIMING, }; static const u16 mt_i2c_regs_v1[] = { [OFFSET_DATA_PORT] = 0x0, [OFFSET_SLAVE_ADDR] = 0x4, [OFFSET_INTR_MASK] = 0x8, [OFFSET_INTR_STAT] = 0xc, [OFFSET_CONTROL] = 0x10, [OFFSET_TRANSFER_LEN] = 0x14, [OFFSET_TRANSAC_LEN] = 0x18, [OFFSET_DELAY_LEN] = 0x1c, [OFFSET_TIMING] = 0x20, [OFFSET_START] = 0x24, [OFFSET_EXT_CONF] = 0x28, [OFFSET_FIFO_STAT] = 0x30, [OFFSET_FIFO_THRESH] = 0x34, [OFFSET_FIFO_ADDR_CLR] = 0x38, [OFFSET_IO_CONFIG] = 0x40, [OFFSET_RSV_DEBUG] = 0x44, [OFFSET_HS] = 0x48, [OFFSET_SOFTRESET] = 0x50, [OFFSET_DCM_EN] = 0x54, [OFFSET_PATH_DIR] = 0x60, [OFFSET_DEBUGSTAT] = 0x64, [OFFSET_DEBUGCTRL] = 0x68, [OFFSET_TRANSFER_LEN_AUX] = 0x6c, [OFFSET_CLOCK_DIV] = 0x70, [OFFSET_SCL_HIGH_LOW_RATIO] = 0x74, [OFFSET_HS_SCL_HIGH_LOW_RATIO] = 0x78, [OFFSET_SCL_MIS_COMP_POINT] = 0x7C, [OFFSET_STA_STO_AC_TIMING] = 0x80, [OFFSET_HS_STA_STO_AC_TIMING] = 0x84, [OFFSET_SDA_TIMING] = 0x88, }; static const u16 mt_i2c_regs_v2[] = { [OFFSET_DATA_PORT] = 0x0, [OFFSET_SLAVE_ADDR] = 0x4, [OFFSET_INTR_MASK] = 0x8, [OFFSET_INTR_STAT] = 0xc, [OFFSET_CONTROL] = 0x10, [OFFSET_TRANSFER_LEN] = 0x14, [OFFSET_TRANSAC_LEN] = 0x18, [OFFSET_DELAY_LEN] = 0x1c, [OFFSET_TIMING] = 0x20, [OFFSET_START] = 0x24, [OFFSET_EXT_CONF] = 0x28, [OFFSET_LTIMING] = 0x2c, [OFFSET_HS] = 0x30, [OFFSET_IO_CONFIG] = 0x34, [OFFSET_FIFO_ADDR_CLR] = 0x38, [OFFSET_SDA_TIMING] = 0x3c, [OFFSET_TRANSFER_LEN_AUX] = 0x44, [OFFSET_CLOCK_DIV] = 0x48, [OFFSET_SOFTRESET] = 0x50, [OFFSET_MULTI_DMA] = 0x8c, [OFFSET_SCL_MIS_COMP_POINT] = 0x90, [OFFSET_DEBUGSTAT] = 0xe4, [OFFSET_DEBUGCTRL] = 0xe8, [OFFSET_FIFO_STAT] = 0xf4, [OFFSET_FIFO_THRESH] = 0xf8, [OFFSET_DCM_EN] = 0xf88, }; static const u16 mt_i2c_regs_v3[] = { [OFFSET_DATA_PORT] = 0x0, [OFFSET_INTR_MASK] = 0x8, [OFFSET_INTR_STAT] = 0xc, [OFFSET_CONTROL] = 0x10, [OFFSET_TRANSFER_LEN] = 0x14, [OFFSET_TRANSAC_LEN] = 0x18, [OFFSET_DELAY_LEN] = 0x1c, [OFFSET_TIMING] = 0x20, [OFFSET_START] = 0x24, [OFFSET_EXT_CONF] = 0x28, [OFFSET_LTIMING] = 0x2c, [OFFSET_HS] = 0x30, [OFFSET_IO_CONFIG] = 0x34, [OFFSET_FIFO_ADDR_CLR] = 0x38, [OFFSET_SDA_TIMING] = 0x3c, [OFFSET_TRANSFER_LEN_AUX] = 0x44, [OFFSET_CLOCK_DIV] = 0x48, [OFFSET_SOFTRESET] = 0x50, [OFFSET_MULTI_DMA] = 0x8c, [OFFSET_SCL_MIS_COMP_POINT] = 0x90, [OFFSET_SLAVE_ADDR] = 0x94, [OFFSET_DEBUGSTAT] = 0xe4, [OFFSET_DEBUGCTRL] = 0xe8, [OFFSET_FIFO_STAT] = 0xf4, [OFFSET_FIFO_THRESH] = 0xf8, [OFFSET_DCM_EN] = 0xf88, }; struct mtk_i2c_compatible { const struct i2c_adapter_quirks *quirks; const u16 *regs; unsigned char pmic_i2c: 1; unsigned char dcm: 1; unsigned char auto_restart: 1; unsigned char aux_len_reg: 1; unsigned char timing_adjust: 1; unsigned char dma_sync: 1; unsigned char ltiming_adjust: 1; unsigned char apdma_sync: 1; unsigned char max_dma_support; }; struct mtk_i2c_ac_timing { u16 htiming; u16 ltiming; u16 hs; u16 ext; u16 inter_clk_div; u16 scl_hl_ratio; u16 hs_scl_hl_ratio; u16 sta_stop; u16 hs_sta_stop; u16 sda_timing; }; struct mtk_i2c { struct i2c_adapter adap; /* i2c host adapter */ struct device *dev; struct completion msg_complete; struct i2c_timings timing_info; /* set in i2c probe */ void __iomem *base; /* i2c base addr */ void __iomem *pdmabase; /* dma base address*/ struct clk_bulk_data clocks[I2C_MT65XX_CLK_MAX]; /* clocks for i2c */ bool have_pmic; /* can use i2c pins from PMIC */ bool use_push_pull; /* IO config push-pull mode */ u16 irq_stat; /* interrupt status */ unsigned int clk_src_div; unsigned int speed_hz; /* The speed in transfer */ enum mtk_trans_op op; u16 timing_reg; u16 high_speed_reg; u16 ltiming_reg; unsigned char auto_restart; bool ignore_restart_irq; struct mtk_i2c_ac_timing ac_timing; const struct mtk_i2c_compatible *dev_comp; }; /** * struct i2c_spec_values: * @min_low_ns: min LOW period of the SCL clock * @min_su_sta_ns: min set-up time for a repeated START condition * @max_hd_dat_ns: max data hold time * @min_su_dat_ns: min data set-up time */ struct i2c_spec_values { unsigned int min_low_ns; unsigned int min_su_sta_ns; unsigned int max_hd_dat_ns; unsigned int min_su_dat_ns; }; static const struct i2c_spec_values standard_mode_spec = { .min_low_ns = 4700 + I2C_STANDARD_MODE_BUFFER, .min_su_sta_ns = 4700 + I2C_STANDARD_MODE_BUFFER, .max_hd_dat_ns = 3450 - I2C_STANDARD_MODE_BUFFER, .min_su_dat_ns = 250 + I2C_STANDARD_MODE_BUFFER, }; static const struct i2c_spec_values fast_mode_spec = { .min_low_ns = 1300 + I2C_FAST_MODE_BUFFER, .min_su_sta_ns = 600 + I2C_FAST_MODE_BUFFER, .max_hd_dat_ns = 900 - I2C_FAST_MODE_BUFFER, .min_su_dat_ns = 100 + I2C_FAST_MODE_BUFFER, }; static const struct i2c_spec_values fast_mode_plus_spec = { .min_low_ns = 500 + I2C_FAST_MODE_PLUS_BUFFER, .min_su_sta_ns = 260 + I2C_FAST_MODE_PLUS_BUFFER, .max_hd_dat_ns = 400 - I2C_FAST_MODE_PLUS_BUFFER, .min_su_dat_ns = 50 + I2C_FAST_MODE_PLUS_BUFFER, }; static const struct i2c_adapter_quirks mt6577_i2c_quirks = { .flags = I2C_AQ_COMB_WRITE_THEN_READ, .max_num_msgs = 1, .max_write_len = 255, .max_read_len = 255, .max_comb_1st_msg_len = 255, .max_comb_2nd_msg_len = 31, }; static const struct i2c_adapter_quirks mt7622_i2c_quirks = { .max_num_msgs = 255, }; static const struct i2c_adapter_quirks mt8183_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; static const struct mtk_i2c_compatible mt2712_compat = { .regs = mt_i2c_regs_v1, .pmic_i2c = 0, .dcm = 1, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 1, .dma_sync = 0, .ltiming_adjust = 0, .apdma_sync = 0, .max_dma_support = 33, }; static const struct mtk_i2c_compatible mt6577_compat = { .quirks = &mt6577_i2c_quirks, .regs = mt_i2c_regs_v1, .pmic_i2c = 0, .dcm = 1, .auto_restart = 0, .aux_len_reg = 0, .timing_adjust = 0, .dma_sync = 0, .ltiming_adjust = 0, .apdma_sync = 0, .max_dma_support = 32, }; static const struct mtk_i2c_compatible mt6589_compat = { .quirks = &mt6577_i2c_quirks, .regs = mt_i2c_regs_v1, .pmic_i2c = 1, .dcm = 0, .auto_restart = 0, .aux_len_reg = 0, .timing_adjust = 0, .dma_sync = 0, .ltiming_adjust = 0, .apdma_sync = 0, .max_dma_support = 32, }; static const struct mtk_i2c_compatible mt7622_compat = { .quirks = &mt7622_i2c_quirks, .regs = mt_i2c_regs_v1, .pmic_i2c = 0, .dcm = 1, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 0, .dma_sync = 0, .ltiming_adjust = 0, .apdma_sync = 0, .max_dma_support = 32, }; static const struct mtk_i2c_compatible mt8168_compat = { .regs = mt_i2c_regs_v1, .pmic_i2c = 0, .dcm = 1, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 1, .dma_sync = 1, .ltiming_adjust = 0, .apdma_sync = 0, .max_dma_support = 33, }; static const struct mtk_i2c_compatible mt7981_compat = { .regs = mt_i2c_regs_v3, .pmic_i2c = 0, .dcm = 0, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 1, .dma_sync = 1, .ltiming_adjust = 1, .max_dma_support = 33 }; static const struct mtk_i2c_compatible mt7986_compat = { .quirks = &mt7622_i2c_quirks, .regs = mt_i2c_regs_v1, .pmic_i2c = 0, .dcm = 1, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 0, .dma_sync = 1, .ltiming_adjust = 0, .max_dma_support = 32, }; static const struct mtk_i2c_compatible mt8173_compat = { .regs = mt_i2c_regs_v1, .pmic_i2c = 0, .dcm = 1, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 0, .dma_sync = 0, .ltiming_adjust = 0, .apdma_sync = 0, .max_dma_support = 33, }; static const struct mtk_i2c_compatible mt8183_compat = { .quirks = &mt8183_i2c_quirks, .regs = mt_i2c_regs_v2, .pmic_i2c = 0, .dcm = 0, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 1, .dma_sync = 1, .ltiming_adjust = 1, .apdma_sync = 0, .max_dma_support = 33, }; static const struct mtk_i2c_compatible mt8186_compat = { .regs = mt_i2c_regs_v2, .pmic_i2c = 0, .dcm = 0, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 1, .dma_sync = 0, .ltiming_adjust = 1, .apdma_sync = 0, .max_dma_support = 36, }; static const struct mtk_i2c_compatible mt8188_compat = { .regs = mt_i2c_regs_v3, .pmic_i2c = 0, .dcm = 0, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 1, .dma_sync = 0, .ltiming_adjust = 1, .apdma_sync = 1, .max_dma_support = 36, }; static const struct mtk_i2c_compatible mt8192_compat = { .quirks = &mt8183_i2c_quirks, .regs = mt_i2c_regs_v2, .pmic_i2c = 0, .dcm = 0, .auto_restart = 1, .aux_len_reg = 1, .timing_adjust = 1, .dma_sync = 1, .ltiming_adjust = 1, .apdma_sync = 1, .max_dma_support = 36, }; static const struct of_device_id mtk_i2c_of_match[] = { { .compatible = "mediatek,mt2712-i2c", .data = &mt2712_compat }, { .compatible = "mediatek,mt6577-i2c", .data = &mt6577_compat }, { .compatible = "mediatek,mt6589-i2c", .data = &mt6589_compat }, { .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat }, { .compatible = "mediatek,mt7981-i2c", .data = &mt7981_compat }, { .compatible = "mediatek,mt7986-i2c", .data = &mt7986_compat }, { .compatible = "mediatek,mt8168-i2c", .data = &mt8168_compat }, { .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat }, { .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat }, { .compatible = "mediatek,mt8186-i2c", .data = &mt8186_compat }, { .compatible = "mediatek,mt8188-i2c", .data = &mt8188_compat }, { .compatible = "mediatek,mt8192-i2c", .data = &mt8192_compat }, {} }; MODULE_DEVICE_TABLE(of, mtk_i2c_of_match); static u16 mtk_i2c_readw(struct mtk_i2c *i2c, enum I2C_REGS_OFFSET reg) { return readw(i2c->base + i2c->dev_comp->regs[reg]); } static void mtk_i2c_writew(struct mtk_i2c *i2c, u16 val, enum I2C_REGS_OFFSET reg) { writew(val, i2c->base + i2c->dev_comp->regs[reg]); } static void mtk_i2c_init_hw(struct mtk_i2c *i2c) { u16 control_reg; u16 intr_stat_reg; u16 ext_conf_val; mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START); intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT); mtk_i2c_writew(i2c, intr_stat_reg, OFFSET_INTR_STAT); if (i2c->dev_comp->apdma_sync) { writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST); udelay(10); writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); udelay(10); writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST, OFFSET_SOFTRESET); udelay(10); writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET); } else { writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); udelay(50); writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET); } /* Set ioconfig */ if (i2c->use_push_pull) mtk_i2c_writew(i2c, I2C_IO_CONFIG_PUSH_PULL, OFFSET_IO_CONFIG); else mtk_i2c_writew(i2c, I2C_IO_CONFIG_OPEN_DRAIN, OFFSET_IO_CONFIG); if (i2c->dev_comp->dcm) mtk_i2c_writew(i2c, I2C_DCM_DISABLE, OFFSET_DCM_EN); mtk_i2c_writew(i2c, i2c->timing_reg, OFFSET_TIMING); mtk_i2c_writew(i2c, i2c->high_speed_reg, OFFSET_HS); if (i2c->dev_comp->ltiming_adjust) mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING); if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ) ext_conf_val = I2C_ST_START_CON; else ext_conf_val = I2C_FS_START_CON; if (i2c->dev_comp->timing_adjust) { ext_conf_val = i2c->ac_timing.ext; mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div, OFFSET_CLOCK_DIV); mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE, OFFSET_SCL_MIS_COMP_POINT); mtk_i2c_writew(i2c, i2c->ac_timing.sda_timing, OFFSET_SDA_TIMING); if (i2c->dev_comp->ltiming_adjust) { mtk_i2c_writew(i2c, i2c->ac_timing.htiming, OFFSET_TIMING); mtk_i2c_writew(i2c, i2c->ac_timing.hs, OFFSET_HS); mtk_i2c_writew(i2c, i2c->ac_timing.ltiming, OFFSET_LTIMING); } else { mtk_i2c_writew(i2c, i2c->ac_timing.scl_hl_ratio, OFFSET_SCL_HIGH_LOW_RATIO); mtk_i2c_writew(i2c, i2c->ac_timing.hs_scl_hl_ratio, OFFSET_HS_SCL_HIGH_LOW_RATIO); mtk_i2c_writew(i2c, i2c->ac_timing.sta_stop, OFFSET_STA_STO_AC_TIMING); mtk_i2c_writew(i2c, i2c->ac_timing.hs_sta_stop, OFFSET_HS_STA_STO_AC_TIMING); } } mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF); /* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */ if (i2c->have_pmic) mtk_i2c_writew(i2c, I2C_CONTROL_WRAPPER, OFFSET_PATH_DIR); control_reg = I2C_CONTROL_ACKERR_DET_EN | I2C_CONTROL_CLK_EXT_EN | I2C_CONTROL_DMA_EN; if (i2c->dev_comp->dma_sync) control_reg |= I2C_CONTROL_DMAACK_EN | I2C_CONTROL_ASYNC_MODE; mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL); mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN); } static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed) { if (speed <= I2C_MAX_STANDARD_MODE_FREQ) return &standard_mode_spec; else if (speed <= I2C_MAX_FAST_MODE_FREQ) return &fast_mode_spec; else return &fast_mode_plus_spec; } static int mtk_i2c_max_step_cnt(unsigned int target_speed) { if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) return MAX_HS_STEP_CNT_DIV; else return MAX_STEP_CNT_DIV; } static int mtk_i2c_get_clk_div_restri(struct mtk_i2c *i2c, unsigned int sample_cnt) { int clk_div_restri = 0; if (i2c->dev_comp->ltiming_adjust == 0) return 0; if (sample_cnt == 1) { if (i2c->ac_timing.inter_clk_div == 0) clk_div_restri = 0; else clk_div_restri = 1; } else { if (i2c->ac_timing.inter_clk_div == 0) clk_div_restri = -1; else if (i2c->ac_timing.inter_clk_div == 1) clk_div_restri = 0; else clk_div_restri = 1; } return clk_div_restri; } /* * Check and Calculate i2c ac-timing * * Hardware design: * sample_ns = (1000000000 * (sample_cnt + 1)) / clk_src * xxx_cnt_div = spec->min_xxx_ns / sample_ns * * Sample_ns is rounded down for xxx_cnt_div would be greater * than the smallest spec. * The sda_timing is chosen as the middle value between * the largest and smallest. */ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c, unsigned int clk_src, unsigned int check_speed, unsigned int step_cnt, unsigned int sample_cnt) { const struct i2c_spec_values *spec; unsigned int su_sta_cnt, low_cnt, high_cnt, max_step_cnt; unsigned int sda_max, sda_min, clk_ns, max_sta_cnt = 0x3f; unsigned int sample_ns = div_u64(1000000000ULL * (sample_cnt + 1), clk_src); if (!i2c->dev_comp->timing_adjust) return 0; if (i2c->dev_comp->ltiming_adjust) max_sta_cnt = 0x100; spec = mtk_i2c_get_spec(check_speed); if (i2c->dev_comp->ltiming_adjust) clk_ns = 1000000000 / clk_src; else clk_ns = sample_ns / 2; su_sta_cnt = DIV_ROUND_UP(spec->min_su_sta_ns + i2c->timing_info.scl_int_delay_ns, clk_ns); if (su_sta_cnt > max_sta_cnt) return -1; low_cnt = DIV_ROUND_UP(spec->min_low_ns, sample_ns); max_step_cnt = mtk_i2c_max_step_cnt(check_speed); if ((2 * step_cnt) > low_cnt && low_cnt < max_step_cnt) { if (low_cnt > step_cnt) { high_cnt = 2 * step_cnt - low_cnt; } else { high_cnt = step_cnt; low_cnt = step_cnt; } } else { return -2; } sda_max = spec->max_hd_dat_ns / sample_ns; if (sda_max > low_cnt) sda_max = 0; sda_min = DIV_ROUND_UP(spec->min_su_dat_ns, sample_ns); if (sda_min < low_cnt) sda_min = 0; if (sda_min > sda_max) return -3; if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) { if (i2c->dev_comp->ltiming_adjust) { i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE | (sample_cnt << 12) | (high_cnt << 8); i2c->ac_timing.ltiming &= ~GENMASK(15, 9); i2c->ac_timing.ltiming |= (sample_cnt << 12) | (low_cnt << 9); i2c->ac_timing.ext &= ~GENMASK(7, 1); i2c->ac_timing.ext |= (su_sta_cnt << 1) | (1 << 0); } else { i2c->ac_timing.hs_scl_hl_ratio = (1 << 12) | (high_cnt << 6) | low_cnt; i2c->ac_timing.hs_sta_stop = (su_sta_cnt << 8) | su_sta_cnt; } i2c->ac_timing.sda_timing &= ~GENMASK(11, 6); i2c->ac_timing.sda_timing |= (1 << 12) | ((sda_max + sda_min) / 2) << 6; } else { if (i2c->dev_comp->ltiming_adjust) { i2c->ac_timing.htiming = (sample_cnt << 8) | (high_cnt); i2c->ac_timing.ltiming = (sample_cnt << 6) | (low_cnt); i2c->ac_timing.ext = (su_sta_cnt << 8) | (1 << 0); } else { i2c->ac_timing.scl_hl_ratio = (1 << 12) | (high_cnt << 6) | low_cnt; i2c->ac_timing.sta_stop = (su_sta_cnt << 8) | su_sta_cnt; } i2c->ac_timing.sda_timing = (1 << 12) | (sda_max + sda_min) / 2; } return 0; } /* * Calculate i2c port speed * * Hardware design: * i2c_bus_freq = parent_clk / (clock_div * 2 * sample_cnt * step_cnt) * clock_div: fixed in hardware, but may be various in different SoCs * * The calculation want to pick the highest bus frequency that is still * less than or equal to i2c->speed_hz. The calculation try to get * sample_cnt and step_cn */ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src, unsigned int target_speed, unsigned int *timing_step_cnt, unsigned int *timing_sample_cnt) { unsigned int step_cnt; unsigned int sample_cnt; unsigned int max_step_cnt; unsigned int base_sample_cnt = MAX_SAMPLE_CNT_DIV; unsigned int base_step_cnt; unsigned int opt_div; unsigned int best_mul; unsigned int cnt_mul; int ret = -EINVAL; int clk_div_restri = 0; if (target_speed > I2C_MAX_HIGH_SPEED_MODE_FREQ) target_speed = I2C_MAX_HIGH_SPEED_MODE_FREQ; max_step_cnt = mtk_i2c_max_step_cnt(target_speed); base_step_cnt = max_step_cnt; /* Find the best combination */ opt_div = DIV_ROUND_UP(clk_src >> 1, target_speed); best_mul = MAX_SAMPLE_CNT_DIV * max_step_cnt; /* Search for the best pair (sample_cnt, step_cnt) with * 0 < sample_cnt < MAX_SAMPLE_CNT_DIV * 0 < step_cnt < max_step_cnt * sample_cnt * step_cnt >= opt_div * optimizing for sample_cnt * step_cnt being minimal */ for (sample_cnt = 1; sample_cnt <= MAX_SAMPLE_CNT_DIV; sample_cnt++) { clk_div_restri = mtk_i2c_get_clk_div_restri(i2c, sample_cnt); step_cnt = DIV_ROUND_UP(opt_div + clk_div_restri, sample_cnt); cnt_mul = step_cnt * sample_cnt; if (step_cnt > max_step_cnt) continue; if (cnt_mul < best_mul) { ret = mtk_i2c_check_ac_timing(i2c, clk_src, target_speed, step_cnt - 1, sample_cnt - 1); if (ret) continue; best_mul = cnt_mul; base_sample_cnt = sample_cnt; base_step_cnt = step_cnt; if (best_mul == (opt_div + clk_div_restri)) break; } } if (ret) return -EINVAL; sample_cnt = base_sample_cnt; step_cnt = base_step_cnt; if ((clk_src / (2 * (sample_cnt * step_cnt - clk_div_restri))) > target_speed) { /* In this case, hardware can't support such * low i2c_bus_freq */ dev_dbg(i2c->dev, "Unsupported speed (%uhz)\n", target_speed); return -EINVAL; } *timing_step_cnt = step_cnt - 1; *timing_sample_cnt = sample_cnt - 1; return 0; } static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk) { unsigned int clk_src; unsigned int step_cnt; unsigned int sample_cnt; unsigned int l_step_cnt; unsigned int l_sample_cnt; unsigned int target_speed; unsigned int clk_div; unsigned int max_clk_div; int ret; target_speed = i2c->speed_hz; parent_clk /= i2c->clk_src_div; if (i2c->dev_comp->timing_adjust && i2c->dev_comp->ltiming_adjust) max_clk_div = MAX_CLOCK_DIV_5BITS; else if (i2c->dev_comp->timing_adjust) max_clk_div = MAX_CLOCK_DIV_8BITS; else max_clk_div = 1; for (clk_div = 1; clk_div <= max_clk_div; clk_div++) { clk_src = parent_clk / clk_div; i2c->ac_timing.inter_clk_div = clk_div - 1; if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) { /* Set master code speed register */ ret = mtk_i2c_calculate_speed(i2c, clk_src, I2C_MAX_FAST_MODE_FREQ, &l_step_cnt, &l_sample_cnt); if (ret < 0) continue; i2c->timing_reg = (l_sample_cnt << 8) | l_step_cnt; /* Set the high speed mode register */ ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed, &step_cnt, &sample_cnt); if (ret < 0) continue; i2c->high_speed_reg = I2C_TIME_DEFAULT_VALUE | (sample_cnt << 12) | (step_cnt << 8); if (i2c->dev_comp->ltiming_adjust) i2c->ltiming_reg = (l_sample_cnt << 6) | l_step_cnt | (sample_cnt << 12) | (step_cnt << 9); } else { ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed, &l_step_cnt, &l_sample_cnt); if (ret < 0) continue; i2c->timing_reg = (l_sample_cnt << 8) | l_step_cnt; /* Disable the high speed transaction */ i2c->high_speed_reg = I2C_TIME_CLR_VALUE; if (i2c->dev_comp->ltiming_adjust) i2c->ltiming_reg = (l_sample_cnt << 6) | l_step_cnt; } break; } return 0; } static void i2c_dump_register(struct mtk_i2c *i2c) { dev_dbg(i2c->dev, "SLAVE_ADDR: 0x%x, INTR_MASK: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_SLAVE_ADDR), mtk_i2c_readw(i2c, OFFSET_INTR_MASK)); dev_dbg(i2c->dev, "INTR_STAT: 0x%x, CONTROL: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_INTR_STAT), mtk_i2c_readw(i2c, OFFSET_CONTROL)); dev_dbg(i2c->dev, "TRANSFER_LEN: 0x%x, TRANSAC_LEN: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_TRANSFER_LEN), mtk_i2c_readw(i2c, OFFSET_TRANSAC_LEN)); dev_dbg(i2c->dev, "DELAY_LEN: 0x%x, HTIMING: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_DELAY_LEN), mtk_i2c_readw(i2c, OFFSET_TIMING)); dev_dbg(i2c->dev, "START: 0x%x, EXT_CONF: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_START), mtk_i2c_readw(i2c, OFFSET_EXT_CONF)); dev_dbg(i2c->dev, "HS: 0x%x, IO_CONFIG: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_HS), mtk_i2c_readw(i2c, OFFSET_IO_CONFIG)); dev_dbg(i2c->dev, "DCM_EN: 0x%x, TRANSFER_LEN_AUX: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_DCM_EN), mtk_i2c_readw(i2c, OFFSET_TRANSFER_LEN_AUX)); dev_dbg(i2c->dev, "CLOCK_DIV: 0x%x, FIFO_STAT: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_CLOCK_DIV), mtk_i2c_readw(i2c, OFFSET_FIFO_STAT)); dev_dbg(i2c->dev, "DEBUGCTRL : 0x%x, DEBUGSTAT: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_DEBUGCTRL), mtk_i2c_readw(i2c, OFFSET_DEBUGSTAT)); if (i2c->dev_comp->regs == mt_i2c_regs_v2) { dev_dbg(i2c->dev, "LTIMING: 0x%x, MULTI_DMA: 0x%x\n", mtk_i2c_readw(i2c, OFFSET_LTIMING), mtk_i2c_readw(i2c, OFFSET_MULTI_DMA)); } dev_dbg(i2c->dev, "\nDMA_INT_FLAG: 0x%x, DMA_INT_EN: 0x%x\n", readl(i2c->pdmabase + OFFSET_INT_FLAG), readl(i2c->pdmabase + OFFSET_INT_EN)); dev_dbg(i2c->dev, "DMA_EN: 0x%x, DMA_CON: 0x%x\n", readl(i2c->pdmabase + OFFSET_EN), readl(i2c->pdmabase + OFFSET_CON)); dev_dbg(i2c->dev, "DMA_TX_MEM_ADDR: 0x%x, DMA_RX_MEM_ADDR: 0x%x\n", readl(i2c->pdmabase + OFFSET_TX_MEM_ADDR), readl(i2c->pdmabase + OFFSET_RX_MEM_ADDR)); dev_dbg(i2c->dev, "DMA_TX_LEN: 0x%x, DMA_RX_LEN: 0x%x\n", readl(i2c->pdmabase + OFFSET_TX_LEN), readl(i2c->pdmabase + OFFSET_RX_LEN)); dev_dbg(i2c->dev, "DMA_TX_4G_MODE: 0x%x, DMA_RX_4G_MODE: 0x%x", readl(i2c->pdmabase + OFFSET_TX_4G_MODE), readl(i2c->pdmabase + OFFSET_RX_4G_MODE)); } static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, int num, int left_num) { u16 addr_reg; u16 start_reg; u16 control_reg; u16 restart_flag = 0; u16 dma_sync = 0; u32 reg_4g_mode; u32 reg_dma_reset; u8 *dma_rd_buf = NULL; u8 *dma_wr_buf = NULL; dma_addr_t rpaddr = 0; dma_addr_t wpaddr = 0; int ret; i2c->irq_stat = 0; if (i2c->auto_restart) restart_flag = I2C_RS_TRANSFER; reinit_completion(&i2c->msg_complete); if (i2c->dev_comp->apdma_sync && i2c->op != I2C_MASTER_WRRD && num > 1) { mtk_i2c_writew(i2c, 0x00, OFFSET_DEBUGCTRL); writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST); ret = readw_poll_timeout(i2c->pdmabase + OFFSET_RST, reg_dma_reset, !(reg_dma_reset & I2C_DMA_WARM_RST), 0, 100); if (ret) { dev_err(i2c->dev, "DMA warm reset timeout\n"); return -ETIMEDOUT; } writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST, OFFSET_SOFTRESET); mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET); mtk_i2c_writew(i2c, I2C_RELIABILITY | I2C_DMAACK_ENABLE, OFFSET_DEBUGCTRL); } control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) & ~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS); if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1)) control_reg |= I2C_CONTROL_RS; if (i2c->op == I2C_MASTER_WRRD) control_reg |= I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS; mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL); addr_reg = i2c_8bit_addr_from_msg(msgs); mtk_i2c_writew(i2c, addr_reg, OFFSET_SLAVE_ADDR); /* Clear interrupt status */ mtk_i2c_writew(i2c, restart_flag | I2C_HS_NACKERR | I2C_ACKERR | I2C_ARB_LOST | I2C_TRANSAC_COMP, OFFSET_INTR_STAT); mtk_i2c_writew(i2c, I2C_FIFO_ADDR_CLR, OFFSET_FIFO_ADDR_CLR); /* Enable interrupt */ mtk_i2c_writew(i2c, restart_flag | I2C_HS_NACKERR | I2C_ACKERR | I2C_ARB_LOST | I2C_TRANSAC_COMP, OFFSET_INTR_MASK); /* Set transfer and transaction len */ if (i2c->op == I2C_MASTER_WRRD) { if (i2c->dev_comp->aux_len_reg) { mtk_i2c_writew(i2c, msgs->len, OFFSET_TRANSFER_LEN); mtk_i2c_writew(i2c, (msgs + 1)->len, OFFSET_TRANSFER_LEN_AUX); } else { mtk_i2c_writew(i2c, msgs->len | ((msgs + 1)->len) << 8, OFFSET_TRANSFER_LEN); } mtk_i2c_writew(i2c, I2C_WRRD_TRANAC_VALUE, OFFSET_TRANSAC_LEN); } else { mtk_i2c_writew(i2c, msgs->len, OFFSET_TRANSFER_LEN); mtk_i2c_writew(i2c, num, OFFSET_TRANSAC_LEN); } if (i2c->dev_comp->apdma_sync) { dma_sync = I2C_DMA_SKIP_CONFIG | I2C_DMA_ASYNC_MODE; if (i2c->op == I2C_MASTER_WRRD) dma_sync |= I2C_DMA_DIR_CHANGE; } /* Prepare buffer data to start transfer */ if (i2c->op == I2C_MASTER_RD) { writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CON_RX | dma_sync, i2c->pdmabase + OFFSET_CON); dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_rd_buf) return -ENOMEM; rpaddr = dma_map_single(i2c->dev, dma_rd_buf, msgs->len, DMA_FROM_DEVICE); if (dma_mapping_error(i2c->dev, rpaddr)) { i2c_put_dma_safe_msg_buf(dma_rd_buf, msgs, false); return -ENOMEM; } if (i2c->dev_comp->max_dma_support > 32) { reg_4g_mode = upper_32_bits(rpaddr); writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE); } writel((u32)rpaddr, i2c->pdmabase + OFFSET_RX_MEM_ADDR); writel(msgs->len, i2c->pdmabase + OFFSET_RX_LEN); } else if (i2c->op == I2C_MASTER_WR) { writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CON_TX | dma_sync, i2c->pdmabase + OFFSET_CON); dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_wr_buf) return -ENOMEM; wpaddr = dma_map_single(i2c->dev, dma_wr_buf, msgs->len, DMA_TO_DEVICE); if (dma_mapping_error(i2c->dev, wpaddr)) { i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, false); return -ENOMEM; } if (i2c->dev_comp->max_dma_support > 32) { reg_4g_mode = upper_32_bits(wpaddr); writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE); } writel((u32)wpaddr, i2c->pdmabase + OFFSET_TX_MEM_ADDR); writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN); } else { writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CLR_FLAG | dma_sync, i2c->pdmabase + OFFSET_CON); dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_wr_buf) return -ENOMEM; wpaddr = dma_map_single(i2c->dev, dma_wr_buf, msgs->len, DMA_TO_DEVICE); if (dma_mapping_error(i2c->dev, wpaddr)) { i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, false); return -ENOMEM; } dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 1); if (!dma_rd_buf) { dma_unmap_single(i2c->dev, wpaddr, msgs->len, DMA_TO_DEVICE); i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, false); return -ENOMEM; } rpaddr = dma_map_single(i2c->dev, dma_rd_buf, (msgs + 1)->len, DMA_FROM_DEVICE); if (dma_mapping_error(i2c->dev, rpaddr)) { dma_unmap_single(i2c->dev, wpaddr, msgs->len, DMA_TO_DEVICE); i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, false); i2c_put_dma_safe_msg_buf(dma_rd_buf, (msgs + 1), false); return -ENOMEM; } if (i2c->dev_comp->max_dma_support > 32) { reg_4g_mode = upper_32_bits(wpaddr); writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE); reg_4g_mode = upper_32_bits(rpaddr); writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE); } writel((u32)wpaddr, i2c->pdmabase + OFFSET_TX_MEM_ADDR); writel((u32)rpaddr, i2c->pdmabase + OFFSET_RX_MEM_ADDR); writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN); writel((msgs + 1)->len, i2c->pdmabase + OFFSET_RX_LEN); } writel(I2C_DMA_START_EN, i2c->pdmabase + OFFSET_EN); if (!i2c->auto_restart) { start_reg = I2C_TRANSAC_START; } else { start_reg = I2C_TRANSAC_START | I2C_RS_MUL_TRIG; if (left_num >= 1) start_reg |= I2C_RS_MUL_CNFG; } mtk_i2c_writew(i2c, start_reg, OFFSET_START); ret = wait_for_completion_timeout(&i2c->msg_complete, i2c->adap.timeout); /* Clear interrupt mask */ mtk_i2c_writew(i2c, ~(restart_flag | I2C_HS_NACKERR | I2C_ACKERR | I2C_ARB_LOST | I2C_TRANSAC_COMP), OFFSET_INTR_MASK); if (i2c->op == I2C_MASTER_WR) { dma_unmap_single(i2c->dev, wpaddr, msgs->len, DMA_TO_DEVICE); i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, true); } else if (i2c->op == I2C_MASTER_RD) { dma_unmap_single(i2c->dev, rpaddr, msgs->len, DMA_FROM_DEVICE); i2c_put_dma_safe_msg_buf(dma_rd_buf, msgs, true); } else { dma_unmap_single(i2c->dev, wpaddr, msgs->len, DMA_TO_DEVICE); dma_unmap_single(i2c->dev, rpaddr, (msgs + 1)->len, DMA_FROM_DEVICE); i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, true); i2c_put_dma_safe_msg_buf(dma_rd_buf, (msgs + 1), true); } if (ret == 0) { dev_dbg(i2c->dev, "addr: %x, transfer timeout\n", msgs->addr); i2c_dump_register(i2c); mtk_i2c_init_hw(i2c); return -ETIMEDOUT; } if (i2c->irq_stat & (I2C_HS_NACKERR | I2C_ACKERR)) { dev_dbg(i2c->dev, "addr: %x, transfer ACK error\n", msgs->addr); mtk_i2c_init_hw(i2c); return -ENXIO; } return 0; } static int mtk_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { int ret; int left_num = num; struct mtk_i2c *i2c = i2c_get_adapdata(adap); ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks); if (ret) return ret; i2c->auto_restart = i2c->dev_comp->auto_restart; /* checking if we can skip restart and optimize using WRRD mode */ if (i2c->auto_restart && num == 2) { if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) && msgs[0].addr == msgs[1].addr) { i2c->auto_restart = 0; } } if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) /* ignore the first restart irq after the master code, * otherwise the first transfer will be discarded. */ i2c->ignore_restart_irq = true; else i2c->ignore_restart_irq = false; while (left_num--) { if (!msgs->buf) { dev_dbg(i2c->dev, "data buffer is NULL.\n"); ret = -EINVAL; goto err_exit; } if (msgs->flags & I2C_M_RD) i2c->op = I2C_MASTER_RD; else i2c->op = I2C_MASTER_WR; if (!i2c->auto_restart) { if (num > 1) { /* combined two messages into one transaction */ i2c->op = I2C_MASTER_WRRD; left_num--; } } /* always use DMA mode. */ ret = mtk_i2c_do_transfer(i2c, msgs, num, left_num); if (ret < 0) goto err_exit; msgs++; } /* the return value is number of executed messages */ ret = num; err_exit: clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks); return ret; } static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id) { struct mtk_i2c *i2c = dev_id; u16 restart_flag = 0; u16 intr_stat; if (i2c->auto_restart) restart_flag = I2C_RS_TRANSFER; intr_stat = mtk_i2c_readw(i2c, OFFSET_INTR_STAT); mtk_i2c_writew(i2c, intr_stat, OFFSET_INTR_STAT); /* * when occurs ack error, i2c controller generate two interrupts * first is the ack error interrupt, then the complete interrupt * i2c->irq_stat need keep the two interrupt value. */ i2c->irq_stat |= intr_stat; if (i2c->ignore_restart_irq && (i2c->irq_stat & restart_flag)) { i2c->ignore_restart_irq = false; i2c->irq_stat = 0; mtk_i2c_writew(i2c, I2C_RS_MUL_CNFG | I2C_RS_MUL_TRIG | I2C_TRANSAC_START, OFFSET_START); } else { if (i2c->irq_stat & (I2C_TRANSAC_COMP | restart_flag)) complete(&i2c->msg_complete); } return IRQ_HANDLED; } static u32 mtk_i2c_functionality(struct i2c_adapter *adap) { if (i2c_check_quirks(adap, I2C_AQ_NO_ZERO_LEN)) return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); else return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm mtk_i2c_algorithm = { .master_xfer = mtk_i2c_transfer, .functionality = mtk_i2c_functionality, }; static int mtk_i2c_parse_dt(struct device_node *np, struct mtk_i2c *i2c) { int ret; ret = of_property_read_u32(np, "clock-frequency", &i2c->speed_hz); if (ret < 0) i2c->speed_hz = I2C_MAX_STANDARD_MODE_FREQ; ret = of_property_read_u32(np, "clock-div", &i2c->clk_src_div); if (ret < 0) return ret; if (i2c->clk_src_div == 0) return -EINVAL; i2c->have_pmic = of_property_read_bool(np, "mediatek,have-pmic"); i2c->use_push_pull = of_property_read_bool(np, "mediatek,use-push-pull"); i2c_parse_fw_timings(i2c->dev, &i2c->timing_info, true); return 0; } static int mtk_i2c_probe(struct platform_device *pdev) { int ret = 0; struct mtk_i2c *i2c; int i, irq, speed_clk; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); i2c->pdmabase = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); if (IS_ERR(i2c->pdmabase)) return PTR_ERR(i2c->pdmabase); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; init_completion(&i2c->msg_complete); i2c->dev_comp = of_device_get_match_data(&pdev->dev); i2c->adap.dev.of_node = pdev->dev.of_node; i2c->dev = &pdev->dev; i2c->adap.dev.parent = &pdev->dev; i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &mtk_i2c_algorithm; i2c->adap.quirks = i2c->dev_comp->quirks; i2c->adap.timeout = 2 * HZ; i2c->adap.retries = 1; i2c->adap.bus_regulator = devm_regulator_get_optional(&pdev->dev, "vbus"); if (IS_ERR(i2c->adap.bus_regulator)) { if (PTR_ERR(i2c->adap.bus_regulator) == -ENODEV) i2c->adap.bus_regulator = NULL; else return PTR_ERR(i2c->adap.bus_regulator); } ret = mtk_i2c_parse_dt(pdev->dev.of_node, i2c); if (ret) return -EINVAL; if (i2c->have_pmic && !i2c->dev_comp->pmic_i2c) return -EINVAL; /* Fill in clk-bulk IDs */ for (i = 0; i < I2C_MT65XX_CLK_MAX; i++) i2c->clocks[i].id = i2c_mt65xx_clk_ids[i]; /* Get clocks one by one, some may be optional */ i2c->clocks[I2C_MT65XX_CLK_MAIN].clk = devm_clk_get(&pdev->dev, "main"); if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_MAIN].clk)) { dev_err(&pdev->dev, "cannot get main clock\n"); return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_MAIN].clk); } i2c->clocks[I2C_MT65XX_CLK_DMA].clk = devm_clk_get(&pdev->dev, "dma"); if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_DMA].clk)) { dev_err(&pdev->dev, "cannot get dma clock\n"); return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_DMA].clk); } i2c->clocks[I2C_MT65XX_CLK_ARB].clk = devm_clk_get_optional(&pdev->dev, "arb"); if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_ARB].clk)) return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_ARB].clk); if (i2c->have_pmic) { i2c->clocks[I2C_MT65XX_CLK_PMIC].clk = devm_clk_get(&pdev->dev, "pmic"); if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk)) { dev_err(&pdev->dev, "cannot get pmic clock\n"); return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk); } speed_clk = I2C_MT65XX_CLK_PMIC; } else { i2c->clocks[I2C_MT65XX_CLK_PMIC].clk = NULL; speed_clk = I2C_MT65XX_CLK_MAIN; } strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name)); ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk)); if (ret) { dev_err(&pdev->dev, "Failed to set the speed.\n"); return -EINVAL; } if (i2c->dev_comp->max_dma_support > 32) { ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(i2c->dev_comp->max_dma_support)); if (ret) { dev_err(&pdev->dev, "dma_set_mask return error.\n"); return ret; } } ret = clk_bulk_prepare_enable(I2C_MT65XX_CLK_MAX, i2c->clocks); if (ret) { dev_err(&pdev->dev, "clock enable failed!\n"); return ret; } mtk_i2c_init_hw(i2c); clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks); ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq, IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, dev_name(&pdev->dev), i2c); if (ret < 0) { dev_err(&pdev->dev, "Request I2C IRQ %d fail\n", irq); goto err_bulk_unprepare; } i2c_set_adapdata(&i2c->adap, i2c); ret = i2c_add_adapter(&i2c->adap); if (ret) goto err_bulk_unprepare; platform_set_drvdata(pdev, i2c); return 0; err_bulk_unprepare: clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); return ret; } static void mtk_i2c_remove(struct platform_device *pdev) { struct mtk_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); } static int mtk_i2c_suspend_noirq(struct device *dev) { struct mtk_i2c *i2c = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&i2c->adap); clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); return 0; } static int mtk_i2c_resume_noirq(struct device *dev) { int ret; struct mtk_i2c *i2c = dev_get_drvdata(dev); ret = clk_bulk_prepare_enable(I2C_MT65XX_CLK_MAX, i2c->clocks); if (ret) { dev_err(dev, "clock enable failed!\n"); return ret; } mtk_i2c_init_hw(i2c); clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks); i2c_mark_adapter_resumed(&i2c->adap); return 0; } static const struct dev_pm_ops mtk_i2c_pm = { NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq, mtk_i2c_resume_noirq) }; static struct platform_driver mtk_i2c_driver = { .probe = mtk_i2c_probe, .remove_new = mtk_i2c_remove, .driver = { .name = I2C_DRV_NAME, .pm = pm_sleep_ptr(&mtk_i2c_pm), .of_match_table = mtk_i2c_of_match, }, }; module_platform_driver(mtk_i2c_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MediaTek I2C Bus Driver"); MODULE_AUTHOR("Xudong Chen <[email protected]>");
linux-master
drivers/i2c/busses/i2c-mt65xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* linux/drivers/i2c/busses/i2c-s3c2410.c * * Copyright (C) 2004,2005,2009 Simtec Electronics * Ben Dooks <[email protected]> * * S3C2410 I2C Controller */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of.h> #include <linux/gpio/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <asm/irq.h> #include <linux/platform_data/i2c-s3c2410.h> /* see s3c2410x user guide, v1.1, section 9 (p447) for more info */ #define S3C2410_IICCON 0x00 #define S3C2410_IICSTAT 0x04 #define S3C2410_IICADD 0x08 #define S3C2410_IICDS 0x0C #define S3C2440_IICLC 0x10 #define S3C2410_IICCON_ACKEN (1 << 7) #define S3C2410_IICCON_TXDIV_16 (0 << 6) #define S3C2410_IICCON_TXDIV_512 (1 << 6) #define S3C2410_IICCON_IRQEN (1 << 5) #define S3C2410_IICCON_IRQPEND (1 << 4) #define S3C2410_IICCON_SCALE(x) ((x) & 0xf) #define S3C2410_IICCON_SCALEMASK (0xf) #define S3C2410_IICSTAT_MASTER_RX (2 << 6) #define S3C2410_IICSTAT_MASTER_TX (3 << 6) #define S3C2410_IICSTAT_SLAVE_RX (0 << 6) #define S3C2410_IICSTAT_SLAVE_TX (1 << 6) #define S3C2410_IICSTAT_MODEMASK (3 << 6) #define S3C2410_IICSTAT_START (1 << 5) #define S3C2410_IICSTAT_BUSBUSY (1 << 5) #define S3C2410_IICSTAT_TXRXEN (1 << 4) #define S3C2410_IICSTAT_ARBITR (1 << 3) #define S3C2410_IICSTAT_ASSLAVE (1 << 2) #define S3C2410_IICSTAT_ADDR0 (1 << 1) #define S3C2410_IICSTAT_LASTBIT (1 << 0) #define S3C2410_IICLC_SDA_DELAY0 (0 << 0) #define S3C2410_IICLC_SDA_DELAY5 (1 << 0) #define S3C2410_IICLC_SDA_DELAY10 (2 << 0) #define S3C2410_IICLC_SDA_DELAY15 (3 << 0) #define S3C2410_IICLC_SDA_DELAY_MASK (3 << 0) #define S3C2410_IICLC_FILTER_ON (1 << 2) /* Treat S3C2410 as baseline hardware, anything else is supported via quirks */ #define QUIRK_S3C2440 (1 << 0) #define QUIRK_HDMIPHY (1 << 1) #define QUIRK_NO_GPIO (1 << 2) #define QUIRK_POLL (1 << 3) /* Max time to wait for bus to become idle after a xfer (in us) */ #define S3C2410_IDLE_TIMEOUT 5000 /* Exynos5 Sysreg offset */ #define EXYNOS5_SYS_I2C_CFG 0x0234 /* i2c controller state */ enum s3c24xx_i2c_state { STATE_IDLE, STATE_START, STATE_READ, STATE_WRITE, STATE_STOP }; struct s3c24xx_i2c { wait_queue_head_t wait; kernel_ulong_t quirks; struct i2c_msg *msg; unsigned int msg_num; unsigned int msg_idx; unsigned int msg_ptr; unsigned int tx_setup; unsigned int irq; enum s3c24xx_i2c_state state; unsigned long clkrate; void __iomem *regs; struct clk *clk; struct device *dev; struct i2c_adapter adap; struct s3c2410_platform_i2c *pdata; struct gpio_desc *gpios[2]; struct pinctrl *pctrl; struct regmap *sysreg; unsigned int sys_i2c_cfg; }; static const struct platform_device_id s3c24xx_driver_ids[] = { { .name = "s3c2410-i2c", .driver_data = 0, }, { .name = "s3c2440-i2c", .driver_data = QUIRK_S3C2440, }, { .name = "s3c2440-hdmiphy-i2c", .driver_data = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO, }, { }, }; MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat); #ifdef CONFIG_OF static const struct of_device_id s3c24xx_i2c_match[] = { { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 }, { .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 }, { .compatible = "samsung,s3c2440-hdmiphy-i2c", .data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) }, { .compatible = "samsung,exynos5-sata-phy-i2c", .data = (void *)(QUIRK_S3C2440 | QUIRK_POLL | QUIRK_NO_GPIO) }, {}, }; MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match); #endif /* * Get controller type either from device tree or platform device variant. */ static inline kernel_ulong_t s3c24xx_get_device_quirks(struct platform_device *pdev) { if (pdev->dev.of_node) return (kernel_ulong_t)of_device_get_match_data(&pdev->dev); return platform_get_device_id(pdev)->driver_data; } /* * Complete the message and wake up the caller, using the given return code, * or zero to mean ok. */ static inline void s3c24xx_i2c_master_complete(struct s3c24xx_i2c *i2c, int ret) { dev_dbg(i2c->dev, "master_complete %d\n", ret); i2c->msg_ptr = 0; i2c->msg = NULL; i2c->msg_idx++; i2c->msg_num = 0; if (ret) i2c->msg_idx = ret; if (!(i2c->quirks & QUIRK_POLL)) wake_up(&i2c->wait); } static inline void s3c24xx_i2c_disable_ack(struct s3c24xx_i2c *i2c) { unsigned long tmp; tmp = readl(i2c->regs + S3C2410_IICCON); writel(tmp & ~S3C2410_IICCON_ACKEN, i2c->regs + S3C2410_IICCON); } static inline void s3c24xx_i2c_enable_ack(struct s3c24xx_i2c *i2c) { unsigned long tmp; tmp = readl(i2c->regs + S3C2410_IICCON); writel(tmp | S3C2410_IICCON_ACKEN, i2c->regs + S3C2410_IICCON); } /* irq enable/disable functions */ static inline void s3c24xx_i2c_disable_irq(struct s3c24xx_i2c *i2c) { unsigned long tmp; tmp = readl(i2c->regs + S3C2410_IICCON); writel(tmp & ~S3C2410_IICCON_IRQEN, i2c->regs + S3C2410_IICCON); } static inline void s3c24xx_i2c_enable_irq(struct s3c24xx_i2c *i2c) { unsigned long tmp; tmp = readl(i2c->regs + S3C2410_IICCON); writel(tmp | S3C2410_IICCON_IRQEN, i2c->regs + S3C2410_IICCON); } static bool is_ack(struct s3c24xx_i2c *i2c) { int tries; for (tries = 50; tries; --tries) { if (readl(i2c->regs + S3C2410_IICCON) & S3C2410_IICCON_IRQPEND) { if (!(readl(i2c->regs + S3C2410_IICSTAT) & S3C2410_IICSTAT_LASTBIT)) return true; } usleep_range(1000, 2000); } dev_err(i2c->dev, "ack was not received\n"); return false; } /* * put the start of a message onto the bus */ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c, struct i2c_msg *msg) { unsigned int addr = (msg->addr & 0x7f) << 1; unsigned long stat; unsigned long iiccon; stat = 0; stat |= S3C2410_IICSTAT_TXRXEN; if (msg->flags & I2C_M_RD) { stat |= S3C2410_IICSTAT_MASTER_RX; addr |= 1; } else stat |= S3C2410_IICSTAT_MASTER_TX; if (msg->flags & I2C_M_REV_DIR_ADDR) addr ^= 1; /* todo - check for whether ack wanted or not */ s3c24xx_i2c_enable_ack(i2c); iiccon = readl(i2c->regs + S3C2410_IICCON); writel(stat, i2c->regs + S3C2410_IICSTAT); dev_dbg(i2c->dev, "START: %08lx to IICSTAT, %02x to DS\n", stat, addr); writeb(addr, i2c->regs + S3C2410_IICDS); /* * delay here to ensure the data byte has gotten onto the bus * before the transaction is started */ ndelay(i2c->tx_setup); dev_dbg(i2c->dev, "iiccon, %08lx\n", iiccon); writel(iiccon, i2c->regs + S3C2410_IICCON); stat |= S3C2410_IICSTAT_START; writel(stat, i2c->regs + S3C2410_IICSTAT); if (i2c->quirks & QUIRK_POLL) { while ((i2c->msg_num != 0) && is_ack(i2c)) { i2c_s3c_irq_nextbyte(i2c, stat); stat = readl(i2c->regs + S3C2410_IICSTAT); if (stat & S3C2410_IICSTAT_ARBITR) dev_err(i2c->dev, "deal with arbitration loss\n"); } } } static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret) { unsigned long iicstat = readl(i2c->regs + S3C2410_IICSTAT); dev_dbg(i2c->dev, "STOP\n"); /* * The datasheet says that the STOP sequence should be: * 1) I2CSTAT.5 = 0 - Clear BUSY (or 'generate STOP') * 2) I2CCON.4 = 0 - Clear IRQPEND * 3) Wait until the stop condition takes effect. * 4*) I2CSTAT.4 = 0 - Clear TXRXEN * * Where, step "4*" is only for buses with the "HDMIPHY" quirk. * * However, after much experimentation, it appears that: * a) normal buses automatically clear BUSY and transition from * Master->Slave when they complete generating a STOP condition. * Therefore, step (3) can be done in doxfer() by polling I2CCON.4 * after starting the STOP generation here. * b) HDMIPHY bus does neither, so there is no way to do step 3. * There is no indication when this bus has finished generating * STOP. * * In fact, we have found that as soon as the IRQPEND bit is cleared in * step 2, the HDMIPHY bus generates the STOP condition, and then * immediately starts transferring another data byte, even though the * bus is supposedly stopped. This is presumably because the bus is * still in "Master" mode, and its BUSY bit is still set. * * To avoid these extra post-STOP transactions on HDMI phy devices, we * just disable Serial Output on the bus (I2CSTAT.4 = 0) directly, * instead of first generating a proper STOP condition. This should * float SDA & SCK terminating the transfer. Subsequent transfers * start with a proper START condition, and proceed normally. * * The HDMIPHY bus is an internal bus that always has exactly two * devices, the host as Master and the HDMIPHY device as the slave. * Skipping the STOP condition has been tested on this bus and works. */ if (i2c->quirks & QUIRK_HDMIPHY) { /* Stop driving the I2C pins */ iicstat &= ~S3C2410_IICSTAT_TXRXEN; } else { /* stop the transfer */ iicstat &= ~S3C2410_IICSTAT_START; } writel(iicstat, i2c->regs + S3C2410_IICSTAT); i2c->state = STATE_STOP; s3c24xx_i2c_master_complete(i2c, ret); s3c24xx_i2c_disable_irq(i2c); } /* * helper functions to determine the current state in the set of * messages we are sending */ /* * returns TRUE if the current message is the last in the set */ static inline int is_lastmsg(struct s3c24xx_i2c *i2c) { return i2c->msg_idx >= (i2c->msg_num - 1); } /* * returns TRUE if we this is the last byte in the current message */ static inline int is_msglast(struct s3c24xx_i2c *i2c) { /* * msg->len is always 1 for the first byte of smbus block read. * Actual length will be read from slave. More bytes will be * read according to the length then. */ if (i2c->msg->flags & I2C_M_RECV_LEN && i2c->msg->len == 1) return 0; return i2c->msg_ptr == i2c->msg->len-1; } /* * returns TRUE if we reached the end of the current message */ static inline int is_msgend(struct s3c24xx_i2c *i2c) { return i2c->msg_ptr >= i2c->msg->len; } /* * process an interrupt and work out what to do */ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) { unsigned long tmp; unsigned char byte; int ret = 0; switch (i2c->state) { case STATE_IDLE: dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__); goto out; case STATE_STOP: dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__); s3c24xx_i2c_disable_irq(i2c); goto out_ack; case STATE_START: /* * last thing we did was send a start condition on the * bus, or started a new i2c message */ if (iicstat & S3C2410_IICSTAT_LASTBIT && !(i2c->msg->flags & I2C_M_IGNORE_NAK)) { /* ack was not received... */ dev_dbg(i2c->dev, "ack was not received\n"); s3c24xx_i2c_stop(i2c, -ENXIO); goto out_ack; } if (i2c->msg->flags & I2C_M_RD) i2c->state = STATE_READ; else i2c->state = STATE_WRITE; /* * Terminate the transfer if there is nothing to do * as this is used by the i2c probe to find devices. */ if (is_lastmsg(i2c) && i2c->msg->len == 0) { s3c24xx_i2c_stop(i2c, 0); goto out_ack; } if (i2c->state == STATE_READ) goto prepare_read; /* * fall through to the write state, as we will need to * send a byte as well */ fallthrough; case STATE_WRITE: /* * we are writing data to the device... check for the * end of the message, and if so, work out what to do */ if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) { if (iicstat & S3C2410_IICSTAT_LASTBIT) { dev_dbg(i2c->dev, "WRITE: No Ack\n"); s3c24xx_i2c_stop(i2c, -ECONNREFUSED); goto out_ack; } } retry_write: if (!is_msgend(i2c)) { byte = i2c->msg->buf[i2c->msg_ptr++]; writeb(byte, i2c->regs + S3C2410_IICDS); /* * delay after writing the byte to allow the * data setup time on the bus, as writing the * data to the register causes the first bit * to appear on SDA, and SCL will change as * soon as the interrupt is acknowledged */ ndelay(i2c->tx_setup); } else if (!is_lastmsg(i2c)) { /* we need to go to the next i2c message */ dev_dbg(i2c->dev, "WRITE: Next Message\n"); i2c->msg_ptr = 0; i2c->msg_idx++; i2c->msg++; /* check to see if we need to do another message */ if (i2c->msg->flags & I2C_M_NOSTART) { if (i2c->msg->flags & I2C_M_RD) { /* * cannot do this, the controller * forces us to send a new START * when we change direction */ dev_dbg(i2c->dev, "missing START before write->read\n"); s3c24xx_i2c_stop(i2c, -EINVAL); break; } goto retry_write; } else { /* send the new start */ s3c24xx_i2c_message_start(i2c, i2c->msg); i2c->state = STATE_START; } } else { /* send stop */ s3c24xx_i2c_stop(i2c, 0); } break; case STATE_READ: /* * we have a byte of data in the data register, do * something with it, and then work out whether we are * going to do any more read/write */ byte = readb(i2c->regs + S3C2410_IICDS); i2c->msg->buf[i2c->msg_ptr++] = byte; /* Add actual length to read for smbus block read */ if (i2c->msg->flags & I2C_M_RECV_LEN && i2c->msg->len == 1) i2c->msg->len += byte; prepare_read: if (is_msglast(i2c)) { /* last byte of buffer */ if (is_lastmsg(i2c)) s3c24xx_i2c_disable_ack(i2c); } else if (is_msgend(i2c)) { /* * ok, we've read the entire buffer, see if there * is anything else we need to do */ if (is_lastmsg(i2c)) { /* last message, send stop and complete */ dev_dbg(i2c->dev, "READ: Send Stop\n"); s3c24xx_i2c_stop(i2c, 0); } else { /* go to the next transfer */ dev_dbg(i2c->dev, "READ: Next Transfer\n"); i2c->msg_ptr = 0; i2c->msg_idx++; i2c->msg++; } } break; } /* acknowlegde the IRQ and get back on with the work */ out_ack: tmp = readl(i2c->regs + S3C2410_IICCON); tmp &= ~S3C2410_IICCON_IRQPEND; writel(tmp, i2c->regs + S3C2410_IICCON); out: return ret; } /* * top level IRQ servicing routine */ static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id) { struct s3c24xx_i2c *i2c = dev_id; unsigned long status; unsigned long tmp; status = readl(i2c->regs + S3C2410_IICSTAT); if (status & S3C2410_IICSTAT_ARBITR) { /* deal with arbitration loss */ dev_err(i2c->dev, "deal with arbitration loss\n"); } if (i2c->state == STATE_IDLE) { dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n"); tmp = readl(i2c->regs + S3C2410_IICCON); tmp &= ~S3C2410_IICCON_IRQPEND; writel(tmp, i2c->regs + S3C2410_IICCON); goto out; } /* * pretty much this leaves us with the fact that we've * transmitted or received whatever byte we last sent */ i2c_s3c_irq_nextbyte(i2c, status); out: return IRQ_HANDLED; } /* * Disable the bus so that we won't get any interrupts from now on, or try * to drive any lines. This is the default state when we don't have * anything to send/receive. * * If there is an event on the bus, or we have a pre-existing event at * kernel boot time, we may not notice the event and the I2C controller * will lock the bus with the I2C clock line low indefinitely. */ static inline void s3c24xx_i2c_disable_bus(struct s3c24xx_i2c *i2c) { unsigned long tmp; /* Stop driving the I2C pins */ tmp = readl(i2c->regs + S3C2410_IICSTAT); tmp &= ~S3C2410_IICSTAT_TXRXEN; writel(tmp, i2c->regs + S3C2410_IICSTAT); /* We don't expect any interrupts now, and don't want send acks */ tmp = readl(i2c->regs + S3C2410_IICCON); tmp &= ~(S3C2410_IICCON_IRQEN | S3C2410_IICCON_IRQPEND | S3C2410_IICCON_ACKEN); writel(tmp, i2c->regs + S3C2410_IICCON); } /* * get the i2c bus for a master transaction */ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c) { unsigned long iicstat; int timeout = 400; while (timeout-- > 0) { iicstat = readl(i2c->regs + S3C2410_IICSTAT); if (!(iicstat & S3C2410_IICSTAT_BUSBUSY)) return 0; msleep(1); } return -ETIMEDOUT; } /* * wait for the i2c bus to become idle. */ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c) { unsigned long iicstat; ktime_t start, now; unsigned long delay; int spins; /* ensure the stop has been through the bus */ dev_dbg(i2c->dev, "waiting for bus idle\n"); start = now = ktime_get(); /* * Most of the time, the bus is already idle within a few usec of the * end of a transaction. However, really slow i2c devices can stretch * the clock, delaying STOP generation. * * On slower SoCs this typically happens within a very small number of * instructions so busy wait briefly to avoid scheduling overhead. */ spins = 3; iicstat = readl(i2c->regs + S3C2410_IICSTAT); while ((iicstat & S3C2410_IICSTAT_START) && --spins) { cpu_relax(); iicstat = readl(i2c->regs + S3C2410_IICSTAT); } /* * If we do get an appreciable delay as a compromise between idle * detection latency for the normal, fast case, and system load in the * slow device case, use an exponential back off in the polling loop, * up to 1/10th of the total timeout, then continue to poll at a * constant rate up to the timeout. */ delay = 1; while ((iicstat & S3C2410_IICSTAT_START) && ktime_us_delta(now, start) < S3C2410_IDLE_TIMEOUT) { usleep_range(delay, 2 * delay); if (delay < S3C2410_IDLE_TIMEOUT / 10) delay <<= 1; now = ktime_get(); iicstat = readl(i2c->regs + S3C2410_IICSTAT); } if (iicstat & S3C2410_IICSTAT_START) dev_warn(i2c->dev, "timeout waiting for bus idle\n"); } /* * this starts an i2c transfer */ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int num) { unsigned long timeout; int ret; ret = s3c24xx_i2c_set_master(i2c); if (ret != 0) { dev_err(i2c->dev, "cannot get bus (error %d)\n", ret); ret = -EAGAIN; goto out; } i2c->msg = msgs; i2c->msg_num = num; i2c->msg_ptr = 0; i2c->msg_idx = 0; i2c->state = STATE_START; s3c24xx_i2c_enable_irq(i2c); s3c24xx_i2c_message_start(i2c, msgs); if (i2c->quirks & QUIRK_POLL) { ret = i2c->msg_idx; if (ret != num) dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret); goto out; } timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); ret = i2c->msg_idx; /* * Having these next two as dev_err() makes life very * noisy when doing an i2cdetect */ if (timeout == 0) dev_dbg(i2c->dev, "timeout\n"); else if (ret != num) dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret); /* For QUIRK_HDMIPHY, bus is already disabled */ if (i2c->quirks & QUIRK_HDMIPHY) goto out; s3c24xx_i2c_wait_idle(i2c); s3c24xx_i2c_disable_bus(i2c); out: i2c->state = STATE_IDLE; return ret; } /* * first port of call from the i2c bus code when an message needs * transferring across the i2c bus. */ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct s3c24xx_i2c *i2c = (struct s3c24xx_i2c *)adap->algo_data; int retry; int ret; ret = clk_enable(i2c->clk); if (ret) return ret; for (retry = 0; retry < adap->retries; retry++) { ret = s3c24xx_i2c_doxfer(i2c, msgs, num); if (ret != -EAGAIN) { clk_disable(i2c->clk); return ret; } dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry); udelay(100); } clk_disable(i2c->clk); return -EREMOTEIO; } /* declare our i2c functionality */ static u32 s3c24xx_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL_ALL | I2C_FUNC_NOSTART | I2C_FUNC_PROTOCOL_MANGLING; } /* i2c bus registration info */ static const struct i2c_algorithm s3c24xx_i2c_algorithm = { .master_xfer = s3c24xx_i2c_xfer, .functionality = s3c24xx_i2c_func, }; /* * return the divisor settings for a given frequency */ static int s3c24xx_i2c_calcdivisor(unsigned long clkin, unsigned int wanted, unsigned int *div1, unsigned int *divs) { unsigned int calc_divs = clkin / wanted; unsigned int calc_div1; if (calc_divs > (16*16)) calc_div1 = 512; else calc_div1 = 16; calc_divs += calc_div1-1; calc_divs /= calc_div1; if (calc_divs == 0) calc_divs = 1; if (calc_divs > 17) calc_divs = 17; *divs = calc_divs; *div1 = calc_div1; return clkin / (calc_divs * calc_div1); } /* * work out a divisor for the user requested frequency setting, * either by the requested frequency, or scanning the acceptable * range of frequencies until something is found */ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got) { struct s3c2410_platform_i2c *pdata = i2c->pdata; unsigned long clkin = clk_get_rate(i2c->clk); unsigned int divs, div1; unsigned long target_frequency; u32 iiccon; int freq; i2c->clkrate = clkin; clkin /= 1000; /* clkin now in KHz */ dev_dbg(i2c->dev, "pdata desired frequency %lu\n", pdata->frequency); target_frequency = pdata->frequency ?: I2C_MAX_STANDARD_MODE_FREQ; target_frequency /= 1000; /* Target frequency now in KHz */ freq = s3c24xx_i2c_calcdivisor(clkin, target_frequency, &div1, &divs); if (freq > target_frequency) { dev_err(i2c->dev, "Unable to achieve desired frequency %luKHz." \ " Lowest achievable %dKHz\n", target_frequency, freq); return -EINVAL; } *got = freq; iiccon = readl(i2c->regs + S3C2410_IICCON); iiccon &= ~(S3C2410_IICCON_SCALEMASK | S3C2410_IICCON_TXDIV_512); iiccon |= (divs-1); if (div1 == 512) iiccon |= S3C2410_IICCON_TXDIV_512; if (i2c->quirks & QUIRK_POLL) iiccon |= S3C2410_IICCON_SCALE(2); writel(iiccon, i2c->regs + S3C2410_IICCON); if (i2c->quirks & QUIRK_S3C2440) { unsigned long sda_delay; if (pdata->sda_delay) { sda_delay = clkin * pdata->sda_delay; sda_delay = DIV_ROUND_UP(sda_delay, 1000000); sda_delay = DIV_ROUND_UP(sda_delay, 5); if (sda_delay > 3) sda_delay = 3; sda_delay |= S3C2410_IICLC_FILTER_ON; } else sda_delay = 0; dev_dbg(i2c->dev, "IICLC=%08lx\n", sda_delay); writel(sda_delay, i2c->regs + S3C2440_IICLC); } return 0; } #ifdef CONFIG_OF static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c) { int i; if (i2c->quirks & QUIRK_NO_GPIO) return 0; for (i = 0; i < 2; i++) { i2c->gpios[i] = devm_gpiod_get_index(i2c->dev, NULL, i, GPIOD_ASIS); if (IS_ERR(i2c->gpios[i])) { dev_err(i2c->dev, "i2c gpio invalid at index %d\n", i); return -EINVAL; } } return 0; } #else static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c) { return 0; } #endif /* * initialise the controller, set the IO lines and frequency */ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c) { struct s3c2410_platform_i2c *pdata; unsigned int freq; /* get the plafrom data */ pdata = i2c->pdata; /* write slave address */ writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD); dev_info(i2c->dev, "slave address 0x%02x\n", pdata->slave_addr); writel(0, i2c->regs + S3C2410_IICCON); writel(0, i2c->regs + S3C2410_IICSTAT); /* we need to work out the divisors for the clock... */ if (s3c24xx_i2c_clockrate(i2c, &freq) != 0) { dev_err(i2c->dev, "cannot meet bus frequency required\n"); return -EINVAL; } /* todo - check that the i2c lines aren't being dragged anywhere */ dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq); dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02x\n", readl(i2c->regs + S3C2410_IICCON)); return 0; } #ifdef CONFIG_OF /* * Parse the device tree node and retreive the platform data. */ static void s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c) { struct s3c2410_platform_i2c *pdata = i2c->pdata; int id; if (!np) return; pdata->bus_num = -1; /* i2c bus number is dynamically assigned */ of_property_read_u32(np, "samsung,i2c-sda-delay", &pdata->sda_delay); of_property_read_u32(np, "samsung,i2c-slave-addr", &pdata->slave_addr); of_property_read_u32(np, "samsung,i2c-max-bus-freq", (u32 *)&pdata->frequency); /* * Exynos5's legacy i2c controller and new high speed i2c * controller have muxed interrupt sources. By default the * interrupts for 4-channel HS-I2C controller are enabled. * If nodes for first four channels of legacy i2c controller * are available then re-configure the interrupts via the * system register. */ id = of_alias_get_id(np, "i2c"); i2c->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg-phandle"); if (IS_ERR(i2c->sysreg)) return; regmap_update_bits(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, BIT(id), 0); } #else static void s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c) { } #endif static int s3c24xx_i2c_probe(struct platform_device *pdev) { struct s3c24xx_i2c *i2c; struct s3c2410_platform_i2c *pdata = NULL; struct resource *res; int ret; if (!pdev->dev.of_node) { pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); return -EINVAL; } } i2c = devm_kzalloc(&pdev->dev, sizeof(struct s3c24xx_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; i2c->pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!i2c->pdata) return -ENOMEM; i2c->quirks = s3c24xx_get_device_quirks(pdev); i2c->sysreg = ERR_PTR(-ENOENT); if (pdata) memcpy(i2c->pdata, pdata, sizeof(*pdata)); else s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c); strscpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name)); i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &s3c24xx_i2c_algorithm; i2c->adap.retries = 2; i2c->adap.class = I2C_CLASS_DEPRECATED; i2c->tx_setup = 50; init_waitqueue_head(&i2c->wait); /* find the clock and enable it */ i2c->dev = &pdev->dev; i2c->clk = devm_clk_get(&pdev->dev, "i2c"); if (IS_ERR(i2c->clk)) { dev_err(&pdev->dev, "cannot get clock\n"); return -ENOENT; } dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk); /* map the registers */ i2c->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); dev_dbg(&pdev->dev, "registers %p (%p)\n", i2c->regs, res); /* setup info block for the i2c core */ i2c->adap.algo_data = i2c; i2c->adap.dev.parent = &pdev->dev; i2c->pctrl = devm_pinctrl_get_select_default(i2c->dev); /* inititalise the i2c gpio lines */ if (i2c->pdata->cfg_gpio) i2c->pdata->cfg_gpio(to_platform_device(i2c->dev)); else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) return -EINVAL; /* initialise the i2c controller */ ret = clk_prepare_enable(i2c->clk); if (ret) { dev_err(&pdev->dev, "I2C clock enable failed\n"); return ret; } ret = s3c24xx_i2c_init(i2c); clk_disable(i2c->clk); if (ret != 0) { dev_err(&pdev->dev, "I2C controller init failed\n"); clk_unprepare(i2c->clk); return ret; } /* * find the IRQ for this unit (note, this relies on the init call to * ensure no current IRQs pending */ if (!(i2c->quirks & QUIRK_POLL)) { i2c->irq = ret = platform_get_irq(pdev, 0); if (ret < 0) { clk_unprepare(i2c->clk); return ret; } ret = devm_request_irq(&pdev->dev, i2c->irq, s3c24xx_i2c_irq, 0, dev_name(&pdev->dev), i2c); if (ret != 0) { dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); clk_unprepare(i2c->clk); return ret; } } /* * Note, previous versions of the driver used i2c_add_adapter() * to add the bus at any number. We now pass the bus number via * the platform data, so if unset it will now default to always * being bus 0. */ i2c->adap.nr = i2c->pdata->bus_num; i2c->adap.dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, i2c); pm_runtime_enable(&pdev->dev); ret = i2c_add_numbered_adapter(&i2c->adap); if (ret < 0) { pm_runtime_disable(&pdev->dev); clk_unprepare(i2c->clk); return ret; } dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev)); return 0; } static void s3c24xx_i2c_remove(struct platform_device *pdev) { struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); clk_unprepare(i2c->clk); pm_runtime_disable(&pdev->dev); i2c_del_adapter(&i2c->adap); } static int s3c24xx_i2c_suspend_noirq(struct device *dev) { struct s3c24xx_i2c *i2c = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&i2c->adap); if (!IS_ERR(i2c->sysreg)) regmap_read(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, &i2c->sys_i2c_cfg); return 0; } static int s3c24xx_i2c_resume_noirq(struct device *dev) { struct s3c24xx_i2c *i2c = dev_get_drvdata(dev); int ret; if (!IS_ERR(i2c->sysreg)) regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); ret = clk_enable(i2c->clk); if (ret) return ret; s3c24xx_i2c_init(i2c); clk_disable(i2c->clk); i2c_mark_adapter_resumed(&i2c->adap); return 0; } static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c24xx_i2c_suspend_noirq, s3c24xx_i2c_resume_noirq) }; static struct platform_driver s3c24xx_i2c_driver = { .probe = s3c24xx_i2c_probe, .remove_new = s3c24xx_i2c_remove, .id_table = s3c24xx_driver_ids, .driver = { .name = "s3c-i2c", .pm = pm_sleep_ptr(&s3c24xx_i2c_dev_pm_ops), .of_match_table = of_match_ptr(s3c24xx_i2c_match), }, }; static int __init i2c_adap_s3c_init(void) { return platform_driver_register(&s3c24xx_i2c_driver); } subsys_initcall(i2c_adap_s3c_init); static void __exit i2c_adap_s3c_exit(void) { platform_driver_unregister(&s3c24xx_i2c_driver); } module_exit(i2c_adap_s3c_exit); MODULE_DESCRIPTION("S3C24XX I2C Bus driver"); MODULE_AUTHOR("Ben Dooks <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/i2c/busses/i2c-s3c2410.c
// SPDX-License-Identifier: GPL-2.0 /* * i2c Support for Atmel's AT91 Two-Wire Interface (TWI) * * Copyright (C) 2011 Weinmann Medical GmbH * Author: Nikolaus Voss <[email protected]> * * Evolved from original work by: * Copyright (C) 2004 Rick Bronson * Converted to 2.6 by Andrew Victor <[email protected]> * * Borrowed heavily from original work by: * Copyright (C) 2000 Philip Edelbrock <[email protected]> */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "i2c-at91.h" void at91_init_twi_bus_master(struct at91_twi_dev *dev) { struct at91_twi_pdata *pdata = dev->pdata; u32 filtr = 0; /* FIFO should be enabled immediately after the software reset */ if (dev->fifo_size) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN); at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN); at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS); at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg); /* enable digital filter */ if (pdata->has_dig_filtr && dev->enable_dig_filt) filtr |= AT91_TWI_FILTR_FILT; /* enable advanced digital filter */ if (pdata->has_adv_dig_filtr && dev->enable_dig_filt) filtr |= AT91_TWI_FILTR_FILT | (AT91_TWI_FILTR_THRES(dev->filter_width) & AT91_TWI_FILTR_THRES_MASK); /* enable analog filter */ if (pdata->has_ana_filtr && dev->enable_ana_filt) filtr |= AT91_TWI_FILTR_PADFEN; if (filtr) at91_twi_write(dev, AT91_TWI_FILTR, filtr); } /* * Calculate symmetric clock as stated in datasheet: * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset)) */ static void at91_calc_twi_clock(struct at91_twi_dev *dev) { int ckdiv, cdiv, div, hold = 0, filter_width = 0; struct at91_twi_pdata *pdata = dev->pdata; int offset = pdata->clk_offset; int max_ckdiv = pdata->clk_max_div; struct i2c_timings timings, *t = &timings; i2c_parse_fw_timings(dev->dev, t, true); div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk), 2 * t->bus_freq_hz) - offset); ckdiv = fls(div >> 8); cdiv = div >> ckdiv; if (ckdiv > max_ckdiv) { dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n", ckdiv, max_ckdiv); ckdiv = max_ckdiv; cdiv = 255; } if (pdata->has_hold_field) { /* * hold time = HOLD + 3 x T_peripheral_clock * Use clk rate in kHz to prevent overflows when computing * hold. */ hold = DIV_ROUND_UP(t->sda_hold_ns * (clk_get_rate(dev->clk) / 1000), 1000000); hold -= 3; if (hold < 0) hold = 0; if (hold > AT91_TWI_CWGR_HOLD_MAX) { dev_warn(dev->dev, "HOLD field set to its maximum value (%d instead of %d)\n", AT91_TWI_CWGR_HOLD_MAX, hold); hold = AT91_TWI_CWGR_HOLD_MAX; } } if (pdata->has_adv_dig_filtr) { /* * filter width = 0 to AT91_TWI_FILTR_THRES_MAX * peripheral clocks */ filter_width = DIV_ROUND_UP(t->digital_filter_width_ns * (clk_get_rate(dev->clk) / 1000), 1000000); if (filter_width > AT91_TWI_FILTR_THRES_MAX) { dev_warn(dev->dev, "Filter threshold set to its maximum value (%d instead of %d)\n", AT91_TWI_FILTR_THRES_MAX, filter_width); filter_width = AT91_TWI_FILTR_THRES_MAX; } } dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv | AT91_TWI_CWGR_HOLD(hold); dev->filter_width = filter_width; dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n", cdiv, ckdiv, hold, t->sda_hold_ns, filter_width, t->digital_filter_width_ns); } static void at91_twi_dma_cleanup(struct at91_twi_dev *dev) { struct at91_twi_dma *dma = &dev->dma; at91_twi_irq_save(dev); if (dma->xfer_in_progress) { if (dma->direction == DMA_FROM_DEVICE) dmaengine_terminate_sync(dma->chan_rx); else dmaengine_terminate_sync(dma->chan_tx); dma->xfer_in_progress = false; } if (dma->buf_mapped) { dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]), dev->buf_len, dma->direction); dma->buf_mapped = false; } at91_twi_irq_restore(dev); } static void at91_twi_write_next_byte(struct at91_twi_dev *dev) { if (!dev->buf_len) return; /* 8bit write works with and without FIFO */ writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR); /* send stop when last byte has been written */ if (--dev->buf_len == 0) { if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY); } dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len); ++dev->buf; } static void at91_twi_write_data_dma_callback(void *data) { struct at91_twi_dev *dev = (struct at91_twi_dev *)data; dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), dev->buf_len, DMA_TO_DEVICE); /* * When this callback is called, THR/TX FIFO is likely not to be empty * yet. So we have to wait for TXCOMP or NACK bits to be set into the * Status Register to be sure that the STOP bit has been sent and the * transfer is completed. The NACK interrupt has already been enabled, * we just have to enable TXCOMP one. */ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); } static void at91_twi_write_data_dma(struct at91_twi_dev *dev) { dma_addr_t dma_addr; struct dma_async_tx_descriptor *txdesc; struct at91_twi_dma *dma = &dev->dma; struct dma_chan *chan_tx = dma->chan_tx; unsigned int sg_len = 1; if (!dev->buf_len) return; dma->direction = DMA_TO_DEVICE; at91_twi_irq_save(dev); dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len, DMA_TO_DEVICE); if (dma_mapping_error(dev->dev, dma_addr)) { dev_err(dev->dev, "dma map failed\n"); return; } dma->buf_mapped = true; at91_twi_irq_restore(dev); if (dev->fifo_size) { size_t part1_len, part2_len; struct scatterlist *sg; unsigned fifo_mr; sg_len = 0; part1_len = dev->buf_len & ~0x3; if (part1_len) { sg = &dma->sg[sg_len++]; sg_dma_len(sg) = part1_len; sg_dma_address(sg) = dma_addr; } part2_len = dev->buf_len & 0x3; if (part2_len) { sg = &dma->sg[sg_len++]; sg_dma_len(sg) = part2_len; sg_dma_address(sg) = dma_addr + part1_len; } /* * DMA controller is triggered when at least 4 data can be * written into the TX FIFO */ fifo_mr = at91_twi_read(dev, AT91_TWI_FMR); fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK; fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA); at91_twi_write(dev, AT91_TWI_FMR, fifo_mr); } else { sg_dma_len(&dma->sg[0]) = dev->buf_len; sg_dma_address(&dma->sg[0]) = dma_addr; } txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) { dev_err(dev->dev, "dma prep slave sg failed\n"); goto error; } txdesc->callback = at91_twi_write_data_dma_callback; txdesc->callback_param = dev; dma->xfer_in_progress = true; dmaengine_submit(txdesc); dma_async_issue_pending(chan_tx); return; error: at91_twi_dma_cleanup(dev); } static void at91_twi_read_next_byte(struct at91_twi_dev *dev) { /* * If we are in this case, it means there is garbage data in RHR, so * delete them. */ if (!dev->buf_len) { at91_twi_read(dev, AT91_TWI_RHR); return; } /* 8bit read works with and without FIFO */ *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR); --dev->buf_len; /* return if aborting, we only needed to read RHR to clear RXRDY*/ if (dev->recv_len_abort) return; /* handle I2C_SMBUS_BLOCK_DATA */ if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) { /* ensure length byte is a valid value */ if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) { dev->msg->flags &= ~I2C_M_RECV_LEN; dev->buf_len += *dev->buf; dev->msg->len = dev->buf_len + 1; dev_dbg(dev->dev, "received block length %zu\n", dev->buf_len); } else { /* abort and send the stop by reading one more byte */ dev->recv_len_abort = true; dev->buf_len = 1; } } /* send stop if second but last byte has been read */ if (!dev->use_alt_cmd && dev->buf_len == 1) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len); ++dev->buf; } static void at91_twi_read_data_dma_callback(void *data) { struct at91_twi_dev *dev = (struct at91_twi_dev *)data; unsigned ier = AT91_TWI_TXCOMP; dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), dev->buf_len, DMA_FROM_DEVICE); if (!dev->use_alt_cmd) { /* The last two bytes have to be read without using dma */ dev->buf += dev->buf_len - 2; dev->buf_len = 2; ier |= AT91_TWI_RXRDY; } at91_twi_write(dev, AT91_TWI_IER, ier); } static void at91_twi_read_data_dma(struct at91_twi_dev *dev) { dma_addr_t dma_addr; struct dma_async_tx_descriptor *rxdesc; struct at91_twi_dma *dma = &dev->dma; struct dma_chan *chan_rx = dma->chan_rx; size_t buf_len; buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2; dma->direction = DMA_FROM_DEVICE; /* Keep in mind that we won't use dma to read the last two bytes */ at91_twi_irq_save(dev); dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(dev->dev, dma_addr)) { dev_err(dev->dev, "dma map failed\n"); return; } dma->buf_mapped = true; at91_twi_irq_restore(dev); if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) { unsigned fifo_mr; /* * DMA controller is triggered when at least 4 data can be * read from the RX FIFO */ fifo_mr = at91_twi_read(dev, AT91_TWI_FMR); fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK; fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA); at91_twi_write(dev, AT91_TWI_FMR, fifo_mr); } sg_dma_len(&dma->sg[0]) = buf_len; sg_dma_address(&dma->sg[0]) = dma_addr; rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!rxdesc) { dev_err(dev->dev, "dma prep slave sg failed\n"); goto error; } rxdesc->callback = at91_twi_read_data_dma_callback; rxdesc->callback_param = dev; dma->xfer_in_progress = true; dmaengine_submit(rxdesc); dma_async_issue_pending(dma->chan_rx); return; error: at91_twi_dma_cleanup(dev); } static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) { struct at91_twi_dev *dev = dev_id; const unsigned status = at91_twi_read(dev, AT91_TWI_SR); const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR); if (!irqstatus) return IRQ_NONE; /* * In reception, the behavior of the twi device (before sama5d2) is * weird. There is some magic about RXRDY flag! When a data has been * almost received, the reception of a new one is anticipated if there * is no stop command to send. That is the reason why ask for sending * the stop command not on the last data but on the second last one. * * Unfortunately, we could still have the RXRDY flag set even if the * transfer is done and we have read the last data. It might happen * when the i2c slave device sends too quickly data after receiving the * ack from the master. The data has been almost received before having * the order to send stop. In this case, sending the stop command could * cause a RXRDY interrupt with a TXCOMP one. It is better to manage * the RXRDY interrupt first in order to not keep garbage data in the * Receive Holding Register for the next transfer. */ if (irqstatus & AT91_TWI_RXRDY) { /* * Read all available bytes at once by polling RXRDY usable w/ * and w/o FIFO. With FIFO enabled we could also read RXFL and * avoid polling RXRDY. */ do { at91_twi_read_next_byte(dev); } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY); } /* * When a NACK condition is detected, the I2C controller sets the NACK, * TXCOMP and TXRDY bits all together in the Status Register (SR). * * 1 - Handling NACK errors with CPU write transfer. * * In such case, we should not write the next byte into the Transmit * Holding Register (THR) otherwise the I2C controller would start a new * transfer and the I2C slave is likely to reply by another NACK. * * 2 - Handling NACK errors with DMA write transfer. * * By setting the TXRDY bit in the SR, the I2C controller also triggers * the DMA controller to write the next data into the THR. Then the * result depends on the hardware version of the I2C controller. * * 2a - Without support of the Alternative Command mode. * * This is the worst case: the DMA controller is triggered to write the * next data into the THR, hence starting a new transfer: the I2C slave * is likely to reply by another NACK. * Concurrently, this interrupt handler is likely to be called to manage * the first NACK before the I2C controller detects the second NACK and * sets once again the NACK bit into the SR. * When handling the first NACK, this interrupt handler disables the I2C * controller interruptions, especially the NACK interrupt. * Hence, the NACK bit is pending into the SR. This is why we should * read the SR to clear all pending interrupts at the beginning of * at91_do_twi_transfer() before actually starting a new transfer. * * 2b - With support of the Alternative Command mode. * * When a NACK condition is detected, the I2C controller also locks the * THR (and sets the LOCK bit in the SR): even though the DMA controller * is triggered by the TXRDY bit to write the next data into the THR, * this data actually won't go on the I2C bus hence a second NACK is not * generated. */ if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) { at91_disable_twi_interrupts(dev); complete(&dev->cmd_complete); } else if (irqstatus & AT91_TWI_TXRDY) { at91_twi_write_next_byte(dev); } /* catch error flags */ dev->transfer_status |= status; return IRQ_HANDLED; } static int at91_do_twi_transfer(struct at91_twi_dev *dev) { int ret; unsigned long time_left; bool has_unre_flag = dev->pdata->has_unre_flag; bool has_alt_cmd = dev->pdata->has_alt_cmd; /* * WARNING: the TXCOMP bit in the Status Register is NOT a clear on * read flag but shows the state of the transmission at the time the * Status Register is read. According to the programmer datasheet, * TXCOMP is set when both holding register and internal shifter are * empty and STOP condition has been sent. * Consequently, we should enable NACK interrupt rather than TXCOMP to * detect transmission failure. * Indeed let's take the case of an i2c write command using DMA. * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and * TXCOMP bits are set together into the Status Register. * LOCK is a clear on write bit, which is set to prevent the DMA * controller from sending new data on the i2c bus after a NACK * condition has happened. Once locked, this i2c peripheral stops * triggering the DMA controller for new data but it is more than * likely that a new DMA transaction is already in progress, writing * into the Transmit Holding Register. Since the peripheral is locked, * these new data won't be sent to the i2c bus but they will remain * into the Transmit Holding Register, so TXCOMP bit is cleared. * Then when the interrupt handler is called, the Status Register is * read: the TXCOMP bit is clear but NACK bit is still set. The driver * manage the error properly, without waiting for timeout. * This case can be reproduced easyly when writing into an at24 eeprom. * * Besides, the TXCOMP bit is already set before the i2c transaction * has been started. For read transactions, this bit is cleared when * writing the START bit into the Control Register. So the * corresponding interrupt can safely be enabled just after. * However for write transactions managed by the CPU, we first write * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP * interrupt. If TXCOMP interrupt were enabled before writing into THR, * the interrupt handler would be called immediately and the i2c command * would be reported as completed. * Also when a write transaction is managed by the DMA controller, * enabling the TXCOMP interrupt in this function may lead to a race * condition since we don't know whether the TXCOMP interrupt is enabled * before or after the DMA has started to write into THR. So the TXCOMP * interrupt is enabled later by at91_twi_write_data_dma_callback(). * Immediately after in that DMA callback, if the alternative command * mode is not used, we still need to send the STOP condition manually * writing the corresponding bit into the Control Register. */ dev_dbg(dev->dev, "transfer: %s %zu bytes.\n", (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len); reinit_completion(&dev->cmd_complete); dev->transfer_status = 0; /* Clear pending interrupts, such as NACK. */ at91_twi_read(dev, AT91_TWI_SR); if (dev->fifo_size) { unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR); /* Reset FIFO mode register */ fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK | AT91_TWI_FMR_RXRDYM_MASK); fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA); fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA); at91_twi_write(dev, AT91_TWI_FMR, fifo_mr); /* Flush FIFOs */ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_THRCLR | AT91_TWI_RHRCLR); } if (!dev->buf_len) { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); } else if (dev->msg->flags & I2C_M_RD) { unsigned start_flags = AT91_TWI_START; /* if only one byte is to be read, immediately stop transfer */ if (!dev->use_alt_cmd && dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN)) start_flags |= AT91_TWI_STOP; at91_twi_write(dev, AT91_TWI_CR, start_flags); /* * When using dma without alternative command mode, the last * byte has to be read manually in order to not send the stop * command too late and then to receive extra data. * In practice, there are some issues if you use the dma to * read n-1 bytes because of latency. * Reading n-2 bytes with dma and the two last ones manually * seems to be the best solution. */ if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) { at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK); at91_twi_read_data_dma(dev); } else { at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP | AT91_TWI_NACK | AT91_TWI_RXRDY); } } else { if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) { at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK); at91_twi_write_data_dma(dev); } else { at91_twi_write_next_byte(dev); at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP | AT91_TWI_NACK | (dev->buf_len ? AT91_TWI_TXRDY : 0)); } } time_left = wait_for_completion_timeout(&dev->cmd_complete, dev->adapter.timeout); if (time_left == 0) { dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR); dev_err(dev->dev, "controller timed out\n"); at91_init_twi_bus(dev); ret = -ETIMEDOUT; goto error; } if (dev->transfer_status & AT91_TWI_NACK) { dev_dbg(dev->dev, "received nack\n"); ret = -EREMOTEIO; goto error; } if (dev->transfer_status & AT91_TWI_OVRE) { dev_err(dev->dev, "overrun while reading\n"); ret = -EIO; goto error; } if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) { dev_err(dev->dev, "underrun while writing\n"); ret = -EIO; goto error; } if ((has_alt_cmd || dev->fifo_size) && (dev->transfer_status & AT91_TWI_LOCK)) { dev_err(dev->dev, "tx locked\n"); ret = -EIO; goto error; } if (dev->recv_len_abort) { dev_err(dev->dev, "invalid smbus block length recvd\n"); ret = -EPROTO; goto error; } dev_dbg(dev->dev, "transfer complete\n"); return 0; error: /* first stop DMA transfer if still in progress */ at91_twi_dma_cleanup(dev); /* then flush THR/FIFO and unlock TX if locked */ if ((has_alt_cmd || dev->fifo_size) && (dev->transfer_status & AT91_TWI_LOCK)) { dev_dbg(dev->dev, "unlock tx\n"); at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_THRCLR | AT91_TWI_LOCKCLR); } /* * some faulty I2C slave devices might hold SDA down; * we can send a bus clear command, hoping that the pins will be * released */ i2c_recover_bus(&dev->adapter); return ret; } static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct at91_twi_dev *dev = i2c_get_adapdata(adap); int ret; unsigned int_addr_flag = 0; struct i2c_msg *m_start = msg; bool is_read; u8 *dma_buf = NULL; dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); ret = pm_runtime_get_sync(dev->dev); if (ret < 0) goto out; if (num == 2) { int internal_address = 0; int i; /* 1st msg is put into the internal address, start with 2nd */ m_start = &msg[1]; for (i = 0; i < msg->len; ++i) { const unsigned addr = msg->buf[msg->len - 1 - i]; internal_address |= addr << (8 * i); int_addr_flag += AT91_TWI_IADRSZ_1; } at91_twi_write(dev, AT91_TWI_IADR, internal_address); } dev->use_alt_cmd = false; is_read = (m_start->flags & I2C_M_RD); if (dev->pdata->has_alt_cmd) { if (m_start->len > 0 && m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); at91_twi_write(dev, AT91_TWI_ACR, AT91_TWI_ACR_DATAL(m_start->len) | ((is_read) ? AT91_TWI_ACR_DIR : 0)); dev->use_alt_cmd = true; } else { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); } } at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag | ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); dev->buf_len = m_start->len; dev->buf = m_start->buf; dev->msg = m_start; dev->recv_len_abort = false; if (dev->use_dma) { dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1); if (!dma_buf) { ret = -ENOMEM; goto out; } dev->buf = dma_buf; } ret = at91_do_twi_transfer(dev); i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret); ret = (ret < 0) ? ret : num; out: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; } /* * The hardware can handle at most two messages concatenated by a * repeated start via it's internal address feature. */ static const struct i2c_adapter_quirks at91_twi_quirks = { .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR, .max_comb_1st_msg_len = 3, }; static u32 at91_twi_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA; } static const struct i2c_algorithm at91_twi_algorithm = { .master_xfer = at91_twi_xfer, .functionality = at91_twi_func, }; static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr) { int ret = 0; struct dma_slave_config slave_config; struct at91_twi_dma *dma = &dev->dma; enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; /* * The actual width of the access will be chosen in * dmaengine_prep_slave_sg(): * for each buffer in the scatter-gather list, if its size is aligned * to addr_width then addr_width accesses will be performed to transfer * the buffer. On the other hand, if the buffer size is not aligned to * addr_width then the buffer is transferred using single byte accesses. * Please refer to the Atmel eXtended DMA controller driver. * When FIFOs are used, the TXRDYM threshold can always be set to * trigger the XDMAC when at least 4 data can be written into the TX * FIFO, even if single byte accesses are performed. * However the RXRDYM threshold must be set to fit the access width, * deduced from buffer length, so the XDMAC is triggered properly to * read data from the RX FIFO. */ if (dev->fifo_size) addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; memset(&slave_config, 0, sizeof(slave_config)); slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR; slave_config.src_addr_width = addr_width; slave_config.src_maxburst = 1; slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR; slave_config.dst_addr_width = addr_width; slave_config.dst_maxburst = 1; slave_config.device_fc = false; dma->chan_tx = dma_request_chan(dev->dev, "tx"); if (IS_ERR(dma->chan_tx)) { ret = PTR_ERR(dma->chan_tx); dma->chan_tx = NULL; goto error; } dma->chan_rx = dma_request_chan(dev->dev, "rx"); if (IS_ERR(dma->chan_rx)) { ret = PTR_ERR(dma->chan_rx); dma->chan_rx = NULL; goto error; } slave_config.direction = DMA_MEM_TO_DEV; if (dmaengine_slave_config(dma->chan_tx, &slave_config)) { dev_err(dev->dev, "failed to configure tx channel\n"); ret = -EINVAL; goto error; } slave_config.direction = DMA_DEV_TO_MEM; if (dmaengine_slave_config(dma->chan_rx, &slave_config)) { dev_err(dev->dev, "failed to configure rx channel\n"); ret = -EINVAL; goto error; } sg_init_table(dma->sg, 2); dma->buf_mapped = false; dma->xfer_in_progress = false; dev->use_dma = true; dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n", dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); return ret; error: if (ret != -EPROBE_DEFER) dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n"); if (dma->chan_rx) dma_release_channel(dma->chan_rx); if (dma->chan_tx) dma_release_channel(dma->chan_tx); return ret; } static int at91_init_twi_recovery_gpio(struct platform_device *pdev, struct at91_twi_dev *dev) { struct i2c_bus_recovery_info *rinfo = &dev->rinfo; rinfo->pinctrl = devm_pinctrl_get(&pdev->dev); if (!rinfo->pinctrl) { dev_info(dev->dev, "pinctrl unavailable, bus recovery not supported\n"); return 0; } if (IS_ERR(rinfo->pinctrl)) { dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n"); return PTR_ERR(rinfo->pinctrl); } dev->adapter.bus_recovery_info = rinfo; return 0; } static int at91_twi_recover_bus_cmd(struct i2c_adapter *adap) { struct at91_twi_dev *dev = i2c_get_adapdata(adap); dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR); if (!(dev->transfer_status & AT91_TWI_SDA)) { dev_dbg(dev->dev, "SDA is down; sending bus clear command\n"); if (dev->use_alt_cmd) { unsigned int acr; acr = at91_twi_read(dev, AT91_TWI_ACR); acr &= ~AT91_TWI_ACR_DATAL_MASK; at91_twi_write(dev, AT91_TWI_ACR, acr); } at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_CLEAR); } return 0; } static int at91_init_twi_recovery_info(struct platform_device *pdev, struct at91_twi_dev *dev) { struct i2c_bus_recovery_info *rinfo = &dev->rinfo; bool has_clear_cmd = dev->pdata->has_clear_cmd; if (!has_clear_cmd) return at91_init_twi_recovery_gpio(pdev, dev); rinfo->recover_bus = at91_twi_recover_bus_cmd; dev->adapter.bus_recovery_info = rinfo; return 0; } int at91_twi_probe_master(struct platform_device *pdev, u32 phy_addr, struct at91_twi_dev *dev) { int rc; init_completion(&dev->cmd_complete); rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0, dev_name(dev->dev), dev); if (rc) { dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc); return rc; } if (dev->dev->of_node) { rc = at91_twi_configure_dma(dev, phy_addr); if (rc == -EPROBE_DEFER) return rc; } if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size", &dev->fifo_size)) { dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size); } dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node, "i2c-digital-filter"); dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node, "i2c-analog-filter"); at91_calc_twi_clock(dev); rc = at91_init_twi_recovery_info(pdev, dev); if (rc == -EPROBE_DEFER) return rc; dev->adapter.algo = &at91_twi_algorithm; dev->adapter.quirks = &at91_twi_quirks; return 0; }
linux-master
drivers/i2c/busses/i2c-at91-master.c