python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cadence SPI controller driver (host and target mode)
*
* Copyright (C) 2008 - 2014 Xilinx, Inc.
*
* based on Blackfin On-Chip SPI Driver (spi_bfin5xx.c)
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
/* Name of this driver */
#define CDNS_SPI_NAME "cdns-spi"
/* Register offset definitions */
#define CDNS_SPI_CR 0x00 /* Configuration Register, RW */
#define CDNS_SPI_ISR 0x04 /* Interrupt Status Register, RO */
#define CDNS_SPI_IER 0x08 /* Interrupt Enable Register, WO */
#define CDNS_SPI_IDR 0x0c /* Interrupt Disable Register, WO */
#define CDNS_SPI_IMR 0x10 /* Interrupt Enabled Mask Register, RO */
#define CDNS_SPI_ER 0x14 /* Enable/Disable Register, RW */
#define CDNS_SPI_DR 0x18 /* Delay Register, RW */
#define CDNS_SPI_TXD 0x1C /* Data Transmit Register, WO */
#define CDNS_SPI_RXD 0x20 /* Data Receive Register, RO */
#define CDNS_SPI_SICR 0x24 /* Slave Idle Count Register, RW */
#define CDNS_SPI_THLD 0x28 /* Transmit FIFO Watermark Register,RW */
#define SPI_AUTOSUSPEND_TIMEOUT 3000
/*
* SPI Configuration Register bit Masks
*
* This register contains various control bits that affect the operation
* of the SPI controller
*/
#define CDNS_SPI_CR_MANSTRT 0x00010000 /* Manual TX Start */
#define CDNS_SPI_CR_CPHA 0x00000004 /* Clock Phase Control */
#define CDNS_SPI_CR_CPOL 0x00000002 /* Clock Polarity Control */
#define CDNS_SPI_CR_SSCTRL 0x00003C00 /* Slave Select Mask */
#define CDNS_SPI_CR_PERI_SEL 0x00000200 /* Peripheral Select Decode */
#define CDNS_SPI_CR_BAUD_DIV 0x00000038 /* Baud Rate Divisor Mask */
#define CDNS_SPI_CR_MSTREN 0x00000001 /* Master Enable Mask */
#define CDNS_SPI_CR_MANSTRTEN 0x00008000 /* Manual TX Enable Mask */
#define CDNS_SPI_CR_SSFORCE 0x00004000 /* Manual SS Enable Mask */
#define CDNS_SPI_CR_BAUD_DIV_4 0x00000008 /* Default Baud Div Mask */
#define CDNS_SPI_CR_DEFAULT (CDNS_SPI_CR_MSTREN | \
CDNS_SPI_CR_SSCTRL | \
CDNS_SPI_CR_SSFORCE | \
CDNS_SPI_CR_BAUD_DIV_4)
/*
* SPI Configuration Register - Baud rate and target select
*
* These are the values used in the calculation of baud rate divisor and
* setting the target select.
*/
#define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
#define CDNS_SPI_BAUD_DIV_MIN 1 /* Baud rate divisor minimum */
#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
#define CDNS_SPI_NOSS 0xF /* No Slave select */
/*
* SPI Interrupt Registers bit Masks
*
* All the four interrupt registers (Status/Mask/Enable/Disable) have the same
* bit definitions.
*/
#define CDNS_SPI_IXR_TXOW 0x00000004 /* SPI TX FIFO Overwater */
#define CDNS_SPI_IXR_MODF 0x00000002 /* SPI Mode Fault */
#define CDNS_SPI_IXR_RXNEMTY 0x00000010 /* SPI RX FIFO Not Empty */
#define CDNS_SPI_IXR_DEFAULT (CDNS_SPI_IXR_TXOW | \
CDNS_SPI_IXR_MODF)
#define CDNS_SPI_IXR_TXFULL 0x00000008 /* SPI TX Full */
#define CDNS_SPI_IXR_ALL 0x0000007F /* SPI all interrupts */
/*
* SPI Enable Register bit Masks
*
* This register is used to enable or disable the SPI controller
*/
#define CDNS_SPI_ER_ENABLE 0x00000001 /* SPI Enable Bit Mask */
#define CDNS_SPI_ER_DISABLE 0x0 /* SPI Disable Bit Mask */
/* Default number of chip select lines */
#define CDNS_SPI_DEFAULT_NUM_CS 4
/**
* struct cdns_spi - This definition defines spi driver instance
* @regs: Virtual address of the SPI controller registers
* @ref_clk: Pointer to the peripheral clock
* @pclk: Pointer to the APB clock
* @clk_rate: Reference clock frequency, taken from @ref_clk
* @speed_hz: Current SPI bus clock speed in Hz
* @txbuf: Pointer to the TX buffer
* @rxbuf: Pointer to the RX buffer
* @tx_bytes: Number of bytes left to transfer
* @rx_bytes: Number of bytes requested
* @dev_busy: Device busy flag
* @is_decoded_cs: Flag for decoder property set or not
* @tx_fifo_depth: Depth of the TX FIFO
*/
struct cdns_spi {
void __iomem *regs;
struct clk *ref_clk;
struct clk *pclk;
unsigned int clk_rate;
u32 speed_hz;
const u8 *txbuf;
u8 *rxbuf;
int tx_bytes;
int rx_bytes;
u8 dev_busy;
u32 is_decoded_cs;
unsigned int tx_fifo_depth;
};
/* Macros for the SPI controller read/write */
static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
{
return readl_relaxed(xspi->regs + offset);
}
static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
{
writel_relaxed(val, xspi->regs + offset);
}
/**
* cdns_spi_init_hw - Initialize the hardware and configure the SPI controller
* @xspi: Pointer to the cdns_spi structure
* @is_target: Flag to indicate target or host mode
* * On reset the SPI controller is configured to target or host mode.
* In host mode baud rate divisor is set to 4, threshold value for TX FIFO
* not full interrupt is set to 1 and size of the word to be transferred as 8 bit.
*
* This function initializes the SPI controller to disable and clear all the
* interrupts, enable manual target select and manual start, deselect all the
* chip select lines, and enable the SPI controller.
*/
static void cdns_spi_init_hw(struct cdns_spi *xspi, bool is_target)
{
u32 ctrl_reg = 0;
if (!is_target)
ctrl_reg |= CDNS_SPI_CR_DEFAULT;
if (xspi->is_decoded_cs)
ctrl_reg |= CDNS_SPI_CR_PERI_SEL;
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_ALL);
/* Clear the RX FIFO */
while (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_RXNEMTY)
cdns_spi_read(xspi, CDNS_SPI_RXD);
cdns_spi_write(xspi, CDNS_SPI_ISR, CDNS_SPI_IXR_ALL);
cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
}
/**
* cdns_spi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
* @is_high: Select(0) or deselect (1) the chip select line
*/
static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
{
struct cdns_spi *xspi = spi_controller_get_devdata(spi->controller);
u32 ctrl_reg;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
if (is_high) {
/* Deselect the target */
ctrl_reg |= CDNS_SPI_CR_SSCTRL;
} else {
/* Select the target */
ctrl_reg &= ~CDNS_SPI_CR_SSCTRL;
if (!(xspi->is_decoded_cs))
ctrl_reg |= ((~(CDNS_SPI_SS0 << spi_get_chipselect(spi, 0))) <<
CDNS_SPI_SS_SHIFT) &
CDNS_SPI_CR_SSCTRL;
else
ctrl_reg |= (spi_get_chipselect(spi, 0) << CDNS_SPI_SS_SHIFT) &
CDNS_SPI_CR_SSCTRL;
}
cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
}
/**
* cdns_spi_config_clock_mode - Sets clock polarity and phase
* @spi: Pointer to the spi_device structure
*
* Sets the requested clock polarity and phase.
*/
static void cdns_spi_config_clock_mode(struct spi_device *spi)
{
struct cdns_spi *xspi = spi_controller_get_devdata(spi->controller);
u32 ctrl_reg, new_ctrl_reg;
new_ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
ctrl_reg = new_ctrl_reg;
/* Set the SPI clock phase and clock polarity */
new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA | CDNS_SPI_CR_CPOL);
if (spi->mode & SPI_CPHA)
new_ctrl_reg |= CDNS_SPI_CR_CPHA;
if (spi->mode & SPI_CPOL)
new_ctrl_reg |= CDNS_SPI_CR_CPOL;
if (new_ctrl_reg != ctrl_reg) {
/*
* Just writing the CR register does not seem to apply the clock
* setting changes. This is problematic when changing the clock
* polarity as it will cause the SPI target to see spurious clock
* transitions. To workaround the issue toggle the ER register.
*/
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
cdns_spi_write(xspi, CDNS_SPI_CR, new_ctrl_reg);
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
}
}
/**
* cdns_spi_config_clock_freq - Sets clock frequency
* @spi: Pointer to the spi_device structure
* @transfer: Pointer to the spi_transfer structure which provides
* information about next transfer setup parameters
*
* Sets the requested clock frequency.
* Note: If the requested frequency is not an exact match with what can be
* obtained using the prescalar value the driver sets the clock frequency which
* is lower than the requested frequency (maximum lower) for the transfer. If
* the requested frequency is higher or lower than that is supported by the SPI
* controller the driver will set the highest or lowest frequency supported by
* controller.
*/
static void cdns_spi_config_clock_freq(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct cdns_spi *xspi = spi_controller_get_devdata(spi->controller);
u32 ctrl_reg, baud_rate_val;
unsigned long frequency;
frequency = xspi->clk_rate;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
/* Set the clock frequency */
if (xspi->speed_hz != transfer->speed_hz) {
/* first valid value is 1 */
baud_rate_val = CDNS_SPI_BAUD_DIV_MIN;
while ((baud_rate_val < CDNS_SPI_BAUD_DIV_MAX) &&
(frequency / (2 << baud_rate_val)) > transfer->speed_hz)
baud_rate_val++;
ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV;
ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
xspi->speed_hz = frequency / (2 << baud_rate_val);
}
cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
}
/**
* cdns_spi_setup_transfer - Configure SPI controller for specified transfer
* @spi: Pointer to the spi_device structure
* @transfer: Pointer to the spi_transfer structure which provides
* information about next transfer setup parameters
*
* Sets the operational mode of SPI controller for the next SPI transfer and
* sets the requested clock frequency.
*
* Return: Always 0
*/
static int cdns_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct cdns_spi *xspi = spi_controller_get_devdata(spi->controller);
cdns_spi_config_clock_freq(spi, transfer);
dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n",
__func__, spi->mode, spi->bits_per_word,
xspi->speed_hz);
return 0;
}
/**
* cdns_spi_process_fifo - Fills the TX FIFO, and drain the RX FIFO
* @xspi: Pointer to the cdns_spi structure
* @ntx: Number of bytes to pack into the TX FIFO
* @nrx: Number of bytes to drain from the RX FIFO
*/
static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
{
ntx = clamp(ntx, 0, xspi->tx_bytes);
nrx = clamp(nrx, 0, xspi->rx_bytes);
xspi->tx_bytes -= ntx;
xspi->rx_bytes -= nrx;
while (ntx || nrx) {
if (ntx) {
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
else
cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
ntx--;
}
if (nrx) {
u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
if (xspi->rxbuf)
*xspi->rxbuf++ = data;
nrx--;
}
}
}
/**
* cdns_spi_irq - Interrupt service routine of the SPI controller
* @irq: IRQ number
* @dev_id: Pointer to the xspi structure
*
* This function handles TX empty and Mode Fault interrupts only.
* On TX empty interrupt this function reads the received data from RX FIFO and
* fills the TX FIFO if there is any data remaining to be transferred.
* On Mode Fault interrupt this function indicates that transfer is completed,
* the SPI subsystem will identify the error as the remaining bytes to be
* transferred is non-zero.
*
* Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
*/
static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
{
struct spi_controller *ctlr = dev_id;
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
irqreturn_t status;
u32 intr_status;
status = IRQ_NONE;
intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR);
cdns_spi_write(xspi, CDNS_SPI_ISR, intr_status);
if (intr_status & CDNS_SPI_IXR_MODF) {
/* Indicate that transfer is completed, the SPI subsystem will
* identify the error as the remaining bytes to be
* transferred is non-zero
*/
cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT);
spi_finalize_current_transfer(ctlr);
status = IRQ_HANDLED;
} else if (intr_status & CDNS_SPI_IXR_TXOW) {
int threshold = cdns_spi_read(xspi, CDNS_SPI_THLD);
int trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
if (threshold > 1)
trans_cnt -= threshold;
/* Set threshold to one if number of pending are
* less than half fifo
*/
if (xspi->tx_bytes < xspi->tx_fifo_depth >> 1)
cdns_spi_write(xspi, CDNS_SPI_THLD, 1);
if (xspi->tx_bytes) {
cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt);
} else {
/* Fixed delay due to controller limitation with
* RX_NEMPTY incorrect status
* Xilinx AR:65885 contains more details
*/
udelay(10);
cdns_spi_process_fifo(xspi, 0, trans_cnt);
cdns_spi_write(xspi, CDNS_SPI_IDR,
CDNS_SPI_IXR_DEFAULT);
spi_finalize_current_transfer(ctlr);
}
status = IRQ_HANDLED;
}
return status;
}
static int cdns_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
if (!spi_controller_is_target(ctlr))
cdns_spi_config_clock_mode(msg->spi);
return 0;
}
/**
* cdns_transfer_one - Initiates the SPI transfer
* @ctlr: Pointer to spi_controller structure
* @spi: Pointer to the spi_device structure
* @transfer: Pointer to the spi_transfer structure which provides
* information about next transfer parameters
*
* This function in host mode fills the TX FIFO, starts the SPI transfer and
* returns a positive transfer count so that core will wait for completion.
* This function in target mode fills the TX FIFO and wait for transfer trigger.
*
* Return: Number of bytes transferred in the last transfer
*/
static int cdns_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *transfer)
{
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
xspi->txbuf = transfer->tx_buf;
xspi->rxbuf = transfer->rx_buf;
xspi->tx_bytes = transfer->len;
xspi->rx_bytes = transfer->len;
if (!spi_controller_is_target(ctlr)) {
cdns_spi_setup_transfer(spi, transfer);
} else {
/* Set TX empty threshold to half of FIFO depth
* only if TX bytes are more than FIFO depth.
*/
if (xspi->tx_bytes > xspi->tx_fifo_depth)
cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1);
}
/* When xspi in busy condition, bytes may send failed,
* then spi control didn't work thoroughly, add one byte delay
*/
if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL)
udelay(10);
cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0);
spi_transfer_delay_exec(transfer);
cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
return transfer->len;
}
/**
* cdns_prepare_transfer_hardware - Prepares hardware for transfer.
* @ctlr: Pointer to the spi_controller structure which provides
* information about the controller.
*
* This function enables SPI host controller.
*
* Return: 0 always
*/
static int cdns_prepare_transfer_hardware(struct spi_controller *ctlr)
{
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
return 0;
}
/**
* cdns_unprepare_transfer_hardware - Relaxes hardware after transfer
* @ctlr: Pointer to the spi_controller structure which provides
* information about the controller.
*
* This function disables the SPI host controller when no target selected.
* This function flush out if any pending data in FIFO.
*
* Return: 0 always
*/
static int cdns_unprepare_transfer_hardware(struct spi_controller *ctlr)
{
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
u32 ctrl_reg;
unsigned int cnt = xspi->tx_fifo_depth;
if (spi_controller_is_target(ctlr)) {
while (cnt--)
cdns_spi_read(xspi, CDNS_SPI_RXD);
}
/* Disable the SPI if target is deselected */
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
ctrl_reg = (ctrl_reg & CDNS_SPI_CR_SSCTRL) >> CDNS_SPI_SS_SHIFT;
if (ctrl_reg == CDNS_SPI_NOSS || spi_controller_is_target(ctlr))
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
/* Reset to default */
cdns_spi_write(xspi, CDNS_SPI_THLD, 0x1);
return 0;
}
/**
* cdns_spi_detect_fifo_depth - Detect the FIFO depth of the hardware
* @xspi: Pointer to the cdns_spi structure
*
* The depth of the TX FIFO is a synthesis configuration parameter of the SPI
* IP. The FIFO threshold register is sized so that its maximum value can be the
* FIFO size - 1. This is used to detect the size of the FIFO.
*/
static void cdns_spi_detect_fifo_depth(struct cdns_spi *xspi)
{
/* The MSBs will get truncated giving us the size of the FIFO */
cdns_spi_write(xspi, CDNS_SPI_THLD, 0xffff);
xspi->tx_fifo_depth = cdns_spi_read(xspi, CDNS_SPI_THLD) + 1;
/* Reset to default */
cdns_spi_write(xspi, CDNS_SPI_THLD, 0x1);
}
/**
* cdns_target_abort - Abort target transfer
* @ctlr: Pointer to the spi_controller structure
*
* This function abort target transfer if there any transfer timeout.
*
* Return: 0 always
*/
static int cdns_target_abort(struct spi_controller *ctlr)
{
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
u32 intr_status;
intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR);
cdns_spi_write(xspi, CDNS_SPI_ISR, intr_status);
cdns_spi_write(xspi, CDNS_SPI_IDR, (CDNS_SPI_IXR_MODF | CDNS_SPI_IXR_RXNEMTY));
spi_finalize_current_transfer(ctlr);
return 0;
}
/**
* cdns_spi_probe - Probe method for the SPI driver
* @pdev: Pointer to the platform_device structure
*
* This function initializes the driver data structures and the hardware.
*
* Return: 0 on success and error value on error
*/
static int cdns_spi_probe(struct platform_device *pdev)
{
int ret = 0, irq;
struct spi_controller *ctlr;
struct cdns_spi *xspi;
u32 num_cs;
bool target;
target = of_property_read_bool(pdev->dev.of_node, "spi-slave");
if (target)
ctlr = spi_alloc_target(&pdev->dev, sizeof(*xspi));
else
ctlr = spi_alloc_host(&pdev->dev, sizeof(*xspi));
if (!ctlr)
return -ENOMEM;
xspi = spi_controller_get_devdata(ctlr);
ctlr->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, ctlr);
xspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xspi->regs)) {
ret = PTR_ERR(xspi->regs);
goto remove_ctlr;
}
xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xspi->pclk)) {
dev_err(&pdev->dev, "pclk clock not found.\n");
ret = PTR_ERR(xspi->pclk);
goto remove_ctlr;
}
ret = clk_prepare_enable(xspi->pclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable APB clock.\n");
goto remove_ctlr;
}
if (!spi_controller_is_target(ctlr)) {
xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xspi->ref_clk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
ret = PTR_ERR(xspi->ref_clk);
goto clk_dis_apb;
}
ret = clk_prepare_enable(xspi->ref_clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
goto clk_dis_apb;
}
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
ctlr->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
else
ctlr->num_chipselect = num_cs;
ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
&xspi->is_decoded_cs);
if (ret < 0)
xspi->is_decoded_cs = 0;
}
cdns_spi_detect_fifo_depth(xspi);
/* SPI controller initializations */
cdns_spi_init_hw(xspi, spi_controller_is_target(ctlr));
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
0, pdev->name, ctlr);
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
goto clk_dis_all;
}
ctlr->use_gpio_descriptors = true;
ctlr->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
ctlr->prepare_message = cdns_prepare_message;
ctlr->transfer_one = cdns_transfer_one;
ctlr->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
if (!spi_controller_is_target(ctlr)) {
ctlr->mode_bits |= SPI_CS_HIGH;
ctlr->set_cs = cdns_spi_chipselect;
ctlr->auto_runtime_pm = true;
xspi->clk_rate = clk_get_rate(xspi->ref_clk);
/* Set to default valid value */
ctlr->max_speed_hz = xspi->clk_rate / 4;
xspi->speed_hz = ctlr->max_speed_hz;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
} else {
ctlr->mode_bits |= SPI_NO_CS;
ctlr->target_abort = cdns_target_abort;
}
ret = spi_register_controller(ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_controller failed\n");
goto clk_dis_all;
}
return ret;
clk_dis_all:
if (!spi_controller_is_target(ctlr)) {
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(xspi->ref_clk);
}
clk_dis_apb:
clk_disable_unprepare(xspi->pclk);
remove_ctlr:
spi_controller_put(ctlr);
return ret;
}
/**
* cdns_spi_remove - Remove method for the SPI driver
* @pdev: Pointer to the platform_device structure
*
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees all resources allocated to
* the device.
*/
static void cdns_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = platform_get_drvdata(pdev);
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
clk_disable_unprepare(xspi->ref_clk);
clk_disable_unprepare(xspi->pclk);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
spi_unregister_controller(ctlr);
}
/**
* cdns_spi_suspend - Suspend method for the SPI driver
* @dev: Address of the platform_device structure
*
* This function disables the SPI controller and
* changes the driver state to "suspend"
*
* Return: 0 on success and error value on error
*/
static int __maybe_unused cdns_spi_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
return spi_controller_suspend(ctlr);
}
/**
* cdns_spi_resume - Resume method for the SPI driver
* @dev: Address of the platform_device structure
*
* This function changes the driver state to "ready"
*
* Return: 0 on success and error value on error
*/
static int __maybe_unused cdns_spi_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
cdns_spi_init_hw(xspi, spi_controller_is_target(ctlr));
return spi_controller_resume(ctlr);
}
/**
* cdns_spi_runtime_resume - Runtime resume method for the SPI driver
* @dev: Address of the platform_device structure
*
* This function enables the clocks
*
* Return: 0 on success and error value on error
*/
static int __maybe_unused cdns_spi_runtime_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
int ret;
ret = clk_prepare_enable(xspi->pclk);
if (ret) {
dev_err(dev, "Cannot enable APB clock.\n");
return ret;
}
ret = clk_prepare_enable(xspi->ref_clk);
if (ret) {
dev_err(dev, "Cannot enable device clock.\n");
clk_disable_unprepare(xspi->pclk);
return ret;
}
return 0;
}
/**
* cdns_spi_runtime_suspend - Runtime suspend method for the SPI driver
* @dev: Address of the platform_device structure
*
* This function disables the clocks
*
* Return: Always 0
*/
static int __maybe_unused cdns_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
clk_disable_unprepare(xspi->ref_clk);
clk_disable_unprepare(xspi->pclk);
return 0;
}
static const struct dev_pm_ops cdns_spi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(cdns_spi_runtime_suspend,
cdns_spi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(cdns_spi_suspend, cdns_spi_resume)
};
static const struct of_device_id cdns_spi_of_match[] = {
{ .compatible = "xlnx,zynq-spi-r1p6" },
{ .compatible = "cdns,spi-r1p6" },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, cdns_spi_of_match);
/* cdns_spi_driver - This structure defines the SPI subsystem platform driver */
static struct platform_driver cdns_spi_driver = {
.probe = cdns_spi_probe,
.remove_new = cdns_spi_remove,
.driver = {
.name = CDNS_SPI_NAME,
.of_match_table = cdns_spi_of_match,
.pm = &cdns_spi_dev_pm_ops,
},
};
module_platform_driver(cdns_spi_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Cadence SPI driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-cadence.c |
// SPDX-License-Identifier: GPL-2.0+
// Loongson SPI Support
// Copyright (C) 2023 Loongson Technology Corporation Limited
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include "spi-loongson.h"
static inline void loongson_spi_write_reg(struct loongson_spi *spi, unsigned char reg,
unsigned char data)
{
writeb(data, spi->base + reg);
}
static inline char loongson_spi_read_reg(struct loongson_spi *spi, unsigned char reg)
{
return readb(spi->base + reg);
}
static void loongson_spi_set_cs(struct spi_device *spi, bool en)
{
int cs;
unsigned char mask = (BIT(4) | BIT(0)) << spi_get_chipselect(spi, 0);
unsigned char val = en ? mask : (BIT(0) << spi_get_chipselect(spi, 0));
struct loongson_spi *loongson_spi = spi_controller_get_devdata(spi->controller);
cs = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SFCS_REG) & ~mask;
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SFCS_REG, val | cs);
}
static void loongson_spi_set_clk(struct loongson_spi *loongson_spi, unsigned int hz)
{
unsigned char val;
unsigned int div, div_tmp;
static const char rdiv[12] = {0, 1, 4, 2, 3, 5, 6, 7, 8, 9, 10, 11};
div = clamp_val(DIV_ROUND_UP_ULL(loongson_spi->clk_rate, hz), 2, 4096);
div_tmp = rdiv[fls(div - 1)];
loongson_spi->spcr = (div_tmp & GENMASK(1, 0)) >> 0;
loongson_spi->sper = (div_tmp & GENMASK(3, 2)) >> 2;
val = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPCR_REG);
val &= ~GENMASK(1, 0);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPCR_REG, val |
loongson_spi->spcr);
val = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPER_REG);
val &= ~GENMASK(1, 0);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPER_REG, val |
loongson_spi->sper);
loongson_spi->hz = hz;
}
static void loongson_spi_set_mode(struct loongson_spi *loongson_spi,
struct spi_device *spi)
{
unsigned char val;
val = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPCR_REG);
val &= ~(LOONGSON_SPI_SPCR_CPOL | LOONGSON_SPI_SPCR_CPHA);
if (spi->mode & SPI_CPOL)
val |= LOONGSON_SPI_SPCR_CPOL;
if (spi->mode & SPI_CPHA)
val |= LOONGSON_SPI_SPCR_CPHA;
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPCR_REG, val);
loongson_spi->mode |= spi->mode;
}
static int loongson_spi_update_state(struct loongson_spi *loongson_spi,
struct spi_device *spi, struct spi_transfer *t)
{
if (t && loongson_spi->hz != t->speed_hz)
loongson_spi_set_clk(loongson_spi, t->speed_hz);
if ((spi->mode ^ loongson_spi->mode) & SPI_MODE_X_MASK)
loongson_spi_set_mode(loongson_spi, spi);
return 0;
}
static int loongson_spi_setup(struct spi_device *spi)
{
struct loongson_spi *loongson_spi;
loongson_spi = spi_controller_get_devdata(spi->controller);
if (spi->bits_per_word % 8)
return -EINVAL;
if (spi_get_chipselect(spi, 0) >= spi->controller->num_chipselect)
return -EINVAL;
loongson_spi->hz = 0;
loongson_spi_set_cs(spi, true);
return 0;
}
static int loongson_spi_write_read_8bit(struct spi_device *spi, const u8 **tx_buf,
u8 **rx_buf, unsigned int num)
{
int ret;
struct loongson_spi *loongson_spi = spi_controller_get_devdata(spi->controller);
if (tx_buf && *tx_buf)
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_FIFO_REG, *((*tx_buf)++));
else
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_FIFO_REG, 0);
ret = readb_poll_timeout(loongson_spi->base + LOONGSON_SPI_SPSR_REG,
loongson_spi->spsr, (loongson_spi->spsr &
LOONGSON_SPI_SPSR_RFEMPTY) != LOONGSON_SPI_SPSR_RFEMPTY,
1, USEC_PER_MSEC);
if (rx_buf && *rx_buf)
*(*rx_buf)++ = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_FIFO_REG);
else
loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_FIFO_REG);
return ret;
}
static int loongson_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
{
int ret;
unsigned int count;
const u8 *tx = xfer->tx_buf;
u8 *rx = xfer->rx_buf;
count = xfer->len;
do {
ret = loongson_spi_write_read_8bit(spi, &tx, &rx, count);
if (ret)
break;
} while (--count);
return ret;
}
static int loongson_spi_prepare_message(struct spi_controller *ctlr, struct spi_message *m)
{
struct loongson_spi *loongson_spi = spi_controller_get_devdata(ctlr);
loongson_spi->para = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_PARA_REG);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_PARA_REG, loongson_spi->para &
~LOONGSON_SPI_PARA_MEM_EN);
return 0;
}
static int loongson_spi_transfer_one(struct spi_controller *ctrl, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct loongson_spi *loongson_spi = spi_controller_get_devdata(spi->controller);
loongson_spi_update_state(loongson_spi, spi, xfer);
if (xfer->len)
return loongson_spi_write_read(spi, xfer);
return 0;
}
static int loongson_spi_unprepare_message(struct spi_controller *ctrl, struct spi_message *m)
{
struct loongson_spi *loongson_spi = spi_controller_get_devdata(ctrl);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_PARA_REG, loongson_spi->para);
return 0;
}
static void loongson_spi_reginit(struct loongson_spi *loongson_spi_dev)
{
unsigned char val;
val = loongson_spi_read_reg(loongson_spi_dev, LOONGSON_SPI_SPCR_REG);
val &= ~LOONGSON_SPI_SPCR_SPE;
loongson_spi_write_reg(loongson_spi_dev, LOONGSON_SPI_SPCR_REG, val);
loongson_spi_write_reg(loongson_spi_dev, LOONGSON_SPI_SPSR_REG,
(LOONGSON_SPI_SPSR_SPIF | LOONGSON_SPI_SPSR_WCOL));
val = loongson_spi_read_reg(loongson_spi_dev, LOONGSON_SPI_SPCR_REG);
val |= LOONGSON_SPI_SPCR_SPE;
loongson_spi_write_reg(loongson_spi_dev, LOONGSON_SPI_SPCR_REG, val);
}
int loongson_spi_init_controller(struct device *dev, void __iomem *regs)
{
struct spi_controller *controller;
struct loongson_spi *spi;
struct clk *clk;
controller = devm_spi_alloc_host(dev, sizeof(struct loongson_spi));
if (controller == NULL)
return -ENOMEM;
controller->mode_bits = SPI_MODE_X_MASK | SPI_CS_HIGH;
controller->setup = loongson_spi_setup;
controller->prepare_message = loongson_spi_prepare_message;
controller->transfer_one = loongson_spi_transfer_one;
controller->unprepare_message = loongson_spi_unprepare_message;
controller->set_cs = loongson_spi_set_cs;
controller->num_chipselect = 4;
device_set_node(&controller->dev, dev_fwnode(dev));
dev_set_drvdata(dev, controller);
spi = spi_controller_get_devdata(controller);
spi->base = regs;
spi->controller = controller;
clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(dev, PTR_ERR(clk), "unable to get clock\n");
spi->clk_rate = clk_get_rate(clk);
loongson_spi_reginit(spi);
spi->mode = 0;
return devm_spi_register_controller(dev, controller);
}
EXPORT_SYMBOL_NS_GPL(loongson_spi_init_controller, SPI_LOONGSON_CORE);
static int __maybe_unused loongson_spi_suspend(struct device *dev)
{
struct loongson_spi *loongson_spi;
struct spi_controller *controller;
controller = dev_get_drvdata(dev);
spi_controller_suspend(controller);
loongson_spi = spi_controller_get_devdata(controller);
loongson_spi->spcr = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPCR_REG);
loongson_spi->sper = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPER_REG);
loongson_spi->spsr = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPSR_REG);
loongson_spi->para = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_PARA_REG);
loongson_spi->sfcs = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SFCS_REG);
loongson_spi->timi = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_TIMI_REG);
return 0;
}
static int __maybe_unused loongson_spi_resume(struct device *dev)
{
struct loongson_spi *loongson_spi;
struct spi_controller *controller;
controller = dev_get_drvdata(dev);
loongson_spi = spi_controller_get_devdata(controller);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPCR_REG, loongson_spi->spcr);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPER_REG, loongson_spi->sper);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPSR_REG, loongson_spi->spsr);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_PARA_REG, loongson_spi->para);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SFCS_REG, loongson_spi->sfcs);
loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_TIMI_REG, loongson_spi->timi);
spi_controller_resume(controller);
return 0;
}
const struct dev_pm_ops loongson_spi_dev_pm_ops = {
.suspend = loongson_spi_suspend,
.resume = loongson_spi_resume,
};
EXPORT_SYMBOL_NS_GPL(loongson_spi_dev_pm_ops, SPI_LOONGSON_CORE);
MODULE_DESCRIPTION("Loongson SPI core driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-loongson-core.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Freescale MXS SPI master driver
//
// Copyright 2012 DENX Software Engineering, GmbH.
// Copyright 2012 Freescale Semiconductor, Inc.
// Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
//
// Rework and transition to new API by:
// Marek Vasut <[email protected]>
//
// Based on previous attempt by:
// Fabio Estevam <[email protected]>
//
// Based on code from U-Boot bootloader by:
// Marek Vasut <[email protected]>
//
// Based on spi-stmp.c, which is:
// Author: Dmitry Pervushin <[email protected]>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/highmem.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/completion.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
#include <trace/events/spi.h>
#define DRIVER_NAME "mxs-spi"
/* Use 10S timeout for very long transfers, it should suffice. */
#define SSP_TIMEOUT 10000
#define SG_MAXLEN 0xff00
/*
* Flags for txrx functions. More efficient that using an argument register for
* each one.
*/
#define TXRX_WRITE (1<<0) /* This is a write */
#define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */
struct mxs_spi {
struct mxs_ssp ssp;
struct completion c;
unsigned int sck; /* Rate requested (vs actual) */
};
static int mxs_spi_setup_transfer(struct spi_device *dev,
const struct spi_transfer *t)
{
struct mxs_spi *spi = spi_master_get_devdata(dev->master);
struct mxs_ssp *ssp = &spi->ssp;
const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
if (hz == 0) {
dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
return -EINVAL;
}
if (hz != spi->sck) {
mxs_ssp_set_clk_rate(ssp, hz);
/*
* Save requested rate, hz, rather than the actual rate,
* ssp->clk_rate. Otherwise we would set the rate every transfer
* when the actual rate is not quite the same as requested rate.
*/
spi->sck = hz;
/*
* Perhaps we should return an error if the actual clock is
* nowhere close to what was requested?
*/
}
writel(BM_SSP_CTRL0_LOCK_CS,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
ssp->base + HW_SSP_CTRL1(ssp));
writel(0x0, ssp->base + HW_SSP_CMD0);
writel(0x0, ssp->base + HW_SSP_CMD1);
return 0;
}
static u32 mxs_spi_cs_to_reg(unsigned cs)
{
u32 select = 0;
/*
* i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
*
* The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ
* in HW_SSP_CTRL0 register do have multiple usage, please refer to
* the datasheet for further details. In SPI mode, they are used to
* toggle the chip-select lines (nCS pins).
*/
if (cs & 1)
select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
if (cs & 2)
select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
return select;
}
static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
{
const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
struct mxs_ssp *ssp = &spi->ssp;
u32 reg;
do {
reg = readl_relaxed(ssp->base + offset);
if (!set)
reg = ~reg;
reg &= mask;
if (reg == mask)
return 0;
} while (time_before(jiffies, timeout));
return -ETIMEDOUT;
}
static void mxs_ssp_dma_irq_callback(void *param)
{
struct mxs_spi *spi = param;
complete(&spi->c);
}
static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
{
struct mxs_ssp *ssp = dev_id;
dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
__func__, __LINE__,
readl(ssp->base + HW_SSP_CTRL1(ssp)),
readl(ssp->base + HW_SSP_STATUS(ssp)));
return IRQ_HANDLED;
}
static int mxs_spi_txrx_dma(struct mxs_spi *spi,
unsigned char *buf, int len,
unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
struct dma_async_tx_descriptor *desc = NULL;
const bool vmalloced_buf = is_vmalloc_addr(buf);
const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
const int sgs = DIV_ROUND_UP(len, desc_len);
int sg_count;
int min, ret;
u32 ctrl0;
struct page *vm_page;
struct {
u32 pio[4];
struct scatterlist sg;
} *dma_xfer;
if (!len)
return -EINVAL;
dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL);
if (!dma_xfer)
return -ENOMEM;
reinit_completion(&spi->c);
/* Chip select was already programmed into CTRL0 */
ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
BM_SSP_CTRL0_READ);
ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
if (!(flags & TXRX_WRITE))
ctrl0 |= BM_SSP_CTRL0_READ;
/* Queue the DMA data transfer. */
for (sg_count = 0; sg_count < sgs; sg_count++) {
/* Prepare the transfer descriptor. */
min = min(len, desc_len);
/*
* De-assert CS on last segment if flag is set (i.e., no more
* transfers will follow)
*/
if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
if (ssp->devid == IMX23_SSP) {
ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
ctrl0 |= min;
}
dma_xfer[sg_count].pio[0] = ctrl0;
dma_xfer[sg_count].pio[3] = min;
if (vmalloced_buf) {
vm_page = vmalloc_to_page(buf);
if (!vm_page) {
ret = -ENOMEM;
goto err_vmalloc;
}
sg_init_table(&dma_xfer[sg_count].sg, 1);
sg_set_page(&dma_xfer[sg_count].sg, vm_page,
min, offset_in_page(buf));
} else {
sg_init_one(&dma_xfer[sg_count].sg, buf, min);
}
ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
len -= min;
buf += min;
/* Queue the PIO register write transfer. */
desc = dmaengine_prep_slave_sg(ssp->dmach,
(struct scatterlist *)dma_xfer[sg_count].pio,
(ssp->devid == IMX23_SSP) ? 1 : 4,
DMA_TRANS_NONE,
sg_count ? DMA_PREP_INTERRUPT : 0);
if (!desc) {
dev_err(ssp->dev,
"Failed to get PIO reg. write descriptor.\n");
ret = -EINVAL;
goto err_mapped;
}
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(ssp->dev,
"Failed to get DMA data write descriptor.\n");
ret = -EINVAL;
goto err_mapped;
}
}
/*
* The last descriptor must have this callback,
* to finish the DMA transaction.
*/
desc->callback = mxs_ssp_dma_irq_callback;
desc->callback_param = spi;
/* Start the transfer. */
dmaengine_submit(desc);
dma_async_issue_pending(ssp->dmach);
if (!wait_for_completion_timeout(&spi->c,
msecs_to_jiffies(SSP_TIMEOUT))) {
dev_err(ssp->dev, "DMA transfer timeout\n");
ret = -ETIMEDOUT;
dmaengine_terminate_all(ssp->dmach);
goto err_vmalloc;
}
ret = 0;
err_vmalloc:
while (--sg_count >= 0) {
err_mapped:
dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
kfree(dma_xfer);
return ret;
}
static int mxs_spi_txrx_pio(struct mxs_spi *spi,
unsigned char *buf, int len,
unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
writel(BM_SSP_CTRL0_IGNORE_CRC,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
while (len--) {
if (len == 0 && (flags & TXRX_DEASSERT_CS))
writel(BM_SSP_CTRL0_IGNORE_CRC,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
if (ssp->devid == IMX23_SSP) {
writel(BM_SSP_CTRL0_XFER_COUNT,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
writel(1,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
} else {
writel(1, ssp->base + HW_SSP_XFER_SIZE);
}
if (flags & TXRX_WRITE)
writel(BM_SSP_CTRL0_READ,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
else
writel(BM_SSP_CTRL0_READ,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_SSP_CTRL0_RUN,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
return -ETIMEDOUT;
if (flags & TXRX_WRITE)
writel(*buf, ssp->base + HW_SSP_DATA(ssp));
writel(BM_SSP_CTRL0_DATA_XFER,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
if (!(flags & TXRX_WRITE)) {
if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
BM_SSP_STATUS_FIFO_EMPTY, 0))
return -ETIMEDOUT;
*buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
}
if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
return -ETIMEDOUT;
buf++;
}
if (len <= 0)
return 0;
return -ETIMEDOUT;
}
static int mxs_spi_transfer_one(struct spi_master *master,
struct spi_message *m)
{
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
struct spi_transfer *t;
unsigned int flag;
int status = 0;
/* Program CS register bits here, it will be used for all transfers. */
writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
writel(mxs_spi_cs_to_reg(spi_get_chipselect(m->spi, 0)),
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
list_for_each_entry(t, &m->transfers, transfer_list) {
trace_spi_transfer_start(m, t);
status = mxs_spi_setup_transfer(m->spi, t);
if (status)
break;
/* De-assert on last transfer, inverted by cs_change flag */
flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
TXRX_DEASSERT_CS : 0;
/*
* Small blocks can be transfered via PIO.
* Measured by empiric means:
*
* dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
*
* DMA only: 2.164808 seconds, 473.0KB/s
* Combined: 1.676276 seconds, 610.9KB/s
*/
if (t->len < 32) {
writel(BM_SSP_CTRL1_DMA_ENABLE,
ssp->base + HW_SSP_CTRL1(ssp) +
STMP_OFFSET_REG_CLR);
if (t->tx_buf)
status = mxs_spi_txrx_pio(spi,
(void *)t->tx_buf,
t->len, flag | TXRX_WRITE);
if (t->rx_buf)
status = mxs_spi_txrx_pio(spi,
t->rx_buf, t->len,
flag);
} else {
writel(BM_SSP_CTRL1_DMA_ENABLE,
ssp->base + HW_SSP_CTRL1(ssp) +
STMP_OFFSET_REG_SET);
if (t->tx_buf)
status = mxs_spi_txrx_dma(spi,
(void *)t->tx_buf, t->len,
flag | TXRX_WRITE);
if (t->rx_buf)
status = mxs_spi_txrx_dma(spi,
t->rx_buf, t->len,
flag);
}
trace_spi_transfer_stop(m, t);
if (status) {
stmp_reset_block(ssp->base);
break;
}
m->actual_length += t->len;
}
m->status = status;
spi_finalize_current_message(master);
return status;
}
static int mxs_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
int ret;
clk_disable_unprepare(ssp->clk);
ret = pinctrl_pm_select_idle_state(dev);
if (ret) {
int ret2 = clk_prepare_enable(ssp->clk);
if (ret2)
dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n",
ret, ret2);
}
return ret;
}
static int mxs_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
int ret;
ret = pinctrl_pm_select_default_state(dev);
if (ret)
return ret;
ret = clk_prepare_enable(ssp->clk);
if (ret)
pinctrl_pm_select_idle_state(dev);
return ret;
}
static int __maybe_unused mxs_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
int ret;
ret = spi_master_suspend(master);
if (ret)
return ret;
if (!pm_runtime_suspended(dev))
return mxs_spi_runtime_suspend(dev);
else
return 0;
}
static int __maybe_unused mxs_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
int ret;
if (!pm_runtime_suspended(dev))
ret = mxs_spi_runtime_resume(dev);
else
ret = 0;
if (ret)
return ret;
ret = spi_master_resume(master);
if (ret < 0 && !pm_runtime_suspended(dev))
mxs_spi_runtime_suspend(dev);
return ret;
}
static const struct dev_pm_ops mxs_spi_pm = {
SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend,
mxs_spi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
};
static const struct of_device_id mxs_spi_dt_ids[] = {
{ .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
{ .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
static int mxs_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_spi_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct spi_master *master;
struct mxs_spi *spi;
struct mxs_ssp *ssp;
struct clk *clk;
void __iomem *base;
int devid, clk_freq;
int ret = 0, irq_err;
/*
* Default clock speed for the SPI core. 160MHz seems to
* work reasonably well with most SPI flashes, so use this
* as a default. Override with "clock-frequency" DT prop.
*/
const int clk_freq_default = 160000000;
irq_err = platform_get_irq(pdev, 0);
if (irq_err < 0)
return irq_err;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
devid = (enum mxs_ssp_id) of_id->data;
ret = of_property_read_u32(np, "clock-frequency",
&clk_freq);
if (ret)
clk_freq = clk_freq_default;
master = spi_alloc_master(&pdev->dev, sizeof(*spi));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
master->transfer_one_message = mxs_spi_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
master->dev.of_node = np;
master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->auto_runtime_pm = true;
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
ssp->dev = &pdev->dev;
ssp->clk = clk;
ssp->base = base;
ssp->devid = devid;
init_completion(&spi->c);
ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
dev_name(&pdev->dev), ssp);
if (ret)
goto out_master_free;
ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
if (IS_ERR(ssp->dmach)) {
dev_err(ssp->dev, "Failed to request DMA\n");
ret = PTR_ERR(ssp->dmach);
goto out_master_free;
}
pm_runtime_enable(ssp->dev);
if (!pm_runtime_enabled(ssp->dev)) {
ret = mxs_spi_runtime_resume(ssp->dev);
if (ret < 0) {
dev_err(ssp->dev, "runtime resume failed\n");
goto out_dma_release;
}
}
ret = pm_runtime_resume_and_get(ssp->dev);
if (ret < 0) {
dev_err(ssp->dev, "runtime_get_sync failed\n");
goto out_pm_runtime_disable;
}
clk_set_rate(ssp->clk, clk_freq);
ret = stmp_reset_block(ssp->base);
if (ret)
goto out_pm_runtime_put;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
goto out_pm_runtime_put;
}
pm_runtime_put(ssp->dev);
return 0;
out_pm_runtime_put:
pm_runtime_put(ssp->dev);
out_pm_runtime_disable:
pm_runtime_disable(ssp->dev);
out_dma_release:
dma_release_channel(ssp->dmach);
out_master_free:
spi_master_put(master);
return ret;
}
static void mxs_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct mxs_spi *spi;
struct mxs_ssp *ssp;
master = platform_get_drvdata(pdev);
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mxs_spi_runtime_suspend(&pdev->dev);
dma_release_channel(ssp->dmach);
}
static struct platform_driver mxs_spi_driver = {
.probe = mxs_spi_probe,
.remove_new = mxs_spi_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mxs_spi_dt_ids,
.pm = &mxs_spi_pm,
},
};
module_platform_driver(mxs_spi_driver);
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_DESCRIPTION("MXS SPI master driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mxs-spi");
| linux-master | drivers/spi/spi-mxs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Xtensa xtfpga SPI controller driver
*
* Copyright (c) 2014 Cadence Design Systems Inc.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#define XTFPGA_SPI_NAME "xtfpga_spi"
#define XTFPGA_SPI_START 0x0
#define XTFPGA_SPI_BUSY 0x4
#define XTFPGA_SPI_DATA 0x8
#define BUSY_WAIT_US 100
struct xtfpga_spi {
struct spi_bitbang bitbang;
void __iomem *regs;
u32 data;
unsigned data_sz;
};
static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
unsigned addr, u32 val)
{
__raw_writel(val, spi->regs + addr);
}
static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
unsigned addr)
{
return __raw_readl(spi->regs + addr);
}
static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
{
unsigned i;
for (i = 0; xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY) &&
i < BUSY_WAIT_US; ++i)
udelay(1);
WARN_ON_ONCE(i == BUSY_WAIT_US);
}
static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
u32 v, u8 bits, unsigned flags)
{
struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0));
xspi->data_sz += bits;
if (xspi->data_sz >= 16) {
xtfpga_spi_write32(xspi, XTFPGA_SPI_DATA,
xspi->data >> (xspi->data_sz - 16));
xspi->data_sz -= 16;
xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 1);
xtfpga_spi_wait_busy(xspi);
xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
}
return 0;
}
static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
{
struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
WARN_ON(xspi->data_sz != 0);
xspi->data_sz = 0;
}
static int xtfpga_spi_probe(struct platform_device *pdev)
{
struct xtfpga_spi *xspi;
int ret;
struct spi_master *master;
master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
if (!master)
return -ENOMEM;
master->flags = SPI_CONTROLLER_NO_RX;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
master->bus_num = pdev->dev.id;
master->dev.of_node = pdev->dev.of_node;
xspi = spi_master_get_devdata(master);
xspi->bitbang.master = master;
xspi->bitbang.chipselect = xtfpga_spi_chipselect;
xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
xspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xspi->regs))
return PTR_ERR(xspi->regs);
xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
usleep_range(1000, 2000);
if (xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY)) {
dev_err(&pdev->dev, "Device stuck in busy state\n");
return -EBUSY;
}
ret = spi_bitbang_start(&xspi->bitbang);
if (ret < 0) {
dev_err(&pdev->dev, "spi_bitbang_start failed\n");
return ret;
}
platform_set_drvdata(pdev, master);
return 0;
}
static void xtfpga_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct xtfpga_spi *xspi = spi_master_get_devdata(master);
spi_bitbang_stop(&xspi->bitbang);
spi_master_put(master);
}
MODULE_ALIAS("platform:" XTFPGA_SPI_NAME);
#ifdef CONFIG_OF
static const struct of_device_id xtfpga_spi_of_match[] = {
{ .compatible = "cdns,xtfpga-spi", },
{}
};
MODULE_DEVICE_TABLE(of, xtfpga_spi_of_match);
#endif
static struct platform_driver xtfpga_spi_driver = {
.probe = xtfpga_spi_probe,
.remove_new = xtfpga_spi_remove,
.driver = {
.name = XTFPGA_SPI_NAME,
.of_match_table = of_match_ptr(xtfpga_spi_of_match),
},
};
module_platform_driver(xtfpga_spi_driver);
MODULE_AUTHOR("Max Filippov <[email protected]>");
MODULE_DESCRIPTION("xtensa xtfpga SPI driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-xtensa-xtfpga.c |
// SPDX-License-Identifier: GPL-2.0
// PCI1xxxx SPI driver
// Copyright (C) 2022 Microchip Technology Inc.
// Authors: Tharun Kumar P <[email protected]>
// Kumaravel Thiagarajan <[email protected]>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#define DRV_NAME "spi-pci1xxxx"
#define SYS_FREQ_DEFAULT (62500000)
#define PCI1XXXX_SPI_MAX_CLOCK_HZ (30000000)
#define PCI1XXXX_SPI_CLK_20MHZ (20000000)
#define PCI1XXXX_SPI_CLK_15MHZ (15000000)
#define PCI1XXXX_SPI_CLK_12MHZ (12000000)
#define PCI1XXXX_SPI_CLK_10MHZ (10000000)
#define PCI1XXXX_SPI_MIN_CLOCK_HZ (2000000)
#define PCI1XXXX_SPI_BUFFER_SIZE (320)
#define SPI_MST_CTL_DEVSEL_MASK (GENMASK(27, 25))
#define SPI_MST_CTL_CMD_LEN_MASK (GENMASK(16, 8))
#define SPI_MST_CTL_SPEED_MASK (GENMASK(7, 5))
#define SPI_MSI_VECTOR_SEL_MASK (GENMASK(4, 4))
#define SPI_MST_CTL_FORCE_CE (BIT(4))
#define SPI_MST_CTL_MODE_SEL (BIT(2))
#define SPI_MST_CTL_GO (BIT(0))
#define SPI_MST1_ADDR_BASE (0x800)
/* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
#define SPI_MST_CMD_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x00)
#define SPI_MST_RSP_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x200)
#define SPI_MST_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x400)
#define SPI_MST_EVENT_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x420)
#define SPI_MST_EVENT_MASK_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x424)
#define SPI_MST_PAD_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x460)
#define SPIALERT_MST_DB_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x464)
#define SPIALERT_MST_VAL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x468)
#define SPI_PCI_CTRL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x480)
#define PCI1XXXX_IRQ_FLAGS (IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE)
#define SPI_MAX_DATA_LEN 320
#define PCI1XXXX_SPI_TIMEOUT (msecs_to_jiffies(100))
#define SPI_INTR BIT(8)
#define SPI_FORCE_CE BIT(4)
#define SPI_CHIP_SEL_COUNT 7
#define VENDOR_ID_MCHP 0x1055
#define SPI_SUSPEND_CONFIG 0x101
#define SPI_RESUME_CONFIG 0x203
struct pci1xxxx_spi_internal {
u8 hw_inst;
bool spi_xfer_in_progress;
int irq;
struct completion spi_xfer_done;
struct spi_controller *spi_host;
struct pci1xxxx_spi *parent;
struct {
unsigned int dev_sel : 3;
unsigned int msi_vector_sel : 1;
} prev_val;
};
struct pci1xxxx_spi {
struct pci_dev *dev;
u8 total_hw_instances;
void __iomem *reg_base;
struct pci1xxxx_spi_internal *spi_int[];
};
static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable)
{
struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller);
struct pci1xxxx_spi *par = p->parent;
u32 regval;
/* Set the DEV_SEL bits of the SPI_MST_CTL_REG */
regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
if (!enable) {
regval |= SPI_FORCE_CE;
regval &= ~SPI_MST_CTL_DEVSEL_MASK;
regval |= (spi_get_chipselect(spi, 0) << 25);
} else {
regval &= ~SPI_FORCE_CE;
}
writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
}
static u8 pci1xxxx_get_clock_div(u32 hz)
{
u8 val = 0;
if (hz >= PCI1XXXX_SPI_MAX_CLOCK_HZ)
val = 2;
else if ((hz < PCI1XXXX_SPI_MAX_CLOCK_HZ) && (hz >= PCI1XXXX_SPI_CLK_20MHZ))
val = 3;
else if ((hz < PCI1XXXX_SPI_CLK_20MHZ) && (hz >= PCI1XXXX_SPI_CLK_15MHZ))
val = 4;
else if ((hz < PCI1XXXX_SPI_CLK_15MHZ) && (hz >= PCI1XXXX_SPI_CLK_12MHZ))
val = 5;
else if ((hz < PCI1XXXX_SPI_CLK_12MHZ) && (hz >= PCI1XXXX_SPI_CLK_10MHZ))
val = 6;
else if ((hz < PCI1XXXX_SPI_CLK_10MHZ) && (hz >= PCI1XXXX_SPI_MIN_CLOCK_HZ))
val = 7;
else
val = 2;
return val;
}
static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
int mode, len, loop_iter, transfer_len;
struct pci1xxxx_spi *par = p->parent;
unsigned long bytes_transfered;
unsigned long bytes_recvd;
unsigned long loop_count;
u8 *rx_buf, result;
const u8 *tx_buf;
u32 regval;
u8 clkdiv;
p->spi_xfer_in_progress = true;
mode = spi->mode;
clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
transfer_len = xfer->len;
regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
if (tx_buf) {
bytes_transfered = 0;
bytes_recvd = 0;
loop_count = transfer_len / SPI_MAX_DATA_LEN;
if (transfer_len % SPI_MAX_DATA_LEN != 0)
loop_count += 1;
for (loop_iter = 0; loop_iter < loop_count; loop_iter++) {
len = SPI_MAX_DATA_LEN;
if ((transfer_len % SPI_MAX_DATA_LEN != 0) &&
(loop_iter == loop_count - 1))
len = transfer_len % SPI_MAX_DATA_LEN;
reinit_completion(&p->spi_xfer_done);
memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
&tx_buf[bytes_transfered], len);
bytes_transfered += len;
regval = readl(par->reg_base +
SPI_MST_CTL_REG_OFFSET(p->hw_inst));
regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
SPI_MST_CTL_SPEED_MASK);
if (mode == SPI_MODE_3)
regval |= SPI_MST_CTL_MODE_SEL;
else
regval &= ~SPI_MST_CTL_MODE_SEL;
regval |= (clkdiv << 5);
regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
regval |= (len << 8);
writel(regval, par->reg_base +
SPI_MST_CTL_REG_OFFSET(p->hw_inst));
regval = readl(par->reg_base +
SPI_MST_CTL_REG_OFFSET(p->hw_inst));
regval |= SPI_MST_CTL_GO;
writel(regval, par->reg_base +
SPI_MST_CTL_REG_OFFSET(p->hw_inst));
/* Wait for DMA_TERM interrupt */
result = wait_for_completion_timeout(&p->spi_xfer_done,
PCI1XXXX_SPI_TIMEOUT);
if (!result)
return -ETIMEDOUT;
if (rx_buf) {
memcpy_fromio(&rx_buf[bytes_recvd], par->reg_base +
SPI_MST_RSP_BUF_OFFSET(p->hw_inst), len);
bytes_recvd += len;
}
}
}
p->spi_xfer_in_progress = false;
return 0;
}
static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
{
struct pci1xxxx_spi_internal *p = dev;
irqreturn_t spi_int_fired = IRQ_NONE;
u32 regval;
/* Clear the SPI GO_BIT Interrupt */
regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
if (regval & SPI_INTR) {
/* Clear xfer_done */
complete(&p->spi_xfer_done);
spi_int_fired = IRQ_HANDLED;
}
writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
return spi_int_fired;
}
static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 hw_inst_cnt, iter, start, only_sec_inst;
struct pci1xxxx_spi_internal *spi_sub_ptr;
struct device *dev = &pdev->dev;
struct pci1xxxx_spi *spi_bus;
struct spi_controller *spi_host;
u32 regval;
int ret;
hw_inst_cnt = ent->driver_data & 0x0f;
start = (ent->driver_data & 0xf0) >> 4;
if (start == 1)
only_sec_inst = 1;
else
only_sec_inst = 0;
spi_bus = devm_kzalloc(&pdev->dev,
struct_size(spi_bus, spi_int, hw_inst_cnt),
GFP_KERNEL);
if (!spi_bus)
return -ENOMEM;
spi_bus->dev = pdev;
spi_bus->total_hw_instances = hw_inst_cnt;
pci_set_master(pdev);
for (iter = 0; iter < hw_inst_cnt; iter++) {
spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
sizeof(struct pci1xxxx_spi_internal),
GFP_KERNEL);
spi_sub_ptr = spi_bus->spi_int[iter];
spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
if (!spi_sub_ptr->spi_host)
return -ENOMEM;
spi_sub_ptr->parent = spi_bus;
spi_sub_ptr->spi_xfer_in_progress = false;
if (!iter) {
ret = pcim_enable_device(pdev);
if (ret)
return -ENOMEM;
ret = pci_request_regions(pdev, DRV_NAME);
if (ret)
return -ENOMEM;
spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (!spi_bus->reg_base) {
ret = -EINVAL;
goto error;
}
ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt,
PCI_IRQ_ALL_TYPES);
if (ret < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors\n");
goto error;
}
init_completion(&spi_sub_ptr->spi_xfer_done);
/* Initialize Interrupts - SPI_INT */
regval = readl(spi_bus->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
regval &= ~SPI_INTR;
writel(regval, spi_bus->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
pci_name(pdev), spi_sub_ptr);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to request irq : %d",
spi_sub_ptr->irq);
ret = -ENODEV;
goto error;
}
/* This register is only applicable for 1st instance */
regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
if (!only_sec_inst)
regval |= (BIT(4));
else
regval &= ~(BIT(4));
writel(regval, spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
}
spi_sub_ptr->hw_inst = start++;
if (iter == 1) {
init_completion(&spi_sub_ptr->spi_xfer_done);
/* Initialize Interrupts - SPI_INT */
regval = readl(spi_bus->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
regval &= ~SPI_INTR;
writel(regval, spi_bus->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
pci_name(pdev), spi_sub_ptr);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to request irq : %d",
spi_sub_ptr->irq);
ret = -ENODEV;
goto error;
}
}
spi_host = spi_sub_ptr->spi_host;
spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
SPI_TX_DUAL | SPI_LOOP;
spi_host->transfer_one = pci1xxxx_spi_transfer_one;
spi_host->set_cs = pci1xxxx_spi_set_cs;
spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;
spi_host->min_speed_hz = PCI1XXXX_SPI_MIN_CLOCK_HZ;
spi_host->flags = SPI_CONTROLLER_MUST_TX;
spi_controller_set_devdata(spi_host, spi_sub_ptr);
ret = devm_spi_register_controller(dev, spi_host);
if (ret)
goto error;
}
pci_set_drvdata(pdev, spi_bus);
return 0;
error:
pci_release_regions(pdev);
return ret;
}
static void store_restore_config(struct pci1xxxx_spi *spi_ptr,
struct pci1xxxx_spi_internal *spi_sub_ptr,
u8 inst, bool store)
{
u32 regval;
if (store) {
regval = readl(spi_ptr->reg_base +
SPI_MST_CTL_REG_OFFSET(spi_sub_ptr->hw_inst));
regval &= SPI_MST_CTL_DEVSEL_MASK;
spi_sub_ptr->prev_val.dev_sel = (regval >> 25) & 7;
regval = readl(spi_ptr->reg_base +
SPI_PCI_CTRL_REG_OFFSET(spi_sub_ptr->hw_inst));
regval &= SPI_MSI_VECTOR_SEL_MASK;
spi_sub_ptr->prev_val.msi_vector_sel = (regval >> 4) & 1;
} else {
regval = readl(spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
regval &= ~SPI_MST_CTL_DEVSEL_MASK;
regval |= (spi_sub_ptr->prev_val.dev_sel << 25);
writel(regval,
spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
writel((spi_sub_ptr->prev_val.msi_vector_sel << 4),
spi_ptr->reg_base + SPI_PCI_CTRL_REG_OFFSET(inst));
}
}
static int pci1xxxx_spi_resume(struct device *dev)
{
struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
struct pci1xxxx_spi_internal *spi_sub_ptr;
u32 regval = SPI_RESUME_CONFIG;
u8 iter;
for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
spi_sub_ptr = spi_ptr->spi_int[iter];
spi_controller_resume(spi_sub_ptr->spi_host);
writel(regval, spi_ptr->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(iter));
/* Restore config at resume */
store_restore_config(spi_ptr, spi_sub_ptr, iter, 0);
}
return 0;
}
static int pci1xxxx_spi_suspend(struct device *dev)
{
struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
struct pci1xxxx_spi_internal *spi_sub_ptr;
u32 reg1 = SPI_SUSPEND_CONFIG;
u8 iter;
for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
spi_sub_ptr = spi_ptr->spi_int[iter];
while (spi_sub_ptr->spi_xfer_in_progress)
msleep(20);
/* Store existing config before suspend */
store_restore_config(spi_ptr, spi_sub_ptr, iter, 1);
spi_controller_suspend(spi_sub_ptr->spi_host);
writel(reg1, spi_ptr->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(iter));
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(spi_pm_ops, pci1xxxx_spi_suspend,
pci1xxxx_spi_resume);
static struct pci_driver pci1xxxx_spi_driver = {
.name = DRV_NAME,
.id_table = pci1xxxx_spi_pci_id_table,
.probe = pci1xxxx_spi_probe,
.driver = {
.pm = pm_sleep_ptr(&spi_pm_ops),
},
};
module_pci_driver(pci1xxxx_spi_driver);
MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx SPI bus driver");
MODULE_AUTHOR("Tharun Kumar P<[email protected]>");
MODULE_AUTHOR("Kumaravel Thiagarajan<[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-pci1xxxx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#include <soc/tegra/common.h>
#define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
#define SLINK_BOTH_EN (1 << 10)
#define SLINK_CS_SW (1 << 11)
#define SLINK_CS_VALUE (1 << 12)
#define SLINK_CS_POLARITY (1 << 13)
#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
#define SLINK_IDLE_SDA_MASK (3 << 16)
#define SLINK_CS_POLARITY1 (1 << 20)
#define SLINK_CK_SDA (1 << 21)
#define SLINK_CS_POLARITY2 (1 << 22)
#define SLINK_CS_POLARITY3 (1 << 23)
#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
#define SLINK_IDLE_SCLK_MASK (3 << 24)
#define SLINK_M_S (1 << 28)
#define SLINK_WAIT (1 << 29)
#define SLINK_GO (1 << 30)
#define SLINK_ENB (1 << 31)
#define SLINK_MODES (SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
#define SLINK_COMMAND2 0x004
#define SLINK_LSBFE (1 << 0)
#define SLINK_SSOE (1 << 1)
#define SLINK_SPIE (1 << 4)
#define SLINK_BIDIROE (1 << 6)
#define SLINK_MODFEN (1 << 7)
#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
#define SLINK_FIFO_REFILLS_0 (0 << 22)
#define SLINK_FIFO_REFILLS_1 (1 << 22)
#define SLINK_FIFO_REFILLS_2 (2 << 22)
#define SLINK_FIFO_REFILLS_3 (3 << 22)
#define SLINK_FIFO_REFILLS_MASK (3 << 22)
#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
#define SLINK_SPC0 (1 << 29)
#define SLINK_TXEN (1 << 30)
#define SLINK_RXEN (1 << 31)
#define SLINK_STATUS 0x008
#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
#define SLINK_MODF (1 << 16)
#define SLINK_RX_UNF (1 << 18)
#define SLINK_TX_OVF (1 << 19)
#define SLINK_TX_FULL (1 << 20)
#define SLINK_TX_EMPTY (1 << 21)
#define SLINK_RX_FULL (1 << 22)
#define SLINK_RX_EMPTY (1 << 23)
#define SLINK_TX_UNF (1 << 24)
#define SLINK_RX_OVF (1 << 25)
#define SLINK_TX_FLUSH (1 << 26)
#define SLINK_RX_FLUSH (1 << 27)
#define SLINK_SCLK (1 << 28)
#define SLINK_ERR (1 << 29)
#define SLINK_RDY (1 << 30)
#define SLINK_BSY (1 << 31)
#define SLINK_FIFO_ERROR (SLINK_TX_OVF | SLINK_RX_UNF | \
SLINK_TX_UNF | SLINK_RX_OVF)
#define SLINK_FIFO_EMPTY (SLINK_TX_EMPTY | SLINK_RX_EMPTY)
#define SLINK_MAS_DATA 0x010
#define SLINK_SLAVE_DATA 0x014
#define SLINK_DMA_CTL 0x018
#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
#define SLINK_TX_TRIG_1 (0 << 16)
#define SLINK_TX_TRIG_4 (1 << 16)
#define SLINK_TX_TRIG_8 (2 << 16)
#define SLINK_TX_TRIG_16 (3 << 16)
#define SLINK_TX_TRIG_MASK (3 << 16)
#define SLINK_RX_TRIG_1 (0 << 18)
#define SLINK_RX_TRIG_4 (1 << 18)
#define SLINK_RX_TRIG_8 (2 << 18)
#define SLINK_RX_TRIG_16 (3 << 18)
#define SLINK_RX_TRIG_MASK (3 << 18)
#define SLINK_PACKED (1 << 20)
#define SLINK_PACK_SIZE_4 (0 << 21)
#define SLINK_PACK_SIZE_8 (1 << 21)
#define SLINK_PACK_SIZE_16 (2 << 21)
#define SLINK_PACK_SIZE_32 (3 << 21)
#define SLINK_PACK_SIZE_MASK (3 << 21)
#define SLINK_IE_TXC (1 << 26)
#define SLINK_IE_RXC (1 << 27)
#define SLINK_DMA_EN (1 << 31)
#define SLINK_STATUS2 0x01c
#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
#define SLINK_TX_FIFO 0x100
#define SLINK_RX_FIFO 0x180
#define DATA_DIR_TX (1 << 0)
#define DATA_DIR_RX (1 << 1)
#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
#define SLINK_STATUS2_RESET \
(TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
#define MAX_CHIP_SELECT 4
#define SLINK_FIFO_DEPTH 32
struct tegra_slink_chip_data {
bool cs_hold_time;
};
struct tegra_slink_data {
struct device *dev;
struct spi_master *master;
const struct tegra_slink_chip_data *chip_data;
spinlock_t lock;
struct clk *clk;
struct reset_control *rst;
void __iomem *base;
phys_addr_t phys;
unsigned irq;
u32 cur_speed;
struct spi_device *cur_spi;
unsigned cur_pos;
unsigned cur_len;
unsigned words_per_32bit;
unsigned bytes_per_word;
unsigned curr_dma_words;
unsigned cur_direction;
unsigned cur_rx_pos;
unsigned cur_tx_pos;
unsigned dma_buf_size;
unsigned max_buf_size;
bool is_curr_dma_xfer;
struct completion rx_dma_complete;
struct completion tx_dma_complete;
u32 tx_status;
u32 rx_status;
u32 status_reg;
bool is_packed;
u32 packed_size;
u32 command_reg;
u32 command2_reg;
u32 dma_control_reg;
u32 def_command_reg;
u32 def_command2_reg;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
struct dma_chan *rx_dma_chan;
u32 *rx_dma_buf;
dma_addr_t rx_dma_phys;
struct dma_async_tx_descriptor *rx_dma_desc;
struct dma_chan *tx_dma_chan;
u32 *tx_dma_buf;
dma_addr_t tx_dma_phys;
struct dma_async_tx_descriptor *tx_dma_desc;
};
static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
unsigned long reg)
{
return readl(tspi->base + reg);
}
static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
u32 val, unsigned long reg)
{
writel(val, tspi->base + reg);
/* Read back register to make sure that register writes completed */
if (reg != SLINK_TX_FIFO)
readl(tspi->base + SLINK_MAS_DATA);
}
static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
{
u32 val_write;
tegra_slink_readl(tspi, SLINK_STATUS);
/* Write 1 to clear status register */
val_write = SLINK_RDY | SLINK_FIFO_ERROR;
tegra_slink_writel(tspi, val_write, SLINK_STATUS);
}
static u32 tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
struct spi_transfer *t)
{
switch (tspi->bytes_per_word) {
case 0:
return SLINK_PACK_SIZE_4;
case 1:
return SLINK_PACK_SIZE_8;
case 2:
return SLINK_PACK_SIZE_16;
case 4:
return SLINK_PACK_SIZE_32;
default:
return 0;
}
}
static unsigned tegra_slink_calculate_curr_xfer_param(
struct spi_device *spi, struct tegra_slink_data *tspi,
struct spi_transfer *t)
{
unsigned remain_len = t->len - tspi->cur_pos;
unsigned max_word;
unsigned bits_per_word;
unsigned max_len;
unsigned total_fifo_words;
bits_per_word = t->bits_per_word;
tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
if (bits_per_word == 8 || bits_per_word == 16) {
tspi->is_packed = true;
tspi->words_per_32bit = 32/bits_per_word;
} else {
tspi->is_packed = false;
tspi->words_per_32bit = 1;
}
tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
if (tspi->is_packed) {
max_len = min(remain_len, tspi->max_buf_size);
tspi->curr_dma_words = max_len/tspi->bytes_per_word;
total_fifo_words = max_len/4;
} else {
max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
max_word = min(max_word, tspi->max_buf_size/4);
tspi->curr_dma_words = max_word;
total_fifo_words = max_word;
}
return total_fifo_words;
}
static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned nbytes;
unsigned tx_empty_count;
u32 fifo_status;
unsigned max_n_32bit;
unsigned i, count;
unsigned int written_words;
unsigned fifo_words_left;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
if (tspi->is_packed) {
fifo_words_left = tx_empty_count * tspi->words_per_32bit;
written_words = min(fifo_words_left, tspi->curr_dma_words);
nbytes = written_words * tspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
x |= (u32)(*tx_buf++) << (i * 8);
tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
}
} else {
max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
x |= (u32)(*tx_buf++) << (i * 8);
tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
}
}
tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
return written_words;
}
static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned rx_full_count;
u32 fifo_status;
unsigned i, count;
unsigned int read_words = 0;
unsigned len;
u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
read_words += tspi->curr_dma_words;
} else {
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
read_words += rx_full_count;
}
return read_words;
}
static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
if (tspi->is_packed) {
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = 0;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
}
tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
}
static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned len;
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
} else {
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = tspi->rx_dma_buf[count] & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
}
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
}
static void tegra_slink_dma_complete(void *args)
{
struct completion *dma_complete = args;
complete(dma_complete);
}
static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
{
reinit_completion(&tspi->tx_dma_complete);
tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tspi->tx_dma_desc) {
dev_err(tspi->dev, "Not able to get desc for Tx\n");
return -EIO;
}
tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
dmaengine_submit(tspi->tx_dma_desc);
dma_async_issue_pending(tspi->tx_dma_chan);
return 0;
}
static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
{
reinit_completion(&tspi->rx_dma_complete);
tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tspi->rx_dma_desc) {
dev_err(tspi->dev, "Not able to get desc for Rx\n");
return -EIO;
}
tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
dmaengine_submit(tspi->rx_dma_desc);
dma_async_issue_pending(tspi->rx_dma_chan);
return 0;
}
static int tegra_slink_start_dma_based_transfer(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
u32 val;
unsigned int len;
int ret = 0;
u32 status;
/* Make sure that Rx and Tx fifo are empty */
status = tegra_slink_readl(tspi, SLINK_STATUS);
if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
(unsigned)status);
return -EIO;
}
val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
val |= tspi->packed_size;
if (tspi->is_packed)
len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
4) * 4;
else
len = tspi->curr_dma_words * 4;
/* Set attention level based on length of transfer */
if (len & 0xF)
val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
else if (((len) >> 4) & 0x1)
val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
else
val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
if (tspi->cur_direction & DATA_DIR_TX)
val |= SLINK_IE_TXC;
if (tspi->cur_direction & DATA_DIR_RX)
val |= SLINK_IE_RXC;
tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
tspi->dma_control_reg = val;
if (tspi->cur_direction & DATA_DIR_TX) {
tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
wmb();
ret = tegra_slink_start_tx_dma(tspi, len);
if (ret < 0) {
dev_err(tspi->dev,
"Starting tx dma failed, err %d\n", ret);
return ret;
}
/* Wait for tx fifo to be fill before starting slink */
status = tegra_slink_readl(tspi, SLINK_STATUS);
while (!(status & SLINK_TX_FULL))
status = tegra_slink_readl(tspi, SLINK_STATUS);
}
if (tspi->cur_direction & DATA_DIR_RX) {
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
ret = tegra_slink_start_rx_dma(tspi, len);
if (ret < 0) {
dev_err(tspi->dev,
"Starting rx dma failed, err %d\n", ret);
if (tspi->cur_direction & DATA_DIR_TX)
dmaengine_terminate_all(tspi->tx_dma_chan);
return ret;
}
}
tspi->is_curr_dma_xfer = true;
if (tspi->is_packed) {
val |= SLINK_PACKED;
tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
/* HW need small delay after settign Packed mode */
udelay(1);
}
tspi->dma_control_reg = val;
val |= SLINK_DMA_EN;
tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
return ret;
}
static int tegra_slink_start_cpu_based_transfer(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
u32 val;
unsigned cur_words;
val = tspi->packed_size;
if (tspi->cur_direction & DATA_DIR_TX)
val |= SLINK_IE_TXC;
if (tspi->cur_direction & DATA_DIR_RX)
val |= SLINK_IE_RXC;
tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
tspi->dma_control_reg = val;
if (tspi->cur_direction & DATA_DIR_TX)
cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
else
cur_words = tspi->curr_dma_words;
val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
tspi->dma_control_reg = val;
tspi->is_curr_dma_xfer = false;
if (tspi->is_packed) {
val |= SLINK_PACKED;
tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
udelay(1);
wmb();
}
tspi->dma_control_reg = val;
val |= SLINK_DMA_EN;
tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
return 0;
}
static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
bool dma_to_memory)
{
struct dma_chan *dma_chan;
u32 *dma_buf;
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan))
return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
"Dma channel is not available\n");
dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
&dma_phys, GFP_KERNEL);
if (!dma_buf) {
dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
dma_release_channel(dma_chan);
return -ENOMEM;
}
if (dma_to_memory) {
dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_sconfig.src_maxburst = 0;
} else {
dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_sconfig.dst_maxburst = 0;
}
ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
if (ret)
goto scrub;
if (dma_to_memory) {
tspi->rx_dma_chan = dma_chan;
tspi->rx_dma_buf = dma_buf;
tspi->rx_dma_phys = dma_phys;
} else {
tspi->tx_dma_chan = dma_chan;
tspi->tx_dma_buf = dma_buf;
tspi->tx_dma_phys = dma_phys;
}
return 0;
scrub:
dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
dma_release_channel(dma_chan);
return ret;
}
static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
bool dma_to_memory)
{
u32 *dma_buf;
dma_addr_t dma_phys;
struct dma_chan *dma_chan;
if (dma_to_memory) {
dma_buf = tspi->rx_dma_buf;
dma_chan = tspi->rx_dma_chan;
dma_phys = tspi->rx_dma_phys;
tspi->rx_dma_chan = NULL;
tspi->rx_dma_buf = NULL;
} else {
dma_buf = tspi->tx_dma_buf;
dma_chan = tspi->tx_dma_chan;
dma_phys = tspi->tx_dma_phys;
tspi->tx_dma_buf = NULL;
tspi->tx_dma_chan = NULL;
}
if (!dma_chan)
return;
dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
dma_release_channel(dma_chan);
}
static int tegra_slink_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t)
{
struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
u32 speed;
u8 bits_per_word;
unsigned total_fifo_words;
int ret;
u32 command;
u32 command2;
bits_per_word = t->bits_per_word;
speed = t->speed_hz;
if (speed != tspi->cur_speed) {
dev_pm_opp_set_rate(tspi->dev, speed * 4);
tspi->cur_speed = speed;
}
tspi->cur_spi = spi;
tspi->cur_pos = 0;
tspi->cur_rx_pos = 0;
tspi->cur_tx_pos = 0;
tspi->curr_xfer = t;
total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
command = tspi->command_reg;
command &= ~SLINK_BIT_LENGTH(~0);
command |= SLINK_BIT_LENGTH(bits_per_word - 1);
command2 = tspi->command2_reg;
command2 &= ~(SLINK_RXEN | SLINK_TXEN);
tspi->cur_direction = 0;
if (t->rx_buf) {
command2 |= SLINK_RXEN;
tspi->cur_direction |= DATA_DIR_RX;
}
if (t->tx_buf) {
command2 |= SLINK_TXEN;
tspi->cur_direction |= DATA_DIR_TX;
}
/*
* Writing to the command2 register bevore the command register prevents
* a spike in chip_select line 0. This selects the chip_select line
* before changing the chip_select value.
*/
tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
tspi->command2_reg = command2;
tegra_slink_writel(tspi, command, SLINK_COMMAND);
tspi->command_reg = command;
if (total_fifo_words > SLINK_FIFO_DEPTH)
ret = tegra_slink_start_dma_based_transfer(tspi, t);
else
ret = tegra_slink_start_cpu_based_transfer(tspi, t);
return ret;
}
static int tegra_slink_setup(struct spi_device *spi)
{
static const u32 cs_pol_bit[MAX_CHIP_SELECT] = {
SLINK_CS_POLARITY,
SLINK_CS_POLARITY1,
SLINK_CS_POLARITY2,
SLINK_CS_POLARITY3,
};
struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
u32 val;
unsigned long flags;
int ret;
dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
spi->bits_per_word,
spi->mode & SPI_CPOL ? "" : "~",
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
ret = pm_runtime_resume_and_get(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
spin_lock_irqsave(&tspi->lock, flags);
val = tspi->def_command_reg;
if (spi->mode & SPI_CS_HIGH)
val |= cs_pol_bit[spi_get_chipselect(spi, 0)];
else
val &= ~cs_pol_bit[spi_get_chipselect(spi, 0)];
tspi->def_command_reg = val;
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
spin_unlock_irqrestore(&tspi->lock, flags);
pm_runtime_put(tspi->dev);
return 0;
}
static int tegra_slink_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
tegra_slink_clear_status(tspi);
tspi->command_reg = tspi->def_command_reg;
tspi->command_reg |= SLINK_CS_SW | SLINK_CS_VALUE;
tspi->command2_reg = tspi->def_command2_reg;
tspi->command2_reg |= SLINK_SS_EN_CS(spi_get_chipselect(spi, 0));
tspi->command_reg &= ~SLINK_MODES;
if (spi->mode & SPI_CPHA)
tspi->command_reg |= SLINK_CK_SDA;
if (spi->mode & SPI_CPOL)
tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_HIGH;
else
tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_LOW;
return 0;
}
static int tegra_slink_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
int ret;
reinit_completion(&tspi->xfer_completion);
ret = tegra_slink_start_transfer_one(spi, xfer);
if (ret < 0) {
dev_err(tspi->dev,
"spi can not start transfer, err %d\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&tspi->xfer_completion,
SLINK_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tspi->dev,
"spi transfer timeout, err %d\n", ret);
return -EIO;
}
if (tspi->tx_status)
return tspi->tx_status;
if (tspi->rx_status)
return tspi->rx_status;
return 0;
}
static int tegra_slink_unprepare_message(struct spi_master *master,
struct spi_message *msg)
{
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
return 0;
}
static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
{
struct spi_transfer *t = tspi->curr_xfer;
unsigned long flags;
spin_lock_irqsave(&tspi->lock, flags);
if (tspi->tx_status || tspi->rx_status ||
(tspi->status_reg & SLINK_BSY)) {
dev_err(tspi->dev,
"CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
dev_err(tspi->dev,
"CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
tspi->command2_reg, tspi->dma_control_reg);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
complete(&tspi->xfer_completion);
goto exit;
}
if (tspi->cur_direction & DATA_DIR_RX)
tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
if (tspi->cur_direction & DATA_DIR_TX)
tspi->cur_pos = tspi->cur_tx_pos;
else
tspi->cur_pos = tspi->cur_rx_pos;
if (tspi->cur_pos == t->len) {
complete(&tspi->xfer_completion);
goto exit;
}
tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
tegra_slink_start_cpu_based_transfer(tspi, t);
exit:
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
{
struct spi_transfer *t = tspi->curr_xfer;
long wait_status;
int err = 0;
unsigned total_fifo_words;
unsigned long flags;
/* Abort dmas if any error */
if (tspi->cur_direction & DATA_DIR_TX) {
if (tspi->tx_status) {
dmaengine_terminate_all(tspi->tx_dma_chan);
err += 1;
} else {
wait_status = wait_for_completion_interruptible_timeout(
&tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tspi->tx_dma_chan);
dev_err(tspi->dev, "TxDma Xfer failed\n");
err += 1;
}
}
}
if (tspi->cur_direction & DATA_DIR_RX) {
if (tspi->rx_status) {
dmaengine_terminate_all(tspi->rx_dma_chan);
err += 2;
} else {
wait_status = wait_for_completion_interruptible_timeout(
&tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tspi->rx_dma_chan);
dev_err(tspi->dev, "RxDma Xfer failed\n");
err += 2;
}
}
}
spin_lock_irqsave(&tspi->lock, flags);
if (err) {
dev_err(tspi->dev,
"DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
dev_err(tspi->dev,
"DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
tspi->command2_reg, tspi->dma_control_reg);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_assert(tspi->rst);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
}
if (tspi->cur_direction & DATA_DIR_RX)
tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
if (tspi->cur_direction & DATA_DIR_TX)
tspi->cur_pos = tspi->cur_tx_pos;
else
tspi->cur_pos = tspi->cur_rx_pos;
if (tspi->cur_pos == t->len) {
complete(&tspi->xfer_completion);
goto exit;
}
/* Continue transfer in current message */
total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
tspi, t);
if (total_fifo_words > SLINK_FIFO_DEPTH)
err = tegra_slink_start_dma_based_transfer(tspi, t);
else
err = tegra_slink_start_cpu_based_transfer(tspi, t);
exit:
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
{
struct tegra_slink_data *tspi = context_data;
if (!tspi->is_curr_dma_xfer)
return handle_cpu_based_xfer(tspi);
return handle_dma_based_xfer(tspi);
}
static irqreturn_t tegra_slink_isr(int irq, void *context_data)
{
struct tegra_slink_data *tspi = context_data;
tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
if (tspi->cur_direction & DATA_DIR_TX)
tspi->tx_status = tspi->status_reg &
(SLINK_TX_OVF | SLINK_TX_UNF);
if (tspi->cur_direction & DATA_DIR_RX)
tspi->rx_status = tspi->status_reg &
(SLINK_RX_OVF | SLINK_RX_UNF);
tegra_slink_clear_status(tspi);
return IRQ_WAKE_THREAD;
}
static const struct tegra_slink_chip_data tegra30_spi_cdata = {
.cs_hold_time = true,
};
static const struct tegra_slink_chip_data tegra20_spi_cdata = {
.cs_hold_time = false,
};
static const struct of_device_id tegra_slink_of_match[] = {
{ .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
{ .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
{}
};
MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
static int tegra_slink_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct tegra_slink_data *tspi;
struct resource *r;
int ret, spi_irq;
const struct tegra_slink_chip_data *cdata = NULL;
cdata = of_device_get_match_data(&pdev->dev);
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
dev_err(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tegra_slink_setup;
master->prepare_message = tegra_slink_prepare_message;
master->transfer_one = tegra_slink_transfer_one;
master->unprepare_message = tegra_slink_unprepare_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
platform_set_drvdata(pdev, master);
tspi = spi_master_get_devdata(master);
tspi->master = master;
tspi->dev = &pdev->dev;
tspi->chip_data = cdata;
spin_lock_init(&tspi->lock);
if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
&master->max_speed_hz))
master->max_speed_hz = 25000000; /* 25MHz */
tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tspi->base)) {
ret = PTR_ERR(tspi->base);
goto exit_free_master;
}
tspi->phys = r->start;
/* disabled clock may cause interrupt storm upon request */
tspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tspi->clk)) {
ret = PTR_ERR(tspi->clk);
dev_err(&pdev->dev, "Can not get clock %d\n", ret);
goto exit_free_master;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
goto exit_free_master;
}
ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (ret)
goto exit_free_master;
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
ret = tegra_slink_init_dma_param(tspi, true);
if (ret < 0)
goto exit_free_master;
ret = tegra_slink_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
tspi->max_buf_size = tspi->dma_buf_size;
init_completion(&tspi->tx_dma_complete);
init_completion(&tspi->rx_dma_complete);
init_completion(&tspi->xfer_completion);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
goto exit_pm_disable;
}
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
tegra_slink_isr_thread, IRQF_ONESHOT,
dev_name(&pdev->dev), tspi);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
goto exit_pm_put;
}
tspi->def_command_reg = SLINK_M_S;
tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
master->dev.of_node = pdev->dev.of_node;
ret = spi_register_master(master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_free_irq;
}
pm_runtime_put(&pdev->dev);
return ret;
exit_free_irq:
free_irq(spi_irq, tspi);
exit_pm_put:
pm_runtime_put(&pdev->dev);
exit_pm_disable:
pm_runtime_force_suspend(&pdev->dev);
tegra_slink_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_slink_deinit_dma_param(tspi, true);
exit_free_master:
spi_master_put(master);
return ret;
}
static void tegra_slink_remove(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
spi_unregister_master(master);
free_irq(tspi->irq, tspi);
pm_runtime_force_suspend(&pdev->dev);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
if (tspi->rx_dma_chan)
tegra_slink_deinit_dma_param(tspi, true);
spi_master_put(master);
}
#ifdef CONFIG_PM_SLEEP
static int tegra_slink_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
return spi_master_suspend(master);
}
static int tegra_slink_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
pm_runtime_put(dev);
return spi_master_resume(master);
}
#endif
static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
/* Flush all write which are in PPSB queue by reading back */
tegra_slink_readl(tspi, SLINK_MAS_DATA);
clk_disable_unprepare(tspi->clk);
return 0;
}
static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(tspi->clk);
if (ret < 0) {
dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
return ret;
}
return 0;
}
static const struct dev_pm_ops slink_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
tegra_slink_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
};
static struct platform_driver tegra_slink_driver = {
.driver = {
.name = "spi-tegra-slink",
.pm = &slink_pm_ops,
.of_match_table = tegra_slink_of_match,
},
.probe = tegra_slink_probe,
.remove_new = tegra_slink_remove,
};
module_platform_driver(tegra_slink_driver);
MODULE_ALIAS("platform:spi-tegra-slink");
MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-tegra20-slink.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
//
// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
// Author: John Garry <[email protected]>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/dmi.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#define HISI_SFC_V3XX_VERSION (0x1f8)
#define HISI_SFC_V3XX_GLB_CFG (0x100)
#define HISI_SFC_V3XX_GLB_CFG_CS0_ADDR_MODE BIT(2)
#define HISI_SFC_V3XX_RAW_INT_STAT (0x120)
#define HISI_SFC_V3XX_INT_STAT (0x124)
#define HISI_SFC_V3XX_INT_MASK (0x128)
#define HISI_SFC_V3XX_INT_CLR (0x12c)
#define HISI_SFC_V3XX_CMD_CFG (0x300)
#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
#define HISI_SFC_V3XX_CMD_INS (0x308)
#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
/* Common definition of interrupt bit masks */
#define HISI_SFC_V3XX_INT_MASK_ALL (0x1ff) /* all the masks */
#define HISI_SFC_V3XX_INT_MASK_CPLT BIT(0) /* command execution complete */
#define HISI_SFC_V3XX_INT_MASK_PP_ERR BIT(2) /* page progrom error */
#define HISI_SFC_V3XX_INT_MASK_IACCES BIT(5) /* error visiting inaccessible/
* protected address
*/
/* IO Mode definition in HISI_SFC_V3XX_CMD_CFG */
#define HISI_SFC_V3XX_STD (0 << 17)
#define HISI_SFC_V3XX_DIDO (1 << 17)
#define HISI_SFC_V3XX_DIO (2 << 17)
#define HISI_SFC_V3XX_FULL_DIO (3 << 17)
#define HISI_SFC_V3XX_QIQO (5 << 17)
#define HISI_SFC_V3XX_QIO (6 << 17)
#define HISI_SFC_V3XX_FULL_QIO (7 << 17)
/*
* The IO modes lookup table. hisi_sfc_v3xx_io_modes[(z - 1) / 2][y / 2][x / 2]
* stands for x-y-z mode, as described in SFDP terminology. -EIO indicates
* an invalid mode.
*/
static const int hisi_sfc_v3xx_io_modes[2][3][3] = {
{
{ HISI_SFC_V3XX_DIDO, HISI_SFC_V3XX_DIDO, HISI_SFC_V3XX_DIDO },
{ HISI_SFC_V3XX_DIO, HISI_SFC_V3XX_FULL_DIO, -EIO },
{ -EIO, -EIO, -EIO },
},
{
{ HISI_SFC_V3XX_QIQO, HISI_SFC_V3XX_QIQO, HISI_SFC_V3XX_QIQO },
{ -EIO, -EIO, -EIO },
{ HISI_SFC_V3XX_QIO, -EIO, HISI_SFC_V3XX_FULL_QIO },
},
};
struct hisi_sfc_v3xx_host {
struct device *dev;
void __iomem *regbase;
int max_cmd_dword;
struct completion *completion;
u8 address_mode;
int irq;
};
static void hisi_sfc_v3xx_disable_int(struct hisi_sfc_v3xx_host *host)
{
writel(0, host->regbase + HISI_SFC_V3XX_INT_MASK);
}
static void hisi_sfc_v3xx_enable_int(struct hisi_sfc_v3xx_host *host)
{
writel(HISI_SFC_V3XX_INT_MASK_ALL, host->regbase + HISI_SFC_V3XX_INT_MASK);
}
static void hisi_sfc_v3xx_clear_int(struct hisi_sfc_v3xx_host *host)
{
writel(HISI_SFC_V3XX_INT_MASK_ALL, host->regbase + HISI_SFC_V3XX_INT_CLR);
}
/*
* The interrupt status register indicates whether an error occurs
* after per operation. Check it, and clear the interrupts for
* next time judgement.
*/
static int hisi_sfc_v3xx_handle_completion(struct hisi_sfc_v3xx_host *host)
{
u32 reg;
reg = readl(host->regbase + HISI_SFC_V3XX_RAW_INT_STAT);
hisi_sfc_v3xx_clear_int(host);
if (reg & HISI_SFC_V3XX_INT_MASK_IACCES) {
dev_err(host->dev, "fail to access protected address\n");
return -EIO;
}
if (reg & HISI_SFC_V3XX_INT_MASK_PP_ERR) {
dev_err(host->dev, "page program operation failed\n");
return -EIO;
}
/*
* The other bits of the interrupt registers is not currently
* used and probably not be triggered in this driver. When it
* happens, we regard it as an unsupported error here.
*/
if (!(reg & HISI_SFC_V3XX_INT_MASK_CPLT)) {
dev_err(host->dev, "unsupported error occurred, status=0x%x\n", reg);
return -EIO;
}
return 0;
}
#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000
#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10
static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
{
u32 reg;
return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
!(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
HISI_SFC_V3XX_WAIT_TIMEOUT_US);
}
static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
struct spi_mem_op *op)
{
struct spi_device *spi = mem->spi;
struct hisi_sfc_v3xx_host *host;
uintptr_t addr = (uintptr_t)op->data.buf.in;
int max_byte_count;
host = spi_controller_get_devdata(spi->controller);
max_byte_count = host->max_cmd_dword * 4;
if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
op->data.nbytes = 4 - (addr % 4);
else if (op->data.nbytes > max_byte_count)
op->data.nbytes = max_byte_count;
return 0;
}
/*
* The controller only supports Standard SPI mode, Dual mode and
* Quad mode. Double sanitize the ops here to avoid OOB access.
*/
static bool hisi_sfc_v3xx_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_device *spi = mem->spi;
struct hisi_sfc_v3xx_host *host;
host = spi_controller_get_devdata(spi->controller);
if (op->data.buswidth > 4 || op->dummy.buswidth > 4 ||
op->addr.buswidth > 4 || op->cmd.buswidth > 4)
return false;
if (op->addr.nbytes != host->address_mode && op->addr.nbytes)
return false;
return spi_mem_default_supports_op(mem, op);
}
/*
* memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
* DATABUF registers -so use __io{read,write}32_copy when possible. For
* trailing bytes, copy them byte-by-byte from the DATABUF register, as we
* can't clobber outside the source/dest buffer.
*
* For efficient data read/write, we try to put any start 32b unaligned data
* into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
*/
static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
u8 *to, unsigned int len)
{
void __iomem *from;
int i;
from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
if (IS_ALIGNED((uintptr_t)to, 4)) {
int words = len / 4;
__ioread32_copy(to, from, words);
len -= words * 4;
if (len) {
u32 val;
to += words * 4;
from += words * 4;
val = __raw_readl(from);
for (i = 0; i < len; i++, val >>= 8, to++)
*to = (u8)val;
}
} else {
for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
u32 val = __raw_readl(from);
int j;
for (j = 0; j < 4 && (j + (i * 4) < len);
to++, val >>= 8, j++)
*to = (u8)val;
}
}
}
static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
const u8 *from, unsigned int len)
{
void __iomem *to;
int i;
to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
if (IS_ALIGNED((uintptr_t)from, 4)) {
int words = len / 4;
__iowrite32_copy(to, from, words);
len -= words * 4;
if (len) {
u32 val = 0;
to += words * 4;
from += words * 4;
for (i = 0; i < len; i++, from++)
val |= *from << i * 8;
__raw_writel(val, to);
}
} else {
for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
u32 val = 0;
int j;
for (j = 0; j < 4 && (j + (i * 4) < len);
from++, j++)
val |= *from << j * 8;
__raw_writel(val, to);
}
}
}
static int hisi_sfc_v3xx_start_bus(struct hisi_sfc_v3xx_host *host,
const struct spi_mem_op *op,
u8 chip_select)
{
int len = op->data.nbytes, buswidth_mode;
u32 config = 0;
if (op->addr.nbytes)
config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
if (op->data.buswidth == 0 || op->data.buswidth == 1) {
buswidth_mode = HISI_SFC_V3XX_STD;
} else {
int data_idx, addr_idx, cmd_idx;
data_idx = (op->data.buswidth - 1) / 2;
addr_idx = op->addr.buswidth / 2;
cmd_idx = op->cmd.buswidth / 2;
buswidth_mode = hisi_sfc_v3xx_io_modes[data_idx][addr_idx][cmd_idx];
}
if (buswidth_mode < 0)
return buswidth_mode;
config |= buswidth_mode;
if (op->data.dir != SPI_MEM_NO_DATA) {
config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
}
if (op->data.dir == SPI_MEM_DATA_IN)
config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
HISI_SFC_V3XX_CMD_CFG_START_MSK;
writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
return 0;
}
static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
const struct spi_mem_op *op,
u8 chip_select)
{
DECLARE_COMPLETION_ONSTACK(done);
int ret;
if (host->irq) {
host->completion = &done;
hisi_sfc_v3xx_enable_int(host);
}
if (op->data.dir == SPI_MEM_DATA_OUT)
hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, op->data.nbytes);
ret = hisi_sfc_v3xx_start_bus(host, op, chip_select);
if (ret)
return ret;
if (host->irq) {
ret = wait_for_completion_timeout(host->completion,
usecs_to_jiffies(HISI_SFC_V3XX_WAIT_TIMEOUT_US));
if (!ret)
ret = -ETIMEDOUT;
else
ret = 0;
hisi_sfc_v3xx_disable_int(host);
synchronize_irq(host->irq);
host->completion = NULL;
} else {
ret = hisi_sfc_v3xx_wait_cmd_idle(host);
}
if (hisi_sfc_v3xx_handle_completion(host) || ret)
return -EIO;
if (op->data.dir == SPI_MEM_DATA_IN)
hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, op->data.nbytes);
return 0;
}
static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct hisi_sfc_v3xx_host *host;
struct spi_device *spi = mem->spi;
u8 chip_select = spi_get_chipselect(spi, 0);
host = spi_controller_get_devdata(spi->controller);
return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
}
static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
.adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
.supports_op = hisi_sfc_v3xx_supports_op,
.exec_op = hisi_sfc_v3xx_exec_op,
};
static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
{
struct hisi_sfc_v3xx_host *host = data;
hisi_sfc_v3xx_disable_int(host);
complete(host->completion);
return IRQ_HANDLED;
}
static int hisi_sfc_v3xx_buswidth_override_bits;
/*
* ACPI FW does not allow us to currently set the device buswidth, so quirk it
* depending on the board.
*/
static int __init hisi_sfc_v3xx_dmi_quirk(const struct dmi_system_id *d)
{
hisi_sfc_v3xx_buswidth_override_bits = SPI_RX_QUAD | SPI_TX_QUAD;
return 0;
}
static const struct dmi_system_id hisi_sfc_v3xx_dmi_quirk_table[] = {
{
.callback = hisi_sfc_v3xx_dmi_quirk,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
DMI_MATCH(DMI_PRODUCT_NAME, "D06"),
},
},
{
.callback = hisi_sfc_v3xx_dmi_quirk,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 2280 V2"),
},
},
{
.callback = hisi_sfc_v3xx_dmi_quirk,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 200 (Model 2280)"),
},
},
{}
};
static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hisi_sfc_v3xx_host *host;
struct spi_controller *ctlr;
u32 version, glb_config;
int ret;
ctlr = spi_alloc_host(&pdev->dev, sizeof(*host));
if (!ctlr)
return -ENOMEM;
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->buswidth_override_bits = hisi_sfc_v3xx_buswidth_override_bits;
host = spi_controller_get_devdata(ctlr);
host->dev = dev;
platform_set_drvdata(pdev, host);
host->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->regbase)) {
ret = PTR_ERR(host->regbase);
goto err_put_host;
}
host->irq = platform_get_irq_optional(pdev, 0);
if (host->irq == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_put_host;
}
hisi_sfc_v3xx_disable_int(host);
if (host->irq > 0) {
ret = devm_request_irq(dev, host->irq, hisi_sfc_v3xx_isr, 0,
"hisi-sfc-v3xx", host);
if (ret) {
dev_err(dev, "failed to request irq%d, ret = %d\n", host->irq, ret);
host->irq = 0;
}
} else {
host->irq = 0;
}
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
/*
* The address mode of the controller is either 3 or 4,
* which is indicated by the address mode bit in
* the global config register. The register is read only
* for the OS driver.
*/
glb_config = readl(host->regbase + HISI_SFC_V3XX_GLB_CFG);
if (glb_config & HISI_SFC_V3XX_GLB_CFG_CS0_ADDR_MODE)
host->address_mode = 4;
else
host->address_mode = 3;
version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
if (version >= 0x351)
host->max_cmd_dword = 64;
else
host->max_cmd_dword = 16;
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
goto err_put_host;
dev_info(&pdev->dev, "hw version 0x%x, %s mode.\n",
version, host->irq ? "irq" : "polling");
return 0;
err_put_host:
spi_controller_put(ctlr);
return ret;
}
static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
{"HISI0341", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
static struct platform_driver hisi_sfc_v3xx_spi_driver = {
.driver = {
.name = "hisi-sfc-v3xx",
.acpi_match_table = hisi_sfc_v3xx_acpi_ids,
},
.probe = hisi_sfc_v3xx_probe,
};
static int __init hisi_sfc_v3xx_spi_init(void)
{
dmi_check_system(hisi_sfc_v3xx_dmi_quirk_table);
return platform_driver_register(&hisi_sfc_v3xx_spi_driver);
}
static void __exit hisi_sfc_v3xx_spi_exit(void)
{
platform_driver_unregister(&hisi_sfc_v3xx_spi_driver);
}
module_init(hisi_sfc_v3xx_spi_init);
module_exit(hisi_sfc_v3xx_spi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Garry <[email protected]>");
MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
| linux-master | drivers/spi/spi-hisi-sfc-v3xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Xilinx SPI controller driver (master mode only)
*
* Author: MontaVista Software, Inc.
* [email protected]
*
* Copyright (c) 2010 Secret Lab Technologies, Ltd.
* Copyright (c) 2009 Intel Corporation
* 2002-2007 (c) MontaVista Software, Inc.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/xilinx_spi.h>
#include <linux/io.h>
#define XILINX_SPI_MAX_CS 32
#define XILINX_SPI_NAME "xilinx_spi"
/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
* Product Specification", DS464
*/
#define XSPI_CR_OFFSET 0x60 /* Control Register */
#define XSPI_CR_LOOP 0x01
#define XSPI_CR_ENABLE 0x02
#define XSPI_CR_MASTER_MODE 0x04
#define XSPI_CR_CPOL 0x08
#define XSPI_CR_CPHA 0x10
#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \
XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
#define XSPI_CR_TXFIFO_RESET 0x20
#define XSPI_CR_RXFIFO_RESET 0x40
#define XSPI_CR_MANUAL_SSELECT 0x80
#define XSPI_CR_TRANS_INHIBIT 0x100
#define XSPI_CR_LSB_FIRST 0x200
#define XSPI_SR_OFFSET 0x64 /* Status Register */
#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */
#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
* IPIF registers are 32 bit
*/
#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
#define XIPIF_V123B_GINTR_ENABLE 0x80000000
#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
* disabled */
#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
struct xilinx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
struct completion done;
void __iomem *regs; /* virt. address of the control registers */
int irq;
bool force_irq; /* force irq to setup master inhibit */
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
u8 bytes_per_word;
int buffer_size; /* buffer size in words */
u32 cs_inactive; /* Level of the CS pins when inactive*/
unsigned int (*read_fn)(void __iomem *);
void (*write_fn)(u32, void __iomem *);
};
static void xspi_write32(u32 val, void __iomem *addr)
{
iowrite32(val, addr);
}
static unsigned int xspi_read32(void __iomem *addr)
{
return ioread32(addr);
}
static void xspi_write32_be(u32 val, void __iomem *addr)
{
iowrite32be(val, addr);
}
static unsigned int xspi_read32_be(void __iomem *addr)
{
return ioread32be(addr);
}
static void xilinx_spi_tx(struct xilinx_spi *xspi)
{
u32 data = 0;
if (!xspi->tx_ptr) {
xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
return;
}
switch (xspi->bytes_per_word) {
case 1:
data = *(u8 *)(xspi->tx_ptr);
break;
case 2:
data = *(u16 *)(xspi->tx_ptr);
break;
case 4:
data = *(u32 *)(xspi->tx_ptr);
break;
}
xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
xspi->tx_ptr += xspi->bytes_per_word;
}
static void xilinx_spi_rx(struct xilinx_spi *xspi)
{
u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
if (!xspi->rx_ptr)
return;
switch (xspi->bytes_per_word) {
case 1:
*(u8 *)(xspi->rx_ptr) = data;
break;
case 2:
*(u16 *)(xspi->rx_ptr) = data;
break;
case 4:
*(u32 *)(xspi->rx_ptr) = data;
break;
}
xspi->rx_ptr += xspi->bytes_per_word;
}
static void xspi_init_hw(struct xilinx_spi *xspi)
{
void __iomem *regs_base = xspi->regs;
/* Reset the SPI device */
xspi->write_fn(XIPIF_V123B_RESET_MASK,
regs_base + XIPIF_V123B_RESETR_OFFSET);
/* Enable the transmit empty interrupt, which we use to determine
* progress on the transmission.
*/
xspi->write_fn(XSPI_INTR_TX_EMPTY,
regs_base + XIPIF_V123B_IIER_OFFSET);
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
/* Deselect the slave on the SPI bus */
xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
/* Disable the transmitter, enable Manual Slave Select Assertion,
* put SPI controller into master mode, and enable it */
xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
regs_base + XSPI_CR_OFFSET);
}
static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
u16 cr;
u32 cs;
if (is_on == BITBANG_CS_INACTIVE) {
/* Deselect the slave on the SPI bus */
xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
return;
}
/* Set the SPI clock phase and polarity */
cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
if (spi->mode & SPI_CPHA)
cr |= XSPI_CR_CPHA;
if (spi->mode & SPI_CPOL)
cr |= XSPI_CR_CPOL;
if (spi->mode & SPI_LSB_FIRST)
cr |= XSPI_CR_LSB_FIRST;
if (spi->mode & SPI_LOOP)
cr |= XSPI_CR_LOOP;
xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
/* We do not check spi->max_speed_hz here as the SPI clock
* frequency is not software programmable (the IP block design
* parameter)
*/
cs = xspi->cs_inactive;
cs ^= BIT(spi_get_chipselect(spi, 0));
/* Activate the chip select */
xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
}
/* spi_bitbang requires custom setup_transfer() to be defined if there is a
* custom txrx_bufs().
*/
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
if (spi->mode & SPI_CS_HIGH)
xspi->cs_inactive &= ~BIT(spi_get_chipselect(spi, 0));
else
xspi->cs_inactive |= BIT(spi_get_chipselect(spi, 0));
return 0;
}
static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
int remaining_words; /* the number of words left to transfer */
bool use_irq = false;
u16 cr = 0;
/* We get here with transmitter inhibited */
xspi->tx_ptr = t->tx_buf;
xspi->rx_ptr = t->rx_buf;
remaining_words = t->len / xspi->bytes_per_word;
if (xspi->irq >= 0 &&
(xspi->force_irq || remaining_words > xspi->buffer_size)) {
u32 isr;
use_irq = true;
/* Inhibit irq to avoid spurious irqs on tx_empty*/
cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
xspi->regs + XSPI_CR_OFFSET);
/* ACK old irqs (if any) */
isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
if (isr)
xspi->write_fn(isr,
xspi->regs + XIPIF_V123B_IISR_OFFSET);
/* Enable the global IPIF interrupt */
xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
xspi->regs + XIPIF_V123B_DGIER_OFFSET);
reinit_completion(&xspi->done);
}
while (remaining_words) {
int n_words, tx_words, rx_words;
u32 sr;
int stalled;
n_words = min(remaining_words, xspi->buffer_size);
tx_words = n_words;
while (tx_words--)
xilinx_spi_tx(xspi);
/* Start the transfer by not inhibiting the transmitter any
* longer
*/
if (use_irq) {
xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
wait_for_completion(&xspi->done);
/* A transmit has just completed. Process received data
* and check for more data to transmit. Always inhibit
* the transmitter while the Isr refills the transmit
* register/FIFO, or make sure it is stopped if we're
* done.
*/
xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
xspi->regs + XSPI_CR_OFFSET);
sr = XSPI_SR_TX_EMPTY_MASK;
} else
sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
/* Read out all the data from the Rx FIFO */
rx_words = n_words;
stalled = 10;
while (rx_words) {
if (rx_words == n_words && !(stalled--) &&
!(sr & XSPI_SR_TX_EMPTY_MASK) &&
(sr & XSPI_SR_RX_EMPTY_MASK)) {
dev_err(&spi->dev,
"Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
xspi_init_hw(xspi);
return -EIO;
}
if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
xilinx_spi_rx(xspi);
rx_words--;
continue;
}
sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
if (!(sr & XSPI_SR_RX_EMPTY_MASK)) {
xilinx_spi_rx(xspi);
rx_words--;
}
}
remaining_words -= n_words;
}
if (use_irq) {
xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
}
return t->len;
}
/* This driver supports single master mode only. Hence Tx FIFO Empty
* is the only interrupt we care about.
* Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
* Fault are not to happen.
*/
static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
{
struct xilinx_spi *xspi = dev_id;
u32 ipif_isr;
/* Get the IPIF interrupts, and clear them immediately */
ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
complete(&xspi->done);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
{
u8 sr;
int n_words = 0;
/*
* Before the buffer_size detection we reset the core
* to make sure we start with a clean state.
*/
xspi->write_fn(XIPIF_V123B_RESET_MASK,
xspi->regs + XIPIF_V123B_RESETR_OFFSET);
/* Fill the Tx FIFO with as many words as possible */
do {
xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
n_words++;
} while (!(sr & XSPI_SR_TX_FULL_MASK));
return n_words;
}
static const struct of_device_id xilinx_spi_of_match[] = {
{ .compatible = "xlnx,axi-quad-spi-1.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
};
MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
static int xilinx_spi_probe(struct platform_device *pdev)
{
struct xilinx_spi *xspi;
struct xspi_platform_data *pdata;
struct resource *res;
int ret, num_cs = 0, bits_per_word;
struct spi_master *master;
bool force_irq = false;
u32 tmp;
u8 i;
pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
num_cs = pdata->num_chipselect;
bits_per_word = pdata->bits_per_word;
force_irq = pdata->force_irq;
} else {
of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
&num_cs);
ret = of_property_read_u32(pdev->dev.of_node,
"xlnx,num-transfer-bits",
&bits_per_word);
if (ret)
bits_per_word = 8;
}
if (!num_cs) {
dev_err(&pdev->dev,
"Missing slave select configuration data\n");
return -EINVAL;
}
if (num_cs > XILINX_SPI_MAX_CS) {
dev_err(&pdev->dev, "Invalid number of spi slaves\n");
return -EINVAL;
}
master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
return -ENODEV;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
SPI_CS_HIGH;
xspi = spi_master_get_devdata(master);
xspi->cs_inactive = 0xffffffff;
xspi->bitbang.master = master;
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
init_completion(&xspi->done);
xspi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(xspi->regs))
return PTR_ERR(xspi->regs);
master->bus_num = pdev->id;
master->num_chipselect = num_cs;
master->dev.of_node = pdev->dev.of_node;
/*
* Detect endianess on the IP via loop bit in CR. Detection
* must be done before reset is sent because incorrect reset
* value generates error interrupt.
* Setup little endian helper functions first and try to use them
* and check if bit was correctly setup or not.
*/
xspi->read_fn = xspi_read32;
xspi->write_fn = xspi_write32;
xspi->write_fn(XSPI_CR_LOOP, xspi->regs + XSPI_CR_OFFSET);
tmp = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
tmp &= XSPI_CR_LOOP;
if (tmp != XSPI_CR_LOOP) {
xspi->read_fn = xspi_read32_be;
xspi->write_fn = xspi_write32_be;
}
master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
xspi->bytes_per_word = bits_per_word / 8;
xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
xspi->irq = platform_get_irq(pdev, 0);
if (xspi->irq < 0 && xspi->irq != -ENXIO) {
return xspi->irq;
} else if (xspi->irq >= 0) {
/* Register for SPI Interrupt */
ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
dev_name(&pdev->dev), xspi);
if (ret)
return ret;
xspi->force_irq = force_irq;
}
/* SPI controller initializations */
xspi_init_hw(xspi);
ret = spi_bitbang_start(&xspi->bitbang);
if (ret) {
dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
return ret;
}
dev_info(&pdev->dev, "at %pR, irq=%d\n", res, xspi->irq);
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
spi_new_device(master, pdata->devices + i);
}
platform_set_drvdata(pdev, master);
return 0;
}
static void xilinx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct xilinx_spi *xspi = spi_master_get_devdata(master);
void __iomem *regs_base = xspi->regs;
spi_bitbang_stop(&xspi->bitbang);
/* Disable all the interrupts just in case */
xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
spi_master_put(xspi->bitbang.master);
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:" XILINX_SPI_NAME);
static struct platform_driver xilinx_spi_driver = {
.probe = xilinx_spi_probe,
.remove_new = xilinx_spi_remove,
.driver = {
.name = XILINX_SPI_NAME,
.of_match_table = xilinx_spi_of_match,
},
};
module_platform_driver(xilinx_spi_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <[email protected]>");
MODULE_DESCRIPTION("Xilinx SPI driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-xilinx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: Leilk Liu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/spi-mt65xx.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/dma-mapping.h>
#define SPI_CFG0_REG 0x0000
#define SPI_CFG1_REG 0x0004
#define SPI_TX_SRC_REG 0x0008
#define SPI_RX_DST_REG 0x000c
#define SPI_TX_DATA_REG 0x0010
#define SPI_RX_DATA_REG 0x0014
#define SPI_CMD_REG 0x0018
#define SPI_STATUS0_REG 0x001c
#define SPI_PAD_SEL_REG 0x0024
#define SPI_CFG2_REG 0x0028
#define SPI_TX_SRC_REG_64 0x002c
#define SPI_RX_DST_REG_64 0x0030
#define SPI_CFG3_IPM_REG 0x0040
#define SPI_CFG0_SCK_HIGH_OFFSET 0
#define SPI_CFG0_SCK_LOW_OFFSET 8
#define SPI_CFG0_CS_HOLD_OFFSET 16
#define SPI_CFG0_CS_SETUP_OFFSET 24
#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
#define SPI_CFG1_CS_IDLE_OFFSET 0
#define SPI_CFG1_PACKET_LOOP_OFFSET 8
#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
#define SPI_CFG1_CS_IDLE_MASK 0xff
#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
#define SPI_CFG2_SCK_HIGH_OFFSET 0
#define SPI_CFG2_SCK_LOW_OFFSET 16
#define SPI_CMD_ACT BIT(0)
#define SPI_CMD_RESUME BIT(1)
#define SPI_CMD_RST BIT(2)
#define SPI_CMD_PAUSE_EN BIT(4)
#define SPI_CMD_DEASSERT BIT(5)
#define SPI_CMD_SAMPLE_SEL BIT(6)
#define SPI_CMD_CS_POL BIT(7)
#define SPI_CMD_CPHA BIT(8)
#define SPI_CMD_CPOL BIT(9)
#define SPI_CMD_RX_DMA BIT(10)
#define SPI_CMD_TX_DMA BIT(11)
#define SPI_CMD_TXMSBF BIT(12)
#define SPI_CMD_RXMSBF BIT(13)
#define SPI_CMD_RX_ENDIAN BIT(14)
#define SPI_CMD_TX_ENDIAN BIT(15)
#define SPI_CMD_FINISH_IE BIT(16)
#define SPI_CMD_PAUSE_IE BIT(17)
#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
#define PIN_MODE_CFG(x) ((x) / 2)
#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
#define SPI_CFG3_IPM_XMODE_EN BIT(4)
#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
#define MT8173_SPI_MAX_PAD_SEL 3
#define MTK_SPI_PAUSE_INT_STATUS 0x2
#define MTK_SPI_MAX_FIFO_SIZE 32U
#define MTK_SPI_PACKET_SIZE 1024
#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
#define MTK_SPI_IPM_PACKET_LOOP SZ_256
#define MTK_SPI_IDLE 0
#define MTK_SPI_PAUSED 1
#define MTK_SPI_32BITS_MASK (0xffffffff)
#define DMA_ADDR_EXT_BITS (36)
#define DMA_ADDR_DEF_BITS (32)
/**
* struct mtk_spi_compatible - device data structure
* @need_pad_sel: Enable pad (pins) selection in SPI controller
* @must_tx: Must explicitly send dummy TX bytes to do RX only transfer
* @enhance_timing: Enable adjusting cfg register to enhance time accuracy
* @dma_ext: DMA address extension supported
* @no_need_unprepare: Don't unprepare the SPI clk during runtime
* @ipm_design: Adjust/extend registers to support IPM design IP features
*/
struct mtk_spi_compatible {
bool need_pad_sel;
bool must_tx;
bool enhance_timing;
bool dma_ext;
bool no_need_unprepare;
bool ipm_design;
};
/**
* struct mtk_spi - SPI driver instance
* @base: Start address of the SPI controller registers
* @state: SPI controller state
* @pad_num: Number of pad_sel entries
* @pad_sel: Groups of pins to select
* @parent_clk: Parent of sel_clk
* @sel_clk: SPI master mux clock
* @spi_clk: Peripheral clock
* @spi_hclk: AHB bus clock
* @cur_transfer: Currently processed SPI transfer
* @xfer_len: Number of bytes to transfer
* @num_xfered: Number of transferred bytes
* @tx_sgl: TX transfer scatterlist
* @rx_sgl: RX transfer scatterlist
* @tx_sgl_len: Size of TX DMA transfer
* @rx_sgl_len: Size of RX DMA transfer
* @dev_comp: Device data structure
* @spi_clk_hz: Current SPI clock in Hz
* @spimem_done: SPI-MEM operation completion
* @use_spimem: Enables SPI-MEM
* @dev: Device pointer
* @tx_dma: DMA start for SPI-MEM TX
* @rx_dma: DMA start for SPI-MEM RX
*/
struct mtk_spi {
void __iomem *base;
u32 state;
int pad_num;
u32 *pad_sel;
struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
struct spi_transfer *cur_transfer;
u32 xfer_len;
u32 num_xfered;
struct scatterlist *tx_sgl, *rx_sgl;
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
u32 spi_clk_hz;
struct completion spimem_done;
bool use_spimem;
struct device *dev;
dma_addr_t tx_dma;
dma_addr_t rx_dma;
};
static const struct mtk_spi_compatible mtk_common_compat;
static const struct mtk_spi_compatible mt2712_compat = {
.must_tx = true,
};
static const struct mtk_spi_compatible mtk_ipm_compat = {
.enhance_timing = true,
.dma_ext = true,
.ipm_design = true,
};
static const struct mtk_spi_compatible mt6765_compat = {
.need_pad_sel = true,
.must_tx = true,
.enhance_timing = true,
.dma_ext = true,
};
static const struct mtk_spi_compatible mt7622_compat = {
.must_tx = true,
.enhance_timing = true,
};
static const struct mtk_spi_compatible mt8173_compat = {
.need_pad_sel = true,
.must_tx = true,
};
static const struct mtk_spi_compatible mt8183_compat = {
.need_pad_sel = true,
.must_tx = true,
.enhance_timing = true,
};
static const struct mtk_spi_compatible mt6893_compat = {
.need_pad_sel = true,
.must_tx = true,
.enhance_timing = true,
.dma_ext = true,
.no_need_unprepare = true,
};
/*
* A piece of default chip info unless the platform
* supplies it.
*/
static const struct mtk_chip_config mtk_default_chip_info = {
.sample_sel = 0,
.tick_delay = 0,
};
static const struct of_device_id mtk_spi_of_match[] = {
{ .compatible = "mediatek,spi-ipm",
.data = (void *)&mtk_ipm_compat,
},
{ .compatible = "mediatek,mt2701-spi",
.data = (void *)&mtk_common_compat,
},
{ .compatible = "mediatek,mt2712-spi",
.data = (void *)&mt2712_compat,
},
{ .compatible = "mediatek,mt6589-spi",
.data = (void *)&mtk_common_compat,
},
{ .compatible = "mediatek,mt6765-spi",
.data = (void *)&mt6765_compat,
},
{ .compatible = "mediatek,mt7622-spi",
.data = (void *)&mt7622_compat,
},
{ .compatible = "mediatek,mt7629-spi",
.data = (void *)&mt7622_compat,
},
{ .compatible = "mediatek,mt8135-spi",
.data = (void *)&mtk_common_compat,
},
{ .compatible = "mediatek,mt8173-spi",
.data = (void *)&mt8173_compat,
},
{ .compatible = "mediatek,mt8183-spi",
.data = (void *)&mt8183_compat,
},
{ .compatible = "mediatek,mt8192-spi",
.data = (void *)&mt6765_compat,
},
{ .compatible = "mediatek,mt6893-spi",
.data = (void *)&mt6893_compat,
},
{}
};
MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
static void mtk_spi_reset(struct mtk_spi *mdata)
{
u32 reg_val;
/* set the software reset bit in SPI_CMD_REG. */
reg_val = readl(mdata->base + SPI_CMD_REG);
reg_val |= SPI_CMD_RST;
writel(reg_val, mdata->base + SPI_CMD_REG);
reg_val = readl(mdata->base + SPI_CMD_REG);
reg_val &= ~SPI_CMD_RST;
writel(reg_val, mdata->base + SPI_CMD_REG);
}
static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
{
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
struct spi_delay *cs_setup = &spi->cs_setup;
struct spi_delay *cs_hold = &spi->cs_hold;
struct spi_delay *cs_inactive = &spi->cs_inactive;
u32 setup, hold, inactive;
u32 reg_val;
int delay;
delay = spi_delay_to_ns(cs_setup, NULL);
if (delay < 0)
return delay;
setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
delay = spi_delay_to_ns(cs_hold, NULL);
if (delay < 0)
return delay;
hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
delay = spi_delay_to_ns(cs_inactive, NULL);
if (delay < 0)
return delay;
inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
if (hold || setup) {
reg_val = readl(mdata->base + SPI_CFG0_REG);
if (mdata->dev_comp->enhance_timing) {
if (hold) {
hold = min_t(u32, hold, 0x10000);
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
reg_val |= (((hold - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
}
if (setup) {
setup = min_t(u32, setup, 0x10000);
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
reg_val |= (((setup - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
}
} else {
if (hold) {
hold = min_t(u32, hold, 0x100);
reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
}
if (setup) {
setup = min_t(u32, setup, 0x100);
reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
reg_val |= (((setup - 1) & 0xff)
<< SPI_CFG0_CS_SETUP_OFFSET);
}
}
writel(reg_val, mdata->base + SPI_CFG0_REG);
}
if (inactive) {
inactive = min_t(u32, inactive, 0x100);
reg_val = readl(mdata->base + SPI_CFG1_REG);
reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
writel(reg_val, mdata->base + SPI_CFG1_REG);
}
return 0;
}
static int mtk_spi_hw_init(struct spi_master *master,
struct spi_device *spi)
{
u16 cpha, cpol;
u32 reg_val;
struct mtk_chip_config *chip_config = spi->controller_data;
struct mtk_spi *mdata = spi_master_get_devdata(master);
cpha = spi->mode & SPI_CPHA ? 1 : 0;
cpol = spi->mode & SPI_CPOL ? 1 : 0;
reg_val = readl(mdata->base + SPI_CMD_REG);
if (mdata->dev_comp->ipm_design) {
/* SPI transfer without idle time until packet length done */
reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
if (spi->mode & SPI_LOOP)
reg_val |= SPI_CMD_IPM_SPIM_LOOP;
else
reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
}
if (cpha)
reg_val |= SPI_CMD_CPHA;
else
reg_val &= ~SPI_CMD_CPHA;
if (cpol)
reg_val |= SPI_CMD_CPOL;
else
reg_val &= ~SPI_CMD_CPOL;
/* set the mlsbx and mlsbtx */
if (spi->mode & SPI_LSB_FIRST) {
reg_val &= ~SPI_CMD_TXMSBF;
reg_val &= ~SPI_CMD_RXMSBF;
} else {
reg_val |= SPI_CMD_TXMSBF;
reg_val |= SPI_CMD_RXMSBF;
}
/* set the tx/rx endian */
#ifdef __LITTLE_ENDIAN
reg_val &= ~SPI_CMD_TX_ENDIAN;
reg_val &= ~SPI_CMD_RX_ENDIAN;
#else
reg_val |= SPI_CMD_TX_ENDIAN;
reg_val |= SPI_CMD_RX_ENDIAN;
#endif
if (mdata->dev_comp->enhance_timing) {
/* set CS polarity */
if (spi->mode & SPI_CS_HIGH)
reg_val |= SPI_CMD_CS_POL;
else
reg_val &= ~SPI_CMD_CS_POL;
if (chip_config->sample_sel)
reg_val |= SPI_CMD_SAMPLE_SEL;
else
reg_val &= ~SPI_CMD_SAMPLE_SEL;
}
/* set finish and pause interrupt always enable */
reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
/* disable dma mode */
reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
/* disable deassert mode */
reg_val &= ~SPI_CMD_DEASSERT;
writel(reg_val, mdata->base + SPI_CMD_REG);
/* pad select */
if (mdata->dev_comp->need_pad_sel)
writel(mdata->pad_sel[spi_get_chipselect(spi, 0)],
mdata->base + SPI_PAD_SEL_REG);
/* tick delay */
if (mdata->dev_comp->enhance_timing) {
if (mdata->dev_comp->ipm_design) {
reg_val = readl(mdata->base + SPI_CMD_REG);
reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
reg_val |= ((chip_config->tick_delay & 0x7)
<< SPI_CMD_IPM_GET_TICKDLY_OFFSET);
writel(reg_val, mdata->base + SPI_CMD_REG);
} else {
reg_val = readl(mdata->base + SPI_CFG1_REG);
reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
reg_val |= ((chip_config->tick_delay & 0x7)
<< SPI_CFG1_GET_TICK_DLY_OFFSET);
writel(reg_val, mdata->base + SPI_CFG1_REG);
}
} else {
reg_val = readl(mdata->base + SPI_CFG1_REG);
reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
reg_val |= ((chip_config->tick_delay & 0x3)
<< SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
writel(reg_val, mdata->base + SPI_CFG1_REG);
}
/* set hw cs timing */
mtk_spi_set_hw_cs_timing(spi);
return 0;
}
static int mtk_spi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
return mtk_spi_hw_init(master, msg->spi);
}
static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
reg_val = readl(mdata->base + SPI_CMD_REG);
if (!enable) {
reg_val |= SPI_CMD_PAUSE_EN;
writel(reg_val, mdata->base + SPI_CMD_REG);
} else {
reg_val &= ~SPI_CMD_PAUSE_EN;
writel(reg_val, mdata->base + SPI_CMD_REG);
mdata->state = MTK_SPI_IDLE;
mtk_spi_reset(mdata);
}
}
static void mtk_spi_prepare_transfer(struct spi_master *master,
u32 speed_hz)
{
u32 div, sck_time, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
if (speed_hz < mdata->spi_clk_hz / 2)
div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
else
div = 1;
sck_time = (div + 1) / 2;
if (mdata->dev_comp->enhance_timing) {
reg_val = readl(mdata->base + SPI_CFG2_REG);
reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xffff)
<< SPI_CFG2_SCK_HIGH_OFFSET);
reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
reg_val |= (((sck_time - 1) & 0xffff)
<< SPI_CFG2_SCK_LOW_OFFSET);
writel(reg_val, mdata->base + SPI_CFG2_REG);
} else {
reg_val = readl(mdata->base + SPI_CFG0_REG);
reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xff)
<< SPI_CFG0_SCK_HIGH_OFFSET);
reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
writel(reg_val, mdata->base + SPI_CFG0_REG);
}
}
static void mtk_spi_setup_packet(struct spi_master *master)
{
u32 packet_size, packet_loop, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
if (mdata->dev_comp->ipm_design)
packet_size = min_t(u32,
mdata->xfer_len,
MTK_SPI_IPM_PACKET_SIZE);
else
packet_size = min_t(u32,
mdata->xfer_len,
MTK_SPI_PACKET_SIZE);
packet_loop = mdata->xfer_len / packet_size;
reg_val = readl(mdata->base + SPI_CFG1_REG);
if (mdata->dev_comp->ipm_design)
reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
else
reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
writel(reg_val, mdata->base + SPI_CFG1_REG);
}
static void mtk_spi_enable_transfer(struct spi_master *master)
{
u32 cmd;
struct mtk_spi *mdata = spi_master_get_devdata(master);
cmd = readl(mdata->base + SPI_CMD_REG);
if (mdata->state == MTK_SPI_IDLE)
cmd |= SPI_CMD_ACT;
else
cmd |= SPI_CMD_RESUME;
writel(cmd, mdata->base + SPI_CMD_REG);
}
static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
{
u32 mult_delta = 0;
if (mdata->dev_comp->ipm_design) {
if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
} else {
if (xfer_len > MTK_SPI_PACKET_SIZE)
mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
}
return mult_delta;
}
static void mtk_spi_update_mdata_len(struct spi_master *master)
{
int mult_delta;
struct mtk_spi *mdata = spi_master_get_devdata(master);
if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
mdata->rx_sgl_len = mult_delta;
mdata->tx_sgl_len -= mdata->xfer_len;
} else {
mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
mdata->tx_sgl_len = mult_delta;
mdata->rx_sgl_len -= mdata->xfer_len;
}
} else if (mdata->tx_sgl_len) {
mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
mdata->tx_sgl_len = mult_delta;
} else if (mdata->rx_sgl_len) {
mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
mdata->rx_sgl_len = mult_delta;
}
}
static void mtk_spi_setup_dma_addr(struct spi_master *master,
struct spi_transfer *xfer)
{
struct mtk_spi *mdata = spi_master_get_devdata(master);
if (mdata->tx_sgl) {
writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
mdata->base + SPI_TX_SRC_REG);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (mdata->dev_comp->dma_ext)
writel((u32)(xfer->tx_dma >> 32),
mdata->base + SPI_TX_SRC_REG_64);
#endif
}
if (mdata->rx_sgl) {
writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
mdata->base + SPI_RX_DST_REG);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (mdata->dev_comp->dma_ext)
writel((u32)(xfer->rx_dma >> 32),
mdata->base + SPI_RX_DST_REG_64);
#endif
}
}
static int mtk_spi_fifo_transfer(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
int cnt, remainder;
u32 reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
mdata->cur_transfer = xfer;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
mdata->num_xfered = 0;
mtk_spi_prepare_transfer(master, xfer->speed_hz);
mtk_spi_setup_packet(master);
if (xfer->tx_buf) {
cnt = xfer->len / 4;
iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
remainder = xfer->len % 4;
if (remainder > 0) {
reg_val = 0;
memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder);
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
}
}
mtk_spi_enable_transfer(master);
return 1;
}
static int mtk_spi_dma_transfer(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
int cmd;
struct mtk_spi *mdata = spi_master_get_devdata(master);
mdata->tx_sgl = NULL;
mdata->rx_sgl = NULL;
mdata->tx_sgl_len = 0;
mdata->rx_sgl_len = 0;
mdata->cur_transfer = xfer;
mdata->num_xfered = 0;
mtk_spi_prepare_transfer(master, xfer->speed_hz);
cmd = readl(mdata->base + SPI_CMD_REG);
if (xfer->tx_buf)
cmd |= SPI_CMD_TX_DMA;
if (xfer->rx_buf)
cmd |= SPI_CMD_RX_DMA;
writel(cmd, mdata->base + SPI_CMD_REG);
if (xfer->tx_buf)
mdata->tx_sgl = xfer->tx_sg.sgl;
if (xfer->rx_buf)
mdata->rx_sgl = xfer->rx_sg.sgl;
if (mdata->tx_sgl) {
xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
}
if (mdata->rx_sgl) {
xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
}
mtk_spi_update_mdata_len(master);
mtk_spi_setup_packet(master);
mtk_spi_setup_dma_addr(master, xfer);
mtk_spi_enable_transfer(master);
return 1;
}
static int mtk_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
u32 reg_val = 0;
/* prepare xfer direction and duplex mode */
if (mdata->dev_comp->ipm_design) {
if (!xfer->tx_buf || !xfer->rx_buf) {
reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
if (xfer->rx_buf)
reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
}
writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
}
if (master->can_dma(master, spi, xfer))
return mtk_spi_dma_transfer(master, spi, xfer);
else
return mtk_spi_fifo_transfer(master, spi, xfer);
}
static bool mtk_spi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
/* Buffers for DMA transactions must be 4-byte aligned */
return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
(unsigned long)xfer->tx_buf % 4 == 0 &&
(unsigned long)xfer->rx_buf % 4 == 0);
}
static int mtk_spi_setup(struct spi_device *spi)
{
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
if (!spi->controller_data)
spi->controller_data = (void *)&mtk_default_chip_info;
if (mdata->dev_comp->need_pad_sel && spi_get_csgpiod(spi, 0))
/* CS de-asserted, gpiolib will handle inversion */
gpiod_direction_output(spi_get_csgpiod(spi, 0), 0);
return 0;
}
static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
{
u32 cmd, reg_val, cnt, remainder, len;
struct spi_master *master = dev_id;
struct mtk_spi *mdata = spi_master_get_devdata(master);
struct spi_transfer *trans = mdata->cur_transfer;
reg_val = readl(mdata->base + SPI_STATUS0_REG);
if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
mdata->state = MTK_SPI_PAUSED;
else
mdata->state = MTK_SPI_IDLE;
/* SPI-MEM ops */
if (mdata->use_spimem) {
complete(&mdata->spimem_done);
return IRQ_HANDLED;
}
if (!master->can_dma(master, NULL, trans)) {
if (trans->rx_buf) {
cnt = mdata->xfer_len / 4;
ioread32_rep(mdata->base + SPI_RX_DATA_REG,
trans->rx_buf + mdata->num_xfered, cnt);
remainder = mdata->xfer_len % 4;
if (remainder > 0) {
reg_val = readl(mdata->base + SPI_RX_DATA_REG);
memcpy(trans->rx_buf +
mdata->num_xfered +
(cnt * 4),
®_val,
remainder);
}
}
mdata->num_xfered += mdata->xfer_len;
if (mdata->num_xfered == trans->len) {
spi_finalize_current_transfer(master);
return IRQ_HANDLED;
}
len = trans->len - mdata->num_xfered;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(master);
cnt = mdata->xfer_len / 4;
iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
trans->tx_buf + mdata->num_xfered, cnt);
remainder = mdata->xfer_len % 4;
if (remainder > 0) {
reg_val = 0;
memcpy(®_val,
trans->tx_buf + (cnt * 4) + mdata->num_xfered,
remainder);
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
}
mtk_spi_enable_transfer(master);
return IRQ_HANDLED;
}
if (mdata->tx_sgl)
trans->tx_dma += mdata->xfer_len;
if (mdata->rx_sgl)
trans->rx_dma += mdata->xfer_len;
if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
mdata->tx_sgl = sg_next(mdata->tx_sgl);
if (mdata->tx_sgl) {
trans->tx_dma = sg_dma_address(mdata->tx_sgl);
mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
}
}
if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
mdata->rx_sgl = sg_next(mdata->rx_sgl);
if (mdata->rx_sgl) {
trans->rx_dma = sg_dma_address(mdata->rx_sgl);
mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
}
}
if (!mdata->tx_sgl && !mdata->rx_sgl) {
/* spi disable dma */
cmd = readl(mdata->base + SPI_CMD_REG);
cmd &= ~SPI_CMD_TX_DMA;
cmd &= ~SPI_CMD_RX_DMA;
writel(cmd, mdata->base + SPI_CMD_REG);
spi_finalize_current_transfer(master);
return IRQ_HANDLED;
}
mtk_spi_update_mdata_len(master);
mtk_spi_setup_packet(master);
mtk_spi_setup_dma_addr(master, trans);
mtk_spi_enable_transfer(master);
return IRQ_HANDLED;
}
static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
struct spi_mem_op *op)
{
int opcode_len;
if (op->data.dir != SPI_MEM_NO_DATA) {
opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
/* force data buffer dma-aligned. */
op->data.nbytes -= op->data.nbytes % 4;
}
}
return 0;
}
static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
if (op->addr.nbytes && op->dummy.nbytes &&
op->addr.buswidth != op->dummy.buswidth)
return false;
if (op->addr.nbytes + op->dummy.nbytes > 16)
return false;
if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
MTK_SPI_IPM_PACKET_LOOP ||
op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
return false;
}
return true;
}
static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
const struct spi_mem_op *op)
{
struct mtk_spi *mdata = spi_master_get_devdata(master);
writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
mdata->base + SPI_TX_SRC_REG);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (mdata->dev_comp->dma_ext)
writel((u32)(mdata->tx_dma >> 32),
mdata->base + SPI_TX_SRC_REG_64);
#endif
if (op->data.dir == SPI_MEM_DATA_IN) {
writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
mdata->base + SPI_RX_DST_REG);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (mdata->dev_comp->dma_ext)
writel((u32)(mdata->rx_dma >> 32),
mdata->base + SPI_RX_DST_REG_64);
#endif
}
}
static int mtk_spi_transfer_wait(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
/*
* For each byte we wait for 8 cycles of the SPI clock.
* Since speed is defined in Hz and we want milliseconds,
* so it should be 8 * 1000.
*/
u64 ms = 8000LL;
if (op->data.dir == SPI_MEM_NO_DATA)
ms *= 32; /* prevent we may get 0 for short transfers. */
else
ms *= op->data.nbytes;
ms = div_u64(ms, mem->spi->max_speed_hz);
ms += ms + 1000; /* 1s tolerance */
if (ms > UINT_MAX)
ms = UINT_MAX;
if (!wait_for_completion_timeout(&mdata->spimem_done,
msecs_to_jiffies(ms))) {
dev_err(mdata->dev, "spi-mem transfer timeout\n");
return -ETIMEDOUT;
}
return 0;
}
static int mtk_spi_mem_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
u32 reg_val, nio, tx_size;
char *tx_tmp_buf, *rx_tmp_buf;
int ret = 0;
mdata->use_spimem = true;
reinit_completion(&mdata->spimem_done);
mtk_spi_reset(mdata);
mtk_spi_hw_init(mem->spi->master, mem->spi);
mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
/* opcode byte len */
reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
/* addr & dummy byte len */
reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
if (op->addr.nbytes || op->dummy.nbytes)
reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
/* data byte len */
if (op->data.dir == SPI_MEM_NO_DATA) {
reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
writel(0, mdata->base + SPI_CFG1_REG);
} else {
reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
mdata->xfer_len = op->data.nbytes;
mtk_spi_setup_packet(mem->spi->master);
}
if (op->addr.nbytes || op->dummy.nbytes) {
if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
reg_val |= SPI_CFG3_IPM_XMODE_EN;
else
reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
}
if (op->addr.buswidth == 2 ||
op->dummy.buswidth == 2 ||
op->data.buswidth == 2)
nio = 2;
else if (op->addr.buswidth == 4 ||
op->dummy.buswidth == 4 ||
op->data.buswidth == 4)
nio = 4;
else
nio = 1;
reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
reg_val |= PIN_MODE_CFG(nio);
reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
if (op->data.dir == SPI_MEM_DATA_IN)
reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
else
reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
if (op->data.dir == SPI_MEM_DATA_OUT)
tx_size += op->data.nbytes;
tx_size = max_t(u32, tx_size, 32);
tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
if (!tx_tmp_buf) {
mdata->use_spimem = false;
return -ENOMEM;
}
tx_tmp_buf[0] = op->cmd.opcode;
if (op->addr.nbytes) {
int i;
for (i = 0; i < op->addr.nbytes; i++)
tx_tmp_buf[i + 1] = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
}
if (op->dummy.nbytes)
memset(tx_tmp_buf + op->addr.nbytes + 1,
0xff,
op->dummy.nbytes);
if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
op->data.buf.out,
op->data.nbytes);
mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
tx_size, DMA_TO_DEVICE);
if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
ret = -ENOMEM;
goto err_exit;
}
if (op->data.dir == SPI_MEM_DATA_IN) {
if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
rx_tmp_buf = kzalloc(op->data.nbytes,
GFP_KERNEL | GFP_DMA);
if (!rx_tmp_buf) {
ret = -ENOMEM;
goto unmap_tx_dma;
}
} else {
rx_tmp_buf = op->data.buf.in;
}
mdata->rx_dma = dma_map_single(mdata->dev,
rx_tmp_buf,
op->data.nbytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
ret = -ENOMEM;
goto kfree_rx_tmp_buf;
}
}
reg_val = readl(mdata->base + SPI_CMD_REG);
reg_val |= SPI_CMD_TX_DMA;
if (op->data.dir == SPI_MEM_DATA_IN)
reg_val |= SPI_CMD_RX_DMA;
writel(reg_val, mdata->base + SPI_CMD_REG);
mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
mtk_spi_enable_transfer(mem->spi->master);
/* Wait for the interrupt. */
ret = mtk_spi_transfer_wait(mem, op);
if (ret)
goto unmap_rx_dma;
/* spi disable dma */
reg_val = readl(mdata->base + SPI_CMD_REG);
reg_val &= ~SPI_CMD_TX_DMA;
if (op->data.dir == SPI_MEM_DATA_IN)
reg_val &= ~SPI_CMD_RX_DMA;
writel(reg_val, mdata->base + SPI_CMD_REG);
unmap_rx_dma:
if (op->data.dir == SPI_MEM_DATA_IN) {
dma_unmap_single(mdata->dev, mdata->rx_dma,
op->data.nbytes, DMA_FROM_DEVICE);
if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
}
kfree_rx_tmp_buf:
if (op->data.dir == SPI_MEM_DATA_IN &&
!IS_ALIGNED((size_t)op->data.buf.in, 4))
kfree(rx_tmp_buf);
unmap_tx_dma:
dma_unmap_single(mdata->dev, mdata->tx_dma,
tx_size, DMA_TO_DEVICE);
err_exit:
kfree(tx_tmp_buf);
mdata->use_spimem = false;
return ret;
}
static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
.adjust_op_size = mtk_spi_mem_adjust_op_size,
.supports_op = mtk_spi_mem_supports_op,
.exec_op = mtk_spi_mem_exec_op,
};
static int mtk_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_master *master;
struct mtk_spi *mdata;
int i, irq, ret, addr_bits;
master = devm_spi_alloc_master(dev, sizeof(*mdata));
if (!master)
return dev_err_probe(dev, -ENOMEM, "failed to alloc spi master\n");
master->auto_runtime_pm = true;
master->dev.of_node = dev->of_node;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->set_cs = mtk_spi_set_cs;
master->prepare_message = mtk_spi_prepare_message;
master->transfer_one = mtk_spi_transfer_one;
master->can_dma = mtk_spi_can_dma;
master->setup = mtk_spi_setup;
master->set_cs_timing = mtk_spi_set_hw_cs_timing;
master->use_gpio_descriptors = true;
mdata = spi_master_get_devdata(master);
mdata->dev_comp = device_get_match_data(dev);
if (mdata->dev_comp->enhance_timing)
master->mode_bits |= SPI_CS_HIGH;
if (mdata->dev_comp->must_tx)
master->flags = SPI_CONTROLLER_MUST_TX;
if (mdata->dev_comp->ipm_design)
master->mode_bits |= SPI_LOOP | SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD;
if (mdata->dev_comp->ipm_design) {
mdata->dev = dev;
master->mem_ops = &mtk_spi_mem_ops;
init_completion(&mdata->spimem_done);
}
if (mdata->dev_comp->need_pad_sel) {
mdata->pad_num = of_property_count_u32_elems(dev->of_node,
"mediatek,pad-select");
if (mdata->pad_num < 0)
return dev_err_probe(dev, -EINVAL,
"No 'mediatek,pad-select' property\n");
mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
sizeof(u32), GFP_KERNEL);
if (!mdata->pad_sel)
return -ENOMEM;
for (i = 0; i < mdata->pad_num; i++) {
of_property_read_u32_index(dev->of_node,
"mediatek,pad-select",
i, &mdata->pad_sel[i]);
if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
return dev_err_probe(dev, -EINVAL,
"wrong pad-sel[%d]: %u\n",
i, mdata->pad_sel[i]);
}
}
platform_set_drvdata(pdev, master);
mdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdata->base))
return PTR_ERR(mdata->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
if (mdata->dev_comp->ipm_design)
dma_set_max_seg_size(dev, SZ_16M);
else
dma_set_max_seg_size(dev, SZ_256K);
mdata->parent_clk = devm_clk_get(dev, "parent-clk");
if (IS_ERR(mdata->parent_clk))
return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
"failed to get parent-clk\n");
mdata->sel_clk = devm_clk_get(dev, "sel-clk");
if (IS_ERR(mdata->sel_clk))
return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
mdata->spi_clk = devm_clk_get(dev, "spi-clk");
if (IS_ERR(mdata->spi_clk))
return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
if (IS_ERR(mdata->spi_hclk))
return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
ret = clk_prepare_enable(mdata->spi_hclk);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to enable hclk\n");
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
clk_disable_unprepare(mdata->spi_hclk);
return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
}
mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
if (mdata->dev_comp->no_need_unprepare) {
clk_disable(mdata->spi_clk);
clk_disable(mdata->spi_hclk);
} else {
clk_disable_unprepare(mdata->spi_clk);
clk_disable_unprepare(mdata->spi_hclk);
}
if (mdata->dev_comp->need_pad_sel) {
if (mdata->pad_num != master->num_chipselect)
return dev_err_probe(dev, -EINVAL,
"pad_num does not match num_chipselect(%d != %d)\n",
mdata->pad_num, master->num_chipselect);
if (!master->cs_gpiods && master->num_chipselect > 1)
return dev_err_probe(dev, -EINVAL,
"cs_gpios not specified and num_chipselect > 1\n");
}
if (mdata->dev_comp->dma_ext)
addr_bits = DMA_ADDR_EXT_BITS;
else
addr_bits = DMA_ADDR_DEF_BITS;
ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
if (ret)
dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
addr_bits, ret);
ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
IRQF_TRIGGER_NONE, dev_name(dev), master);
if (ret)
return dev_err_probe(dev, ret, "failed to register irq\n");
pm_runtime_enable(dev);
ret = devm_spi_register_master(dev, master);
if (ret) {
pm_runtime_disable(dev);
return dev_err_probe(dev, ret, "failed to register master\n");
}
return 0;
}
static void mtk_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
int ret;
if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
complete(&mdata->spimem_done);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_warn(&pdev->dev, "Failed to resume hardware (%pe)\n", ERR_PTR(ret));
} else {
/*
* If pm runtime resume failed, clks are disabled and
* unprepared. So don't access the hardware and skip clk
* unpreparing.
*/
mtk_spi_reset(mdata);
if (mdata->dev_comp->no_need_unprepare) {
clk_unprepare(mdata->spi_clk);
clk_unprepare(mdata->spi_hclk);
}
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
#ifdef CONFIG_PM_SLEEP
static int mtk_spi_suspend(struct device *dev)
{
int ret;
struct spi_master *master = dev_get_drvdata(dev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
ret = spi_master_suspend(master);
if (ret)
return ret;
if (!pm_runtime_suspended(dev)) {
clk_disable_unprepare(mdata->spi_clk);
clk_disable_unprepare(mdata->spi_hclk);
}
return 0;
}
static int mtk_spi_resume(struct device *dev)
{
int ret;
struct spi_master *master = dev_get_drvdata(dev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
if (!pm_runtime_suspended(dev)) {
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
return ret;
}
ret = clk_prepare_enable(mdata->spi_hclk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
clk_disable_unprepare(mdata->spi_clk);
return ret;
}
}
ret = spi_master_resume(master);
if (ret < 0) {
clk_disable_unprepare(mdata->spi_clk);
clk_disable_unprepare(mdata->spi_hclk);
}
return ret;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int mtk_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
if (mdata->dev_comp->no_need_unprepare) {
clk_disable(mdata->spi_clk);
clk_disable(mdata->spi_hclk);
} else {
clk_disable_unprepare(mdata->spi_clk);
clk_disable_unprepare(mdata->spi_hclk);
}
return 0;
}
static int mtk_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
int ret;
if (mdata->dev_comp->no_need_unprepare) {
ret = clk_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
return ret;
}
ret = clk_enable(mdata->spi_hclk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
clk_disable(mdata->spi_clk);
return ret;
}
} else {
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
return ret;
}
ret = clk_prepare_enable(mdata->spi_hclk);
if (ret < 0) {
dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
clk_disable_unprepare(mdata->spi_clk);
return ret;
}
}
return 0;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops mtk_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
mtk_spi_runtime_resume, NULL)
};
static struct platform_driver mtk_spi_driver = {
.driver = {
.name = "mtk-spi",
.pm = &mtk_spi_pm,
.of_match_table = mtk_spi_of_match,
},
.probe = mtk_spi_probe,
.remove_new = mtk_spi_remove,
};
module_platform_driver(mtk_spi_driver);
MODULE_DESCRIPTION("MTK SPI Controller driver");
MODULE_AUTHOR("Leilk Liu <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:mtk-spi");
| linux-master | drivers/spi/spi-mt65xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PXA2xx SPI DMA engine support.
*
* Copyright (C) 2013, 2021 Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
#include "spi-pxa2xx.h"
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
bool error)
{
struct spi_message *msg = drv_data->controller->cur_msg;
/*
* It is possible that one CPU is handling ROR interrupt and other
* just gets DMA completion. Calling pump_transfers() twice for the
* same transfer leads to problems thus we prevent concurrent calls
* by using dma_running.
*/
if (atomic_dec_and_test(&drv_data->dma_running)) {
/*
* If the other CPU is still handling the ROR interrupt we
* might not know about the error yet. So we re-check the
* ROR bit here before we clear the status register.
*/
if (!error)
error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR;
/* Clear status & disable interrupts */
clear_SSCR1_bits(drv_data, drv_data->dma_cr1);
write_SSSR_CS(drv_data, drv_data->clear_sr);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
if (error) {
/* In case we got an error we disable the SSP now */
pxa_ssp_disable(drv_data->ssp);
msg->status = -EIO;
}
spi_finalize_current_transfer(drv_data->controller);
}
}
static void pxa2xx_spi_dma_callback(void *data)
{
pxa2xx_spi_dma_transfer_complete(data, false);
}
static struct dma_async_tx_descriptor *
pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
enum dma_transfer_direction dir,
struct spi_transfer *xfer)
{
struct chip_data *chip =
spi_get_ctldata(drv_data->controller->cur_msg->spi);
enum dma_slave_buswidth width;
struct dma_slave_config cfg;
struct dma_chan *chan;
struct sg_table *sgt;
int ret;
switch (drv_data->n_bytes) {
case 1:
width = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
case 2:
width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
default:
width = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
}
memset(&cfg, 0, sizeof(cfg));
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = drv_data->ssp->phys_base + SSDR;
cfg.dst_addr_width = width;
cfg.dst_maxburst = chip->dma_burst_size;
sgt = &xfer->tx_sg;
chan = drv_data->controller->dma_tx;
} else {
cfg.src_addr = drv_data->ssp->phys_base + SSDR;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
sgt = &xfer->rx_sg;
chan = drv_data->controller->dma_rx;
}
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
dev_warn(drv_data->ssp->dev, "DMA slave config failed\n");
return NULL;
}
return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 status;
status = read_SSSR_bits(drv_data, drv_data->mask_sr);
if (status & SSSR_ROR) {
dev_err(drv_data->ssp->dev, "FIFO overrun\n");
dmaengine_terminate_async(drv_data->controller->dma_rx);
dmaengine_terminate_async(drv_data->controller->dma_tx);
pxa2xx_spi_dma_transfer_complete(drv_data, true);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
struct spi_transfer *xfer)
{
struct dma_async_tx_descriptor *tx_desc, *rx_desc;
int err;
tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
if (!tx_desc) {
dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n");
err = -EBUSY;
goto err_tx;
}
rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
if (!rx_desc) {
dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n");
err = -EBUSY;
goto err_rx;
}
/* We are ready when RX completes */
rx_desc->callback = pxa2xx_spi_dma_callback;
rx_desc->callback_param = drv_data;
dmaengine_submit(rx_desc);
dmaengine_submit(tx_desc);
return 0;
err_rx:
dmaengine_terminate_async(drv_data->controller->dma_tx);
err_tx:
return err;
}
void pxa2xx_spi_dma_start(struct driver_data *drv_data)
{
dma_async_issue_pending(drv_data->controller->dma_rx);
dma_async_issue_pending(drv_data->controller->dma_tx);
atomic_set(&drv_data->dma_running, 1);
}
void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
{
atomic_set(&drv_data->dma_running, 0);
dmaengine_terminate_sync(drv_data->controller->dma_rx);
dmaengine_terminate_sync(drv_data->controller->dma_tx);
}
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
struct spi_controller *controller = drv_data->controller;
struct device *dev = drv_data->ssp->dev;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
controller->dma_tx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->tx_param, dev, "tx");
if (!controller->dma_tx)
return -ENODEV;
controller->dma_rx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->rx_param, dev, "rx");
if (!controller->dma_rx) {
dma_release_channel(controller->dma_tx);
controller->dma_tx = NULL;
return -ENODEV;
}
return 0;
}
void pxa2xx_spi_dma_release(struct driver_data *drv_data)
{
struct spi_controller *controller = drv_data->controller;
if (controller->dma_rx) {
dmaengine_terminate_sync(controller->dma_rx);
dma_release_channel(controller->dma_rx);
controller->dma_rx = NULL;
}
if (controller->dma_tx) {
dmaengine_terminate_sync(controller->dma_tx);
dma_release_channel(controller->dma_tx);
controller->dma_tx = NULL;
}
}
int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word, u32 *burst_code,
u32 *threshold)
{
struct pxa2xx_spi_chip *chip_info = spi->controller_data;
struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
u32 dma_burst_size = drv_data->controller_info->dma_burst_size;
/*
* If the DMA burst size is given in chip_info we use that,
* otherwise we use the default. Also we use the default FIFO
* thresholds for now.
*/
*burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size;
*threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
| SSCR1_TxTresh(TX_THRESH_DFLT);
return 0;
}
| linux-master | drivers/spi/spi-pxa2xx-dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ASPEED FMC/SPI Memory Controller Driver
*
* Copyright (c) 2015-2022, IBM Corporation.
* Copyright (c) 2020, ASPEED Corporation.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#define DEVICE_NAME "spi-aspeed-smc"
/* Type setting Register */
#define CONFIG_REG 0x0
#define CONFIG_TYPE_SPI 0x2
/* CE Control Register */
#define CE_CTRL_REG 0x4
/* CEx Control Register */
#define CE0_CTRL_REG 0x10
#define CTRL_IO_MODE_MASK GENMASK(30, 28)
#define CTRL_IO_SINGLE_DATA 0x0
#define CTRL_IO_DUAL_DATA BIT(29)
#define CTRL_IO_QUAD_DATA BIT(30)
#define CTRL_COMMAND_SHIFT 16
#define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */
#define CTRL_IO_DUMMY_SET(dummy) \
(((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6))
#define CTRL_FREQ_SEL_SHIFT 8
#define CTRL_FREQ_SEL_MASK GENMASK(11, CTRL_FREQ_SEL_SHIFT)
#define CTRL_CE_STOP_ACTIVE BIT(2)
#define CTRL_IO_MODE_CMD_MASK GENMASK(1, 0)
#define CTRL_IO_MODE_NORMAL 0x0
#define CTRL_IO_MODE_READ 0x1
#define CTRL_IO_MODE_WRITE 0x2
#define CTRL_IO_MODE_USER 0x3
#define CTRL_IO_CMD_MASK 0xf0ff40c3
/* CEx Address Decoding Range Register */
#define CE0_SEGMENT_ADDR_REG 0x30
/* CEx Read timing compensation register */
#define CE0_TIMING_COMPENSATION_REG 0x94
enum aspeed_spi_ctl_reg_value {
ASPEED_SPI_BASE,
ASPEED_SPI_READ,
ASPEED_SPI_WRITE,
ASPEED_SPI_MAX,
};
struct aspeed_spi;
struct aspeed_spi_chip {
struct aspeed_spi *aspi;
u32 cs;
void __iomem *ctl;
void __iomem *ahb_base;
u32 ahb_window_size;
u32 ctl_val[ASPEED_SPI_MAX];
u32 clk_freq;
};
struct aspeed_spi_data {
u32 ctl0;
u32 max_cs;
bool hastype;
u32 mode_bits;
u32 we0;
u32 timing;
u32 hclk_mask;
u32 hdiv_max;
u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg);
u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg);
u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end);
int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv,
const u8 *golden_buf, u8 *test_buf);
};
#define ASPEED_SPI_MAX_NUM_CS 5
struct aspeed_spi {
const struct aspeed_spi_data *data;
void __iomem *regs;
void __iomem *ahb_base;
u32 ahb_base_phy;
u32 ahb_window_size;
struct device *dev;
struct clk *clk;
u32 clk_freq;
struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS];
};
static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op)
{
switch (op->data.buswidth) {
case 1:
return CTRL_IO_SINGLE_DATA;
case 2:
return CTRL_IO_DUAL_DATA;
case 4:
return CTRL_IO_QUAD_DATA;
default:
return CTRL_IO_SINGLE_DATA;
}
}
static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode)
{
u32 ctl;
if (io_mode > 0) {
ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK;
ctl |= io_mode;
writel(ctl, chip->ctl);
}
}
static void aspeed_spi_start_user(struct aspeed_spi_chip *chip)
{
u32 ctl = chip->ctl_val[ASPEED_SPI_BASE];
ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
writel(ctl, chip->ctl);
ctl &= ~CTRL_CE_STOP_ACTIVE;
writel(ctl, chip->ctl);
}
static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip)
{
u32 ctl = chip->ctl_val[ASPEED_SPI_READ] |
CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE;
writel(ctl, chip->ctl);
/* Restore defaults */
writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
}
static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len)
{
size_t offset = 0;
if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
ioread32_rep(src, buf, len >> 2);
offset = len & ~0x3;
len -= offset;
}
ioread8_rep(src, (u8 *)buf + offset, len);
return 0;
}
static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len)
{
size_t offset = 0;
if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
iowrite32_rep(dst, buf, len >> 2);
offset = len & ~0x3;
len -= offset;
}
iowrite8_rep(dst, (const u8 *)buf + offset, len);
return 0;
}
static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes,
u64 offset, u32 opcode)
{
__be32 temp;
u32 cmdaddr;
switch (addr_nbytes) {
case 3:
cmdaddr = offset & 0xFFFFFF;
cmdaddr |= opcode << 24;
temp = cpu_to_be32(cmdaddr);
aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
break;
case 4:
temp = cpu_to_be32(offset);
aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1);
aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4);
break;
default:
WARN_ONCE(1, "Unexpected address width %u", addr_nbytes);
return -EOPNOTSUPP;
}
return 0;
}
static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip,
const struct spi_mem_op *op)
{
aspeed_spi_start_user(chip);
aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
aspeed_spi_read_from_ahb(op->data.buf.in,
chip->ahb_base, op->data.nbytes);
aspeed_spi_stop_user(chip);
return 0;
}
static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip,
const struct spi_mem_op *op)
{
aspeed_spi_start_user(chip);
aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1);
aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out,
op->data.nbytes);
aspeed_spi_stop_user(chip);
return 0;
}
static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
const struct spi_mem_op *op,
u64 offset, size_t len, void *buf)
{
int io_mode = aspeed_spi_get_io_mode(op);
u8 dummy = 0xFF;
int i;
int ret;
aspeed_spi_start_user(chip);
ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
if (ret < 0)
return ret;
if (op->dummy.buswidth && op->dummy.nbytes) {
for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
}
aspeed_spi_set_io_mode(chip, io_mode);
aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
aspeed_spi_stop_user(chip);
return 0;
}
static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
const struct spi_mem_op *op)
{
int ret;
aspeed_spi_start_user(chip);
ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
if (ret < 0)
return ret;
aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
aspeed_spi_stop_user(chip);
return 0;
}
/* support for 1-1-1, 1-1-2 or 1-1-4 */
static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
if (op->cmd.buswidth > 1)
return false;
if (op->addr.nbytes != 0) {
if (op->addr.buswidth > 1)
return false;
if (op->addr.nbytes < 3 || op->addr.nbytes > 4)
return false;
}
if (op->dummy.nbytes != 0) {
if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7)
return false;
}
if (op->data.nbytes != 0 && op->data.buswidth > 4)
return false;
return spi_mem_default_supports_op(mem, op);
}
static const struct aspeed_spi_data ast2400_spi_data;
static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)];
u32 addr_mode, addr_mode_backup;
u32 ctl_val;
int ret = 0;
dev_dbg(aspi->dev,
"CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth,
op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
addr_mode = readl(aspi->regs + CE_CTRL_REG);
addr_mode_backup = addr_mode;
ctl_val = chip->ctl_val[ASPEED_SPI_BASE];
ctl_val &= ~CTRL_IO_CMD_MASK;
ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT;
/* 4BYTE address mode */
if (op->addr.nbytes) {
if (op->addr.nbytes == 4)
addr_mode |= (0x11 << chip->cs);
else
addr_mode &= ~(0x11 << chip->cs);
if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
ctl_val |= CTRL_IO_ADDRESS_4B;
}
if (op->dummy.nbytes)
ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
if (op->data.nbytes)
ctl_val |= aspeed_spi_get_io_mode(op);
if (op->data.dir == SPI_MEM_DATA_OUT)
ctl_val |= CTRL_IO_MODE_WRITE;
else
ctl_val |= CTRL_IO_MODE_READ;
if (addr_mode != addr_mode_backup)
writel(addr_mode, aspi->regs + CE_CTRL_REG);
writel(ctl_val, chip->ctl);
if (op->data.dir == SPI_MEM_DATA_IN) {
if (!op->addr.nbytes)
ret = aspeed_spi_read_reg(chip, op);
else
ret = aspeed_spi_read_user(chip, op, op->addr.val,
op->data.nbytes, op->data.buf.in);
} else {
if (!op->addr.nbytes)
ret = aspeed_spi_write_reg(chip, op);
else
ret = aspeed_spi_write_user(chip, op);
}
/* Restore defaults */
if (addr_mode != addr_mode_backup)
writel(addr_mode_backup, aspi->regs + CE_CTRL_REG);
writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
return ret;
}
static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
int ret;
ret = do_aspeed_spi_exec_op(mem, op);
if (ret)
dev_err(&mem->spi->dev, "operation failed: %d\n", ret);
return ret;
}
static const char *aspeed_spi_get_name(struct spi_mem *mem)
{
struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = aspi->dev;
return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
spi_get_chipselect(mem->spi, 0));
}
struct aspeed_spi_window {
u32 cs;
u32 offset;
u32 size;
};
static void aspeed_spi_get_windows(struct aspeed_spi *aspi,
struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS])
{
const struct aspeed_spi_data *data = aspi->data;
u32 reg_val;
u32 cs;
for (cs = 0; cs < aspi->data->max_cs; cs++) {
reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4);
windows[cs].cs = cs;
windows[cs].size = data->segment_end(aspi, reg_val) -
data->segment_start(aspi, reg_val);
windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy;
dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs,
windows[cs].offset, windows[cs].size);
}
}
/*
* On the AST2600, some CE windows are closed by default at reset but
* U-Boot should open all.
*/
static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip)
{
struct aspeed_spi *aspi = chip->aspi;
struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
struct aspeed_spi_window *win = &windows[chip->cs];
/* No segment registers for the AST2400 SPI controller */
if (aspi->data == &ast2400_spi_data) {
win->offset = 0;
win->size = aspi->ahb_window_size;
} else {
aspeed_spi_get_windows(aspi, windows);
}
chip->ahb_base = aspi->ahb_base + win->offset;
chip->ahb_window_size = win->size;
dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB",
chip->cs, aspi->ahb_base_phy + win->offset,
aspi->ahb_base_phy + win->offset + win->size - 1,
win->size >> 20);
return chip->ahb_window_size ? 0 : -1;
}
static int aspeed_spi_set_window(struct aspeed_spi *aspi,
const struct aspeed_spi_window *win)
{
u32 start = aspi->ahb_base_phy + win->offset;
u32 end = start + win->size;
void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4;
u32 seg_val_backup = readl(seg_reg);
u32 seg_val = aspi->data->segment_reg(aspi, start, end);
if (seg_val == seg_val_backup)
return 0;
writel(seg_val, seg_reg);
/*
* Restore initial value if something goes wrong else we could
* loose access to the chip.
*/
if (seg_val != readl(seg_reg)) {
dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB",
win->cs, start, end - 1, win->size >> 20);
writel(seg_val_backup, seg_reg);
return -EIO;
}
if (win->size)
dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB",
win->cs, start, end - 1, win->size >> 20);
else
dev_dbg(aspi->dev, "CE%d window closed", win->cs);
return 0;
}
/*
* Yet to be done when possible :
* - Align mappings on flash size (we don't have the info)
* - ioremap each window, not strictly necessary since the overall window
* is correct.
*/
static const struct aspeed_spi_data ast2500_spi_data;
static const struct aspeed_spi_data ast2600_spi_data;
static const struct aspeed_spi_data ast2600_fmc_data;
static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip,
u32 local_offset, u32 size)
{
struct aspeed_spi *aspi = chip->aspi;
struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 };
struct aspeed_spi_window *win = &windows[chip->cs];
int ret;
/* No segment registers for the AST2400 SPI controller */
if (aspi->data == &ast2400_spi_data)
return 0;
/*
* Due to an HW issue on the AST2500 SPI controller, the CE0
* window size should be smaller than the maximum 128MB.
*/
if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) {
size = 120 << 20;
dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)",
chip->cs, size >> 20);
}
/*
* The decoding size of AST2600 SPI controller should set at
* least 2MB.
*/
if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) &&
size < SZ_2M) {
size = SZ_2M;
dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)",
chip->cs, size >> 20);
}
aspeed_spi_get_windows(aspi, windows);
/* Adjust this chip window */
win->offset += local_offset;
win->size = size;
if (win->offset + win->size > aspi->ahb_window_size) {
win->size = aspi->ahb_window_size - win->offset;
dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20);
}
ret = aspeed_spi_set_window(aspi, win);
if (ret)
return ret;
/* Update chip mapping info */
chip->ahb_base = aspi->ahb_base + win->offset;
chip->ahb_window_size = win->size;
/*
* Also adjust next chip window to make sure that it does not
* overlap with the current window.
*/
if (chip->cs < aspi->data->max_cs - 1) {
struct aspeed_spi_window *next = &windows[chip->cs + 1];
/* Change offset and size to keep the same end address */
if ((next->offset + next->size) > (win->offset + win->size))
next->size = (next->offset + next->size) - (win->offset + win->size);
else
next->size = 0;
next->offset = win->offset + win->size;
aspeed_spi_set_window(aspi, next);
}
return 0;
}
static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip);
static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
struct spi_mem_op *op = &desc->info.op_tmpl;
u32 ctl_val;
int ret = 0;
dev_dbg(aspi->dev,
"CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
desc->info.offset, desc->info.offset + desc->info.length,
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth,
op->addr.nbytes, op->dummy.nbytes);
chip->clk_freq = desc->mem->spi->max_speed_hz;
/* Only for reads */
if (op->data.dir != SPI_MEM_DATA_IN)
return -EOPNOTSUPP;
aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length);
if (desc->info.length > chip->ahb_window_size)
dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping",
chip->cs, chip->ahb_window_size >> 20);
/* Define the default IO read settings */
ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
ctl_val |= aspeed_spi_get_io_mode(op) |
op->cmd.opcode << CTRL_COMMAND_SHIFT |
CTRL_IO_MODE_READ;
if (op->dummy.nbytes)
ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
/* Tune 4BYTE address mode */
if (op->addr.nbytes) {
u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
if (op->addr.nbytes == 4)
addr_mode |= (0x11 << chip->cs);
else
addr_mode &= ~(0x11 << chip->cs);
writel(addr_mode, aspi->regs + CE_CTRL_REG);
/* AST2400 SPI controller sets 4BYTE address mode in
* CE0 Control Register
*/
if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data)
ctl_val |= CTRL_IO_ADDRESS_4B;
}
/* READ mode is the controller default setting */
chip->ctl_val[ASPEED_SPI_READ] = ctl_val;
writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
ret = aspeed_spi_do_calibration(chip);
dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n",
chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]);
return ret;
}
static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offset, size_t len, void *buf)
{
struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
/* Switch to USER command mode if mapping window is too small */
if (chip->ahb_window_size < offset + len) {
int ret;
ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf);
if (ret < 0)
return ret;
} else {
memcpy_fromio(buf, chip->ahb_base + offset, len);
}
return len;
}
static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
.supports_op = aspeed_spi_supports_op,
.exec_op = aspeed_spi_exec_op,
.get_name = aspeed_spi_get_name,
.dirmap_create = aspeed_spi_dirmap_create,
.dirmap_read = aspeed_spi_dirmap_read,
};
static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type)
{
u32 reg;
reg = readl(aspi->regs + CONFIG_REG);
reg &= ~(0x3 << (cs * 2));
reg |= type << (cs * 2);
writel(reg, aspi->regs + CONFIG_REG);
}
static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable)
{
u32 we_bit = BIT(aspi->data->we0 + cs);
u32 reg = readl(aspi->regs + CONFIG_REG);
if (enable)
reg |= we_bit;
else
reg &= ~we_bit;
writel(reg, aspi->regs + CONFIG_REG);
}
static int aspeed_spi_setup(struct spi_device *spi)
{
struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
const struct aspeed_spi_data *data = aspi->data;
unsigned int cs = spi_get_chipselect(spi, 0);
struct aspeed_spi_chip *chip = &aspi->chips[cs];
chip->aspi = aspi;
chip->cs = cs;
chip->ctl = aspi->regs + data->ctl0 + cs * 4;
/* The driver only supports SPI type flash */
if (data->hastype)
aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI);
if (aspeed_spi_chip_set_default_window(chip) < 0) {
dev_warn(aspi->dev, "CE%d window invalid", cs);
return -EINVAL;
}
aspeed_spi_chip_enable(aspi, cs, true);
chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER;
dev_dbg(aspi->dev, "CE%d setup done\n", cs);
return 0;
}
static void aspeed_spi_cleanup(struct spi_device *spi)
{
struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
unsigned int cs = spi_get_chipselect(spi, 0);
aspeed_spi_chip_enable(aspi, cs, false);
dev_dbg(aspi->dev, "CE%d cleanup done\n", cs);
}
static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable)
{
int cs;
for (cs = 0; cs < aspi->data->max_cs; cs++)
aspeed_spi_chip_enable(aspi, cs, enable);
}
static int aspeed_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct aspeed_spi_data *data;
struct spi_controller *ctlr;
struct aspeed_spi *aspi;
struct resource *res;
int ret;
data = of_device_get_match_data(&pdev->dev);
if (!data)
return -ENODEV;
ctlr = devm_spi_alloc_host(dev, sizeof(*aspi));
if (!ctlr)
return -ENOMEM;
aspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, aspi);
aspi->data = data;
aspi->dev = dev;
aspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aspi->regs))
return PTR_ERR(aspi->regs);
aspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(aspi->ahb_base)) {
dev_err(dev, "missing AHB mapping window\n");
return PTR_ERR(aspi->ahb_base);
}
aspi->ahb_window_size = resource_size(res);
aspi->ahb_base_phy = res->start;
aspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(aspi->clk)) {
dev_err(dev, "missing clock\n");
return PTR_ERR(aspi->clk);
}
aspi->clk_freq = clk_get_rate(aspi->clk);
if (!aspi->clk_freq) {
dev_err(dev, "invalid clock\n");
return -EINVAL;
}
ret = clk_prepare_enable(aspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
return ret;
}
/* IRQ is for DMA, which the driver doesn't support yet */
ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits;
ctlr->bus_num = pdev->id;
ctlr->mem_ops = &aspeed_spi_mem_ops;
ctlr->setup = aspeed_spi_setup;
ctlr->cleanup = aspeed_spi_cleanup;
ctlr->num_chipselect = data->max_cs;
ctlr->dev.of_node = dev->of_node;
ret = devm_spi_register_controller(dev, ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_controller failed\n");
goto disable_clk;
}
return 0;
disable_clk:
clk_disable_unprepare(aspi->clk);
return ret;
}
static void aspeed_spi_remove(struct platform_device *pdev)
{
struct aspeed_spi *aspi = platform_get_drvdata(pdev);
aspeed_spi_enable(aspi, false);
clk_disable_unprepare(aspi->clk);
}
/*
* AHB mappings
*/
/*
* The Segment Registers of the AST2400 and AST2500 use a 8MB unit.
* The address range is encoded with absolute addresses in the overall
* mapping window.
*/
static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg)
{
return ((reg >> 16) & 0xFF) << 23;
}
static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg)
{
return ((reg >> 24) & 0xFF) << 23;
}
static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
{
return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24);
}
/*
* The Segment Registers of the AST2600 use a 1MB unit. The address
* range is encoded with offsets in the overall mapping window.
*/
#define AST2600_SEG_ADDR_MASK 0x0ff00000
static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi,
u32 reg)
{
u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK;
return aspi->ahb_base_phy + start_offset;
}
static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
u32 reg)
{
u32 end_offset = reg & AST2600_SEG_ADDR_MASK;
/* segment is disabled */
if (!end_offset)
return aspi->ahb_base_phy;
return aspi->ahb_base_phy + end_offset + 0x100000;
}
static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi,
u32 start, u32 end)
{
/* disable zero size segments */
if (start == end)
return 0;
return ((start & AST2600_SEG_ADDR_MASK) >> 16) |
((end - 1) & AST2600_SEG_ADDR_MASK);
}
/*
* Read timing compensation sequences
*/
#define CALIBRATE_BUF_SIZE SZ_16K
static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
const u8 *golden_buf, u8 *test_buf)
{
int i;
for (i = 0; i < 10; i++) {
memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) {
#if defined(VERBOSE_DEBUG)
print_hex_dump_bytes(DEVICE_NAME " fail: ", DUMP_PREFIX_NONE,
test_buf, 0x100);
#endif
return false;
}
}
return true;
}
#define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8))
/*
* The timing register is shared by all devices. Only update for CE0.
*/
static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
const u8 *golden_buf, u8 *test_buf)
{
struct aspeed_spi *aspi = chip->aspi;
const struct aspeed_spi_data *data = aspi->data;
int i;
int good_pass = -1, pass_count = 0;
u32 shift = (hdiv - 1) << 2;
u32 mask = ~(0xfu << shift);
u32 fread_timing_val = 0;
/* Try HCLK delay 0..5, each one with/without delay and look for a
* good pair.
*/
for (i = 0; i < 12; i++) {
bool pass;
if (chip->cs == 0) {
fread_timing_val &= mask;
fread_timing_val |= FREAD_TPASS(i) << shift;
writel(fread_timing_val, aspi->regs + data->timing);
}
pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
dev_dbg(aspi->dev,
" * [%08x] %d HCLK delay, %dns DI delay : %s",
fread_timing_val, i / 2, (i & 1) ? 0 : 4,
pass ? "PASS" : "FAIL");
if (pass) {
pass_count++;
if (pass_count == 3) {
good_pass = i - 1;
break;
}
} else {
pass_count = 0;
}
}
/* No good setting for this frequency */
if (good_pass < 0)
return -1;
/* We have at least one pass of margin, let's use first pass */
if (chip->cs == 0) {
fread_timing_val &= mask;
fread_timing_val |= FREAD_TPASS(good_pass) << shift;
writel(fread_timing_val, aspi->regs + data->timing);
}
dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]",
good_pass, fread_timing_val);
return 0;
}
static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
{
const u32 *tb32 = (const u32 *)test_buf;
u32 i, cnt = 0;
/* We check if we have enough words that are neither all 0
* nor all 1's so the calibration can be considered valid.
*
* I use an arbitrary threshold for now of 64
*/
size >>= 2;
for (i = 0; i < size; i++) {
if (tb32[i] != 0 && tb32[i] != 0xffffffff)
cnt++;
}
return cnt >= 64;
}
static const u32 aspeed_spi_hclk_divs[] = {
0xf, /* HCLK */
0x7, /* HCLK/2 */
0xe, /* HCLK/3 */
0x6, /* HCLK/4 */
0xd, /* HCLK/5 */
};
#define ASPEED_SPI_HCLK_DIV(i) \
(aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT)
static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
{
struct aspeed_spi *aspi = chip->aspi;
const struct aspeed_spi_data *data = aspi->data;
u32 ahb_freq = aspi->clk_freq;
u32 max_freq = chip->clk_freq;
u32 ctl_val;
u8 *golden_buf = NULL;
u8 *test_buf = NULL;
int i, rc, best_div = -1;
dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz",
ahb_freq / 1000000);
/*
* use the related low frequency to get check calibration data
* and get golden data.
*/
ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask;
writel(ctl_val, chip->ctl);
test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL);
if (!test_buf)
return -ENOMEM;
golden_buf = test_buf + CALIBRATE_BUF_SIZE;
memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
dev_info(aspi->dev, "Calibration area too uniform, using low speed");
goto no_calib;
}
#if defined(VERBOSE_DEBUG)
print_hex_dump_bytes(DEVICE_NAME " good: ", DUMP_PREFIX_NONE,
golden_buf, 0x100);
#endif
/* Now we iterate the HCLK dividers until we find our breaking point */
for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) {
u32 tv, freq;
freq = ahb_freq / i;
if (freq > max_freq)
continue;
/* Set the timing */
tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i);
writel(tv, chip->ctl);
dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv);
rc = data->calibrate(chip, i, golden_buf, test_buf);
if (rc == 0)
best_div = i;
}
/* Nothing found ? */
if (best_div < 0) {
dev_warn(aspi->dev, "No good frequency, using dumb slow");
} else {
dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div);
/* Record the freq */
for (i = 0; i < ASPEED_SPI_MAX; i++)
chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) |
ASPEED_SPI_HCLK_DIV(best_div);
}
no_calib:
writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl);
kfree(test_buf);
return 0;
}
#define TIMING_DELAY_DI BIT(3)
#define TIMING_DELAY_HCYCLE_MAX 5
#define TIMING_REG_AST2600(chip) \
((chip)->aspi->regs + (chip)->aspi->data->timing + \
(chip)->cs * 4)
static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
const u8 *golden_buf, u8 *test_buf)
{
struct aspeed_spi *aspi = chip->aspi;
int hcycle;
u32 shift = (hdiv - 2) << 3;
u32 mask = ~(0xfu << shift);
u32 fread_timing_val = 0;
for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
int delay_ns;
bool pass = false;
fread_timing_val &= mask;
fread_timing_val |= hcycle << shift;
/* no DI input delay first */
writel(fread_timing_val, TIMING_REG_AST2600(chip));
pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
dev_dbg(aspi->dev,
" * [%08x] %d HCLK delay, DI delay none : %s",
fread_timing_val, hcycle, pass ? "PASS" : "FAIL");
if (pass)
return 0;
/* Add DI input delays */
fread_timing_val &= mask;
fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift;
for (delay_ns = 0; delay_ns < 0x10; delay_ns++) {
fread_timing_val &= ~(0xf << (4 + shift));
fread_timing_val |= delay_ns << (4 + shift);
writel(fread_timing_val, TIMING_REG_AST2600(chip));
pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
dev_dbg(aspi->dev,
" * [%08x] %d HCLK delay, DI delay %d.%dns : %s",
fread_timing_val, hcycle, (delay_ns + 1) / 2,
(delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL");
/*
* TODO: This is optimistic. We should look
* for a working interval and save the middle
* value in the read timing register.
*/
if (pass)
return 0;
}
}
/* No good setting for this frequency */
return -1;
}
/*
* Platform definitions
*/
static const struct aspeed_spi_data ast2400_fmc_data = {
.max_cs = 5,
.hastype = true,
.we0 = 16,
.ctl0 = CE0_CTRL_REG,
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xfffff0ff,
.hdiv_max = 1,
.calibrate = aspeed_spi_calibrate,
.segment_start = aspeed_spi_segment_start,
.segment_end = aspeed_spi_segment_end,
.segment_reg = aspeed_spi_segment_reg,
};
static const struct aspeed_spi_data ast2400_spi_data = {
.max_cs = 1,
.hastype = false,
.we0 = 0,
.ctl0 = 0x04,
.timing = 0x14,
.hclk_mask = 0xfffff0ff,
.hdiv_max = 1,
.calibrate = aspeed_spi_calibrate,
/* No segment registers */
};
static const struct aspeed_spi_data ast2500_fmc_data = {
.max_cs = 3,
.hastype = true,
.we0 = 16,
.ctl0 = CE0_CTRL_REG,
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xffffd0ff,
.hdiv_max = 1,
.calibrate = aspeed_spi_calibrate,
.segment_start = aspeed_spi_segment_start,
.segment_end = aspeed_spi_segment_end,
.segment_reg = aspeed_spi_segment_reg,
};
static const struct aspeed_spi_data ast2500_spi_data = {
.max_cs = 2,
.hastype = false,
.we0 = 16,
.ctl0 = CE0_CTRL_REG,
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xffffd0ff,
.hdiv_max = 1,
.calibrate = aspeed_spi_calibrate,
.segment_start = aspeed_spi_segment_start,
.segment_end = aspeed_spi_segment_end,
.segment_reg = aspeed_spi_segment_reg,
};
static const struct aspeed_spi_data ast2600_fmc_data = {
.max_cs = 3,
.hastype = false,
.mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
.we0 = 16,
.ctl0 = CE0_CTRL_REG,
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xf0fff0ff,
.hdiv_max = 2,
.calibrate = aspeed_spi_ast2600_calibrate,
.segment_start = aspeed_spi_segment_ast2600_start,
.segment_end = aspeed_spi_segment_ast2600_end,
.segment_reg = aspeed_spi_segment_ast2600_reg,
};
static const struct aspeed_spi_data ast2600_spi_data = {
.max_cs = 2,
.hastype = false,
.mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
.we0 = 16,
.ctl0 = CE0_CTRL_REG,
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xf0fff0ff,
.hdiv_max = 2,
.calibrate = aspeed_spi_ast2600_calibrate,
.segment_start = aspeed_spi_segment_ast2600_start,
.segment_end = aspeed_spi_segment_ast2600_end,
.segment_reg = aspeed_spi_segment_ast2600_reg,
};
static const struct of_device_id aspeed_spi_matches[] = {
{ .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data },
{ .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data },
{ .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data },
{ .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data },
{ .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data },
{ .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data },
{ }
};
MODULE_DEVICE_TABLE(of, aspeed_spi_matches);
static struct platform_driver aspeed_spi_driver = {
.probe = aspeed_spi_probe,
.remove_new = aspeed_spi_remove,
.driver = {
.name = DEVICE_NAME,
.of_match_table = aspeed_spi_matches,
}
};
module_platform_driver(aspeed_spi_driver);
MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
MODULE_AUTHOR("Chin-Ting Kuo <[email protected]>");
MODULE_AUTHOR("Cedric Le Goater <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-aspeed-smc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (C) IBM Corporation 2020
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/fsi.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#define FSI_ENGID_SPI 0x23
#define FSI_MBOX_ROOT_CTRL_8 0x2860
#define FSI_MBOX_ROOT_CTRL_8_SPI_MUX 0xf0000000
#define FSI2SPI_DATA0 0x00
#define FSI2SPI_DATA1 0x04
#define FSI2SPI_CMD 0x08
#define FSI2SPI_CMD_WRITE BIT(31)
#define FSI2SPI_RESET 0x18
#define FSI2SPI_STATUS 0x1c
#define FSI2SPI_STATUS_ANY_ERROR BIT(31)
#define FSI2SPI_IRQ 0x20
#define SPI_FSI_BASE 0x70000
#define SPI_FSI_TIMEOUT_MS 1000
#define SPI_FSI_MAX_RX_SIZE 8
#define SPI_FSI_MAX_TX_SIZE 40
#define SPI_FSI_ERROR 0x0
#define SPI_FSI_COUNTER_CFG 0x1
#define SPI_FSI_CFG1 0x2
#define SPI_FSI_CLOCK_CFG 0x3
#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
#define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33))
#define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38))
#define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39))
#define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42))
#define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44)
#define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51)
#define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52)
#define SPI_FSI_MMAP 0x4
#define SPI_FSI_DATA_TX 0x5
#define SPI_FSI_DATA_RX 0x6
#define SPI_FSI_SEQUENCE 0x7
#define SPI_FSI_SEQUENCE_STOP 0x00
#define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf))
#define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf))
#define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf))
#define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0
#define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf))
#define SPI_FSI_STATUS 0x8
#define SPI_FSI_STATUS_ERROR \
(GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12))
#define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48)
#define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48)
#define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57)
#define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58)
#define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59)
#define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61)
#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
#define SPI_FSI_STATUS_ANY_ERROR \
(SPI_FSI_STATUS_ERROR | \
SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
SPI_FSI_STATUS_RDR_OVERRUN)
#define SPI_FSI_PORT_CTRL 0x9
struct fsi2spi {
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
struct mutex lock; /* lock access to the device */
};
struct fsi_spi {
struct device *dev; /* SPI controller device */
struct fsi2spi *bridge; /* FSI2SPI device */
u32 base;
};
struct fsi_spi_sequence {
int bit;
u64 data;
};
static int fsi_spi_check_mux(struct fsi_device *fsi, struct device *dev)
{
int rc;
u32 root_ctrl_8;
__be32 root_ctrl_8_be;
rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8_be,
sizeof(root_ctrl_8_be));
if (rc)
return rc;
root_ctrl_8 = be32_to_cpu(root_ctrl_8_be);
dev_dbg(dev, "Root control register 8: %08x\n", root_ctrl_8);
if ((root_ctrl_8 & FSI_MBOX_ROOT_CTRL_8_SPI_MUX) ==
FSI_MBOX_ROOT_CTRL_8_SPI_MUX)
return 0;
return -ENOLINK;
}
static int fsi_spi_check_status(struct fsi_spi *ctx)
{
int rc;
u32 sts;
__be32 sts_be;
rc = fsi_device_read(ctx->bridge->fsi, FSI2SPI_STATUS, &sts_be,
sizeof(sts_be));
if (rc)
return rc;
sts = be32_to_cpu(sts_be);
if (sts & FSI2SPI_STATUS_ANY_ERROR) {
dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts);
return -EIO;
}
return 0;
}
static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
{
int rc = 0;
__be32 cmd_be;
__be32 data_be;
u32 cmd = offset + ctx->base;
struct fsi2spi *bridge = ctx->bridge;
*value = 0ULL;
if (cmd & FSI2SPI_CMD_WRITE)
return -EINVAL;
rc = mutex_lock_interruptible(&bridge->lock);
if (rc)
return rc;
cmd_be = cpu_to_be32(cmd);
rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
sizeof(cmd_be));
if (rc)
goto unlock;
rc = fsi_spi_check_status(ctx);
if (rc)
goto unlock;
rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA0, &data_be,
sizeof(data_be));
if (rc)
goto unlock;
*value |= (u64)be32_to_cpu(data_be) << 32;
rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA1, &data_be,
sizeof(data_be));
if (rc)
goto unlock;
*value |= (u64)be32_to_cpu(data_be);
dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
unlock:
mutex_unlock(&bridge->lock);
return rc;
}
static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
{
int rc = 0;
__be32 cmd_be;
__be32 data_be;
u32 cmd = offset + ctx->base;
struct fsi2spi *bridge = ctx->bridge;
if (cmd & FSI2SPI_CMD_WRITE)
return -EINVAL;
rc = mutex_lock_interruptible(&bridge->lock);
if (rc)
return rc;
dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
data_be = cpu_to_be32(upper_32_bits(value));
rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA0, &data_be,
sizeof(data_be));
if (rc)
goto unlock;
data_be = cpu_to_be32(lower_32_bits(value));
rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA1, &data_be,
sizeof(data_be));
if (rc)
goto unlock;
cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
sizeof(cmd_be));
if (rc)
goto unlock;
rc = fsi_spi_check_status(ctx);
unlock:
mutex_unlock(&bridge->lock);
return rc;
}
static int fsi_spi_data_in(u64 in, u8 *rx, int len)
{
int i;
int num_bytes = min(len, 8);
for (i = 0; i < num_bytes; ++i)
rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i)));
return num_bytes;
}
static int fsi_spi_data_out(u64 *out, const u8 *tx, int len)
{
int i;
int num_bytes = min(len, 8);
u8 *out_bytes = (u8 *)out;
/* Unused bytes of the tx data should be 0. */
*out = 0ULL;
for (i = 0; i < num_bytes; ++i)
out_bytes[8 - (i + 1)] = tx[i];
return num_bytes;
}
static int fsi_spi_reset(struct fsi_spi *ctx)
{
int rc;
dev_dbg(ctx->dev, "Resetting SPI controller.\n");
rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
SPI_FSI_CLOCK_CFG_RESET1);
if (rc)
return rc;
rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
SPI_FSI_CLOCK_CFG_RESET2);
if (rc)
return rc;
return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
}
static int fsi_spi_status(struct fsi_spi *ctx, u64 *status, const char *dir)
{
int rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, status);
if (rc)
return rc;
if (*status & SPI_FSI_STATUS_ANY_ERROR) {
dev_err(ctx->dev, "%s error: %016llx\n", dir, *status);
rc = fsi_spi_reset(ctx);
if (rc)
return rc;
return -EREMOTEIO;
}
return 0;
}
static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
{
/*
* Add the next byte of instruction to the 8-byte sequence register.
* Then decrement the counter so that the next instruction will go in
* the right place. Return the index of the slot we just filled in the
* sequence register.
*/
seq->data |= (u64)val << seq->bit;
seq->bit -= 8;
}
static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
{
seq->bit = 56;
seq->data = 0ULL;
}
static int fsi_spi_transfer_data(struct fsi_spi *ctx,
struct spi_transfer *transfer)
{
int loops;
int rc = 0;
unsigned long end;
u64 status = 0ULL;
if (transfer->tx_buf) {
int nb;
int sent = 0;
u64 out = 0ULL;
const u8 *tx = transfer->tx_buf;
while (transfer->len > sent) {
nb = fsi_spi_data_out(&out, &tx[sent],
(int)transfer->len - sent);
rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out);
if (rc)
return rc;
loops = 0;
end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_status(ctx, &status, "TX");
if (rc)
return rc;
} while (status & SPI_FSI_STATUS_TDR_FULL);
sent += nb;
}
} else if (transfer->rx_buf) {
int recv = 0;
u64 in = 0ULL;
u8 *rx = transfer->rx_buf;
while (transfer->len > recv) {
loops = 0;
end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_status(ctx, &status, "RX");
if (rc)
return rc;
} while (!(status & SPI_FSI_STATUS_RDR_FULL));
rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
if (rc)
return rc;
recv += fsi_spi_data_in(in, &rx[recv],
(int)transfer->len - recv);
}
}
return 0;
}
static int fsi_spi_transfer_init(struct fsi_spi *ctx)
{
int loops = 0;
int rc;
bool reset = false;
unsigned long end;
u64 seq_state;
u64 clock_cfg = 0ULL;
u64 status = 0ULL;
u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS);
do {
if (loops++ && time_after(jiffies, end))
return -ETIMEDOUT;
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status);
if (rc)
return rc;
seq_state = status & SPI_FSI_STATUS_SEQ_STATE;
if (status & (SPI_FSI_STATUS_ANY_ERROR |
SPI_FSI_STATUS_TDR_FULL |
SPI_FSI_STATUS_RDR_FULL)) {
if (reset) {
dev_err(ctx->dev,
"Initialization error: %08llx\n",
status);
return -EIO;
}
rc = fsi_spi_reset(ctx);
if (rc)
return rc;
reset = true;
continue;
}
} while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
if (rc)
return rc;
rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
if (rc)
return rc;
if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE |
SPI_FSI_CLOCK_CFG_ECC_DISABLE |
SPI_FSI_CLOCK_CFG_MODE |
SPI_FSI_CLOCK_CFG_SCK_RECV_DEL |
SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg)
rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
wanted_clock_cfg);
return rc;
}
static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *mesg)
{
int rc;
u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(spi_get_chipselect(mesg->spi, 0) + 1);
unsigned int len;
struct spi_transfer *transfer;
struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
rc = fsi_spi_check_mux(ctx->bridge->fsi, ctx->dev);
if (rc)
goto error;
list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
struct fsi_spi_sequence seq;
struct spi_transfer *next = NULL;
/* Sequencer must do shift out (tx) first. */
if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
rc = -EINVAL;
goto error;
}
dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len);
rc = fsi_spi_transfer_init(ctx);
if (rc < 0)
goto error;
fsi_spi_sequence_init(&seq);
fsi_spi_sequence_add(&seq, seq_slave);
len = transfer->len;
while (len > 8) {
fsi_spi_sequence_add(&seq,
SPI_FSI_SEQUENCE_SHIFT_OUT(8));
len -= 8;
}
fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
if (!list_is_last(&transfer->transfer_list,
&mesg->transfers)) {
next = list_next_entry(transfer, transfer_list);
/* Sequencer can only do shift in (rx) after tx. */
if (next->rx_buf) {
u8 shift;
if (next->len > SPI_FSI_MAX_RX_SIZE) {
rc = -EINVAL;
goto error;
}
dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
next->len);
shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
fsi_spi_sequence_add(&seq, shift);
} else {
next = NULL;
}
}
fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0));
rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data);
if (rc)
goto error;
rc = fsi_spi_transfer_data(ctx, transfer);
if (rc)
goto error;
if (next) {
rc = fsi_spi_transfer_data(ctx, next);
if (rc)
goto error;
transfer = next;
}
}
error:
mesg->status = rc;
spi_finalize_current_message(ctlr);
return rc;
}
static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
{
return SPI_FSI_MAX_RX_SIZE;
}
static int fsi_spi_probe(struct device *dev)
{
int rc;
struct device_node *np;
int num_controllers_registered = 0;
struct fsi2spi *bridge;
struct fsi_device *fsi = to_fsi_dev(dev);
rc = fsi_spi_check_mux(fsi, dev);
if (rc)
return -ENODEV;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
bridge->fsi = fsi;
mutex_init(&bridge->lock);
for_each_available_child_of_node(dev->of_node, np) {
u32 base;
struct fsi_spi *ctx;
struct spi_controller *ctlr;
if (of_property_read_u32(np, "reg", &base))
continue;
ctlr = spi_alloc_host(dev, sizeof(*ctx));
if (!ctlr) {
of_node_put(np);
break;
}
ctlr->dev.of_node = np;
ctlr->num_chipselect = of_get_available_child_count(np) ?: 1;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
ctlr->max_transfer_size = fsi_spi_max_transfer_size;
ctlr->transfer_one_message = fsi_spi_transfer_one_message;
ctx = spi_controller_get_devdata(ctlr);
ctx->dev = &ctlr->dev;
ctx->bridge = bridge;
ctx->base = base + SPI_FSI_BASE;
rc = devm_spi_register_controller(dev, ctlr);
if (rc)
spi_controller_put(ctlr);
else
num_controllers_registered++;
}
if (!num_controllers_registered)
return -ENODEV;
return 0;
}
static const struct fsi_device_id fsi_spi_ids[] = {
{ FSI_ENGID_SPI, FSI_VERSION_ANY },
{ }
};
MODULE_DEVICE_TABLE(fsi, fsi_spi_ids);
static struct fsi_driver fsi_spi_driver = {
.id_table = fsi_spi_ids,
.drv = {
.name = "spi-fsi",
.bus = &fsi_bus_type,
.probe = fsi_spi_probe,
},
};
module_fsi_driver(fsi_spi_driver);
MODULE_AUTHOR("Eddie James <[email protected]>");
MODULE_DESCRIPTION("FSI attached SPI controller");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-fsi.c |
// SPDX-License-Identifier: GPL-2.0
//
// SPI controller driver for Qualcomm Atheros AR934x/QCA95xx SoCs
//
// Copyright (C) 2020 Chuanhong Guo <[email protected]>
//
// Based on spi-mt7621.c:
// Copyright (C) 2011 Sergiy <[email protected]>
// Copyright (C) 2011-2013 Gabor Juhos <[email protected]>
// Copyright (C) 2014-2015 Felix Fietkau <[email protected]>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "spi-ar934x"
#define AR934X_SPI_REG_FS 0x00
#define AR934X_SPI_ENABLE BIT(0)
#define AR934X_SPI_REG_IOC 0x08
#define AR934X_SPI_IOC_INITVAL 0x70000
#define AR934X_SPI_REG_CTRL 0x04
#define AR934X_SPI_CLK_MASK GENMASK(5, 0)
#define AR934X_SPI_DATAOUT 0x10
#define AR934X_SPI_REG_SHIFT_CTRL 0x14
#define AR934X_SPI_SHIFT_EN BIT(31)
#define AR934X_SPI_SHIFT_CS(n) BIT(28 + (n))
#define AR934X_SPI_SHIFT_TERM 26
#define AR934X_SPI_SHIFT_VAL(cs, term, count) \
(AR934X_SPI_SHIFT_EN | AR934X_SPI_SHIFT_CS(cs) | \
(term) << AR934X_SPI_SHIFT_TERM | (count))
#define AR934X_SPI_DATAIN 0x18
struct ar934x_spi {
struct spi_controller *ctlr;
void __iomem *base;
struct clk *clk;
unsigned int clk_freq;
};
static inline int ar934x_spi_clk_div(struct ar934x_spi *sp, unsigned int freq)
{
int div = DIV_ROUND_UP(sp->clk_freq, freq * 2) - 1;
if (div < 0)
return 0;
else if (div > AR934X_SPI_CLK_MASK)
return -EINVAL;
else
return div;
}
static int ar934x_spi_setup(struct spi_device *spi)
{
struct ar934x_spi *sp = spi_controller_get_devdata(spi->controller);
if ((spi->max_speed_hz == 0) ||
(spi->max_speed_hz > (sp->clk_freq / 2))) {
spi->max_speed_hz = sp->clk_freq / 2;
} else if (spi->max_speed_hz < (sp->clk_freq / 128)) {
dev_err(&spi->dev, "spi clock is too low\n");
return -EINVAL;
}
return 0;
}
static int ar934x_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *m)
{
struct ar934x_spi *sp = spi_controller_get_devdata(ctlr);
struct spi_transfer *t = NULL;
struct spi_device *spi = m->spi;
unsigned long trx_done, trx_cur;
int stat = 0;
u8 bpw, term = 0;
int div, i;
u32 reg;
const u8 *tx_buf;
u8 *buf;
m->actual_length = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->bits_per_word >= 8 && t->bits_per_word < 32)
bpw = t->bits_per_word >> 3;
else
bpw = 4;
if (t->speed_hz)
div = ar934x_spi_clk_div(sp, t->speed_hz);
else
div = ar934x_spi_clk_div(sp, spi->max_speed_hz);
if (div < 0) {
stat = -EIO;
goto msg_done;
}
reg = ioread32(sp->base + AR934X_SPI_REG_CTRL);
reg &= ~AR934X_SPI_CLK_MASK;
reg |= div;
iowrite32(reg, sp->base + AR934X_SPI_REG_CTRL);
iowrite32(0, sp->base + AR934X_SPI_DATAOUT);
for (trx_done = 0; trx_done < t->len; trx_done += bpw) {
trx_cur = t->len - trx_done;
if (trx_cur > bpw)
trx_cur = bpw;
else if (list_is_last(&t->transfer_list, &m->transfers))
term = 1;
if (t->tx_buf) {
tx_buf = t->tx_buf + trx_done;
reg = tx_buf[0];
for (i = 1; i < trx_cur; i++)
reg = reg << 8 | tx_buf[i];
iowrite32(reg, sp->base + AR934X_SPI_DATAOUT);
}
reg = AR934X_SPI_SHIFT_VAL(spi_get_chipselect(spi, 0), term,
trx_cur * 8);
iowrite32(reg, sp->base + AR934X_SPI_REG_SHIFT_CTRL);
stat = readl_poll_timeout(
sp->base + AR934X_SPI_REG_SHIFT_CTRL, reg,
!(reg & AR934X_SPI_SHIFT_EN), 0, 5);
if (stat < 0)
goto msg_done;
if (t->rx_buf) {
reg = ioread32(sp->base + AR934X_SPI_DATAIN);
buf = t->rx_buf + trx_done;
for (i = 0; i < trx_cur; i++) {
buf[trx_cur - i - 1] = reg & 0xff;
reg >>= 8;
}
}
spi_delay_exec(&t->word_delay, t);
}
m->actual_length += t->len;
spi_transfer_delay_exec(t);
}
msg_done:
m->status = stat;
spi_finalize_current_message(ctlr);
return 0;
}
static const struct of_device_id ar934x_spi_match[] = {
{ .compatible = "qca,ar934x-spi" },
{},
};
MODULE_DEVICE_TABLE(of, ar934x_spi_match);
static int ar934x_spi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct ar934x_spi *sp;
void __iomem *base;
struct clk *clk;
int ret;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret)
return ret;
ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*sp));
if (!ctlr) {
dev_info(&pdev->dev, "failed to allocate spi controller\n");
ret = -ENOMEM;
goto err_clk_disable;
}
/* disable flash mapping and expose spi controller registers */
iowrite32(AR934X_SPI_ENABLE, base + AR934X_SPI_REG_FS);
/* restore pins to default state: CSn=1 DO=CLK=0 */
iowrite32(AR934X_SPI_IOC_INITVAL, base + AR934X_SPI_REG_IOC);
ctlr->mode_bits = SPI_LSB_FIRST;
ctlr->setup = ar934x_spi_setup;
ctlr->transfer_one_message = ar934x_spi_transfer_one_message;
ctlr->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->num_chipselect = 3;
dev_set_drvdata(&pdev->dev, ctlr);
sp = spi_controller_get_devdata(ctlr);
sp->base = base;
sp->clk = clk;
sp->clk_freq = clk_get_rate(clk);
sp->ctlr = ctlr;
ret = spi_register_controller(ctlr);
if (!ret)
return 0;
err_clk_disable:
clk_disable_unprepare(clk);
return ret;
}
static void ar934x_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct ar934x_spi *sp;
ctlr = dev_get_drvdata(&pdev->dev);
sp = spi_controller_get_devdata(ctlr);
spi_unregister_controller(ctlr);
clk_disable_unprepare(sp->clk);
}
static struct platform_driver ar934x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = ar934x_spi_match,
},
.probe = ar934x_spi_probe,
.remove_new = ar934x_spi_remove,
};
module_platform_driver(ar934x_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Qualcomm Atheros AR934x/QCA95xx");
MODULE_AUTHOR("Chuanhong Guo <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/spi/spi-ar934x.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale SPI controller driver.
*
* Maintainer: Kumar Gala
*
* Copyright (C) 2006 Polycom, Inc.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* CPM SPI and QE buffer descriptors mode support:
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <[email protected]>
*
* GRLIB support:
* Copyright (c) 2012 Aeroflex Gaisler AB.
* Author: Andreas Larsson <[email protected]>
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
#ifdef CONFIG_FSL_SOC
#include <sysdev/fsl_soc.h>
#endif
/* Specific to the MPC8306/MPC8309 */
#define IMMR_SPI_CS_OFFSET 0x14c
#define SPI_BOOT_SEL_BIT 0x80000000
#include "spi-fsl-lib.h"
#include "spi-fsl-cpm.h"
#include "spi-fsl-spi.h"
#define TYPE_FSL 0
#define TYPE_GRLIB 1
struct fsl_spi_match_data {
int type;
};
static struct fsl_spi_match_data of_fsl_spi_fsl_config = {
.type = TYPE_FSL,
};
static struct fsl_spi_match_data of_fsl_spi_grlib_config = {
.type = TYPE_GRLIB,
};
static const struct of_device_id of_fsl_spi_match[] = {
{
.compatible = "fsl,spi",
.data = &of_fsl_spi_fsl_config,
},
{
.compatible = "aeroflexgaisler,spictrl",
.data = &of_fsl_spi_grlib_config,
},
{}
};
MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
static int fsl_spi_get_type(struct device *dev)
{
const struct of_device_id *match;
if (dev->of_node) {
match = of_match_node(of_fsl_spi_match, dev->of_node);
if (match && match->data)
return ((struct fsl_spi_match_data *)match->data)->type;
}
return TYPE_FSL;
}
static void fsl_spi_change_mode(struct spi_device *spi)
{
struct mpc8xxx_spi *mspi = spi_controller_get_devdata(spi->controller);
struct spi_mpc8xxx_cs *cs = spi->controller_state;
struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
__be32 __iomem *mode = ®_base->mode;
unsigned long flags;
if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
return;
/* Turn off IRQs locally to minimize time that SPI is disabled. */
local_irq_save(flags);
/* Turn off SPI unit prior changing mode */
mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
/* When in CPM mode, we need to reinit tx and rx. */
if (mspi->flags & SPI_CPM_MODE) {
fsl_spi_cpm_reinit_txrx(mspi);
}
mpc8xxx_spi_write_reg(mode, cs->hw_mode);
local_irq_restore(flags);
}
static void fsl_spi_qe_cpu_set_shifts(u32 *rx_shift, u32 *tx_shift,
int bits_per_word, int msb_first)
{
*rx_shift = 0;
*tx_shift = 0;
if (msb_first) {
if (bits_per_word <= 8) {
*rx_shift = 16;
*tx_shift = 24;
} else if (bits_per_word <= 16) {
*rx_shift = 16;
*tx_shift = 16;
}
} else {
if (bits_per_word <= 8)
*rx_shift = 8;
}
}
static void fsl_spi_grlib_set_shifts(u32 *rx_shift, u32 *tx_shift,
int bits_per_word, int msb_first)
{
*rx_shift = 0;
*tx_shift = 0;
if (bits_per_word <= 16) {
if (msb_first) {
*rx_shift = 16; /* LSB in bit 16 */
*tx_shift = 32 - bits_per_word; /* MSB in bit 31 */
} else {
*rx_shift = 16 - bits_per_word; /* MSB in bit 15 */
}
}
}
static void mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
struct mpc8xxx_spi *mpc8xxx_spi,
int bits_per_word)
{
cs->rx_shift = 0;
cs->tx_shift = 0;
if (bits_per_word <= 8) {
cs->get_rx = mpc8xxx_spi_rx_buf_u8;
cs->get_tx = mpc8xxx_spi_tx_buf_u8;
} else if (bits_per_word <= 16) {
cs->get_rx = mpc8xxx_spi_rx_buf_u16;
cs->get_tx = mpc8xxx_spi_tx_buf_u16;
} else if (bits_per_word <= 32) {
cs->get_rx = mpc8xxx_spi_rx_buf_u32;
cs->get_tx = mpc8xxx_spi_tx_buf_u32;
}
if (mpc8xxx_spi->set_shifts)
mpc8xxx_spi->set_shifts(&cs->rx_shift, &cs->tx_shift,
bits_per_word,
!(spi->mode & SPI_LSB_FIRST));
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
}
static int fsl_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi;
int bits_per_word = 0;
u8 pm;
u32 hz = 0;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
mpc8xxx_spi = spi_controller_get_devdata(spi->controller);
if (t) {
bits_per_word = t->bits_per_word;
hz = t->speed_hz;
}
/* spi_transfer level calls that work per-word */
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
if (!hz)
hz = spi->max_speed_hz;
if (!(mpc8xxx_spi->flags & SPI_CPM_MODE))
mspi_apply_cpu_mode_quirks(cs, spi, mpc8xxx_spi, bits_per_word);
if (bits_per_word == 32)
bits_per_word = 0;
else
bits_per_word = bits_per_word - 1;
/* mask out bits we are going to set */
cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16
| SPMODE_PM(0xF));
cs->hw_mode |= SPMODE_LEN(bits_per_word);
if ((mpc8xxx_spi->spibrg / hz) > 64) {
cs->hw_mode |= SPMODE_DIV16;
pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
WARN_ONCE(pm > 16,
"%s: Requested speed is too low: %d Hz. Will use %d Hz instead.\n",
dev_name(&spi->dev), hz, mpc8xxx_spi->spibrg / 1024);
if (pm > 16)
pm = 16;
} else {
pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
}
if (pm)
pm--;
cs->hw_mode |= SPMODE_PM(pm);
fsl_spi_change_mode(spi);
return 0;
}
static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len)
{
u32 word;
struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
mspi->count = len;
/* enable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE);
/* transmit word */
word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(®_base->transmit, word);
return 0;
}
static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
bool is_dma_mapped)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(spi->controller);
struct fsl_spi_reg __iomem *reg_base;
unsigned int len = t->len;
u8 bits_per_word;
int ret;
reg_base = mpc8xxx_spi->reg_base;
bits_per_word = spi->bits_per_word;
if (t->bits_per_word)
bits_per_word = t->bits_per_word;
if (bits_per_word > 8)
len /= 2;
if (bits_per_word > 16)
len /= 2;
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
reinit_completion(&mpc8xxx_spi->done);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
else
ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len);
if (ret)
return ret;
wait_for_completion(&mpc8xxx_spi->done);
/* disable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, 0);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
fsl_spi_cpm_bufs_complete(mpc8xxx_spi);
return mpc8xxx_spi->count;
}
static int fsl_spi_prepare_message(struct spi_controller *ctlr,
struct spi_message *m)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
struct spi_transfer *first;
first = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
/*
* In CPU mode, optimize large byte transfers to use larger
* bits_per_word values to reduce number of interrupts taken.
*
* Some glitches can appear on the SPI clock when the mode changes.
* Check that there is no speed change during the transfer and set it up
* now to change the mode without having a chip-select asserted.
*/
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->speed_hz != first->speed_hz) {
dev_err(&m->spi->dev,
"speed_hz cannot change during message.\n");
return -EINVAL;
}
if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
if (t->len < 256 || t->bits_per_word != 8)
continue;
if ((t->len & 3) == 0)
t->bits_per_word = 32;
else if ((t->len & 1) == 0)
t->bits_per_word = 16;
} else {
/*
* CPM/QE uses Little Endian for words > 8
* so transform 16 and 32 bits words into 8 bits
* Unfortnatly that doesn't work for LSB so
* reject these for now
* Note: 32 bits word, LSB works iff
* tfcr/rfcr is set to CPMFCR_GBL
*/
if (m->spi->mode & SPI_LSB_FIRST && t->bits_per_word > 8)
return -EINVAL;
if (t->bits_per_word == 16 || t->bits_per_word == 32)
t->bits_per_word = 8; /* pretend its 8 bits */
if (t->bits_per_word == 8 && t->len >= 256 &&
(mpc8xxx_spi->flags & SPI_CPM1))
t->bits_per_word = 16;
}
}
return fsl_spi_setup_transfer(m->spi, first);
}
static int fsl_spi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *t)
{
int status;
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
return status;
if (t->len)
status = fsl_spi_bufs(spi, t, !!t->tx_dma || !!t->rx_dma);
if (status > 0)
return -EMSGSIZE;
return status;
}
static int fsl_spi_unprepare_message(struct spi_controller *controller,
struct spi_message *msg)
{
return fsl_spi_setup_transfer(msg->spi, NULL);
}
static int fsl_spi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg __iomem *reg_base;
bool initial_setup = false;
int retval;
u32 hw_mode;
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
if (!spi->max_speed_hz)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi_set_ctldata(spi, cs);
initial_setup = true;
}
mpc8xxx_spi = spi_controller_get_devdata(spi->controller);
reg_base = mpc8xxx_spi->reg_base;
hw_mode = cs->hw_mode; /* Save original settings */
cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode);
/* mask out bits we are going to set */
cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
| SPMODE_REV | SPMODE_LOOP);
if (spi->mode & SPI_CPHA)
cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK;
if (spi->mode & SPI_CPOL)
cs->hw_mode |= SPMODE_CI_INACTIVEHIGH;
if (!(spi->mode & SPI_LSB_FIRST))
cs->hw_mode |= SPMODE_REV;
if (spi->mode & SPI_LOOP)
cs->hw_mode |= SPMODE_LOOP;
retval = fsl_spi_setup_transfer(spi, NULL);
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
if (initial_setup)
kfree(cs);
return retval;
}
return 0;
}
static void fsl_spi_cleanup(struct spi_device *spi)
{
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
kfree(cs);
spi_set_ctldata(spi, NULL);
}
static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
{
struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
/* We need handle RX first */
if (events & SPIE_NE) {
u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive);
if (mspi->rx)
mspi->get_rx(rx_data, mspi);
}
if ((events & SPIE_NF) == 0)
/* spin until TX is done */
while (((events =
mpc8xxx_spi_read_reg(®_base->event)) &
SPIE_NF) == 0)
cpu_relax();
/* Clear the events */
mpc8xxx_spi_write_reg(®_base->event, events);
mspi->count -= 1;
if (mspi->count) {
u32 word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(®_base->transmit, word);
} else {
complete(&mspi->done);
}
}
static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
{
struct mpc8xxx_spi *mspi = context_data;
irqreturn_t ret = IRQ_NONE;
u32 events;
struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
/* Get interrupt events(tx/rx) */
events = mpc8xxx_spi_read_reg(®_base->event);
if (events)
ret = IRQ_HANDLED;
dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
if (mspi->flags & SPI_CPM_MODE)
fsl_spi_cpm_irq(mspi, events);
else
fsl_spi_cpu_irq(mspi, events);
return ret;
}
static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(spi->controller);
struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
u32 slvsel;
u16 cs = spi_get_chipselect(spi, 0);
if (cs < mpc8xxx_spi->native_chipselects) {
slvsel = mpc8xxx_spi_read_reg(®_base->slvsel);
slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
mpc8xxx_spi_write_reg(®_base->slvsel, slvsel);
}
}
static void fsl_spi_grlib_probe(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(host);
struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
int mbits;
u32 capabilities;
capabilities = mpc8xxx_spi_read_reg(®_base->cap);
mpc8xxx_spi->set_shifts = fsl_spi_grlib_set_shifts;
mbits = SPCAP_MAXWLEN(capabilities);
if (mbits)
mpc8xxx_spi->max_bits_per_word = mbits + 1;
mpc8xxx_spi->native_chipselects = 0;
if (SPCAP_SSEN(capabilities)) {
mpc8xxx_spi->native_chipselects = SPCAP_SSSZ(capabilities);
mpc8xxx_spi_write_reg(®_base->slvsel, 0xffffffff);
}
host->num_chipselect = mpc8xxx_spi->native_chipselects;
host->set_cs = fsl_spi_grlib_cs_control;
}
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
struct device *dev = spi->dev.parent->parent;
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
return;
iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
}
static struct spi_controller *fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_controller *host;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg __iomem *reg_base;
u32 regval;
int ret = 0;
host = spi_alloc_host(dev, sizeof(struct mpc8xxx_spi));
if (host == NULL) {
ret = -ENOMEM;
goto err;
}
dev_set_drvdata(dev, host);
mpc8xxx_spi_probe(dev, mem, irq);
host->setup = fsl_spi_setup;
host->cleanup = fsl_spi_cleanup;
host->prepare_message = fsl_spi_prepare_message;
host->transfer_one = fsl_spi_transfer_one;
host->unprepare_message = fsl_spi_unprepare_message;
host->use_gpio_descriptors = true;
host->set_cs = fsl_spi_cs_control;
mpc8xxx_spi = spi_controller_get_devdata(host);
mpc8xxx_spi->max_bits_per_word = 32;
mpc8xxx_spi->type = fsl_spi_get_type(dev);
ret = fsl_spi_cpm_init(mpc8xxx_spi);
if (ret)
goto err_cpm_init;
mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
if (IS_ERR(mpc8xxx_spi->reg_base)) {
ret = PTR_ERR(mpc8xxx_spi->reg_base);
goto err_probe;
}
if (mpc8xxx_spi->type == TYPE_GRLIB)
fsl_spi_grlib_probe(dev);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
host->bits_per_word_mask =
(SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
else
host->bits_per_word_mask =
(SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
host->bits_per_word_mask &=
SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts;
if (mpc8xxx_spi->set_shifts)
/* 8 bits per word and MSB first */
mpc8xxx_spi->set_shifts(&mpc8xxx_spi->rx_shift,
&mpc8xxx_spi->tx_shift, 8, 1);
/* Register for SPI Interrupt */
ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_spi_irq,
0, "fsl_spi", mpc8xxx_spi);
if (ret != 0)
goto err_probe;
reg_base = mpc8xxx_spi->reg_base;
/* SPI controller initializations */
mpc8xxx_spi_write_reg(®_base->mode, 0);
mpc8xxx_spi_write_reg(®_base->mask, 0);
mpc8xxx_spi_write_reg(®_base->command, 0);
mpc8xxx_spi_write_reg(®_base->event, 0xffffffff);
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
if (mpc8xxx_spi->max_bits_per_word < 8) {
regval &= ~SPMODE_LEN(0xF);
regval |= SPMODE_LEN(mpc8xxx_spi->max_bits_per_word - 1);
}
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
regval |= SPMODE_OP;
mpc8xxx_spi_write_reg(®_base->mode, regval);
ret = devm_spi_register_controller(dev, host);
if (ret < 0)
goto err_probe;
dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
return host;
err_probe:
fsl_spi_cpm_free(mpc8xxx_spi);
err_cpm_init:
spi_controller_put(host);
err:
return ERR_PTR(ret);
}
static int of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct spi_controller *host;
struct resource mem;
int irq, type;
int ret;
bool spisel_boot = false;
#if IS_ENABLED(CONFIG_FSL_SOC)
struct mpc8xxx_spi_probe_info *pinfo = NULL;
#endif
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
return ret;
type = fsl_spi_get_type(&ofdev->dev);
if (type == TYPE_FSL) {
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
#if IS_ENABLED(CONFIG_FSL_SOC)
pinfo = to_of_pinfo(pdata);
spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
if (spisel_boot) {
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
if (!pinfo->immr_spi_cs)
return -ENOMEM;
}
#endif
/*
* Handle the case where we have one hardwired (always selected)
* device on the first "chipselect". Else we let the core code
* handle any GPIOs or native chip selects and assign the
* appropriate callback for dealing with the CS lines. This isn't
* supported on the GRLIB variant.
*/
ret = gpiod_count(dev, "cs");
if (ret < 0)
ret = 0;
if (ret == 0 && !spisel_boot)
pdata->max_chipselect = 1;
else
pdata->max_chipselect = ret + spisel_boot;
}
ret = of_address_to_resource(np, 0, &mem);
if (ret)
goto unmap_out;
irq = platform_get_irq(ofdev, 0);
if (irq < 0) {
ret = irq;
goto unmap_out;
}
host = fsl_spi_probe(dev, &mem, irq);
return PTR_ERR_OR_ZERO(host);
unmap_out:
#if IS_ENABLED(CONFIG_FSL_SOC)
if (spisel_boot)
iounmap(pinfo->immr_spi_cs);
#endif
return ret;
}
static void of_fsl_spi_remove(struct platform_device *ofdev)
{
struct spi_controller *host = platform_get_drvdata(ofdev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(host);
fsl_spi_cpm_free(mpc8xxx_spi);
}
static struct platform_driver of_fsl_spi_driver = {
.driver = {
.name = "fsl_spi",
.of_match_table = of_fsl_spi_match,
},
.probe = of_fsl_spi_probe,
.remove_new = of_fsl_spi_remove,
};
#ifdef CONFIG_MPC832x_RDB
/*
* XXX XXX XXX
* This is "legacy" platform driver, was used by the MPC8323E-RDB boards
* only. The driver should go away soon, since newer MPC8323E-RDB's device
* tree can work with OpenFirmware driver. But for now we support old trees
* as well.
*/
static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
{
struct resource *mem;
int irq;
struct spi_controller *host;
if (!dev_get_platdata(&pdev->dev))
return -EINVAL;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = fsl_spi_probe(&pdev->dev, mem, irq);
return PTR_ERR_OR_ZERO(host);
}
static void plat_mpc8xxx_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(host);
fsl_spi_cpm_free(mpc8xxx_spi);
}
MODULE_ALIAS("platform:mpc8xxx_spi");
static struct platform_driver mpc8xxx_spi_driver = {
.probe = plat_mpc8xxx_spi_probe,
.remove_new = plat_mpc8xxx_spi_remove,
.driver = {
.name = "mpc8xxx_spi",
},
};
static bool legacy_driver_failed;
static void __init legacy_driver_register(void)
{
legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver);
}
static void __exit legacy_driver_unregister(void)
{
if (legacy_driver_failed)
return;
platform_driver_unregister(&mpc8xxx_spi_driver);
}
#else
static void __init legacy_driver_register(void) {}
static void __exit legacy_driver_unregister(void) {}
#endif /* CONFIG_MPC832x_RDB */
static int __init fsl_spi_init(void)
{
legacy_driver_register();
return platform_driver_register(&of_fsl_spi_driver);
}
module_init(fsl_spi_init);
static void __exit fsl_spi_exit(void)
{
platform_driver_unregister(&of_fsl_spi_driver);
legacy_driver_unregister();
}
module_exit(fsl_spi_exit);
MODULE_AUTHOR("Kumar Gala");
MODULE_DESCRIPTION("Simple Freescale SPI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-fsl-spi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* NXP FlexSPI(FSPI) controller driver.
*
* Copyright 2019-2020 NXP
* Copyright 2020 Puresoftware Ltd.
*
* FlexSPI is a flexsible SPI host controller which supports two SPI
* channels and up to 4 external devices. Each channel supports
* Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional
* data lines).
*
* FlexSPI controller is driven by the LUT(Look-up Table) registers
* LUT registers are a look-up-table for sequences of instructions.
* A valid sequence consists of four LUT registers.
* Maximum 32 LUT sequences can be programmed simultaneously.
*
* LUTs are being created at run-time based on the commands passed
* from the spi-mem framework, thus using single LUT index.
*
* Software triggered Flash read/write access by IP Bus.
*
* Memory mapped read access by AHB Bus.
*
* Based on SPI MEM interface and spi-fsl-qspi.c driver.
*
* Author:
* Yogesh Narayan Gaur <[email protected]>
* Boris Brezillon <[email protected]>
* Frieder Schrempf <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
#include <linux/sys_soc.h>
#include <linux/mfd/syscon.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
/*
* The driver only uses one single LUT entry, that is updated on
* each call of exec_op(). Index 0 is preset at boot with a basic
* read operation, so let's use the last entry (31).
*/
#define SEQID_LUT 31
/* Registers used by the driver */
#define FSPI_MCR0 0x00
#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16)
#define FSPI_MCR0_LEARN_EN BIT(15)
#define FSPI_MCR0_SCRFRUN_EN BIT(14)
#define FSPI_MCR0_OCTCOMB_EN BIT(13)
#define FSPI_MCR0_DOZE_EN BIT(12)
#define FSPI_MCR0_HSEN BIT(11)
#define FSPI_MCR0_SERCLKDIV BIT(8)
#define FSPI_MCR0_ATDF_EN BIT(7)
#define FSPI_MCR0_ARDF_EN BIT(6)
#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4)
#define FSPI_MCR0_END_CFG(x) ((x) << 2)
#define FSPI_MCR0_MDIS BIT(1)
#define FSPI_MCR0_SWRST BIT(0)
#define FSPI_MCR1 0x04
#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16)
#define FSPI_MCR1_AHB_TIMEOUT(x) (x)
#define FSPI_MCR2 0x08
#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24)
#define FSPI_MCR2_SAMEDEVICEEN BIT(15)
#define FSPI_MCR2_CLRLRPHS BIT(14)
#define FSPI_MCR2_ABRDATSZ BIT(8)
#define FSPI_MCR2_ABRLEARN BIT(7)
#define FSPI_MCR2_ABR_READ BIT(6)
#define FSPI_MCR2_ABRWRITE BIT(5)
#define FSPI_MCR2_ABRDUMMY BIT(4)
#define FSPI_MCR2_ABR_MODE BIT(3)
#define FSPI_MCR2_ABRCADDR BIT(2)
#define FSPI_MCR2_ABRRADDR BIT(1)
#define FSPI_MCR2_ABR_CMD BIT(0)
#define FSPI_AHBCR 0x0c
#define FSPI_AHBCR_RDADDROPT BIT(6)
#define FSPI_AHBCR_PREF_EN BIT(5)
#define FSPI_AHBCR_BUFF_EN BIT(4)
#define FSPI_AHBCR_CACH_EN BIT(3)
#define FSPI_AHBCR_CLRTXBUF BIT(2)
#define FSPI_AHBCR_CLRRXBUF BIT(1)
#define FSPI_AHBCR_PAR_EN BIT(0)
#define FSPI_INTEN 0x10
#define FSPI_INTEN_SCLKSBWR BIT(9)
#define FSPI_INTEN_SCLKSBRD BIT(8)
#define FSPI_INTEN_DATALRNFL BIT(7)
#define FSPI_INTEN_IPTXWE BIT(6)
#define FSPI_INTEN_IPRXWA BIT(5)
#define FSPI_INTEN_AHBCMDERR BIT(4)
#define FSPI_INTEN_IPCMDERR BIT(3)
#define FSPI_INTEN_AHBCMDGE BIT(2)
#define FSPI_INTEN_IPCMDGE BIT(1)
#define FSPI_INTEN_IPCMDDONE BIT(0)
#define FSPI_INTR 0x14
#define FSPI_INTR_SCLKSBWR BIT(9)
#define FSPI_INTR_SCLKSBRD BIT(8)
#define FSPI_INTR_DATALRNFL BIT(7)
#define FSPI_INTR_IPTXWE BIT(6)
#define FSPI_INTR_IPRXWA BIT(5)
#define FSPI_INTR_AHBCMDERR BIT(4)
#define FSPI_INTR_IPCMDERR BIT(3)
#define FSPI_INTR_AHBCMDGE BIT(2)
#define FSPI_INTR_IPCMDGE BIT(1)
#define FSPI_INTR_IPCMDDONE BIT(0)
#define FSPI_LUTKEY 0x18
#define FSPI_LUTKEY_VALUE 0x5AF05AF0
#define FSPI_LCKCR 0x1C
#define FSPI_LCKER_LOCK 0x1
#define FSPI_LCKER_UNLOCK 0x2
#define FSPI_BUFXCR_INVALID_MSTRID 0xE
#define FSPI_AHBRX_BUF0CR0 0x20
#define FSPI_AHBRX_BUF1CR0 0x24
#define FSPI_AHBRX_BUF2CR0 0x28
#define FSPI_AHBRX_BUF3CR0 0x2C
#define FSPI_AHBRX_BUF4CR0 0x30
#define FSPI_AHBRX_BUF5CR0 0x34
#define FSPI_AHBRX_BUF6CR0 0x38
#define FSPI_AHBRX_BUF7CR0 0x3C
#define FSPI_AHBRXBUF0CR7_PREF BIT(31)
#define FSPI_AHBRX_BUF0CR1 0x40
#define FSPI_AHBRX_BUF1CR1 0x44
#define FSPI_AHBRX_BUF2CR1 0x48
#define FSPI_AHBRX_BUF3CR1 0x4C
#define FSPI_AHBRX_BUF4CR1 0x50
#define FSPI_AHBRX_BUF5CR1 0x54
#define FSPI_AHBRX_BUF6CR1 0x58
#define FSPI_AHBRX_BUF7CR1 0x5C
#define FSPI_FLSHA1CR0 0x60
#define FSPI_FLSHA2CR0 0x64
#define FSPI_FLSHB1CR0 0x68
#define FSPI_FLSHB2CR0 0x6C
#define FSPI_FLSHXCR0_SZ_KB 10
#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB)
#define FSPI_FLSHA1CR1 0x70
#define FSPI_FLSHA2CR1 0x74
#define FSPI_FLSHB1CR1 0x78
#define FSPI_FLSHB2CR1 0x7C
#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16)
#define FSPI_FLSHXCR1_CAS(x) ((x) << 11)
#define FSPI_FLSHXCR1_WA BIT(10)
#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5)
#define FSPI_FLSHXCR1_TCSS(x) (x)
#define FSPI_FLSHA1CR2 0x80
#define FSPI_FLSHA2CR2 0x84
#define FSPI_FLSHB1CR2 0x88
#define FSPI_FLSHB2CR2 0x8C
#define FSPI_FLSHXCR2_CLRINSP BIT(24)
#define FSPI_FLSHXCR2_AWRWAIT BIT(16)
#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13
#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8
#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5
#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0
#define FSPI_IPCR0 0xA0
#define FSPI_IPCR1 0xA4
#define FSPI_IPCR1_IPAREN BIT(31)
#define FSPI_IPCR1_SEQNUM_SHIFT 24
#define FSPI_IPCR1_SEQID_SHIFT 16
#define FSPI_IPCR1_IDATSZ(x) (x)
#define FSPI_IPCMD 0xB0
#define FSPI_IPCMD_TRG BIT(0)
#define FSPI_DLPR 0xB4
#define FSPI_IPRXFCR 0xB8
#define FSPI_IPRXFCR_CLR BIT(0)
#define FSPI_IPRXFCR_DMA_EN BIT(1)
#define FSPI_IPRXFCR_WMRK(x) ((x) << 2)
#define FSPI_IPTXFCR 0xBC
#define FSPI_IPTXFCR_CLR BIT(0)
#define FSPI_IPTXFCR_DMA_EN BIT(1)
#define FSPI_IPTXFCR_WMRK(x) ((x) << 2)
#define FSPI_DLLACR 0xC0
#define FSPI_DLLACR_OVRDEN BIT(8)
#define FSPI_DLLACR_SLVDLY(x) ((x) << 3)
#define FSPI_DLLACR_DLLRESET BIT(1)
#define FSPI_DLLACR_DLLEN BIT(0)
#define FSPI_DLLBCR 0xC4
#define FSPI_DLLBCR_OVRDEN BIT(8)
#define FSPI_DLLBCR_SLVDLY(x) ((x) << 3)
#define FSPI_DLLBCR_DLLRESET BIT(1)
#define FSPI_DLLBCR_DLLEN BIT(0)
#define FSPI_STS0 0xE0
#define FSPI_STS0_DLPHB(x) ((x) << 8)
#define FSPI_STS0_DLPHA(x) ((x) << 4)
#define FSPI_STS0_CMD_SRC(x) ((x) << 2)
#define FSPI_STS0_ARB_IDLE BIT(1)
#define FSPI_STS0_SEQ_IDLE BIT(0)
#define FSPI_STS1 0xE4
#define FSPI_STS1_IP_ERRCD(x) ((x) << 24)
#define FSPI_STS1_IP_ERRID(x) ((x) << 16)
#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8)
#define FSPI_STS1_AHB_ERRID(x) (x)
#define FSPI_STS2 0xE8
#define FSPI_STS2_BREFLOCK BIT(17)
#define FSPI_STS2_BSLVLOCK BIT(16)
#define FSPI_STS2_AREFLOCK BIT(1)
#define FSPI_STS2_ASLVLOCK BIT(0)
#define FSPI_STS2_AB_LOCK (FSPI_STS2_BREFLOCK | \
FSPI_STS2_BSLVLOCK | \
FSPI_STS2_AREFLOCK | \
FSPI_STS2_ASLVLOCK)
#define FSPI_AHBSPNST 0xEC
#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16)
#define FSPI_AHBSPNST_BUFID(x) ((x) << 1)
#define FSPI_AHBSPNST_ACTIVE BIT(0)
#define FSPI_IPRXFSTS 0xF0
#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16)
#define FSPI_IPRXFSTS_FILL(x) (x)
#define FSPI_IPTXFSTS 0xF4
#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16)
#define FSPI_IPTXFSTS_FILL(x) (x)
#define FSPI_RFDR 0x100
#define FSPI_TFDR 0x180
#define FSPI_LUT_BASE 0x200
#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
#define FSPI_LUT_REG(idx) \
(FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
/* register map end */
/* Instruction set for the LUT register. */
#define LUT_STOP 0x00
#define LUT_CMD 0x01
#define LUT_ADDR 0x02
#define LUT_CADDR_SDR 0x03
#define LUT_MODE 0x04
#define LUT_MODE2 0x05
#define LUT_MODE4 0x06
#define LUT_MODE8 0x07
#define LUT_NXP_WRITE 0x08
#define LUT_NXP_READ 0x09
#define LUT_LEARN_SDR 0x0A
#define LUT_DATSZ_SDR 0x0B
#define LUT_DUMMY 0x0C
#define LUT_DUMMY_RWDS_SDR 0x0D
#define LUT_JMP_ON_CS 0x1F
#define LUT_CMD_DDR 0x21
#define LUT_ADDR_DDR 0x22
#define LUT_CADDR_DDR 0x23
#define LUT_MODE_DDR 0x24
#define LUT_MODE2_DDR 0x25
#define LUT_MODE4_DDR 0x26
#define LUT_MODE8_DDR 0x27
#define LUT_WRITE_DDR 0x28
#define LUT_READ_DDR 0x29
#define LUT_LEARN_DDR 0x2A
#define LUT_DATSZ_DDR 0x2B
#define LUT_DUMMY_DDR 0x2C
#define LUT_DUMMY_RWDS_DDR 0x2D
/*
* Calculate number of required PAD bits for LUT register.
*
* The pad stands for the number of IO lines [0:7].
* For example, the octal read needs eight IO lines,
* so you should use LUT_PAD(8). This macro
* returns 3 i.e. use eight (2^3) IP lines for read.
*/
#define LUT_PAD(x) (fls(x) - 1)
/*
* Macro for constructing the LUT entries with the following
* register layout:
*
* ---------------------------------------------------
* | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
* ---------------------------------------------------
*/
#define PAD_SHIFT 8
#define INSTR_SHIFT 10
#define OPRND_SHIFT 16
/* Macros for constructing the LUT register. */
#define LUT_DEF(idx, ins, pad, opr) \
((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \
(opr)) << (((idx) % 2) * OPRND_SHIFT))
#define POLL_TOUT 5000
#define NXP_FSPI_MAX_CHIPSELECT 4
#define NXP_FSPI_MIN_IOMAP SZ_4M
#define DCFG_RCWSR1 0x100
#define SYS_PLL_RAT GENMASK(6, 2)
/* Access flash memory using IP bus only */
#define FSPI_QUIRK_USE_IP_ONLY BIT(0)
struct nxp_fspi_devtype_data {
unsigned int rxfifo;
unsigned int txfifo;
unsigned int ahb_buf_size;
unsigned int quirks;
bool little_endian;
};
static struct nxp_fspi_devtype_data lx2160a_data = {
.rxfifo = SZ_512, /* (64 * 64 bits) */
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
.quirks = 0,
.little_endian = true, /* little-endian */
};
static struct nxp_fspi_devtype_data imx8mm_data = {
.rxfifo = SZ_512, /* (64 * 64 bits) */
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
.quirks = 0,
.little_endian = true, /* little-endian */
};
static struct nxp_fspi_devtype_data imx8qxp_data = {
.rxfifo = SZ_512, /* (64 * 64 bits) */
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
.quirks = 0,
.little_endian = true, /* little-endian */
};
static struct nxp_fspi_devtype_data imx8dxl_data = {
.rxfifo = SZ_512, /* (64 * 64 bits) */
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
.quirks = FSPI_QUIRK_USE_IP_ONLY,
.little_endian = true, /* little-endian */
};
struct nxp_fspi {
void __iomem *iobase;
void __iomem *ahb_addr;
u32 memmap_phy;
u32 memmap_phy_size;
u32 memmap_start;
u32 memmap_len;
struct clk *clk, *clk_en;
struct device *dev;
struct completion c;
struct nxp_fspi_devtype_data *devtype_data;
struct mutex lock;
struct pm_qos_request pm_qos_req;
int selected;
};
static inline int needs_ip_only(struct nxp_fspi *f)
{
return f->devtype_data->quirks & FSPI_QUIRK_USE_IP_ONLY;
}
/*
* R/W functions for big- or little-endian registers:
* The FSPI controller's endianness is independent of
* the CPU core's endianness. So far, although the CPU
* core is little-endian the FSPI controller can use
* big-endian or little-endian.
*/
static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr)
{
if (f->devtype_data->little_endian)
iowrite32(val, addr);
else
iowrite32be(val, addr);
}
static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr)
{
if (f->devtype_data->little_endian)
return ioread32(addr);
else
return ioread32be(addr);
}
static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id)
{
struct nxp_fspi *f = dev_id;
u32 reg;
/* clear interrupt */
reg = fspi_readl(f, f->iobase + FSPI_INTR);
fspi_writel(f, FSPI_INTR_IPCMDDONE, f->iobase + FSPI_INTR);
if (reg & FSPI_INTR_IPCMDDONE)
complete(&f->c);
return IRQ_HANDLED;
}
static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width)
{
switch (width) {
case 1:
case 2:
case 4:
case 8:
return 0;
}
return -ENOTSUPP;
}
static bool nxp_fspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
int ret;
ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth);
if (op->addr.nbytes)
ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth);
if (op->dummy.nbytes)
ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth);
if (op->data.nbytes)
ret |= nxp_fspi_check_buswidth(f, op->data.buswidth);
if (ret)
return false;
/*
* The number of address bytes should be equal to or less than 4 bytes.
*/
if (op->addr.nbytes > 4)
return false;
/*
* If requested address value is greater than controller assigned
* memory mapped space, return error as it didn't fit in the range
* of assigned address space.
*/
if (op->addr.val >= f->memmap_phy_size)
return false;
/* Max 64 dummy clock cycles supported */
if (op->dummy.buswidth &&
(op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
return false;
/* Max data length, check controller limits and alignment */
if (op->data.dir == SPI_MEM_DATA_IN &&
(op->data.nbytes > f->devtype_data->ahb_buf_size ||
(op->data.nbytes > f->devtype_data->rxfifo - 4 &&
!IS_ALIGNED(op->data.nbytes, 8))))
return false;
if (op->data.dir == SPI_MEM_DATA_OUT &&
op->data.nbytes > f->devtype_data->txfifo)
return false;
return spi_mem_default_supports_op(mem, op);
}
/* Instead of busy looping invoke readl_poll_timeout functionality. */
static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base,
u32 mask, u32 delay_us,
u32 timeout_us, bool c)
{
u32 reg;
if (!f->devtype_data->little_endian)
mask = (u32)cpu_to_be32(mask);
if (c)
return readl_poll_timeout(base, reg, (reg & mask),
delay_us, timeout_us);
else
return readl_poll_timeout(base, reg, !(reg & mask),
delay_us, timeout_us);
}
/*
* If the slave device content being changed by Write/Erase, need to
* invalidate the AHB buffer. This can be achieved by doing the reset
* of controller after setting MCR0[SWRESET] bit.
*/
static inline void nxp_fspi_invalid(struct nxp_fspi *f)
{
u32 reg;
int ret;
reg = fspi_readl(f, f->iobase + FSPI_MCR0);
fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0);
/* w1c register, wait unit clear */
ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
WARN_ON(ret);
}
static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
const struct spi_mem_op *op)
{
void __iomem *base = f->iobase;
u32 lutval[4] = {};
int lutidx = 1, i;
/* cmd */
lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
op->cmd.opcode);
/* addr bytes */
if (op->addr.nbytes) {
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
LUT_PAD(op->addr.buswidth),
op->addr.nbytes * 8);
lutidx++;
}
/* dummy bytes, if needed */
if (op->dummy.nbytes) {
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
/*
* Due to FlexSPI controller limitation number of PAD for dummy
* buswidth needs to be programmed as equal to data buswidth.
*/
LUT_PAD(op->data.buswidth),
op->dummy.nbytes * 8 /
op->dummy.buswidth);
lutidx++;
}
/* read/write data bytes */
if (op->data.nbytes) {
lutval[lutidx / 2] |= LUT_DEF(lutidx,
op->data.dir == SPI_MEM_DATA_IN ?
LUT_NXP_READ : LUT_NXP_WRITE,
LUT_PAD(op->data.buswidth),
0);
lutidx++;
}
/* stop condition. */
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
/* unlock LUT */
fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
/* fill LUT */
for (i = 0; i < ARRAY_SIZE(lutval); i++)
fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x], size: 0x%08x\n",
op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes);
/* lock LUT */
fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR);
}
static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
{
int ret;
if (is_acpi_node(dev_fwnode(f->dev)))
return 0;
ret = clk_prepare_enable(f->clk_en);
if (ret)
return ret;
ret = clk_prepare_enable(f->clk);
if (ret) {
clk_disable_unprepare(f->clk_en);
return ret;
}
return 0;
}
static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
{
if (is_acpi_node(dev_fwnode(f->dev)))
return 0;
clk_disable_unprepare(f->clk);
clk_disable_unprepare(f->clk_en);
return 0;
}
static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
{
int ret;
/* Reset the DLL, set the DLLRESET to 1 and then set to 0 */
fspi_writel(f, FSPI_DLLACR_DLLRESET, f->iobase + FSPI_DLLACR);
fspi_writel(f, FSPI_DLLBCR_DLLRESET, f->iobase + FSPI_DLLBCR);
fspi_writel(f, 0, f->iobase + FSPI_DLLACR);
fspi_writel(f, 0, f->iobase + FSPI_DLLBCR);
/*
* Enable the DLL calibration mode.
* The delay target for slave delay line is:
* ((SLVDLYTARGET+1) * 1/32 * clock cycle of reference clock.
* When clock rate > 100MHz, recommend SLVDLYTARGET is 0xF, which
* means half of clock cycle of reference clock.
*/
fspi_writel(f, FSPI_DLLACR_DLLEN | FSPI_DLLACR_SLVDLY(0xF),
f->iobase + FSPI_DLLACR);
fspi_writel(f, FSPI_DLLBCR_DLLEN | FSPI_DLLBCR_SLVDLY(0xF),
f->iobase + FSPI_DLLBCR);
/* Wait to get REF/SLV lock */
ret = fspi_readl_poll_tout(f, f->iobase + FSPI_STS2, FSPI_STS2_AB_LOCK,
0, POLL_TOUT, true);
if (ret)
dev_warn(f->dev, "DLL lock failed, please fix it!\n");
}
/*
* In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0
* register and start base address of the slave device.
*
* (Higher address)
* -------- <-- FLSHB2CR0
* | B2 |
* | |
* B2 start address --> -------- <-- FLSHB1CR0
* | B1 |
* | |
* B1 start address --> -------- <-- FLSHA2CR0
* | A2 |
* | |
* A2 start address --> -------- <-- FLSHA1CR0
* | A1 |
* | |
* A1 start address --> -------- (Lower address)
*
*
* Start base address defines the starting address range for given CS and
* FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS.
*
* But, different targets are having different combinations of number of CS,
* some targets only have single CS or two CS covering controller's full
* memory mapped space area.
* Thus, implementation is being done as independent of the size and number
* of the connected slave device.
* Assign controller memory mapped space size as the size to the connected
* slave device.
* Mark FLSHxxCR0 as zero initially and then assign value only to the selected
* chip-select Flash configuration register.
*
* For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the
* memory mapped size of the controller.
* Value for rest of the CS FLSHxxCR0 register would be zero.
*
*/
static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
{
unsigned long rate = spi->max_speed_hz;
int ret;
uint64_t size_kb;
/*
* Return, if previously selected slave device is same as current
* requested slave device.
*/
if (f->selected == spi_get_chipselect(spi, 0))
return;
/* Reset FLSHxxCR0 registers */
fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0);
fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0);
fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0);
fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0);
/* Assign controller memory mapped space as size, KBytes, of flash. */
size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size);
fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 +
4 * spi_get_chipselect(spi, 0));
dev_dbg(f->dev, "Slave device [CS:%x] selected\n", spi_get_chipselect(spi, 0));
nxp_fspi_clk_disable_unprep(f);
ret = clk_set_rate(f->clk, rate);
if (ret)
return;
ret = nxp_fspi_clk_prep_enable(f);
if (ret)
return;
/*
* If clock rate > 100MHz, then switch from DLL override mode to
* DLL calibration mode.
*/
if (rate > 100000000)
nxp_fspi_dll_calibration(f);
f->selected = spi_get_chipselect(spi, 0);
}
static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
{
u32 start = op->addr.val;
u32 len = op->data.nbytes;
/* if necessary, ioremap before AHB read */
if ((!f->ahb_addr) || start < f->memmap_start ||
start + len > f->memmap_start + f->memmap_len) {
if (f->ahb_addr)
iounmap(f->ahb_addr);
f->memmap_start = start;
f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
len : NXP_FSPI_MIN_IOMAP;
f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
f->memmap_len);
if (!f->ahb_addr) {
dev_err(f->dev, "failed to alloc memory\n");
return -ENOMEM;
}
}
/* Read out the data directly from the AHB buffer. */
memcpy_fromio(op->data.buf.in,
f->ahb_addr + start - f->memmap_start, len);
return 0;
}
static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
const struct spi_mem_op *op)
{
void __iomem *base = f->iobase;
int i, ret;
u8 *buf = (u8 *) op->data.buf.out;
/* clear the TX FIFO. */
fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR);
/*
* Default value of water mark level is 8 bytes, hence in single
* write request controller can write max 8 bytes of data.
*/
for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) {
/* Wait for TXFIFO empty */
ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
FSPI_INTR_IPTXWE, 0,
POLL_TOUT, true);
WARN_ON(ret);
fspi_writel(f, *(u32 *) (buf + i), base + FSPI_TFDR);
fspi_writel(f, *(u32 *) (buf + i + 4), base + FSPI_TFDR + 4);
fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
}
if (i < op->data.nbytes) {
u32 data = 0;
int j;
/* Wait for TXFIFO empty */
ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
FSPI_INTR_IPTXWE, 0,
POLL_TOUT, true);
WARN_ON(ret);
for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
memcpy(&data, buf + i + j, 4);
fspi_writel(f, data, base + FSPI_TFDR + j);
}
fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
}
}
static void nxp_fspi_read_rxfifo(struct nxp_fspi *f,
const struct spi_mem_op *op)
{
void __iomem *base = f->iobase;
int i, ret;
int len = op->data.nbytes;
u8 *buf = (u8 *) op->data.buf.in;
/*
* Default value of water mark level is 8 bytes, hence in single
* read request controller can read max 8 bytes of data.
*/
for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) {
/* Wait for RXFIFO available */
ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
FSPI_INTR_IPRXWA, 0,
POLL_TOUT, true);
WARN_ON(ret);
*(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR);
*(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4);
/* move the FIFO pointer */
fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
}
if (i < len) {
u32 tmp;
int size, j;
buf = op->data.buf.in + i;
/* Wait for RXFIFO available */
ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
FSPI_INTR_IPRXWA, 0,
POLL_TOUT, true);
WARN_ON(ret);
len = op->data.nbytes - i;
for (j = 0; j < op->data.nbytes - i; j += 4) {
tmp = fspi_readl(f, base + FSPI_RFDR + j);
size = min(len, 4);
memcpy(buf + j, &tmp, size);
len -= size;
}
}
/* invalid the RXFIFO */
fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR);
/* move the FIFO pointer */
fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
}
static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
{
void __iomem *base = f->iobase;
int seqnum = 0;
int err = 0;
u32 reg;
reg = fspi_readl(f, base + FSPI_IPRXFCR);
/* invalid RXFIFO first */
reg &= ~FSPI_IPRXFCR_DMA_EN;
reg = reg | FSPI_IPRXFCR_CLR;
fspi_writel(f, reg, base + FSPI_IPRXFCR);
init_completion(&f->c);
fspi_writel(f, op->addr.val, base + FSPI_IPCR0);
/*
* Always start the sequence at the same index since we update
* the LUT at each exec_op() call. And also specify the DATA
* length, since it's has not been specified in the LUT.
*/
fspi_writel(f, op->data.nbytes |
(SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) |
(seqnum << FSPI_IPCR1_SEQNUM_SHIFT),
base + FSPI_IPCR1);
/* Trigger the LUT now. */
fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD);
/* Wait for the interrupt. */
if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
/* Invoke IP data read, if request is of data read. */
if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
nxp_fspi_read_rxfifo(f, op);
return err;
}
static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
int err = 0;
mutex_lock(&f->lock);
/* Wait for controller being ready. */
err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
WARN_ON(err);
nxp_fspi_select_mem(f, mem->spi);
nxp_fspi_prepare_lut(f, op);
/*
* If we have large chunks of data, we read them through the AHB bus by
* accessing the mapped memory. In all other cases we use IP commands
* to access the flash. Read via AHB bus may be corrupted due to
* existence of an errata and therefore discard AHB read in such cases.
*/
if (op->data.nbytes > (f->devtype_data->rxfifo - 4) &&
op->data.dir == SPI_MEM_DATA_IN &&
!needs_ip_only(f)) {
err = nxp_fspi_read_ahb(f, op);
} else {
if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
nxp_fspi_fill_txfifo(f, op);
err = nxp_fspi_do_op(f, op);
}
/* Invalidate the data in the AHB buffer. */
nxp_fspi_invalid(f);
mutex_unlock(&f->lock);
return err;
}
static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
if (op->data.dir == SPI_MEM_DATA_OUT) {
if (op->data.nbytes > f->devtype_data->txfifo)
op->data.nbytes = f->devtype_data->txfifo;
} else {
if (op->data.nbytes > f->devtype_data->ahb_buf_size)
op->data.nbytes = f->devtype_data->ahb_buf_size;
else if (op->data.nbytes > (f->devtype_data->rxfifo - 4))
op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
}
/* Limit data bytes to RX FIFO in case of IP read only */
if (op->data.dir == SPI_MEM_DATA_IN &&
needs_ip_only(f) &&
op->data.nbytes > f->devtype_data->rxfifo)
op->data.nbytes = f->devtype_data->rxfifo;
return 0;
}
static void erratum_err050568(struct nxp_fspi *f)
{
static const struct soc_device_attribute ls1028a_soc_attr[] = {
{ .family = "QorIQ LS1028A" },
{ /* sentinel */ }
};
struct regmap *map;
u32 val, sys_pll_ratio;
int ret;
/* Check for LS1028A family */
if (!soc_device_match(ls1028a_soc_attr)) {
dev_dbg(f->dev, "Errata applicable only for LS1028A\n");
return;
}
map = syscon_regmap_lookup_by_compatible("fsl,ls1028a-dcfg");
if (IS_ERR(map)) {
dev_err(f->dev, "No syscon regmap\n");
goto err;
}
ret = regmap_read(map, DCFG_RCWSR1, &val);
if (ret < 0)
goto err;
sys_pll_ratio = FIELD_GET(SYS_PLL_RAT, val);
dev_dbg(f->dev, "val: 0x%08x, sys_pll_ratio: %d\n", val, sys_pll_ratio);
/* Use IP bus only if platform clock is 300MHz */
if (sys_pll_ratio == 3)
f->devtype_data->quirks |= FSPI_QUIRK_USE_IP_ONLY;
return;
err:
dev_err(f->dev, "Errata cannot be executed. Read via IP bus may not work\n");
}
static int nxp_fspi_default_setup(struct nxp_fspi *f)
{
void __iomem *base = f->iobase;
int ret, i;
u32 reg;
/* disable and unprepare clock to avoid glitch pass to controller */
nxp_fspi_clk_disable_unprep(f);
/* the default frequency, we will change it later if necessary. */
ret = clk_set_rate(f->clk, 20000000);
if (ret)
return ret;
ret = nxp_fspi_clk_prep_enable(f);
if (ret)
return ret;
/*
* ERR050568: Flash access by FlexSPI AHB command may not work with
* platform frequency equal to 300 MHz on LS1028A.
* LS1028A reuses LX2160A compatible entry. Make errata applicable for
* Layerscape LS1028A platform.
*/
if (of_device_is_compatible(f->dev->of_node, "nxp,lx2160a-fspi"))
erratum_err050568(f);
/* Reset the module */
/* w1c register, wait unit clear */
ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
WARN_ON(ret);
/* Disable the module */
fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0);
/*
* Config the DLL register to default value, enable the slave clock delay
* line delay cell override mode, and use 1 fixed delay cell in DLL delay
* chain, this is the suggested setting when clock rate < 100MHz.
*/
fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR);
fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
/* enable module */
fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) |
FSPI_MCR0_IP_TIMEOUT(0xFF) | (u32) FSPI_MCR0_OCTCOMB_EN,
base + FSPI_MCR0);
/*
* Disable same device enable bit and configure all slave devices
* independently.
*/
reg = fspi_readl(f, f->iobase + FSPI_MCR2);
reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN);
fspi_writel(f, reg, base + FSPI_MCR2);
/* AHB configuration for access buffer 0~7. */
for (i = 0; i < 7; i++)
fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i);
/*
* Set ADATSZ with the maximum AHB buffer size to improve the read
* performance.
*/
fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 |
FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0);
/* prefetch and no start address alignment limitation */
fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
base + FSPI_AHBCR);
/* Reset the FLSHxCR1 registers. */
reg = FSPI_FLSHXCR1_TCSH(0x3) | FSPI_FLSHXCR1_TCSS(0x3);
fspi_writel(f, reg, base + FSPI_FLSHA1CR1);
fspi_writel(f, reg, base + FSPI_FLSHA2CR1);
fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
/* AHB Read - Set lut sequence ID for all CS. */
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2);
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2);
f->selected = -1;
/* enable the interrupt */
fspi_writel(f, FSPI_INTEN_IPCMDDONE, base + FSPI_INTEN);
return 0;
}
static const char *nxp_fspi_get_name(struct spi_mem *mem)
{
struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
struct device *dev = &mem->spi->dev;
const char *name;
// Set custom name derived from the platform_device of the controller.
if (of_get_available_child_count(f->dev->of_node) == 1)
return dev_name(f->dev);
name = devm_kasprintf(dev, GFP_KERNEL,
"%s-%d", dev_name(f->dev),
spi_get_chipselect(mem->spi, 0));
if (!name) {
dev_err(dev, "failed to get memory for custom flash name\n");
return ERR_PTR(-ENOMEM);
}
return name;
}
static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
.adjust_op_size = nxp_fspi_adjust_op_size,
.supports_op = nxp_fspi_supports_op,
.exec_op = nxp_fspi_exec_op,
.get_name = nxp_fspi_get_name,
};
static int nxp_fspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct resource *res;
struct nxp_fspi *f;
int ret;
u32 reg;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*f));
if (!ctlr)
return -ENOMEM;
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL |
SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL;
f = spi_controller_get_devdata(ctlr);
f->dev = dev;
f->devtype_data = (struct nxp_fspi_devtype_data *)device_get_match_data(dev);
if (!f->devtype_data) {
ret = -ENODEV;
goto err_put_ctrl;
}
platform_set_drvdata(pdev, f);
/* find the resources - configuration register address space */
if (is_acpi_node(dev_fwnode(f->dev)))
f->iobase = devm_platform_ioremap_resource(pdev, 0);
else
f->iobase = devm_platform_ioremap_resource_byname(pdev, "fspi_base");
if (IS_ERR(f->iobase)) {
ret = PTR_ERR(f->iobase);
goto err_put_ctrl;
}
/* find the resources - controller memory mapped space */
if (is_acpi_node(dev_fwnode(f->dev)))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
else
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "fspi_mmap");
if (!res) {
ret = -ENODEV;
goto err_put_ctrl;
}
/* assign memory mapped starting address and mapped size. */
f->memmap_phy = res->start;
f->memmap_phy_size = resource_size(res);
/* find the clocks */
if (dev_of_node(&pdev->dev)) {
f->clk_en = devm_clk_get(dev, "fspi_en");
if (IS_ERR(f->clk_en)) {
ret = PTR_ERR(f->clk_en);
goto err_put_ctrl;
}
f->clk = devm_clk_get(dev, "fspi");
if (IS_ERR(f->clk)) {
ret = PTR_ERR(f->clk);
goto err_put_ctrl;
}
ret = nxp_fspi_clk_prep_enable(f);
if (ret) {
dev_err(dev, "can not enable the clock\n");
goto err_put_ctrl;
}
}
/* Clear potential interrupts */
reg = fspi_readl(f, f->iobase + FSPI_INTR);
if (reg)
fspi_writel(f, reg, f->iobase + FSPI_INTR);
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_disable_clk;
ret = devm_request_irq(dev, ret,
nxp_fspi_irq_handler, 0, pdev->name, f);
if (ret) {
dev_err(dev, "failed to request irq: %d\n", ret);
goto err_disable_clk;
}
mutex_init(&f->lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
ctlr->mem_ops = &nxp_fspi_mem_ops;
nxp_fspi_default_setup(f);
ctlr->dev.of_node = np;
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret)
goto err_destroy_mutex;
return 0;
err_destroy_mutex:
mutex_destroy(&f->lock);
err_disable_clk:
nxp_fspi_clk_disable_unprep(f);
err_put_ctrl:
spi_controller_put(ctlr);
dev_err(dev, "NXP FSPI probe failed\n");
return ret;
}
static void nxp_fspi_remove(struct platform_device *pdev)
{
struct nxp_fspi *f = platform_get_drvdata(pdev);
/* disable the hardware */
fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
nxp_fspi_clk_disable_unprep(f);
mutex_destroy(&f->lock);
if (f->ahb_addr)
iounmap(f->ahb_addr);
}
static int nxp_fspi_suspend(struct device *dev)
{
return 0;
}
static int nxp_fspi_resume(struct device *dev)
{
struct nxp_fspi *f = dev_get_drvdata(dev);
nxp_fspi_default_setup(f);
return 0;
}
static const struct of_device_id nxp_fspi_dt_ids[] = {
{ .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
{ .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, },
{ .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, },
{ .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, },
{ .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id nxp_fspi_acpi_ids[] = {
{ "NXP0009", .driver_data = (kernel_ulong_t)&lx2160a_data, },
{}
};
MODULE_DEVICE_TABLE(acpi, nxp_fspi_acpi_ids);
#endif
static const struct dev_pm_ops nxp_fspi_pm_ops = {
.suspend = nxp_fspi_suspend,
.resume = nxp_fspi_resume,
};
static struct platform_driver nxp_fspi_driver = {
.driver = {
.name = "nxp-fspi",
.of_match_table = nxp_fspi_dt_ids,
.acpi_match_table = ACPI_PTR(nxp_fspi_acpi_ids),
.pm = &nxp_fspi_pm_ops,
},
.probe = nxp_fspi_probe,
.remove_new = nxp_fspi_remove,
};
module_platform_driver(nxp_fspi_driver);
MODULE_DESCRIPTION("NXP FSPI Controller Driver");
MODULE_AUTHOR("NXP Semiconductor");
MODULE_AUTHOR("Yogesh Narayan Gaur <[email protected]>");
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_AUTHOR("Frieder Schrempf <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-nxp-fspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Broadcom BCMBCA High Speed SPI Controller driver
*
* Copyright 2000-2010 Broadcom Corporation
* Copyright 2012-2013 Jonas Gorski <[email protected]>
* Copyright 2019-2022 Broadcom Ltd
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/spi/spi-mem.h>
#include <linux/pm_runtime.h>
#define HSSPI_GLOBAL_CTRL_REG 0x0
#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
#define HSSPI_INT_STATUS_REG 0x8
#define HSSPI_INT_STATUS_MASKED_REG 0xc
#define HSSPI_INT_MASK_REG 0x10
#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
#define HSSPI_INT_CLEAR_ALL 0xff001f1f
#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
#define PINGPONG_CMD_COMMAND_MASK 0xf
#define PINGPONG_COMMAND_NOOP 0
#define PINGPONG_COMMAND_START_NOW 1
#define PINGPONG_COMMAND_START_TRIGGER 2
#define PINGPONG_COMMAND_HALT 3
#define PINGPONG_COMMAND_FLUSH 4
#define PINGPONG_CMD_PROFILE_SHIFT 8
#define PINGPONG_CMD_SS_SHIFT 12
#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
#define HSSPI_PINGPONG_STATUS_SRC_BUSY BIT(1)
#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
#define CLK_CTRL_CLK_POLARITY BIT(16)
#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
#define SIGNAL_CTRL_LATCH_RISING BIT(12)
#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
#define MODE_CTRL_MODE_3WIRE BIT(20)
#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
#define HSSPI_OP_MULTIBIT BIT(11)
#define HSSPI_OP_CODE_SHIFT 13
#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_BUFFER_LEN 512
#define HSSPI_OPCODE_LEN 2
#define HSSPI_MAX_PREPEND_LEN 15
#define HSSPI_MAX_SYNC_CLOCK 30000000
#define HSSPI_SPI_MAX_CS 8
#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
#define HSSPI_POLL_STATUS_TIMEOUT_MS 100
#define HSSPI_WAIT_MODE_POLLING 0
#define HSSPI_WAIT_MODE_INTR 1
#define HSSPI_WAIT_MODE_MAX HSSPI_WAIT_MODE_INTR
#define SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT 0
#define SPIM_CTRL_CS_OVERRIDE_SEL_MASK 0xff
#define SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT 8
#define SPIM_CTRL_CS_OVERRIDE_VAL_MASK 0xff
struct bcmbca_hsspi {
struct completion done;
struct mutex bus_mutex;
struct mutex msg_mutex;
struct platform_device *pdev;
struct clk *clk;
struct clk *pll_clk;
void __iomem *regs;
void __iomem *spim_ctrl;
u8 __iomem *fifo;
u32 speed_hz;
u8 cs_polarity;
u32 wait_mode;
};
static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct bcmbca_hsspi *bs = spi_controller_get_devdata(ctrl);
return sprintf(buf, "%d\n", bs->wait_mode);
}
static ssize_t wait_mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct bcmbca_hsspi *bs = spi_controller_get_devdata(ctrl);
u32 val;
if (kstrtou32(buf, 10, &val))
return -EINVAL;
if (val > HSSPI_WAIT_MODE_MAX) {
dev_warn(dev, "invalid wait mode %u\n", val);
return -EINVAL;
}
mutex_lock(&bs->msg_mutex);
bs->wait_mode = val;
/* clear interrupt status to avoid spurious int on next transfer */
if (val == HSSPI_WAIT_MODE_INTR)
__raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
mutex_unlock(&bs->msg_mutex);
return count;
}
static DEVICE_ATTR_RW(wait_mode);
static struct attribute *bcmbca_hsspi_attrs[] = {
&dev_attr_wait_mode.attr,
NULL,
};
static const struct attribute_group bcmbca_hsspi_group = {
.attrs = bcmbca_hsspi_attrs,
};
static void bcmbca_hsspi_set_cs(struct bcmbca_hsspi *bs, unsigned int cs,
bool active)
{
u32 reg;
/* No cs orerriden needed for SS7 internal cs on pcm based voice dev */
if (cs == 7)
return;
mutex_lock(&bs->bus_mutex);
reg = __raw_readl(bs->spim_ctrl);
if (active)
reg |= BIT(cs + SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT);
else
reg &= ~BIT(cs + SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT);
__raw_writel(reg, bs->spim_ctrl);
mutex_unlock(&bs->bus_mutex);
}
static void bcmbca_hsspi_set_clk(struct bcmbca_hsspi *bs,
struct spi_device *spi, int hz)
{
unsigned int profile = spi_get_chipselect(spi, 0);
u32 reg;
reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
__raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
if (hz > HSSPI_MAX_SYNC_CLOCK)
reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
else
reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
__raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
mutex_lock(&bs->bus_mutex);
/* setup clock polarity */
reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
reg &= ~GLOBAL_CTRL_CLK_POLARITY;
if (spi->mode & SPI_CPOL)
reg |= GLOBAL_CTRL_CLK_POLARITY;
__raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
mutex_unlock(&bs->bus_mutex);
}
static int bcmbca_hsspi_wait_cmd(struct bcmbca_hsspi *bs, unsigned int cs)
{
unsigned long limit;
u32 reg = 0;
int rc = 0;
if (bs->wait_mode == HSSPI_WAIT_MODE_INTR) {
if (wait_for_completion_timeout(&bs->done, HZ) == 0)
rc = 1;
} else {
limit = jiffies + msecs_to_jiffies(HSSPI_POLL_STATUS_TIMEOUT_MS);
while (!time_after(jiffies, limit)) {
reg = __raw_readl(bs->regs + HSSPI_PINGPONG_STATUS_REG(0));
if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
cpu_relax();
else
break;
}
if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
rc = 1;
}
if (rc)
dev_err(&bs->pdev->dev, "transfer timed out!\n");
return rc;
}
static int bcmbca_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t,
struct spi_message *msg)
{
struct bcmbca_hsspi *bs = spi_controller_get_devdata(spi->controller);
unsigned int chip_select = spi_get_chipselect(spi, 0);
u16 opcode = 0, val;
int pending = t->len;
int step_size = HSSPI_BUFFER_LEN;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
u32 reg = 0, cs_act = 0;
bcmbca_hsspi_set_clk(bs, spi, t->speed_hz);
if (tx && rx)
opcode = HSSPI_OP_READ_WRITE;
else if (tx)
opcode = HSSPI_OP_WRITE;
else if (rx)
opcode = HSSPI_OP_READ;
if (opcode != HSSPI_OP_READ)
step_size -= HSSPI_OPCODE_LEN;
if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
(opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
opcode |= HSSPI_OP_MULTIBIT;
if (t->rx_nbits == SPI_NBITS_DUAL)
reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
if (t->tx_nbits == SPI_NBITS_DUAL)
reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
}
__raw_writel(reg | 0xff,
bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
while (pending > 0) {
int curr_step = min_t(int, step_size, pending);
reinit_completion(&bs->done);
if (tx) {
memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
tx += curr_step;
}
*(__be16 *)(&val) = cpu_to_be16(opcode | curr_step);
__raw_writew(val, bs->fifo);
/* enable interrupt */
if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
__raw_writel(HSSPI_PINGx_CMD_DONE(0),
bs->regs + HSSPI_INT_MASK_REG);
if (!cs_act) {
/* must apply cs signal as close as the cmd starts */
bcmbca_hsspi_set_cs(bs, chip_select, true);
cs_act = 1;
}
reg = chip_select << PINGPONG_CMD_SS_SHIFT |
chip_select << PINGPONG_CMD_PROFILE_SHIFT |
PINGPONG_COMMAND_START_NOW;
__raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
if (bcmbca_hsspi_wait_cmd(bs, spi_get_chipselect(spi, 0)))
return -ETIMEDOUT;
pending -= curr_step;
if (rx) {
memcpy_fromio(rx, bs->fifo, curr_step);
rx += curr_step;
}
}
return 0;
}
static int bcmbca_hsspi_setup(struct spi_device *spi)
{
struct bcmbca_hsspi *bs = spi_controller_get_devdata(spi->controller);
u32 reg;
reg = __raw_readl(bs->regs +
HSSPI_PROFILE_SIGNAL_CTRL_REG(spi_get_chipselect(spi, 0)));
reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
if (spi->mode & SPI_CPHA)
reg |= SIGNAL_CTRL_LAUNCH_RISING;
else
reg |= SIGNAL_CTRL_LATCH_RISING;
__raw_writel(reg, bs->regs +
HSSPI_PROFILE_SIGNAL_CTRL_REG(spi_get_chipselect(spi, 0)));
mutex_lock(&bs->bus_mutex);
reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
if (spi->mode & SPI_CS_HIGH)
reg |= BIT(spi_get_chipselect(spi, 0));
else
reg &= ~BIT(spi_get_chipselect(spi, 0));
__raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
if (spi->mode & SPI_CS_HIGH)
bs->cs_polarity |= BIT(spi_get_chipselect(spi, 0));
else
bs->cs_polarity &= ~BIT(spi_get_chipselect(spi, 0));
reg = __raw_readl(bs->spim_ctrl);
reg &= ~BIT(spi_get_chipselect(spi, 0) + SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT);
if (spi->mode & SPI_CS_HIGH)
reg |= BIT(spi_get_chipselect(spi, 0) + SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT);
__raw_writel(reg, bs->spim_ctrl);
mutex_unlock(&bs->bus_mutex);
return 0;
}
static int bcmbca_hsspi_transfer_one(struct spi_controller *host,
struct spi_message *msg)
{
struct bcmbca_hsspi *bs = spi_controller_get_devdata(host);
struct spi_transfer *t;
struct spi_device *spi = msg->spi;
int status = -EINVAL;
bool keep_cs = false;
mutex_lock(&bs->msg_mutex);
list_for_each_entry(t, &msg->transfers, transfer_list) {
status = bcmbca_hsspi_do_txrx(spi, t, msg);
if (status)
break;
spi_transfer_delay_exec(t);
if (t->cs_change) {
if (list_is_last(&t->transfer_list, &msg->transfers)) {
keep_cs = true;
} else {
if (!t->cs_off)
bcmbca_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), false);
spi_transfer_cs_change_delay_exec(msg, t);
if (!list_next_entry(t, transfer_list)->cs_off)
bcmbca_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), true);
}
} else if (!list_is_last(&t->transfer_list, &msg->transfers) &&
t->cs_off != list_next_entry(t, transfer_list)->cs_off) {
bcmbca_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), t->cs_off);
}
msg->actual_length += t->len;
}
mutex_unlock(&bs->msg_mutex);
if (status || !keep_cs)
bcmbca_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), false);
msg->status = status;
spi_finalize_current_message(host);
return 0;
}
static irqreturn_t bcmbca_hsspi_interrupt(int irq, void *dev_id)
{
struct bcmbca_hsspi *bs = (struct bcmbca_hsspi *)dev_id;
if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
return IRQ_NONE;
__raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
complete(&bs->done);
return IRQ_HANDLED;
}
static int bcmbca_hsspi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct bcmbca_hsspi *bs;
struct resource *res_mem;
void __iomem *spim_ctrl;
void __iomem *regs;
struct device *dev = &pdev->dev;
struct clk *clk, *pll_clk = NULL;
int irq, ret;
u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsspi");
if (!res_mem)
return -EINVAL;
regs = devm_ioremap_resource(dev, res_mem);
if (IS_ERR(regs))
return PTR_ERR(regs);
res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spim-ctrl");
if (!res_mem)
return -EINVAL;
spim_ctrl = devm_ioremap_resource(dev, res_mem);
if (IS_ERR(spim_ctrl))
return PTR_ERR(spim_ctrl);
clk = devm_clk_get(dev, "hsspi");
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_prepare_enable(clk);
if (ret)
return ret;
rate = clk_get_rate(clk);
if (!rate) {
pll_clk = devm_clk_get(dev, "pll");
if (IS_ERR(pll_clk)) {
ret = PTR_ERR(pll_clk);
goto out_disable_clk;
}
ret = clk_prepare_enable(pll_clk);
if (ret)
goto out_disable_clk;
rate = clk_get_rate(pll_clk);
if (!rate) {
ret = -EINVAL;
goto out_disable_pll_clk;
}
}
host = spi_alloc_host(&pdev->dev, sizeof(*bs));
if (!host) {
ret = -ENOMEM;
goto out_disable_pll_clk;
}
bs = spi_controller_get_devdata(host);
bs->pdev = pdev;
bs->clk = clk;
bs->pll_clk = pll_clk;
bs->regs = regs;
bs->spim_ctrl = spim_ctrl;
bs->speed_hz = rate;
bs->fifo = (u8 __iomem *) (bs->regs + HSSPI_FIFO_REG(0));
bs->wait_mode = HSSPI_WAIT_MODE_POLLING;
mutex_init(&bs->bus_mutex);
mutex_init(&bs->msg_mutex);
init_completion(&bs->done);
host->dev.of_node = dev->of_node;
if (!dev->of_node)
host->bus_num = HSSPI_BUS_NUM;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > 8) {
dev_warn(dev, "unsupported number of cs (%i), reducing to 8\n",
num_cs);
num_cs = HSSPI_SPI_MAX_CS;
}
host->num_chipselect = num_cs;
host->setup = bcmbca_hsspi_setup;
host->transfer_one_message = bcmbca_hsspi_transfer_one;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
SPI_RX_DUAL | SPI_TX_DUAL;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->auto_runtime_pm = true;
platform_set_drvdata(pdev, host);
/* Initialize the hardware */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
/* clean up any pending interrupts */
__raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
/* read out default CS polarities */
reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
__raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
bs->regs + HSSPI_GLOBAL_CTRL_REG);
if (irq > 0) {
ret = devm_request_irq(dev, irq, bcmbca_hsspi_interrupt, IRQF_SHARED,
pdev->name, bs);
if (ret)
goto out_put_host;
}
pm_runtime_enable(&pdev->dev);
ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
if (ret) {
dev_err(&pdev->dev, "couldn't register sysfs group\n");
goto out_pm_disable;
}
/* register and we are done */
ret = devm_spi_register_controller(dev, host);
if (ret)
goto out_sysgroup_disable;
dev_info(dev, "Broadcom BCMBCA High Speed SPI Controller driver");
return 0;
out_sysgroup_disable:
sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
out_pm_disable:
pm_runtime_disable(&pdev->dev);
out_put_host:
spi_controller_put(host);
out_disable_pll_clk:
clk_disable_unprepare(pll_clk);
out_disable_clk:
clk_disable_unprepare(clk);
return ret;
}
static void bcmbca_hsspi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct bcmbca_hsspi *bs = spi_controller_get_devdata(host);
/* reset the hardware and block queue progress */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
}
#ifdef CONFIG_PM_SLEEP
static int bcmbca_hsspi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct bcmbca_hsspi *bs = spi_controller_get_devdata(host);
spi_controller_suspend(host);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
return 0;
}
static int bcmbca_hsspi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct bcmbca_hsspi *bs = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(bs->clk);
if (ret)
return ret;
if (bs->pll_clk) {
ret = clk_prepare_enable(bs->pll_clk);
if (ret) {
clk_disable_unprepare(bs->clk);
return ret;
}
}
spi_controller_resume(host);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(bcmbca_hsspi_pm_ops, bcmbca_hsspi_suspend,
bcmbca_hsspi_resume);
static const struct of_device_id bcmbca_hsspi_of_match[] = {
{ .compatible = "brcm,bcmbca-hsspi-v1.1", },
{},
};
MODULE_DEVICE_TABLE(of, bcmbca_hsspi_of_match);
static struct platform_driver bcmbca_hsspi_driver = {
.driver = {
.name = "bcmbca-hsspi",
.pm = &bcmbca_hsspi_pm_ops,
.of_match_table = bcmbca_hsspi_of_match,
},
.probe = bcmbca_hsspi_probe,
.remove_new = bcmbca_hsspi_remove,
};
module_platform_driver(bcmbca_hsspi_driver);
MODULE_ALIAS("platform:bcmbca_hsspi");
MODULE_DESCRIPTION("Broadcom BCMBCA High Speed SPI Controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-bcmbca-hsspi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
* Copyright (C) 2013, 2021 Intel Corporation
*/
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
#include "spi-pxa2xx.h"
MODULE_AUTHOR("Stephen Street");
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-spi");
#define TIMOUT_DFLT 1000
/*
* For testing SSCR1 changes that require SSP restart, basically
* everything except the service and interrupt enables, the PXA270 developer
* manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
* list, but the PXA255 developer manual says all bits without really meaning
* the service and interrupt enables.
*/
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF \
| QUARK_X1000_SSCR1_EFWR \
| QUARK_X1000_SSCR1_RFT \
| QUARK_X1000_SSCR1_TFT \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
#define CE4100_SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
| CE4100_SSCR1_RFT | CE4100_SSCR1_TFT | SSCR1_MWDS \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
#define LPSS_CS_CONTROL_SW_MODE BIT(0)
#define LPSS_CS_CONTROL_CS_HIGH BIT(1)
#define LPSS_CAPS_CS_EN_SHIFT 9
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
#define LPSS_PRIV_CLOCK_GATE 0x38
#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
unsigned offset;
/* Register offsets from drv_data->lpss_base or -1 */
int reg_general;
int reg_ssp;
int reg_cs_ctrl;
int reg_capabilities;
/* FIFO thresholds */
u32 rx_threshold;
u32 tx_threshold_lo;
u32 tx_threshold_hi;
/* Chip select control */
unsigned cs_sel_shift;
unsigned cs_sel_mask;
unsigned cs_num;
/* Quirks */
unsigned cs_clk_stays_gated : 1;
};
/* Keep these sorted with enum pxa_ssp_type */
static const struct lpss_config lpss_platforms[] = {
{ /* LPSS_LPT_SSP */
.offset = 0x800,
.reg_general = 0x08,
.reg_ssp = 0x0c,
.reg_cs_ctrl = 0x18,
.reg_capabilities = -1,
.rx_threshold = 64,
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
},
{ /* LPSS_BYT_SSP */
.offset = 0x400,
.reg_general = 0x08,
.reg_ssp = 0x0c,
.reg_cs_ctrl = 0x18,
.reg_capabilities = -1,
.rx_threshold = 64,
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
},
{ /* LPSS_BSW_SSP */
.offset = 0x400,
.reg_general = 0x08,
.reg_ssp = 0x0c,
.reg_cs_ctrl = 0x18,
.reg_capabilities = -1,
.rx_threshold = 64,
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
.cs_sel_shift = 2,
.cs_sel_mask = 1 << 2,
.cs_num = 2,
},
{ /* LPSS_SPT_SSP */
.offset = 0x200,
.reg_general = -1,
.reg_ssp = 0x20,
.reg_cs_ctrl = 0x24,
.reg_capabilities = -1,
.rx_threshold = 1,
.tx_threshold_lo = 32,
.tx_threshold_hi = 56,
},
{ /* LPSS_BXT_SSP */
.offset = 0x200,
.reg_general = -1,
.reg_ssp = 0x20,
.reg_cs_ctrl = 0x24,
.reg_capabilities = 0xfc,
.rx_threshold = 1,
.tx_threshold_lo = 16,
.tx_threshold_hi = 48,
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
.cs_clk_stays_gated = true,
},
{ /* LPSS_CNL_SSP */
.offset = 0x200,
.reg_general = -1,
.reg_ssp = 0x20,
.reg_cs_ctrl = 0x24,
.reg_capabilities = 0xfc,
.rx_threshold = 1,
.tx_threshold_lo = 32,
.tx_threshold_hi = 56,
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
.cs_clk_stays_gated = true,
},
};
static inline const struct lpss_config
*lpss_get_config(const struct driver_data *drv_data)
{
return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
}
static bool is_lpss_ssp(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
case LPSS_BSW_SSP:
case LPSS_SPT_SSP:
case LPSS_BXT_SSP:
case LPSS_CNL_SSP:
return true;
default:
return false;
}
}
static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
{
return drv_data->ssp_type == QUARK_X1000_SSP;
}
static bool is_mmp2_ssp(const struct driver_data *drv_data)
{
return drv_data->ssp_type == MMP2_SSP;
}
static bool is_mrfld_ssp(const struct driver_data *drv_data)
{
return drv_data->ssp_type == MRFLD_SSP;
}
static void pxa2xx_spi_update(const struct driver_data *drv_data, u32 reg, u32 mask, u32 value)
{
if ((pxa2xx_spi_read(drv_data, reg) & mask) != value)
pxa2xx_spi_write(drv_data, reg, value & mask);
}
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
return QUARK_X1000_SSCR1_CHANGE_MASK;
case CE4100_SSP:
return CE4100_SSCR1_CHANGE_MASK;
default:
return SSCR1_CHANGE_MASK;
}
}
static u32
pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
return RX_THRESH_QUARK_X1000_DFLT;
case CE4100_SSP:
return RX_THRESH_CE4100_DFLT;
default:
return RX_THRESH_DFLT;
}
}
static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
{
u32 mask;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
mask = QUARK_X1000_SSSR_TFL_MASK;
break;
case CE4100_SSP:
mask = CE4100_SSSR_TFL_MASK;
break;
default:
mask = SSSR_TFL_MASK;
break;
}
return read_SSSR_bits(drv_data, mask) == mask;
}
static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
u32 *sccr1_reg)
{
u32 mask;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
mask = QUARK_X1000_SSCR1_RFT;
break;
case CE4100_SSP:
mask = CE4100_SSCR1_RFT;
break;
default:
mask = SSCR1_RFT;
break;
}
*sccr1_reg &= ~mask;
}
static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
u32 *sccr1_reg, u32 threshold)
{
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
*sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
break;
case CE4100_SSP:
*sccr1_reg |= CE4100_SSCR1_RxTresh(threshold);
break;
default:
*sccr1_reg |= SSCR1_RxTresh(threshold);
break;
}
}
static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
u32 clk_div, u8 bits)
{
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
return clk_div
| QUARK_X1000_SSCR0_Motorola
| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits);
default:
return clk_div
| SSCR0_Motorola
| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
| (bits > 16 ? SSCR0_EDSS : 0);
}
}
/*
* Read and write LPSS SSP private registers. Caller must first check that
* is_lpss_ssp() returns true before these can be called.
*/
static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
{
WARN_ON(!drv_data->lpss_base);
return readl(drv_data->lpss_base + offset);
}
static void __lpss_ssp_write_priv(struct driver_data *drv_data,
unsigned offset, u32 value)
{
WARN_ON(!drv_data->lpss_base);
writel(value, drv_data->lpss_base + offset);
}
/*
* lpss_ssp_setup - perform LPSS SSP specific setup
* @drv_data: pointer to the driver private data
*
* Perform LPSS SSP specific setup. This function must be called first if
* one is going to use LPSS SSP private registers.
*/
static void lpss_ssp_setup(struct driver_data *drv_data)
{
const struct lpss_config *config;
u32 value;
config = lpss_get_config(drv_data);
drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset;
/* Enable software chip select control */
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
/* Enable multiblock DMA transfers */
if (drv_data->controller_info->enable_dma) {
__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
if (config->reg_general >= 0) {
value = __lpss_ssp_read_priv(drv_data,
config->reg_general);
value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
__lpss_ssp_write_priv(drv_data,
config->reg_general, value);
}
}
}
static void lpss_ssp_select_cs(struct spi_device *spi,
const struct lpss_config *config)
{
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
u32 value, cs;
if (!config->cs_sel_mask)
return;
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
cs = spi_get_chipselect(spi, 0);
cs <<= config->cs_sel_shift;
if (cs != (value & config->cs_sel_mask)) {
/*
* When switching another chip select output active the
* output must be selected first and wait 2 ssp_clk cycles
* before changing state to active. Otherwise a short
* glitch will occur on the previous chip select since
* output select is latched but state control is not.
*/
value &= ~config->cs_sel_mask;
value |= cs;
__lpss_ssp_write_priv(drv_data,
config->reg_cs_ctrl, value);
ndelay(1000000000 /
(drv_data->controller->max_speed_hz / 2));
}
}
static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
{
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
const struct lpss_config *config;
u32 value;
config = lpss_get_config(drv_data);
if (enable)
lpss_ssp_select_cs(spi, config);
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
if (enable)
value &= ~LPSS_CS_CONTROL_CS_HIGH;
else
value |= LPSS_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
if (config->cs_clk_stays_gated) {
u32 clkgate;
/*
* Changing CS alone when dynamic clock gating is on won't
* actually flip CS at that time. This ruins SPI transfers
* that specify delays, or have no data. Toggle the clock mode
* to force on briefly to poke the CS pin to move.
*/
clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
__lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
__lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
}
}
static void cs_assert(struct spi_device *spi)
{
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
if (drv_data->ssp_type == CE4100_SSP) {
pxa2xx_spi_write(drv_data, SSSR, spi_get_chipselect(spi, 0));
return;
}
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, true);
}
static void cs_deassert(struct spi_device *spi)
{
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
unsigned long timeout;
if (drv_data->ssp_type == CE4100_SSP)
return;
/* Wait until SSP becomes idle before deasserting the CS */
timeout = jiffies + msecs_to_jiffies(10);
while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
!time_after(jiffies, timeout))
cpu_relax();
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, false);
}
static void pxa2xx_spi_set_cs(struct spi_device *spi, bool level)
{
if (level)
cs_deassert(spi);
else
cs_assert(spi);
}
int pxa2xx_spi_flush(struct driver_data *drv_data)
{
unsigned long limit = loops_per_jiffy << 1;
do {
while (read_SSSR_bits(drv_data, SSSR_RNE))
pxa2xx_spi_read(drv_data, SSDR);
} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
write_SSSR_CS(drv_data, SSSR_ROR);
return limit;
}
static void pxa2xx_spi_off(struct driver_data *drv_data)
{
/* On MMP, disabling SSE seems to corrupt the Rx FIFO */
if (is_mmp2_ssp(drv_data))
return;
pxa_ssp_disable(drv_data->ssp);
}
static int null_writer(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
pxa2xx_spi_write(drv_data, SSDR, 0);
drv_data->tx += n_bytes;
return 1;
}
static int null_reader(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += n_bytes;
}
return drv_data->rx == drv_data->rx_end;
}
static int u8_writer(struct driver_data *drv_data)
{
if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
++drv_data->tx;
return 1;
}
static int u8_reader(struct driver_data *drv_data)
{
while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
++drv_data->rx;
}
return drv_data->rx == drv_data->rx_end;
}
static int u16_writer(struct driver_data *drv_data)
{
if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
drv_data->tx += 2;
return 1;
}
static int u16_reader(struct driver_data *drv_data)
{
while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 2;
}
return drv_data->rx == drv_data->rx_end;
}
static int u32_writer(struct driver_data *drv_data)
{
if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
drv_data->tx += 4;
return 1;
}
static int u32_reader(struct driver_data *drv_data)
{
while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 4;
}
return drv_data->rx == drv_data->rx_end;
}
static void reset_sccr1(struct driver_data *drv_data)
{
u32 mask = drv_data->int_cr1 | drv_data->dma_cr1, threshold;
struct chip_data *chip;
if (drv_data->controller->cur_msg) {
chip = spi_get_ctldata(drv_data->controller->cur_msg->spi);
threshold = chip->threshold;
} else {
threshold = 0;
}
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
mask |= QUARK_X1000_SSCR1_RFT;
break;
case CE4100_SSP:
mask |= CE4100_SSCR1_RFT;
break;
default:
mask |= SSCR1_RFT;
break;
}
pxa2xx_spi_update(drv_data, SSCR1, mask, threshold);
}
static void int_stop_and_reset(struct driver_data *drv_data)
{
/* Clear and disable interrupts */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
if (pxa25x_ssp_comp(drv_data))
return;
pxa2xx_spi_write(drv_data, SSTO, 0);
}
static void int_error_stop(struct driver_data *drv_data, const char *msg, int err)
{
int_stop_and_reset(drv_data);
pxa2xx_spi_flush(drv_data);
pxa2xx_spi_off(drv_data);
dev_err(drv_data->ssp->dev, "%s\n", msg);
drv_data->controller->cur_msg->status = err;
spi_finalize_current_transfer(drv_data->controller);
}
static void int_transfer_complete(struct driver_data *drv_data)
{
int_stop_and_reset(drv_data);
spi_finalize_current_transfer(drv_data->controller);
}
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
u32 irq_status;
irq_status = read_SSSR_bits(drv_data, drv_data->mask_sr);
if (!(pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE))
irq_status &= ~SSSR_TFS;
if (irq_status & SSSR_ROR) {
int_error_stop(drv_data, "interrupt_transfer: FIFO overrun", -EIO);
return IRQ_HANDLED;
}
if (irq_status & SSSR_TUR) {
int_error_stop(drv_data, "interrupt_transfer: FIFO underrun", -EIO);
return IRQ_HANDLED;
}
if (irq_status & SSSR_TINT) {
pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
if (drv_data->read(drv_data)) {
int_transfer_complete(drv_data);
return IRQ_HANDLED;
}
}
/* Drain Rx FIFO, Fill Tx FIFO and prevent overruns */
do {
if (drv_data->read(drv_data)) {
int_transfer_complete(drv_data);
return IRQ_HANDLED;
}
} while (drv_data->write(drv_data));
if (drv_data->read(drv_data)) {
int_transfer_complete(drv_data);
return IRQ_HANDLED;
}
if (drv_data->tx == drv_data->tx_end) {
u32 bytes_left;
u32 sccr1_reg;
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
sccr1_reg &= ~SSCR1_TIE;
/*
* PXA25x_SSP has no timeout, set up Rx threshold for
* the remaining Rx bytes.
*/
if (pxa25x_ssp_comp(drv_data)) {
u32 rx_thre;
pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
bytes_left = drv_data->rx_end - drv_data->rx;
switch (drv_data->n_bytes) {
case 4:
bytes_left >>= 2;
break;
case 2:
bytes_left >>= 1;
break;
}
rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
if (rx_thre > bytes_left)
rx_thre = bytes_left;
pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
}
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
}
/* We did something */
return IRQ_HANDLED;
}
static void handle_bad_msg(struct driver_data *drv_data)
{
int_stop_and_reset(drv_data);
pxa2xx_spi_off(drv_data);
dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
}
static irqreturn_t ssp_int(int irq, void *dev_id)
{
struct driver_data *drv_data = dev_id;
u32 sccr1_reg;
u32 mask = drv_data->mask_sr;
u32 status;
/*
* The IRQ might be shared with other peripherals so we must first
* check that are we RPM suspended or not. If we are we assume that
* the IRQ was not for us (we shouldn't be RPM suspended when the
* interrupt is enabled).
*/
if (pm_runtime_suspended(drv_data->ssp->dev))
return IRQ_NONE;
/*
* If the device is not yet in RPM suspended state and we get an
* interrupt that is meant for another device, check if status bits
* are all set to one. That means that the device is already
* powered off.
*/
status = pxa2xx_spi_read(drv_data, SSSR);
if (status == ~0)
return IRQ_NONE;
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
/* Ignore possible writes if we don't need to write */
if (!(sccr1_reg & SSCR1_TIE))
mask &= ~SSSR_TFS;
/* Ignore RX timeout interrupt if it is disabled */
if (!(sccr1_reg & SSCR1_TINTE))
mask &= ~SSSR_TINT;
if (!(status & mask))
return IRQ_NONE;
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
if (!drv_data->controller->cur_msg) {
handle_bad_msg(drv_data);
/* Never fail */
return IRQ_HANDLED;
}
return drv_data->transfer_handler(drv_data);
}
/*
* The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
* input frequency by fractions of 2^24. It also has a divider by 5.
*
* There are formulas to get baud rate value for given input frequency and
* divider parameters, such as DDS_CLK_RATE and SCR:
*
* Fsys = 200MHz
*
* Fssp = Fsys * DDS_CLK_RATE / 2^24 (1)
* Baud rate = Fsclk = Fssp / (2 * (SCR + 1)) (2)
*
* DDS_CLK_RATE either 2^n or 2^n / 5.
* SCR is in range 0 .. 255
*
* Divisor = 5^i * 2^j * 2 * k
* i = [0, 1] i = 1 iff j = 0 or j > 3
* j = [0, 23] j = 0 iff i = 1
* k = [1, 256]
* Special case: j = 0, i = 1: Divisor = 2 / 5
*
* Accordingly to the specification the recommended values for DDS_CLK_RATE
* are:
* Case 1: 2^n, n = [0, 23]
* Case 2: 2^24 * 2 / 5 (0x666666)
* Case 3: less than or equal to 2^24 / 5 / 16 (0x33333)
*
* In all cases the lowest possible value is better.
*
* The function calculates parameters for all cases and chooses the one closest
* to the asked baud rate.
*/
static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
{
unsigned long xtal = 200000000;
unsigned long fref = xtal / 2; /* mandatory division by 2,
see (2) */
/* case 3 */
unsigned long fref1 = fref / 2; /* case 1 */
unsigned long fref2 = fref * 2 / 5; /* case 2 */
unsigned long scale;
unsigned long q, q1, q2;
long r, r1, r2;
u32 mul;
/* Case 1 */
/* Set initial value for DDS_CLK_RATE */
mul = (1 << 24) >> 1;
/* Calculate initial quot */
q1 = DIV_ROUND_UP(fref1, rate);
/* Scale q1 if it's too big */
if (q1 > 256) {
/* Scale q1 to range [1, 512] */
scale = fls_long(q1 - 1);
if (scale > 9) {
q1 >>= scale - 9;
mul >>= scale - 9;
}
/* Round the result if we have a remainder */
q1 += q1 & 1;
}
/* Decrease DDS_CLK_RATE as much as we can without loss in precision */
scale = __ffs(q1);
q1 >>= scale;
mul >>= scale;
/* Get the remainder */
r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);
/* Case 2 */
q2 = DIV_ROUND_UP(fref2, rate);
r2 = abs(fref2 / q2 - rate);
/*
* Choose the best between two: less remainder we have the better. We
* can't go case 2 if q2 is greater than 256 since SCR register can
* hold only values 0 .. 255.
*/
if (r2 >= r1 || q2 > 256) {
/* case 1 is better */
r = r1;
q = q1;
} else {
/* case 2 is better */
r = r2;
q = q2;
mul = (1 << 24) * 2 / 5;
}
/* Check case 3 only if the divisor is big enough */
if (fref / rate >= 80) {
u64 fssp;
u32 m;
/* Calculate initial quot */
q1 = DIV_ROUND_UP(fref, rate);
m = (1 << 24) / q1;
/* Get the remainder */
fssp = (u64)fref * m;
do_div(fssp, 1 << 24);
r1 = abs(fssp - rate);
/* Choose this one if it suits better */
if (r1 < r) {
/* case 3 is better */
q = 1;
mul = m;
}
}
*dds = mul;
return q - 1;
}
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
unsigned long ssp_clk = drv_data->controller->max_speed_hz;
const struct ssp_device *ssp = drv_data->ssp;
rate = min_t(int, ssp_clk, rate);
/*
* Calculate the divisor for the SCR (Serial Clock Rate), avoiding
* that the SSP transmission rate can be greater than the device rate.
*/
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
else
return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
}
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
int rate)
{
struct chip_data *chip =
spi_get_ctldata(drv_data->controller->cur_msg->spi);
unsigned int clk_div;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
break;
default:
clk_div = ssp_get_clk_div(drv_data, rate);
break;
}
return clk_div << 8;
}
static bool pxa2xx_spi_can_dma(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct chip_data *chip = spi_get_ctldata(spi);
return chip->enable_dma &&
xfer->len <= MAX_DMA_LEN &&
xfer->len >= chip->dma_burst_size;
}
static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
struct spi_message *message = controller->cur_msg;
struct chip_data *chip = spi_get_ctldata(spi);
u32 dma_thresh = chip->dma_threshold;
u32 dma_burst = chip->dma_burst_size;
u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
u32 clk_div;
u8 bits;
u32 speed;
u32 cr0;
u32 cr1;
int err;
int dma_mapped;
/* Check if we can DMA this transfer */
if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
/* Reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&spi->dev,
"Mapped transfer length of %u is greater than %d\n",
transfer->len, MAX_DMA_LEN);
return -EINVAL;
}
/* Warn ... we force this to PIO mode */
dev_warn_ratelimited(&spi->dev,
"DMA disabled for transfer length %u greater than %d\n",
transfer->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
if (pxa2xx_spi_flush(drv_data) == 0) {
dev_err(&spi->dev, "Flush failed\n");
return -EIO;
}
drv_data->tx = (void *)transfer->tx_buf;
drv_data->tx_end = drv_data->tx + transfer->len;
drv_data->rx = transfer->rx_buf;
drv_data->rx_end = drv_data->rx + transfer->len;
/* Change speed and bit per word on a per transfer */
bits = transfer->bits_per_word;
speed = transfer->speed_hz;
clk_div = pxa2xx_ssp_get_clk_div(drv_data, speed);
if (bits <= 8) {
drv_data->n_bytes = 1;
drv_data->read = drv_data->rx ? u8_reader : null_reader;
drv_data->write = drv_data->tx ? u8_writer : null_writer;
} else if (bits <= 16) {
drv_data->n_bytes = 2;
drv_data->read = drv_data->rx ? u16_reader : null_reader;
drv_data->write = drv_data->tx ? u16_writer : null_writer;
} else if (bits <= 32) {
drv_data->n_bytes = 4;
drv_data->read = drv_data->rx ? u32_reader : null_reader;
drv_data->write = drv_data->tx ? u32_writer : null_writer;
}
/*
* If bits per word is changed in DMA mode, then must check
* the thresholds and burst also.
*/
if (chip->enable_dma) {
if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
spi,
bits, &dma_burst,
&dma_thresh))
dev_warn_ratelimited(&spi->dev,
"DMA burst size reduced to match bits_per_word\n");
}
dma_mapped = controller->can_dma &&
controller->can_dma(controller, spi, transfer) &&
controller->cur_msg_mapped;
if (dma_mapped) {
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
err = pxa2xx_spi_dma_prepare(drv_data, transfer);
if (err)
return err;
/* Clear status and start DMA engine */
cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
pxa2xx_spi_dma_start(drv_data);
} else {
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = interrupt_transfer;
/* Clear status */
cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
write_SSSR_CS(drv_data, drv_data->clear_sr);
}
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&spi->dev, "%u Hz actual, %s\n",
controller->max_speed_hz
/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
else
dev_dbg(&spi->dev, "%u Hz actual, %s\n",
controller->max_speed_hz / 2
/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
if (is_lpss_ssp(drv_data)) {
pxa2xx_spi_update(drv_data, SSIRF, GENMASK(7, 0), chip->lpss_rx_threshold);
pxa2xx_spi_update(drv_data, SSITF, GENMASK(15, 0), chip->lpss_tx_threshold);
}
if (is_mrfld_ssp(drv_data)) {
u32 mask = SFIFOTT_RFT | SFIFOTT_TFT;
u32 thresh = 0;
thresh |= SFIFOTT_RxThresh(chip->lpss_rx_threshold);
thresh |= SFIFOTT_TxThresh(chip->lpss_tx_threshold);
pxa2xx_spi_update(drv_data, SFIFOTT, mask, thresh);
}
if (is_quark_x1000_ssp(drv_data))
pxa2xx_spi_update(drv_data, DDS_RATE, GENMASK(23, 0), chip->dds_rate);
/* Stop the SSP */
if (!is_mmp2_ssp(drv_data))
pxa_ssp_disable(drv_data->ssp);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
/* First set CR1 without interrupt and service enables */
pxa2xx_spi_update(drv_data, SSCR1, change_mask, cr1);
/* See if we need to reload the configuration registers */
pxa2xx_spi_update(drv_data, SSCR0, GENMASK(31, 0), cr0);
/* Restart the SSP */
pxa_ssp_enable(drv_data->ssp);
if (is_mmp2_ssp(drv_data)) {
u8 tx_level = read_SSSR_bits(drv_data, SSSR_TFL_MASK) >> 8;
if (tx_level) {
/* On MMP2, flipping SSE doesn't to empty Tx FIFO. */
dev_warn(&spi->dev, "%u bytes of garbage in Tx FIFO!\n", tx_level);
if (tx_level > transfer->len)
tx_level = transfer->len;
drv_data->tx += tx_level;
}
}
if (spi_controller_is_target(controller)) {
while (drv_data->write(drv_data))
;
if (drv_data->gpiod_ready) {
gpiod_set_value(drv_data->gpiod_ready, 1);
udelay(1);
gpiod_set_value(drv_data->gpiod_ready, 0);
}
}
/*
* Release the data by enabling service requests and interrupts,
* without changing any mode bits.
*/
pxa2xx_spi_write(drv_data, SSCR1, cr1);
return 1;
}
static int pxa2xx_spi_target_abort(struct spi_controller *controller)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
int_error_stop(drv_data, "transfer aborted", -EINTR);
return 0;
}
static void pxa2xx_spi_handle_err(struct spi_controller *controller,
struct spi_message *msg)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
int_stop_and_reset(drv_data);
/* Disable the SSP */
pxa2xx_spi_off(drv_data);
/*
* Stop the DMA if running. Note DMA callback handler may have unset
* the dma_running already, which is fine as stopping is not needed
* then but we shouldn't rely this flag for anything else than
* stopping. For instance to differentiate between PIO and DMA
* transfers.
*/
if (atomic_read(&drv_data->dma_running))
pxa2xx_spi_dma_stop(drv_data);
}
static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP now */
pxa2xx_spi_off(drv_data);
return 0;
}
static int setup(struct spi_device *spi)
{
struct pxa2xx_spi_chip *chip_info;
struct chip_data *chip;
const struct lpss_config *config;
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
uint tx_thres, tx_hi_thres, rx_thres;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
tx_thres = TX_THRESH_QUARK_X1000_DFLT;
tx_hi_thres = 0;
rx_thres = RX_THRESH_QUARK_X1000_DFLT;
break;
case MRFLD_SSP:
tx_thres = TX_THRESH_MRFLD_DFLT;
tx_hi_thres = 0;
rx_thres = RX_THRESH_MRFLD_DFLT;
break;
case CE4100_SSP:
tx_thres = TX_THRESH_CE4100_DFLT;
tx_hi_thres = 0;
rx_thres = RX_THRESH_CE4100_DFLT;
break;
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
case LPSS_BSW_SSP:
case LPSS_SPT_SSP:
case LPSS_BXT_SSP:
case LPSS_CNL_SSP:
config = lpss_get_config(drv_data);
tx_thres = config->tx_threshold_lo;
tx_hi_thres = config->tx_threshold_hi;
rx_thres = config->rx_threshold;
break;
default:
tx_hi_thres = 0;
if (spi_controller_is_target(drv_data->controller)) {
tx_thres = 1;
rx_thres = 2;
} else {
tx_thres = TX_THRESH_DFLT;
rx_thres = RX_THRESH_DFLT;
}
break;
}
/* Only allocate on the first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
if (drv_data->ssp_type == CE4100_SSP) {
if (spi_get_chipselect(spi, 0) > 4) {
dev_err(&spi->dev,
"failed setup: cs number must not be > 4.\n");
kfree(chip);
return -EINVAL;
}
}
chip->enable_dma = drv_data->controller_info->enable_dma;
chip->timeout = TIMOUT_DFLT;
}
/*
* Protocol drivers may change the chip settings, so...
* if chip_info exists, use it.
*/
chip_info = spi->controller_data;
/* chip_info isn't always needed */
if (chip_info) {
if (chip_info->timeout)
chip->timeout = chip_info->timeout;
if (chip_info->tx_threshold)
tx_thres = chip_info->tx_threshold;
if (chip_info->tx_hi_threshold)
tx_hi_thres = chip_info->tx_hi_threshold;
if (chip_info->rx_threshold)
rx_thres = chip_info->rx_threshold;
chip->dma_threshold = 0;
}
chip->cr1 = 0;
if (spi_controller_is_target(drv_data->controller)) {
chip->cr1 |= SSCR1_SCFR;
chip->cr1 |= SSCR1_SCLKDIR;
chip->cr1 |= SSCR1_SFRMDIR;
chip->cr1 |= SSCR1_SPH;
}
if (is_lpss_ssp(drv_data)) {
chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) |
SSITF_TxHiThresh(tx_hi_thres);
}
if (is_mrfld_ssp(drv_data)) {
chip->lpss_rx_threshold = rx_thres;
chip->lpss_tx_threshold = tx_thres;
}
/*
* Set DMA burst and threshold outside of chip_info path so that if
* chip_info goes away after setting chip->enable_dma, the burst and
* threshold can still respond to changes in bits_per_word.
*/
if (chip->enable_dma) {
/* Set up legal burst and threshold for DMA */
if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
spi->bits_per_word,
&chip->dma_burst_size,
&chip->dma_threshold)) {
dev_warn(&spi->dev,
"in setup: DMA burst size reduced to match bits_per_word\n");
}
dev_dbg(&spi->dev,
"in setup: DMA burst size set to %u\n",
chip->dma_burst_size);
}
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
& QUARK_X1000_SSCR1_RFT)
| (QUARK_X1000_SSCR1_TxTresh(tx_thres)
& QUARK_X1000_SSCR1_TFT);
break;
case CE4100_SSP:
chip->threshold = (CE4100_SSCR1_RxTresh(rx_thres) & CE4100_SSCR1_RFT) |
(CE4100_SSCR1_TxTresh(tx_thres) & CE4100_SSCR1_TFT);
break;
default:
chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
break;
}
chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
chip->cr1 |= ((spi->mode & SPI_CPHA) ? SSCR1_SPH : 0) |
((spi->mode & SPI_CPOL) ? SSCR1_SPO : 0);
if (spi->mode & SPI_LOOP)
chip->cr1 |= SSCR1_LBM;
spi_set_ctldata(spi, chip);
return 0;
}
static void cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
kfree(chip);
}
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
return param == chan->device->dev;
}
static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
struct pxa2xx_spi_controller *pdata;
struct device *dev = &pdev->dev;
struct device *parent = dev->parent;
struct ssp_device *ssp;
struct resource *res;
enum pxa_ssp_type type = SSP_UNDEFINED;
const void *match;
bool is_lpss_priv;
int status;
u64 uid;
is_lpss_priv = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpss_priv");
match = device_get_match_data(dev);
if (match)
type = (uintptr_t)match;
else if (is_lpss_priv) {
u32 value;
status = device_property_read_u32(dev, "intel,spi-pxa2xx-type", &value);
if (status)
return ERR_PTR(status);
type = (enum pxa_ssp_type)value;
}
/* Validate the SSP type correctness */
if (!(type > SSP_UNDEFINED && type < SSP_MAX))
return ERR_PTR(-EINVAL);
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
ssp = &pdata->ssp;
ssp->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ssp->mmio_base))
return ERR_CAST(ssp->mmio_base);
ssp->phys_base = res->start;
/* Platforms with iDMA 64-bit */
if (is_lpss_priv) {
pdata->tx_param = parent;
pdata->rx_param = parent;
pdata->dma_filter = pxa2xx_spi_idma_filter;
}
ssp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ssp->clk))
return ERR_CAST(ssp->clk);
ssp->irq = platform_get_irq(pdev, 0);
if (ssp->irq < 0)
return ERR_PTR(ssp->irq);
ssp->type = type;
ssp->dev = dev;
status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
if (status)
ssp->port_id = -1;
else
ssp->port_id = uid;
pdata->is_target = device_property_read_bool(dev, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
pdata->dma_burst_size = 1;
return pdata;
}
static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
unsigned int cs)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
if (has_acpi_companion(drv_data->ssp->dev)) {
switch (drv_data->ssp_type) {
/*
* For Atoms the ACPI DeviceSelection used by the Windows
* driver starts from 1 instead of 0 so translate it here
* to match what Linux expects.
*/
case LPSS_BYT_SSP:
case LPSS_BSW_SSP:
return cs - 1;
default:
break;
}
}
return cs;
}
static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi)
{
return MAX_DMA_LEN;
}
static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pxa2xx_spi_controller *platform_info;
struct spi_controller *controller;
struct driver_data *drv_data;
struct ssp_device *ssp;
const struct lpss_config *config;
int status;
u32 tmp;
platform_info = dev_get_platdata(dev);
if (!platform_info) {
platform_info = pxa2xx_spi_init_pdata(pdev);
if (IS_ERR(platform_info)) {
dev_err(&pdev->dev, "missing platform data\n");
return PTR_ERR(platform_info);
}
}
ssp = pxa_ssp_request(pdev->id, pdev->name);
if (!ssp)
ssp = &platform_info->ssp;
if (!ssp->mmio_base) {
dev_err(&pdev->dev, "failed to get SSP\n");
return -ENODEV;
}
if (platform_info->is_target)
controller = devm_spi_alloc_target(dev, sizeof(*drv_data));
else
controller = devm_spi_alloc_host(dev, sizeof(*drv_data));
if (!controller) {
dev_err(&pdev->dev, "cannot alloc spi_controller\n");
status = -ENOMEM;
goto out_error_controller_alloc;
}
drv_data = spi_controller_get_devdata(controller);
drv_data->controller = controller;
drv_data->controller_info = platform_info;
drv_data->ssp = ssp;
device_set_node(&controller->dev, dev_fwnode(dev));
/* The spi->mode bits understood by this driver: */
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
controller->bus_num = ssp->port_id;
controller->dma_alignment = DMA_ALIGNMENT;
controller->cleanup = cleanup;
controller->setup = setup;
controller->set_cs = pxa2xx_spi_set_cs;
controller->transfer_one = pxa2xx_spi_transfer_one;
controller->target_abort = pxa2xx_spi_target_abort;
controller->handle_err = pxa2xx_spi_handle_err;
controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
controller->auto_runtime_pm = true;
controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
drv_data->ssp_type = ssp->type;
if (pxa25x_ssp_comp(drv_data)) {
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
break;
default:
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
break;
}
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
drv_data->dma_cr1 = 0;
drv_data->clear_sr = SSSR_ROR;
drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
} else {
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
drv_data->dma_cr1 = DEFAULT_DMA_CR1;
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS
| SSSR_ROR | SSSR_TUR;
}
status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
drv_data);
if (status < 0) {
dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
goto out_error_controller_alloc;
}
/* Setup DMA if requested */
if (platform_info->enable_dma) {
status = pxa2xx_spi_dma_setup(drv_data);
if (status) {
dev_warn(dev, "no DMA channels available, using PIO\n");
platform_info->enable_dma = false;
} else {
controller->can_dma = pxa2xx_spi_can_dma;
controller->max_dma_len = MAX_DMA_LEN;
controller->max_transfer_size =
pxa2xx_spi_max_dma_transfer_size;
}
}
/* Enable SOC clock */
status = clk_prepare_enable(ssp->clk);
if (status)
goto out_error_dma_irq_alloc;
controller->max_speed_hz = clk_get_rate(ssp->clk);
/*
* Set minimum speed for all other platforms than Intel Quark which is
* able do under 1 Hz transfers.
*/
if (!pxa25x_ssp_comp(drv_data))
controller->min_speed_hz =
DIV_ROUND_UP(controller->max_speed_hz, 4096);
else if (!is_quark_x1000_ssp(drv_data))
controller->min_speed_hz =
DIV_ROUND_UP(controller->max_speed_hz, 512);
pxa_ssp_disable(ssp);
/* Load default SSP configuration */
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
pxa2xx_spi_write(drv_data, SSCR1, tmp);
/* Using the Motorola SPI protocol and use 8 bit frame */
tmp = QUARK_X1000_SSCR0_Motorola | QUARK_X1000_SSCR0_DataSize(8);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
case CE4100_SSP:
tmp = CE4100_SSCR1_RxTresh(RX_THRESH_CE4100_DFLT) |
CE4100_SSCR1_TxTresh(TX_THRESH_CE4100_DFLT);
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
default:
if (spi_controller_is_target(controller)) {
tmp = SSCR1_SCFR |
SSCR1_SCLKDIR |
SSCR1_SFRMDIR |
SSCR1_RxTresh(2) |
SSCR1_TxTresh(1) |
SSCR1_SPH;
} else {
tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
SSCR1_TxTresh(TX_THRESH_DFLT);
}
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_Motorola | SSCR0_DataSize(8);
if (!spi_controller_is_target(controller))
tmp |= SSCR0_SCR(2);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
}
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
if (!is_quark_x1000_ssp(drv_data))
pxa2xx_spi_write(drv_data, SSPSP, 0);
if (is_lpss_ssp(drv_data)) {
lpss_ssp_setup(drv_data);
config = lpss_get_config(drv_data);
if (config->reg_capabilities >= 0) {
tmp = __lpss_ssp_read_priv(drv_data,
config->reg_capabilities);
tmp &= LPSS_CAPS_CS_EN_MASK;
tmp >>= LPSS_CAPS_CS_EN_SHIFT;
platform_info->num_chipselect = ffz(tmp);
} else if (config->cs_num) {
platform_info->num_chipselect = config->cs_num;
}
}
controller->num_chipselect = platform_info->num_chipselect;
controller->use_gpio_descriptors = true;
if (platform_info->is_target) {
drv_data->gpiod_ready = devm_gpiod_get_optional(dev,
"ready", GPIOD_OUT_LOW);
if (IS_ERR(drv_data->gpiod_ready)) {
status = PTR_ERR(drv_data->gpiod_ready);
goto out_error_clock_enabled;
}
}
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = spi_register_controller(controller);
if (status) {
dev_err(&pdev->dev, "problem registering SPI controller\n");
goto out_error_pm_runtime_enabled;
}
return status;
out_error_pm_runtime_enabled:
pm_runtime_disable(&pdev->dev);
out_error_clock_enabled:
clk_disable_unprepare(ssp->clk);
out_error_dma_irq_alloc:
pxa2xx_spi_dma_release(drv_data);
free_irq(ssp->irq, drv_data);
out_error_controller_alloc:
pxa_ssp_free(ssp);
return status;
}
static void pxa2xx_spi_remove(struct platform_device *pdev)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
struct ssp_device *ssp = drv_data->ssp;
pm_runtime_get_sync(&pdev->dev);
spi_unregister_controller(drv_data->controller);
/* Disable the SSP at the peripheral and SOC level */
pxa_ssp_disable(ssp);
clk_disable_unprepare(ssp->clk);
/* Release DMA */
if (drv_data->controller_info->enable_dma)
pxa2xx_spi_dma_release(drv_data);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
/* Release IRQ */
free_irq(ssp->irq, drv_data);
/* Release SSP */
pxa_ssp_free(ssp);
}
static int pxa2xx_spi_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
struct ssp_device *ssp = drv_data->ssp;
int status;
status = spi_controller_suspend(drv_data->controller);
if (status)
return status;
pxa_ssp_disable(ssp);
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(ssp->clk);
return 0;
}
static int pxa2xx_spi_resume(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
struct ssp_device *ssp = drv_data->ssp;
int status;
/* Enable the SSP clock */
if (!pm_runtime_suspended(dev)) {
status = clk_prepare_enable(ssp->clk);
if (status)
return status;
}
/* Start the queue running */
return spi_controller_resume(drv_data->controller);
}
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
clk_disable_unprepare(drv_data->ssp->clk);
return 0;
}
static int pxa2xx_spi_runtime_resume(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
return clk_prepare_enable(drv_data->ssp->clk);
}
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, pxa2xx_spi_runtime_resume, NULL)
};
#ifdef CONFIG_ACPI
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "80860F0E", LPSS_BYT_SSP },
{ "8086228E", LPSS_BSW_SSP },
{ "INT33C0", LPSS_LPT_SSP },
{ "INT33C1", LPSS_LPT_SSP },
{ "INT3430", LPSS_LPT_SSP },
{ "INT3431", LPSS_LPT_SSP },
{}
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
#endif
static const struct of_device_id pxa2xx_spi_of_match[] __maybe_unused = {
{ .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
{}
};
MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
static struct platform_driver driver = {
.driver = {
.name = "pxa2xx-spi",
.pm = pm_ptr(&pxa2xx_spi_pm_ops),
.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
.of_match_table = of_match_ptr(pxa2xx_spi_of_match),
},
.probe = pxa2xx_spi_probe,
.remove_new = pxa2xx_spi_remove,
};
static int __init pxa2xx_spi_init(void)
{
return platform_driver_register(&driver);
}
subsys_initcall(pxa2xx_spi_init);
static void __exit pxa2xx_spi_exit(void)
{
platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);
MODULE_SOFTDEP("pre: dw_dmac");
| linux-master | drivers/spi/spi-pxa2xx.c |
// SPDX-License-Identifier: GPL-2.0
//
// STMicroelectronics STM32 SPI Controller driver
//
// Copyright (C) 2017, STMicroelectronics - All Rights Reserved
// Author(s): Amelie Delaunay <[email protected]> for STMicroelectronics.
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "spi_stm32"
/* STM32F4 SPI registers */
#define STM32F4_SPI_CR1 0x00
#define STM32F4_SPI_CR2 0x04
#define STM32F4_SPI_SR 0x08
#define STM32F4_SPI_DR 0x0C
#define STM32F4_SPI_I2SCFGR 0x1C
/* STM32F4_SPI_CR1 bit fields */
#define STM32F4_SPI_CR1_CPHA BIT(0)
#define STM32F4_SPI_CR1_CPOL BIT(1)
#define STM32F4_SPI_CR1_MSTR BIT(2)
#define STM32F4_SPI_CR1_BR_SHIFT 3
#define STM32F4_SPI_CR1_BR GENMASK(5, 3)
#define STM32F4_SPI_CR1_SPE BIT(6)
#define STM32F4_SPI_CR1_LSBFRST BIT(7)
#define STM32F4_SPI_CR1_SSI BIT(8)
#define STM32F4_SPI_CR1_SSM BIT(9)
#define STM32F4_SPI_CR1_RXONLY BIT(10)
#define STM32F4_SPI_CR1_DFF BIT(11)
#define STM32F4_SPI_CR1_CRCNEXT BIT(12)
#define STM32F4_SPI_CR1_CRCEN BIT(13)
#define STM32F4_SPI_CR1_BIDIOE BIT(14)
#define STM32F4_SPI_CR1_BIDIMODE BIT(15)
#define STM32F4_SPI_CR1_BR_MIN 0
#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
/* STM32F4_SPI_CR2 bit fields */
#define STM32F4_SPI_CR2_RXDMAEN BIT(0)
#define STM32F4_SPI_CR2_TXDMAEN BIT(1)
#define STM32F4_SPI_CR2_SSOE BIT(2)
#define STM32F4_SPI_CR2_FRF BIT(4)
#define STM32F4_SPI_CR2_ERRIE BIT(5)
#define STM32F4_SPI_CR2_RXNEIE BIT(6)
#define STM32F4_SPI_CR2_TXEIE BIT(7)
/* STM32F4_SPI_SR bit fields */
#define STM32F4_SPI_SR_RXNE BIT(0)
#define STM32F4_SPI_SR_TXE BIT(1)
#define STM32F4_SPI_SR_CHSIDE BIT(2)
#define STM32F4_SPI_SR_UDR BIT(3)
#define STM32F4_SPI_SR_CRCERR BIT(4)
#define STM32F4_SPI_SR_MODF BIT(5)
#define STM32F4_SPI_SR_OVR BIT(6)
#define STM32F4_SPI_SR_BSY BIT(7)
#define STM32F4_SPI_SR_FRE BIT(8)
/* STM32F4_SPI_I2SCFGR bit fields */
#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11)
/* STM32F4 SPI Baud Rate min/max divisor */
#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN)
#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX)
/* STM32H7 SPI registers */
#define STM32H7_SPI_CR1 0x00
#define STM32H7_SPI_CR2 0x04
#define STM32H7_SPI_CFG1 0x08
#define STM32H7_SPI_CFG2 0x0C
#define STM32H7_SPI_IER 0x10
#define STM32H7_SPI_SR 0x14
#define STM32H7_SPI_IFCR 0x18
#define STM32H7_SPI_TXDR 0x20
#define STM32H7_SPI_RXDR 0x30
#define STM32H7_SPI_I2SCFGR 0x50
/* STM32H7_SPI_CR1 bit fields */
#define STM32H7_SPI_CR1_SPE BIT(0)
#define STM32H7_SPI_CR1_MASRX BIT(8)
#define STM32H7_SPI_CR1_CSTART BIT(9)
#define STM32H7_SPI_CR1_CSUSP BIT(10)
#define STM32H7_SPI_CR1_HDDIR BIT(11)
#define STM32H7_SPI_CR1_SSI BIT(12)
/* STM32H7_SPI_CR2 bit fields */
#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0)
#define STM32H7_SPI_TSIZE_MAX GENMASK(15, 0)
/* STM32H7_SPI_CFG1 bit fields */
#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0)
#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5)
#define STM32H7_SPI_CFG1_RXDMAEN BIT(14)
#define STM32H7_SPI_CFG1_TXDMAEN BIT(15)
#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28)
#define STM32H7_SPI_CFG1_MBR_SHIFT 28
#define STM32H7_SPI_CFG1_MBR_MIN 0
#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
/* STM32H7_SPI_CFG2 bit fields */
#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4)
#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17)
#define STM32H7_SPI_CFG2_SP GENMASK(21, 19)
#define STM32H7_SPI_CFG2_MASTER BIT(22)
#define STM32H7_SPI_CFG2_LSBFRST BIT(23)
#define STM32H7_SPI_CFG2_CPHA BIT(24)
#define STM32H7_SPI_CFG2_CPOL BIT(25)
#define STM32H7_SPI_CFG2_SSM BIT(26)
#define STM32H7_SPI_CFG2_SSIOP BIT(28)
#define STM32H7_SPI_CFG2_AFCNTR BIT(31)
/* STM32H7_SPI_IER bit fields */
#define STM32H7_SPI_IER_RXPIE BIT(0)
#define STM32H7_SPI_IER_TXPIE BIT(1)
#define STM32H7_SPI_IER_DXPIE BIT(2)
#define STM32H7_SPI_IER_EOTIE BIT(3)
#define STM32H7_SPI_IER_TXTFIE BIT(4)
#define STM32H7_SPI_IER_OVRIE BIT(6)
#define STM32H7_SPI_IER_MODFIE BIT(9)
#define STM32H7_SPI_IER_ALL GENMASK(10, 0)
/* STM32H7_SPI_SR bit fields */
#define STM32H7_SPI_SR_RXP BIT(0)
#define STM32H7_SPI_SR_TXP BIT(1)
#define STM32H7_SPI_SR_EOT BIT(3)
#define STM32H7_SPI_SR_OVR BIT(6)
#define STM32H7_SPI_SR_MODF BIT(9)
#define STM32H7_SPI_SR_SUSP BIT(11)
#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13)
#define STM32H7_SPI_SR_RXWNE BIT(15)
/* STM32H7_SPI_IFCR bit fields */
#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3)
/* STM32H7_SPI_I2SCFGR bit fields */
#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
/* STM32H7 SPI Master Baud Rate min/max divisor */
#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
/* STM32H7 SPI Communication mode */
#define STM32H7_SPI_FULL_DUPLEX 0
#define STM32H7_SPI_SIMPLEX_TX 1
#define STM32H7_SPI_SIMPLEX_RX 2
#define STM32H7_SPI_HALF_DUPLEX 3
/* SPI Communication type */
#define SPI_FULL_DUPLEX 0
#define SPI_SIMPLEX_TX 1
#define SPI_SIMPLEX_RX 2
#define SPI_3WIRE_TX 3
#define SPI_3WIRE_RX 4
#define STM32_SPI_AUTOSUSPEND_DELAY 1 /* 1 ms */
/*
* use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
* without fifo buffers.
*/
#define SPI_DMA_MIN_BYTES 16
/* STM32 SPI driver helpers */
#define STM32_SPI_MASTER_MODE(stm32_spi) (!(stm32_spi)->device_mode)
#define STM32_SPI_DEVICE_MODE(stm32_spi) ((stm32_spi)->device_mode)
/**
* struct stm32_spi_reg - stm32 SPI register & bitfield desc
* @reg: register offset
* @mask: bitfield mask
* @shift: left shift
*/
struct stm32_spi_reg {
int reg;
int mask;
int shift;
};
/**
* struct stm32_spi_regspec - stm32 registers definition, compatible dependent data
* @en: enable register and SPI enable bit
* @dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
* @dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
* @cpol: clock polarity register and polarity bit
* @cpha: clock phase register and phase bit
* @lsb_first: LSB transmitted first register and bit
* @cs_high: chips select active value
* @br: baud rate register and bitfields
* @rx: SPI RX data register
* @tx: SPI TX data register
*/
struct stm32_spi_regspec {
const struct stm32_spi_reg en;
const struct stm32_spi_reg dma_rx_en;
const struct stm32_spi_reg dma_tx_en;
const struct stm32_spi_reg cpol;
const struct stm32_spi_reg cpha;
const struct stm32_spi_reg lsb_first;
const struct stm32_spi_reg cs_high;
const struct stm32_spi_reg br;
const struct stm32_spi_reg rx;
const struct stm32_spi_reg tx;
};
struct stm32_spi;
/**
* struct stm32_spi_cfg - stm32 compatible configuration data
* @regs: registers descriptions
* @get_fifo_size: routine to get fifo size
* @get_bpw_mask: routine to get bits per word mask
* @disable: routine to disable controller
* @config: routine to configure controller as SPI Master
* @set_bpw: routine to configure registers to for bits per word
* @set_mode: routine to configure registers to desired mode
* @set_data_idleness: optional routine to configure registers to desired idle
* time between frames (if driver has this functionality)
* @set_number_of_data: optional routine to configure registers to desired
* number of data (if driver has this functionality)
* @transfer_one_dma_start: routine to start transfer a single spi_transfer
* using DMA
* @dma_rx_cb: routine to call after DMA RX channel operation is complete
* @dma_tx_cb: routine to call after DMA TX channel operation is complete
* @transfer_one_irq: routine to configure interrupts for driver
* @irq_handler_event: Interrupt handler for SPI controller events
* @irq_handler_thread: thread of interrupt handler for SPI controller
* @baud_rate_div_min: minimum baud rate divisor
* @baud_rate_div_max: maximum baud rate divisor
* @has_fifo: boolean to know if fifo is used for driver
* @has_device_mode: is this compatible capable to switch on device mode
* @flags: compatible specific SPI controller flags used at registration time
*/
struct stm32_spi_cfg {
const struct stm32_spi_regspec *regs;
int (*get_fifo_size)(struct stm32_spi *spi);
int (*get_bpw_mask)(struct stm32_spi *spi);
void (*disable)(struct stm32_spi *spi);
int (*config)(struct stm32_spi *spi);
void (*set_bpw)(struct stm32_spi *spi);
int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type);
void (*set_data_idleness)(struct stm32_spi *spi, u32 length);
int (*set_number_of_data)(struct stm32_spi *spi, u32 length);
void (*transfer_one_dma_start)(struct stm32_spi *spi);
void (*dma_rx_cb)(void *data);
void (*dma_tx_cb)(void *data);
int (*transfer_one_irq)(struct stm32_spi *spi);
irqreturn_t (*irq_handler_event)(int irq, void *dev_id);
irqreturn_t (*irq_handler_thread)(int irq, void *dev_id);
unsigned int baud_rate_div_min;
unsigned int baud_rate_div_max;
bool has_fifo;
bool has_device_mode;
u16 flags;
};
/**
* struct stm32_spi - private data of the SPI controller
* @dev: driver model representation of the controller
* @ctrl: controller interface
* @cfg: compatible configuration data
* @base: virtual memory area
* @clk: hw kernel clock feeding the SPI clock generator
* @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
* @lock: prevent I/O concurrent access
* @irq: SPI controller interrupt line
* @fifo_size: size of the embedded fifo in bytes
* @cur_midi: master inter-data idleness in ns
* @cur_speed: speed configured in Hz
* @cur_half_period: time of a half bit in us
* @cur_bpw: number of bits in a single SPI data frame
* @cur_fthlv: fifo threshold level (data frames in a single data packet)
* @cur_comm: SPI communication mode
* @cur_xferlen: current transfer length in bytes
* @cur_usedma: boolean to know if dma is used in current transfer
* @tx_buf: data to be written, or NULL
* @rx_buf: data to be read, or NULL
* @tx_len: number of data to be written in bytes
* @rx_len: number of data to be read in bytes
* @dma_tx: dma channel for TX transfer
* @dma_rx: dma channel for RX transfer
* @phys_addr: SPI registers physical base address
* @device_mode: the controller is configured as SPI device
*/
struct stm32_spi {
struct device *dev;
struct spi_controller *ctrl;
const struct stm32_spi_cfg *cfg;
void __iomem *base;
struct clk *clk;
u32 clk_rate;
spinlock_t lock; /* prevent I/O concurrent access */
int irq;
unsigned int fifo_size;
unsigned int cur_midi;
unsigned int cur_speed;
unsigned int cur_half_period;
unsigned int cur_bpw;
unsigned int cur_fthlv;
unsigned int cur_comm;
unsigned int cur_xferlen;
bool cur_usedma;
const void *tx_buf;
void *rx_buf;
int tx_len;
int rx_len;
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
dma_addr_t phys_addr;
bool device_mode;
};
static const struct stm32_spi_regspec stm32f4_spi_regspec = {
.en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE },
.dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN },
.dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN },
.cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
.cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
.lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
.cs_high = {},
.br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
.rx = { STM32F4_SPI_DR },
.tx = { STM32F4_SPI_DR },
};
static const struct stm32_spi_regspec stm32h7_spi_regspec = {
/* SPI data transfer is enabled but spi_ker_ck is idle.
* CFG1 and CFG2 registers are write protected when SPE is enabled.
*/
.en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE },
.dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN },
.dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN },
.cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
.cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
.lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
.cs_high = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_SSIOP },
.br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
STM32H7_SPI_CFG1_MBR_SHIFT },
.rx = { STM32H7_SPI_RXDR },
.tx = { STM32H7_SPI_TXDR },
};
static inline void stm32_spi_set_bits(struct stm32_spi *spi,
u32 offset, u32 bits)
{
writel_relaxed(readl_relaxed(spi->base + offset) | bits,
spi->base + offset);
}
static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
u32 offset, u32 bits)
{
writel_relaxed(readl_relaxed(spi->base + offset) & ~bits,
spi->base + offset);
}
/**
* stm32h7_spi_get_fifo_size - Return fifo size
* @spi: pointer to the spi controller data structure
*/
static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi)
{
unsigned long flags;
u32 count = 0;
spin_lock_irqsave(&spi->lock, flags);
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP)
writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR);
stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
spin_unlock_irqrestore(&spi->lock, flags);
dev_dbg(spi->dev, "%d x 8-bit fifo size\n", count);
return count;
}
/**
* stm32f4_spi_get_bpw_mask - Return bits per word mask
* @spi: pointer to the spi controller data structure
*/
static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi)
{
dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n");
return SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
}
/**
* stm32h7_spi_get_bpw_mask - Return bits per word mask
* @spi: pointer to the spi controller data structure
*/
static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
{
unsigned long flags;
u32 cfg1, max_bpw;
spin_lock_irqsave(&spi->lock, flags);
/*
* The most significant bit at DSIZE bit field is reserved when the
* maximum data size of periperal instances is limited to 16-bit
*/
stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE);
cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1);
max_bpw = FIELD_GET(STM32H7_SPI_CFG1_DSIZE, cfg1) + 1;
spin_unlock_irqrestore(&spi->lock, flags);
dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
return SPI_BPW_RANGE_MASK(4, max_bpw);
}
/**
* stm32_spi_prepare_mbr - Determine baud rate divisor value
* @spi: pointer to the spi controller data structure
* @speed_hz: requested speed
* @min_div: minimum baud rate divisor
* @max_div: maximum baud rate divisor
*
* Return baud rate divisor value in case of success or -EINVAL
*/
static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
u32 min_div, u32 max_div)
{
u32 div, mbrdiv;
/* Ensure spi->clk_rate is even */
div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz);
/*
* SPI framework set xfer->speed_hz to ctrl->max_speed_hz if
* xfer->speed_hz is greater than ctrl->max_speed_hz, and it returns
* an error when xfer->speed_hz is lower than ctrl->min_speed_hz, so
* no need to check it there.
* However, we need to ensure the following calculations.
*/
if ((div < min_div) || (div > max_div))
return -EINVAL;
/* Determine the first power of 2 greater than or equal to div */
if (div & (div - 1))
mbrdiv = fls(div);
else
mbrdiv = fls(div) - 1;
spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
spi->cur_half_period = DIV_ROUND_CLOSEST(USEC_PER_SEC, 2 * spi->cur_speed);
return mbrdiv - 1;
}
/**
* stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
* @spi: pointer to the spi controller data structure
* @xfer_len: length of the message to be transferred
*/
static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
{
u32 packet, bpw;
/* data packet should not exceed 1/2 of fifo space */
packet = clamp(xfer_len, 1U, spi->fifo_size / 2);
/* align packet size with data registers access */
bpw = DIV_ROUND_UP(spi->cur_bpw, 8);
return DIV_ROUND_UP(packet, bpw);
}
/**
* stm32f4_spi_write_tx - Write bytes to Transmit Data Register
* @spi: pointer to the spi controller data structure
*
* Read from tx_buf depends on remaining bytes to avoid to read beyond
* tx_buf end.
*/
static void stm32f4_spi_write_tx(struct stm32_spi *spi)
{
if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
STM32F4_SPI_SR_TXE)) {
u32 offs = spi->cur_xferlen - spi->tx_len;
if (spi->cur_bpw == 16) {
const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR);
spi->tx_len -= sizeof(u16);
} else {
const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR);
spi->tx_len -= sizeof(u8);
}
}
dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
}
/**
* stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register
* @spi: pointer to the spi controller data structure
*
* Read from tx_buf depends on remaining bytes to avoid to read beyond
* tx_buf end.
*/
static void stm32h7_spi_write_txfifo(struct stm32_spi *spi)
{
while ((spi->tx_len > 0) &&
(readl_relaxed(spi->base + STM32H7_SPI_SR) &
STM32H7_SPI_SR_TXP)) {
u32 offs = spi->cur_xferlen - spi->tx_len;
if (spi->tx_len >= sizeof(u32)) {
const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u32);
} else if (spi->tx_len >= sizeof(u16)) {
const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u16);
} else {
const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u8);
}
}
dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
}
/**
* stm32f4_spi_read_rx - Read bytes from Receive Data Register
* @spi: pointer to the spi controller data structure
*
* Write in rx_buf depends on remaining bytes to avoid to write beyond
* rx_buf end.
*/
static void stm32f4_spi_read_rx(struct stm32_spi *spi)
{
if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
STM32F4_SPI_SR_RXNE)) {
u32 offs = spi->cur_xferlen - spi->rx_len;
if (spi->cur_bpw == 16) {
u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
*rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR);
spi->rx_len -= sizeof(u16);
} else {
u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
*rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR);
spi->rx_len -= sizeof(u8);
}
}
dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len);
}
/**
* stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
* @spi: pointer to the spi controller data structure
*
* Write in rx_buf depends on remaining bytes to avoid to write beyond
* rx_buf end.
*/
static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi)
{
u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
while ((spi->rx_len > 0) &&
((sr & STM32H7_SPI_SR_RXP) ||
((sr & STM32H7_SPI_SR_EOT) &&
((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
u32 offs = spi->cur_xferlen - spi->rx_len;
if ((spi->rx_len >= sizeof(u32)) ||
(sr & STM32H7_SPI_SR_RXWNE)) {
u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
*rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u32);
} else if ((spi->rx_len >= sizeof(u16)) ||
(!(sr & STM32H7_SPI_SR_RXWNE) &&
(rxplvl >= 2 || spi->cur_bpw > 8))) {
u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
*rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u16);
} else {
u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
*rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u8);
}
sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
}
dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
__func__, spi->rx_len, sr);
}
/**
* stm32_spi_enable - Enable SPI controller
* @spi: pointer to the spi controller data structure
*/
static void stm32_spi_enable(struct stm32_spi *spi)
{
dev_dbg(spi->dev, "enable controller\n");
stm32_spi_set_bits(spi, spi->cfg->regs->en.reg,
spi->cfg->regs->en.mask);
}
/**
* stm32f4_spi_disable - Disable SPI controller
* @spi: pointer to the spi controller data structure
*/
static void stm32f4_spi_disable(struct stm32_spi *spi)
{
unsigned long flags;
u32 sr;
dev_dbg(spi->dev, "disable controller\n");
spin_lock_irqsave(&spi->lock, flags);
if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) &
STM32F4_SPI_CR1_SPE)) {
spin_unlock_irqrestore(&spi->lock, flags);
return;
}
/* Disable interrupts */
stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE |
STM32F4_SPI_CR2_RXNEIE |
STM32F4_SPI_CR2_ERRIE);
/* Wait until BSY = 0 */
if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR,
sr, !(sr & STM32F4_SPI_SR_BSY),
10, 100000) < 0) {
dev_warn(spi->dev, "disabling condition timeout\n");
}
if (spi->cur_usedma && spi->dma_tx)
dmaengine_terminate_async(spi->dma_tx);
if (spi->cur_usedma && spi->dma_rx)
dmaengine_terminate_async(spi->dma_rx);
stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN |
STM32F4_SPI_CR2_RXDMAEN);
/* Sequence to clear OVR flag */
readl_relaxed(spi->base + STM32F4_SPI_DR);
readl_relaxed(spi->base + STM32F4_SPI_SR);
spin_unlock_irqrestore(&spi->lock, flags);
}
/**
* stm32h7_spi_disable - Disable SPI controller
* @spi: pointer to the spi controller data structure
*
* RX-Fifo is flushed when SPI controller is disabled.
*/
static void stm32h7_spi_disable(struct stm32_spi *spi)
{
unsigned long flags;
u32 cr1;
dev_dbg(spi->dev, "disable controller\n");
spin_lock_irqsave(&spi->lock, flags);
cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1);
if (!(cr1 & STM32H7_SPI_CR1_SPE)) {
spin_unlock_irqrestore(&spi->lock, flags);
return;
}
/* Add a delay to make sure that transmission is ended. */
if (spi->cur_half_period)
udelay(spi->cur_half_period);
if (spi->cur_usedma && spi->dma_tx)
dmaengine_terminate_async(spi->dma_tx);
if (spi->cur_usedma && spi->dma_rx)
dmaengine_terminate_async(spi->dma_rx);
stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN |
STM32H7_SPI_CFG1_RXDMAEN);
/* Disable interrupts and clear status flags */
writel_relaxed(0, spi->base + STM32H7_SPI_IER);
writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR);
spin_unlock_irqrestore(&spi->lock, flags);
}
/**
* stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
* @ctrl: controller interface
* @spi_dev: pointer to the spi device
* @transfer: pointer to spi transfer
*
* If driver has fifo and the current transfer size is greater than fifo size,
* use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
*/
static bool stm32_spi_can_dma(struct spi_controller *ctrl,
struct spi_device *spi_dev,
struct spi_transfer *transfer)
{
unsigned int dma_size;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
if (spi->cfg->has_fifo)
dma_size = spi->fifo_size;
else
dma_size = SPI_DMA_MIN_BYTES;
dev_dbg(spi->dev, "%s: %s\n", __func__,
(transfer->len > dma_size) ? "true" : "false");
return (transfer->len > dma_size);
}
/**
* stm32f4_spi_irq_event - Interrupt handler for SPI controller events
* @irq: interrupt line
* @dev_id: SPI controller ctrl interface
*/
static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
u32 sr, mask = 0;
bool end = false;
spin_lock(&spi->lock);
sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
/*
* BSY flag is not handled in interrupt but it is normal behavior when
* this flag is set.
*/
sr &= ~STM32F4_SPI_SR_BSY;
if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
spi->cur_comm == SPI_3WIRE_TX)) {
/* OVR flag shouldn't be handled for TX only mode */
sr &= ~(STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE);
mask |= STM32F4_SPI_SR_TXE;
}
if (!spi->cur_usedma && (spi->cur_comm == SPI_FULL_DUPLEX ||
spi->cur_comm == SPI_SIMPLEX_RX ||
spi->cur_comm == SPI_3WIRE_RX)) {
/* TXE flag is set and is handled when RXNE flag occurs */
sr &= ~STM32F4_SPI_SR_TXE;
mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
}
if (!(sr & mask)) {
dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr);
spin_unlock(&spi->lock);
return IRQ_NONE;
}
if (sr & STM32F4_SPI_SR_OVR) {
dev_warn(spi->dev, "Overrun: received value discarded\n");
/* Sequence to clear OVR flag */
readl_relaxed(spi->base + STM32F4_SPI_DR);
readl_relaxed(spi->base + STM32F4_SPI_SR);
/*
* If overrun is detected, it means that something went wrong,
* so stop the current transfer. Transfer can wait for next
* RXNE but DR is already read and end never happens.
*/
end = true;
goto end_irq;
}
if (sr & STM32F4_SPI_SR_TXE) {
if (spi->tx_buf)
stm32f4_spi_write_tx(spi);
if (spi->tx_len == 0)
end = true;
}
if (sr & STM32F4_SPI_SR_RXNE) {
stm32f4_spi_read_rx(spi);
if (spi->rx_len == 0)
end = true;
else if (spi->tx_buf)/* Load data for discontinuous mode */
stm32f4_spi_write_tx(spi);
}
end_irq:
if (end) {
/* Immediately disable interrupts to do not generate new one */
stm32_spi_clr_bits(spi, STM32F4_SPI_CR2,
STM32F4_SPI_CR2_TXEIE |
STM32F4_SPI_CR2_RXNEIE |
STM32F4_SPI_CR2_ERRIE);
spin_unlock(&spi->lock);
return IRQ_WAKE_THREAD;
}
spin_unlock(&spi->lock);
return IRQ_HANDLED;
}
/**
* stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
* @irq: interrupt line
* @dev_id: SPI controller interface
*/
static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi_finalize_current_transfer(ctrl);
stm32f4_spi_disable(spi);
return IRQ_HANDLED;
}
/**
* stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller
* @irq: interrupt line
* @dev_id: SPI controller interface
*/
static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
u32 sr, ier, mask;
unsigned long flags;
bool end = false;
spin_lock_irqsave(&spi->lock, flags);
sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
mask = ier;
/*
* EOTIE enables irq from EOT, SUSP and TXC events. We need to set
* SUSP to acknowledge it later. TXC is automatically cleared
*/
mask |= STM32H7_SPI_SR_SUSP;
/*
* DXPIE is set in Full-Duplex, one IT will be raised if TXP and RXP
* are set. So in case of Full-Duplex, need to poll TXP and RXP event.
*/
if ((spi->cur_comm == SPI_FULL_DUPLEX) && !spi->cur_usedma)
mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
sr, ier);
spin_unlock_irqrestore(&spi->lock, flags);
return IRQ_NONE;
}
if (sr & STM32H7_SPI_SR_SUSP) {
static DEFINE_RATELIMIT_STATE(rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
1);
ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&rs))
dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi);
/*
* If communication is suspended while using DMA, it means
* that something went wrong, so stop the current transfer
*/
if (spi->cur_usedma)
end = true;
}
if (sr & STM32H7_SPI_SR_MODF) {
dev_warn(spi->dev, "Mode fault: transfer aborted\n");
end = true;
}
if (sr & STM32H7_SPI_SR_OVR) {
dev_err(spi->dev, "Overrun: RX data lost\n");
end = true;
}
if (sr & STM32H7_SPI_SR_EOT) {
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi);
if (!spi->cur_usedma ||
(spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX))
end = true;
}
if (sr & STM32H7_SPI_SR_TXP)
if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
stm32h7_spi_write_txfifo(spi);
if (sr & STM32H7_SPI_SR_RXP)
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi);
writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
spin_unlock_irqrestore(&spi->lock, flags);
if (end) {
stm32h7_spi_disable(spi);
spi_finalize_current_transfer(ctrl);
}
return IRQ_HANDLED;
}
/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
* @ctrl: controller interface
* @msg: pointer to spi message
*/
static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
struct spi_message *msg)
{
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
struct spi_device *spi_dev = msg->spi;
struct device_node *np = spi_dev->dev.of_node;
unsigned long flags;
u32 clrb = 0, setb = 0;
/* SPI slave device may need time between data frames */
spi->cur_midi = 0;
if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
if (spi_dev->mode & SPI_CPOL)
setb |= spi->cfg->regs->cpol.mask;
else
clrb |= spi->cfg->regs->cpol.mask;
if (spi_dev->mode & SPI_CPHA)
setb |= spi->cfg->regs->cpha.mask;
else
clrb |= spi->cfg->regs->cpha.mask;
if (spi_dev->mode & SPI_LSB_FIRST)
setb |= spi->cfg->regs->lsb_first.mask;
else
clrb |= spi->cfg->regs->lsb_first.mask;
if (STM32_SPI_DEVICE_MODE(spi) && spi_dev->mode & SPI_CS_HIGH)
setb |= spi->cfg->regs->cs_high.mask;
else
clrb |= spi->cfg->regs->cs_high.mask;
dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
!!(spi_dev->mode & SPI_CPOL),
!!(spi_dev->mode & SPI_CPHA),
!!(spi_dev->mode & SPI_LSB_FIRST),
!!(spi_dev->mode & SPI_CS_HIGH));
/* On STM32H7, messages should not exceed a maximum size setted
* afterward via the set_number_of_data function. In order to
* ensure that, split large messages into several messages
*/
if (spi->cfg->set_number_of_data) {
int ret;
ret = spi_split_transfers_maxwords(ctrl, msg,
STM32H7_SPI_TSIZE_MAX,
GFP_KERNEL | GFP_DMA);
if (ret)
return ret;
}
spin_lock_irqsave(&spi->lock, flags);
/* CPOL, CPHA and LSB FIRST bits have common register */
if (clrb || setb)
writel_relaxed(
(readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) &
~clrb) | setb,
spi->base + spi->cfg->regs->cpol.reg);
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
}
/**
* stm32f4_spi_dma_tx_cb - dma callback
* @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA TX channel.
*/
static void stm32f4_spi_dma_tx_cb(void *data)
{
struct stm32_spi *spi = data;
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
spi_finalize_current_transfer(spi->ctrl);
stm32f4_spi_disable(spi);
}
}
/**
* stm32_spi_dma_rx_cb - dma callback
* @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA RX channel.
*/
static void stm32_spi_dma_rx_cb(void *data)
{
struct stm32_spi *spi = data;
spi_finalize_current_transfer(spi->ctrl);
spi->cfg->disable(spi);
}
/**
* stm32_spi_dma_config - configure dma slave channel depending on current
* transfer bits_per_word.
* @spi: pointer to the spi controller data structure
* @dma_conf: pointer to the dma_slave_config structure
* @dir: direction of the dma transfer
*/
static void stm32_spi_dma_config(struct stm32_spi *spi,
struct dma_slave_config *dma_conf,
enum dma_transfer_direction dir)
{
enum dma_slave_buswidth buswidth;
u32 maxburst;
if (spi->cur_bpw <= 8)
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
else if (spi->cur_bpw <= 16)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
else
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (spi->cfg->has_fifo) {
/* Valid for DMA Half or Full Fifo threshold */
if (spi->cur_fthlv == 2)
maxburst = 1;
else
maxburst = spi->cur_fthlv;
} else {
maxburst = 1;
}
memset(dma_conf, 0, sizeof(struct dma_slave_config));
dma_conf->direction = dir;
if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg;
dma_conf->src_addr_width = buswidth;
dma_conf->src_maxburst = maxburst;
dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
buswidth, maxburst);
} else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg;
dma_conf->dst_addr_width = buswidth;
dma_conf->dst_maxburst = maxburst;
dev_dbg(spi->dev, "Tx DMA config buswidth=%d, maxburst=%d\n",
buswidth, maxburst);
}
}
/**
* stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
* interrupts
* @spi: pointer to the spi controller data structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
{
unsigned long flags;
u32 cr2 = 0;
/* Enable the interrupts relative to the current communication mode */
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
cr2 |= STM32F4_SPI_CR2_TXEIE;
} else if (spi->cur_comm == SPI_FULL_DUPLEX ||
spi->cur_comm == SPI_SIMPLEX_RX ||
spi->cur_comm == SPI_3WIRE_RX) {
/* In transmit-only mode, the OVR flag is set in the SR register
* since the received data are never read. Therefore set OVR
* interrupt only when rx buffer is available.
*/
cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE;
} else {
return -EINVAL;
}
spin_lock_irqsave(&spi->lock, flags);
stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2);
stm32_spi_enable(spi);
/* starting data transfer when buffer is loaded */
if (spi->tx_buf)
stm32f4_spi_write_tx(spi);
spin_unlock_irqrestore(&spi->lock, flags);
return 1;
}
/**
* stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using
* interrupts
* @spi: pointer to the spi controller data structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
{
unsigned long flags;
u32 ier = 0;
/* Enable the interrupts relative to the current communication mode */
if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
ier |= STM32H7_SPI_IER_DXPIE;
else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
ier |= STM32H7_SPI_IER_TXPIE;
else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
ier |= STM32H7_SPI_IER_RXPIE;
/* Enable the interrupts relative to the end of transfer */
ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE |
STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
spin_lock_irqsave(&spi->lock, flags);
stm32_spi_enable(spi);
/* Be sure to have data in fifo before starting data transfer */
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
if (STM32_SPI_MASTER_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
spin_unlock_irqrestore(&spi->lock, flags);
return 1;
}
/**
* stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
* @spi: pointer to the spi controller data structure
*/
static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
/* In DMA mode end of transfer is handled by DMA TX or RX callback. */
if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX ||
spi->cur_comm == SPI_FULL_DUPLEX) {
/*
* In transmit-only mode, the OVR flag is set in the SR register
* since the received data are never read. Therefore set OVR
* interrupt only when rx buffer is available.
*/
stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE);
}
stm32_spi_enable(spi);
}
/**
* stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
* @spi: pointer to the spi controller data structure
*/
static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
uint32_t ier = STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
/* Enable the interrupts */
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)
ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE;
stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
stm32_spi_enable(spi);
if (STM32_SPI_MASTER_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
/**
* stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
* @spi: pointer to the spi controller data structure
* @xfer: pointer to the spi_transfer structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
struct spi_transfer *xfer)
{
struct dma_slave_config tx_dma_conf, rx_dma_conf;
struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags);
rx_dma_desc = NULL;
if (spi->rx_buf && spi->dma_rx) {
stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
/* Enable Rx DMA request */
stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg,
spi->cfg->regs->dma_rx_en.mask);
rx_dma_desc = dmaengine_prep_slave_sg(
spi->dma_rx, xfer->rx_sg.sgl,
xfer->rx_sg.nents,
rx_dma_conf.direction,
DMA_PREP_INTERRUPT);
}
tx_dma_desc = NULL;
if (spi->tx_buf && spi->dma_tx) {
stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
tx_dma_desc = dmaengine_prep_slave_sg(
spi->dma_tx, xfer->tx_sg.sgl,
xfer->tx_sg.nents,
tx_dma_conf.direction,
DMA_PREP_INTERRUPT);
}
if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) ||
(spi->rx_buf && spi->dma_rx && !rx_dma_desc))
goto dma_desc_error;
if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc))
goto dma_desc_error;
if (rx_dma_desc) {
rx_dma_desc->callback = spi->cfg->dma_rx_cb;
rx_dma_desc->callback_param = spi;
if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
dev_err(spi->dev, "Rx DMA submit failed\n");
goto dma_desc_error;
}
/* Enable Rx DMA channel */
dma_async_issue_pending(spi->dma_rx);
}
if (tx_dma_desc) {
if (spi->cur_comm == SPI_SIMPLEX_TX ||
spi->cur_comm == SPI_3WIRE_TX) {
tx_dma_desc->callback = spi->cfg->dma_tx_cb;
tx_dma_desc->callback_param = spi;
}
if (dma_submit_error(dmaengine_submit(tx_dma_desc))) {
dev_err(spi->dev, "Tx DMA submit failed\n");
goto dma_submit_error;
}
/* Enable Tx DMA channel */
dma_async_issue_pending(spi->dma_tx);
/* Enable Tx DMA request */
stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg,
spi->cfg->regs->dma_tx_en.mask);
}
spi->cfg->transfer_one_dma_start(spi);
spin_unlock_irqrestore(&spi->lock, flags);
return 1;
dma_submit_error:
if (spi->dma_rx)
dmaengine_terminate_sync(spi->dma_rx);
dma_desc_error:
stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg,
spi->cfg->regs->dma_rx_en.mask);
spin_unlock_irqrestore(&spi->lock, flags);
dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
spi->cur_usedma = false;
return spi->cfg->transfer_one_irq(spi);
}
/**
* stm32f4_spi_set_bpw - Configure bits per word
* @spi: pointer to the spi controller data structure
*/
static void stm32f4_spi_set_bpw(struct stm32_spi *spi)
{
if (spi->cur_bpw == 16)
stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
else
stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
}
/**
* stm32h7_spi_set_bpw - configure bits per word
* @spi: pointer to the spi controller data structure
*/
static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
{
u32 bpw, fthlv;
u32 cfg1_clrb = 0, cfg1_setb = 0;
bpw = spi->cur_bpw - 1;
cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE;
cfg1_setb |= FIELD_PREP(STM32H7_SPI_CFG1_DSIZE, bpw);
spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
fthlv = spi->cur_fthlv - 1;
cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
cfg1_setb |= FIELD_PREP(STM32H7_SPI_CFG1_FTHLV, fthlv);
writel_relaxed(
(readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
~cfg1_clrb) | cfg1_setb,
spi->base + STM32H7_SPI_CFG1);
}
/**
* stm32_spi_set_mbr - Configure baud rate divisor in master mode
* @spi: pointer to the spi controller data structure
* @mbrdiv: baud rate divisor value
*/
static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
{
u32 clrb = 0, setb = 0;
clrb |= spi->cfg->regs->br.mask;
setb |= (mbrdiv << spi->cfg->regs->br.shift) & spi->cfg->regs->br.mask;
writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) &
~clrb) | setb,
spi->base + spi->cfg->regs->br.reg);
}
/**
* stm32_spi_communication_type - return transfer communication type
* @spi_dev: pointer to the spi device
* @transfer: pointer to spi transfer
*/
static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
struct spi_transfer *transfer)
{
unsigned int type = SPI_FULL_DUPLEX;
if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
/*
* SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
* is forbidden and unvalidated by SPI subsystem so depending
* on the valid buffer, we can determine the direction of the
* transfer.
*/
if (!transfer->tx_buf)
type = SPI_3WIRE_RX;
else
type = SPI_3WIRE_TX;
} else {
if (!transfer->tx_buf)
type = SPI_SIMPLEX_RX;
else if (!transfer->rx_buf)
type = SPI_SIMPLEX_TX;
}
return type;
}
/**
* stm32f4_spi_set_mode - configure communication mode
* @spi: pointer to the spi controller data structure
* @comm_type: type of communication to configure
*/
static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
{
if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) {
stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
STM32F4_SPI_CR1_BIDIMODE |
STM32F4_SPI_CR1_BIDIOE);
} else if (comm_type == SPI_FULL_DUPLEX ||
comm_type == SPI_SIMPLEX_RX) {
stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
STM32F4_SPI_CR1_BIDIMODE |
STM32F4_SPI_CR1_BIDIOE);
} else if (comm_type == SPI_3WIRE_RX) {
stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
STM32F4_SPI_CR1_BIDIMODE);
stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
STM32F4_SPI_CR1_BIDIOE);
} else {
return -EINVAL;
}
return 0;
}
/**
* stm32h7_spi_set_mode - configure communication mode
* @spi: pointer to the spi controller data structure
* @comm_type: type of communication to configure
*/
static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
{
u32 mode;
u32 cfg2_clrb = 0, cfg2_setb = 0;
if (comm_type == SPI_3WIRE_RX) {
mode = STM32H7_SPI_HALF_DUPLEX;
stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
} else if (comm_type == SPI_3WIRE_TX) {
mode = STM32H7_SPI_HALF_DUPLEX;
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
} else if (comm_type == SPI_SIMPLEX_RX) {
mode = STM32H7_SPI_SIMPLEX_RX;
} else if (comm_type == SPI_SIMPLEX_TX) {
mode = STM32H7_SPI_SIMPLEX_TX;
} else {
mode = STM32H7_SPI_FULL_DUPLEX;
}
cfg2_clrb |= STM32H7_SPI_CFG2_COMM;
cfg2_setb |= FIELD_PREP(STM32H7_SPI_CFG2_COMM, mode);
writel_relaxed(
(readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
~cfg2_clrb) | cfg2_setb,
spi->base + STM32H7_SPI_CFG2);
return 0;
}
/**
* stm32h7_spi_data_idleness - configure minimum time delay inserted between two
* consecutive data frames in master mode
* @spi: pointer to the spi controller data structure
* @len: transfer len
*/
static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
{
u32 cfg2_clrb = 0, cfg2_setb = 0;
cfg2_clrb |= STM32H7_SPI_CFG2_MIDI;
if ((len > 1) && (spi->cur_midi > 0)) {
u32 sck_period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->cur_speed);
u32 midi = min_t(u32,
DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
FIELD_GET(STM32H7_SPI_CFG2_MIDI,
STM32H7_SPI_CFG2_MIDI));
dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
sck_period_ns, midi, midi * sck_period_ns);
cfg2_setb |= FIELD_PREP(STM32H7_SPI_CFG2_MIDI, midi);
}
writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
~cfg2_clrb) | cfg2_setb,
spi->base + STM32H7_SPI_CFG2);
}
/**
* stm32h7_spi_number_of_data - configure number of data at current transfer
* @spi: pointer to the spi controller data structure
* @nb_words: transfer length (in words)
*/
static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
{
if (nb_words <= STM32H7_SPI_TSIZE_MAX) {
writel_relaxed(FIELD_PREP(STM32H7_SPI_CR2_TSIZE, nb_words),
spi->base + STM32H7_SPI_CR2);
} else {
return -EMSGSIZE;
}
return 0;
}
/**
* stm32_spi_transfer_one_setup - common setup to transfer a single
* spi_transfer either using DMA or
* interrupts.
* @spi: pointer to the spi controller data structure
* @spi_dev: pointer to the spi device
* @transfer: pointer to spi transfer
*/
static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
struct spi_device *spi_dev,
struct spi_transfer *transfer)
{
unsigned long flags;
unsigned int comm_type;
int nb_words, ret = 0;
int mbr;
spin_lock_irqsave(&spi->lock, flags);
spi->cur_xferlen = transfer->len;
spi->cur_bpw = transfer->bits_per_word;
spi->cfg->set_bpw(spi);
/* Update spi->cur_speed with real clock speed */
if (STM32_SPI_MASTER_MODE(spi)) {
mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
spi->cfg->baud_rate_div_min,
spi->cfg->baud_rate_div_max);
if (mbr < 0) {
ret = mbr;
goto out;
}
transfer->speed_hz = spi->cur_speed;
stm32_spi_set_mbr(spi, mbr);
}
comm_type = stm32_spi_communication_type(spi_dev, transfer);
ret = spi->cfg->set_mode(spi, comm_type);
if (ret < 0)
goto out;
spi->cur_comm = comm_type;
if (STM32_SPI_MASTER_MODE(spi) && spi->cfg->set_data_idleness)
spi->cfg->set_data_idleness(spi, transfer->len);
if (spi->cur_bpw <= 8)
nb_words = transfer->len;
else if (spi->cur_bpw <= 16)
nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
else
nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
if (spi->cfg->set_number_of_data) {
ret = spi->cfg->set_number_of_data(spi, nb_words);
if (ret < 0)
goto out;
}
dev_dbg(spi->dev, "transfer communication mode set to %d\n",
spi->cur_comm);
dev_dbg(spi->dev,
"data frame of %d-bit, data packet of %d data frames\n",
spi->cur_bpw, spi->cur_fthlv);
if (STM32_SPI_MASTER_MODE(spi))
dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
spi->cur_xferlen, nb_words);
dev_dbg(spi->dev, "dma %s\n",
(spi->cur_usedma) ? "enabled" : "disabled");
out:
spin_unlock_irqrestore(&spi->lock, flags);
return ret;
}
/**
* stm32_spi_transfer_one - transfer a single spi_transfer
* @ctrl: controller interface
* @spi_dev: pointer to the spi device
* @transfer: pointer to spi transfer
*
* It must return 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
static int stm32_spi_transfer_one(struct spi_controller *ctrl,
struct spi_device *spi_dev,
struct spi_transfer *transfer)
{
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
int ret;
spi->tx_buf = transfer->tx_buf;
spi->rx_buf = transfer->rx_buf;
spi->tx_len = spi->tx_buf ? transfer->len : 0;
spi->rx_len = spi->rx_buf ? transfer->len : 0;
spi->cur_usedma = (ctrl->can_dma &&
ctrl->can_dma(ctrl, spi_dev, transfer));
ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
if (ret) {
dev_err(spi->dev, "SPI transfer setup failed\n");
return ret;
}
if (spi->cur_usedma)
return stm32_spi_transfer_one_dma(spi, transfer);
else
return spi->cfg->transfer_one_irq(spi);
}
/**
* stm32_spi_unprepare_msg - relax the hardware
* @ctrl: controller interface
* @msg: pointer to the spi message
*/
static int stm32_spi_unprepare_msg(struct spi_controller *ctrl,
struct spi_message *msg)
{
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi->cfg->disable(spi);
return 0;
}
/**
* stm32f4_spi_config - Configure SPI controller as SPI master
* @spi: pointer to the spi controller data structure
*/
static int stm32f4_spi_config(struct stm32_spi *spi)
{
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags);
/* Ensure I2SMOD bit is kept cleared */
stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR,
STM32F4_SPI_I2SCFGR_I2SMOD);
/*
* - SS input value high
* - transmitter half duplex direction
* - Set the master mode (default Motorola mode)
* - Consider 1 master/n slaves configuration and
* SS input value is determined by the SSI bit
*/
stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI |
STM32F4_SPI_CR1_BIDIOE |
STM32F4_SPI_CR1_MSTR |
STM32F4_SPI_CR1_SSM);
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
}
/**
* stm32h7_spi_config - Configure SPI controller
* @spi: pointer to the spi controller data structure
*/
static int stm32h7_spi_config(struct stm32_spi *spi)
{
unsigned long flags;
u32 cr1 = 0, cfg2 = 0;
spin_lock_irqsave(&spi->lock, flags);
/* Ensure I2SMOD bit is kept cleared */
stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR,
STM32H7_SPI_I2SCFGR_I2SMOD);
if (STM32_SPI_DEVICE_MODE(spi)) {
/* Use native device select */
cfg2 &= ~STM32H7_SPI_CFG2_SSM;
} else {
/*
* - Transmitter half duplex direction
* - Automatic communication suspend when RX-Fifo is full
* - SS input value high
*/
cr1 |= STM32H7_SPI_CR1_HDDIR | STM32H7_SPI_CR1_MASRX | STM32H7_SPI_CR1_SSI;
/*
* - Set the master mode (default Motorola mode)
* - Consider 1 master/n devices configuration and
* SS input value is determined by the SSI bit
* - keep control of all associated GPIOs
*/
cfg2 |= STM32H7_SPI_CFG2_MASTER | STM32H7_SPI_CFG2_SSM | STM32H7_SPI_CFG2_AFCNTR;
}
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, cr1);
stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, cfg2);
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
}
static const struct stm32_spi_cfg stm32f4_spi_cfg = {
.regs = &stm32f4_spi_regspec,
.get_bpw_mask = stm32f4_spi_get_bpw_mask,
.disable = stm32f4_spi_disable,
.config = stm32f4_spi_config,
.set_bpw = stm32f4_spi_set_bpw,
.set_mode = stm32f4_spi_set_mode,
.transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
.dma_tx_cb = stm32f4_spi_dma_tx_cb,
.dma_rx_cb = stm32_spi_dma_rx_cb,
.transfer_one_irq = stm32f4_spi_transfer_one_irq,
.irq_handler_event = stm32f4_spi_irq_event,
.irq_handler_thread = stm32f4_spi_irq_thread,
.baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
.baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
.has_fifo = false,
.has_device_mode = false,
.flags = SPI_CONTROLLER_MUST_TX,
};
static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.regs = &stm32h7_spi_regspec,
.get_fifo_size = stm32h7_spi_get_fifo_size,
.get_bpw_mask = stm32h7_spi_get_bpw_mask,
.disable = stm32h7_spi_disable,
.config = stm32h7_spi_config,
.set_bpw = stm32h7_spi_set_bpw,
.set_mode = stm32h7_spi_set_mode,
.set_data_idleness = stm32h7_spi_data_idleness,
.set_number_of_data = stm32h7_spi_number_of_data,
.transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
.dma_rx_cb = stm32_spi_dma_rx_cb,
/*
* dma_tx_cb is not necessary since in case of TX, dma is followed by
* SPI access hence handling is performed within the SPI interrupt
*/
.transfer_one_irq = stm32h7_spi_transfer_one_irq,
.irq_handler_thread = stm32h7_spi_irq_thread,
.baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
.baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
.has_fifo = true,
.has_device_mode = true,
};
static const struct of_device_id stm32_spi_of_match[] = {
{ .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg },
{ .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg },
{},
};
MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
static int stm32h7_spi_device_abort(struct spi_controller *ctrl)
{
spi_finalize_current_transfer(ctrl);
return 0;
}
static int stm32_spi_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
struct stm32_spi *spi;
struct resource *res;
struct reset_control *rst;
struct device_node *np = pdev->dev.of_node;
bool device_mode;
int ret;
const struct stm32_spi_cfg *cfg = of_device_get_match_data(&pdev->dev);
device_mode = of_property_read_bool(np, "spi-slave");
if (!cfg->has_device_mode && device_mode) {
dev_err(&pdev->dev, "spi-slave not supported\n");
return -EPERM;
}
if (device_mode)
ctrl = devm_spi_alloc_slave(&pdev->dev, sizeof(struct stm32_spi));
else
ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
if (!ctrl) {
dev_err(&pdev->dev, "spi controller allocation failed\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, ctrl);
spi = spi_controller_get_devdata(ctrl);
spi->dev = &pdev->dev;
spi->ctrl = ctrl;
spi->device_mode = device_mode;
spin_lock_init(&spi->lock);
spi->cfg = cfg;
spi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(spi->base))
return PTR_ERR(spi->base);
spi->phys_addr = (dma_addr_t)res->start;
spi->irq = platform_get_irq(pdev, 0);
if (spi->irq <= 0)
return spi->irq;
ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
spi->cfg->irq_handler_event,
spi->cfg->irq_handler_thread,
IRQF_ONESHOT, pdev->name, ctrl);
if (ret) {
dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
ret);
return ret;
}
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
ret = PTR_ERR(spi->clk);
dev_err(&pdev->dev, "clk get failed: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(spi->clk);
if (ret) {
dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
return ret;
}
spi->clk_rate = clk_get_rate(spi->clk);
if (!spi->clk_rate) {
dev_err(&pdev->dev, "clk rate = 0\n");
ret = -EINVAL;
goto err_clk_disable;
}
rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
if (rst) {
if (IS_ERR(rst)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
"failed to get reset\n");
goto err_clk_disable;
}
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
}
if (spi->cfg->has_fifo)
spi->fifo_size = spi->cfg->get_fifo_size(spi);
ret = spi->cfg->config(spi);
if (ret) {
dev_err(&pdev->dev, "controller configuration failed: %d\n",
ret);
goto err_clk_disable;
}
ctrl->dev.of_node = pdev->dev.of_node;
ctrl->auto_runtime_pm = true;
ctrl->bus_num = pdev->id;
ctrl->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
SPI_3WIRE;
ctrl->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
ctrl->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
ctrl->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
ctrl->use_gpio_descriptors = true;
ctrl->prepare_message = stm32_spi_prepare_msg;
ctrl->transfer_one = stm32_spi_transfer_one;
ctrl->unprepare_message = stm32_spi_unprepare_msg;
ctrl->flags = spi->cfg->flags;
if (STM32_SPI_DEVICE_MODE(spi))
ctrl->slave_abort = stm32h7_spi_device_abort;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
ret = PTR_ERR(spi->dma_tx);
spi->dma_tx = NULL;
if (ret == -EPROBE_DEFER)
goto err_clk_disable;
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
} else {
ctrl->dma_tx = spi->dma_tx;
}
spi->dma_rx = dma_request_chan(spi->dev, "rx");
if (IS_ERR(spi->dma_rx)) {
ret = PTR_ERR(spi->dma_rx);
spi->dma_rx = NULL;
if (ret == -EPROBE_DEFER)
goto err_dma_release;
dev_warn(&pdev->dev, "failed to request rx dma channel\n");
} else {
ctrl->dma_rx = spi->dma_rx;
}
if (spi->dma_tx || spi->dma_rx)
ctrl->can_dma = stm32_spi_can_dma;
pm_runtime_set_autosuspend_delay(&pdev->dev,
STM32_SPI_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = spi_register_controller(ctrl);
if (ret) {
dev_err(&pdev->dev, "spi controller registration failed: %d\n",
ret);
goto err_pm_disable;
}
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
dev_info(&pdev->dev, "driver initialized (%s mode)\n",
STM32_SPI_MASTER_MODE(spi) ? "master" : "device");
return 0;
err_pm_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
err_dma_release:
if (spi->dma_tx)
dma_release_channel(spi->dma_tx);
if (spi->dma_rx)
dma_release_channel(spi->dma_rx);
err_clk_disable:
clk_disable_unprepare(spi->clk);
return ret;
}
static void stm32_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctrl = platform_get_drvdata(pdev);
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
pm_runtime_get_sync(&pdev->dev);
spi_unregister_controller(ctrl);
spi->cfg->disable(spi);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (ctrl->dma_tx)
dma_release_channel(ctrl->dma_tx);
if (ctrl->dma_rx)
dma_release_channel(ctrl->dma_rx);
clk_disable_unprepare(spi->clk);
pinctrl_pm_select_sleep_state(&pdev->dev);
}
static int __maybe_unused stm32_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
clk_disable_unprepare(spi->clk);
return pinctrl_pm_select_sleep_state(dev);
}
static int __maybe_unused stm32_spi_runtime_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
int ret;
ret = pinctrl_pm_select_default_state(dev);
if (ret)
return ret;
return clk_prepare_enable(spi->clk);
}
static int __maybe_unused stm32_spi_suspend(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
int ret;
ret = spi_controller_suspend(ctrl);
if (ret)
return ret;
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused stm32_spi_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
ret = spi_controller_resume(ctrl);
if (ret) {
clk_disable_unprepare(spi->clk);
return ret;
}
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Unable to power device:%d\n", ret);
return ret;
}
spi->cfg->config(spi);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
static const struct dev_pm_ops stm32_spi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume)
SET_RUNTIME_PM_OPS(stm32_spi_runtime_suspend,
stm32_spi_runtime_resume, NULL)
};
static struct platform_driver stm32_spi_driver = {
.probe = stm32_spi_probe,
.remove_new = stm32_spi_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_spi_pm_ops,
.of_match_table = stm32_spi_of_match,
},
};
module_platform_driver(stm32_spi_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_DESCRIPTION("STMicroelectronics STM32 SPI Controller driver");
MODULE_AUTHOR("Amelie Delaunay <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-stm32.c |
/*
* Broadcom BCM63XX High Speed SPI Controller driver
*
* Copyright 2000-2010 Broadcom Corporation
* Copyright 2012-2013 Jonas Gorski <[email protected]>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/spi/spi-mem.h>
#include <linux/mtd/spi-nor.h>
#include <linux/reset.h>
#include <linux/pm_runtime.h>
#define HSSPI_GLOBAL_CTRL_REG 0x0
#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
#define HSSPI_INT_STATUS_REG 0x8
#define HSSPI_INT_STATUS_MASKED_REG 0xc
#define HSSPI_INT_MASK_REG 0x10
#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
#define HSSPI_INT_CLEAR_ALL 0xff001f1f
#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
#define PINGPONG_CMD_COMMAND_MASK 0xf
#define PINGPONG_COMMAND_NOOP 0
#define PINGPONG_COMMAND_START_NOW 1
#define PINGPONG_COMMAND_START_TRIGGER 2
#define PINGPONG_COMMAND_HALT 3
#define PINGPONG_COMMAND_FLUSH 4
#define PINGPONG_CMD_PROFILE_SHIFT 8
#define PINGPONG_CMD_SS_SHIFT 12
#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
#define HSSPI_PINGPONG_STATUS_SRC_BUSY BIT(1)
#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
#define SIGNAL_CTRL_LATCH_RISING BIT(12)
#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
#define MODE_CTRL_MODE_3WIRE BIT(20)
#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
#define HSSPI_OP_MULTIBIT BIT(11)
#define HSSPI_OP_CODE_SHIFT 13
#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_BUFFER_LEN 512
#define HSSPI_OPCODE_LEN 2
#define HSSPI_MAX_PREPEND_LEN 15
/*
* Some chip require 30MHz but other require 25MHz. Use smaller value to cover
* both cases.
*/
#define HSSPI_MAX_SYNC_CLOCK 25000000
#define HSSPI_SPI_MAX_CS 8
#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
#define HSSPI_POLL_STATUS_TIMEOUT_MS 100
#define HSSPI_WAIT_MODE_POLLING 0
#define HSSPI_WAIT_MODE_INTR 1
#define HSSPI_WAIT_MODE_MAX HSSPI_WAIT_MODE_INTR
/*
* Default transfer mode is auto. If the msg is prependable, use the prepend
* mode. If not, falls back to use the dummy cs workaround mode but limit the
* clock to 25MHz to make sure it works in all board design.
*/
#define HSSPI_XFER_MODE_AUTO 0
#define HSSPI_XFER_MODE_PREPEND 1
#define HSSPI_XFER_MODE_DUMMYCS 2
#define HSSPI_XFER_MODE_MAX HSSPI_XFER_MODE_DUMMYCS
#define bcm63xx_prepend_printk_on_checkfail(bs, fmt, ...) \
do { \
if (bs->xfer_mode == HSSPI_XFER_MODE_AUTO) \
dev_dbg(&bs->pdev->dev, fmt, ##__VA_ARGS__); \
else if (bs->xfer_mode == HSSPI_XFER_MODE_PREPEND) \
dev_err(&bs->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
struct bcm63xx_hsspi {
struct completion done;
struct mutex bus_mutex;
struct mutex msg_mutex;
struct platform_device *pdev;
struct clk *clk;
struct clk *pll_clk;
void __iomem *regs;
u8 __iomem *fifo;
u32 speed_hz;
u8 cs_polarity;
u32 wait_mode;
u32 xfer_mode;
u32 prepend_cnt;
u8 *prepend_buf;
};
static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(ctrl);
return sprintf(buf, "%d\n", bs->wait_mode);
}
static ssize_t wait_mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(ctrl);
u32 val;
if (kstrtou32(buf, 10, &val))
return -EINVAL;
if (val > HSSPI_WAIT_MODE_MAX) {
dev_warn(dev, "invalid wait mode %u\n", val);
return -EINVAL;
}
mutex_lock(&bs->msg_mutex);
bs->wait_mode = val;
/* clear interrupt status to avoid spurious int on next transfer */
if (val == HSSPI_WAIT_MODE_INTR)
__raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
mutex_unlock(&bs->msg_mutex);
return count;
}
static DEVICE_ATTR_RW(wait_mode);
static ssize_t xfer_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(ctrl);
return sprintf(buf, "%d\n", bs->xfer_mode);
}
static ssize_t xfer_mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(ctrl);
u32 val;
if (kstrtou32(buf, 10, &val))
return -EINVAL;
if (val > HSSPI_XFER_MODE_MAX) {
dev_warn(dev, "invalid xfer mode %u\n", val);
return -EINVAL;
}
mutex_lock(&bs->msg_mutex);
bs->xfer_mode = val;
mutex_unlock(&bs->msg_mutex);
return count;
}
static DEVICE_ATTR_RW(xfer_mode);
static struct attribute *bcm63xx_hsspi_attrs[] = {
&dev_attr_wait_mode.attr,
&dev_attr_xfer_mode.attr,
NULL,
};
static const struct attribute_group bcm63xx_hsspi_group = {
.attrs = bcm63xx_hsspi_attrs,
};
static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
struct spi_device *spi, int hz);
static size_t bcm63xx_hsspi_max_message_size(struct spi_device *spi)
{
return HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN;
}
static int bcm63xx_hsspi_wait_cmd(struct bcm63xx_hsspi *bs)
{
unsigned long limit;
u32 reg = 0;
int rc = 0;
if (bs->wait_mode == HSSPI_WAIT_MODE_INTR) {
if (wait_for_completion_timeout(&bs->done, HZ) == 0)
rc = 1;
} else {
/* polling mode checks for status busy bit */
limit = jiffies + msecs_to_jiffies(HSSPI_POLL_STATUS_TIMEOUT_MS);
while (!time_after(jiffies, limit)) {
reg = __raw_readl(bs->regs + HSSPI_PINGPONG_STATUS_REG(0));
if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
cpu_relax();
else
break;
}
if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
rc = 1;
}
if (rc)
dev_err(&bs->pdev->dev, "transfer timed out!\n");
return rc;
}
static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host,
struct spi_message *msg,
struct spi_transfer *t_prepend)
{
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
bool tx_only = false;
struct spi_transfer *t;
/*
* Multiple transfers within a message may be combined into one transfer
* to the controller using its prepend feature. A SPI message is prependable
* only if the following are all true:
* 1. One or more half duplex write transfer in single bit mode
* 2. Optional full duplex read/write at the end
* 3. No delay and cs_change between transfers
*/
bs->prepend_cnt = 0;
list_for_each_entry(t, &msg->transfers, transfer_list) {
if ((spi_delay_to_ns(&t->delay, t) > 0) || t->cs_change) {
bcm63xx_prepend_printk_on_checkfail(bs,
"Delay or cs change not supported in prepend mode!\n");
return false;
}
tx_only = false;
if (t->tx_buf && !t->rx_buf) {
tx_only = true;
if (bs->prepend_cnt + t->len >
(HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN)) {
bcm63xx_prepend_printk_on_checkfail(bs,
"exceed max buf len, abort prepending transfers!\n");
return false;
}
if (t->tx_nbits > SPI_NBITS_SINGLE &&
!list_is_last(&t->transfer_list, &msg->transfers)) {
bcm63xx_prepend_printk_on_checkfail(bs,
"multi-bit prepend buf not supported!\n");
return false;
}
if (t->tx_nbits == SPI_NBITS_SINGLE) {
memcpy(bs->prepend_buf + bs->prepend_cnt, t->tx_buf, t->len);
bs->prepend_cnt += t->len;
}
} else {
if (!list_is_last(&t->transfer_list, &msg->transfers)) {
bcm63xx_prepend_printk_on_checkfail(bs,
"rx/tx_rx transfer not supported when it is not last one!\n");
return false;
}
}
if (list_is_last(&t->transfer_list, &msg->transfers)) {
memcpy(t_prepend, t, sizeof(struct spi_transfer));
if (tx_only && t->tx_nbits == SPI_NBITS_SINGLE) {
/*
* if the last one is also a single bit tx only transfer, merge
* all of them into one single tx transfer
*/
t_prepend->len = bs->prepend_cnt;
t_prepend->tx_buf = bs->prepend_buf;
bs->prepend_cnt = 0;
} else {
/*
* if the last one is not a tx only transfer or dual tx xfer, all
* the previous transfers are sent through prepend bytes and
* make sure it does not exceed the max prepend len
*/
if (bs->prepend_cnt > HSSPI_MAX_PREPEND_LEN) {
bcm63xx_prepend_printk_on_checkfail(bs,
"exceed max prepend len, abort prepending transfers!\n");
return false;
}
}
}
}
return true;
}
static int bcm63xx_hsspi_do_prepend_txrx(struct spi_device *spi,
struct spi_transfer *t)
{
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
unsigned int chip_select = spi_get_chipselect(spi, 0);
u16 opcode = 0, val;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
u32 reg = 0;
/*
* shouldn't happen as we set the max_message_size in the probe.
* but check it again in case some driver does not honor the max size
*/
if (t->len + bs->prepend_cnt > (HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN)) {
dev_warn(&bs->pdev->dev,
"Prepend message large than fifo size len %d prepend %d\n",
t->len, bs->prepend_cnt);
return -EINVAL;
}
bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
if (tx && rx)
opcode = HSSPI_OP_READ_WRITE;
else if (tx)
opcode = HSSPI_OP_WRITE;
else if (rx)
opcode = HSSPI_OP_READ;
if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
(opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
opcode |= HSSPI_OP_MULTIBIT;
if (t->rx_nbits == SPI_NBITS_DUAL) {
reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT;
}
if (t->tx_nbits == SPI_NBITS_DUAL) {
reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT;
}
}
reg |= bs->prepend_cnt << MODE_CTRL_PREPENDBYTE_CNT_SHIFT;
__raw_writel(reg | 0xff,
bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
reinit_completion(&bs->done);
if (bs->prepend_cnt)
memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, bs->prepend_buf,
bs->prepend_cnt);
if (tx)
memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN + bs->prepend_cnt, tx,
t->len);
*(__be16 *)(&val) = cpu_to_be16(opcode | t->len);
__raw_writew(val, bs->fifo);
/* enable interrupt */
if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
__raw_writel(HSSPI_PINGx_CMD_DONE(0), bs->regs + HSSPI_INT_MASK_REG);
/* start the transfer */
reg = chip_select << PINGPONG_CMD_SS_SHIFT |
chip_select << PINGPONG_CMD_PROFILE_SHIFT |
PINGPONG_COMMAND_START_NOW;
__raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
if (bcm63xx_hsspi_wait_cmd(bs))
return -ETIMEDOUT;
if (rx)
memcpy_fromio(rx, bs->fifo, t->len);
return 0;
}
static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned int cs,
bool active)
{
u32 reg;
mutex_lock(&bs->bus_mutex);
reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
reg &= ~BIT(cs);
if (active == !(bs->cs_polarity & BIT(cs)))
reg |= BIT(cs);
__raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
mutex_unlock(&bs->bus_mutex);
}
static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
struct spi_device *spi, int hz)
{
unsigned int profile = spi_get_chipselect(spi, 0);
u32 reg;
reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
__raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
if (hz > HSSPI_MAX_SYNC_CLOCK)
reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
else
reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
__raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
mutex_lock(&bs->bus_mutex);
/* setup clock polarity */
reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
reg &= ~GLOBAL_CTRL_CLK_POLARITY;
if (spi->mode & SPI_CPOL)
reg |= GLOBAL_CTRL_CLK_POLARITY;
__raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
mutex_unlock(&bs->bus_mutex);
}
static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
{
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
unsigned int chip_select = spi_get_chipselect(spi, 0);
u16 opcode = 0, val;
int pending = t->len;
int step_size = HSSPI_BUFFER_LEN;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
u32 reg = 0;
bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
if (!t->cs_off)
bcm63xx_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), true);
if (tx && rx)
opcode = HSSPI_OP_READ_WRITE;
else if (tx)
opcode = HSSPI_OP_WRITE;
else if (rx)
opcode = HSSPI_OP_READ;
if (opcode != HSSPI_OP_READ)
step_size -= HSSPI_OPCODE_LEN;
if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
(opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
opcode |= HSSPI_OP_MULTIBIT;
if (t->rx_nbits == SPI_NBITS_DUAL)
reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
if (t->tx_nbits == SPI_NBITS_DUAL)
reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
}
__raw_writel(reg | 0xff,
bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
while (pending > 0) {
int curr_step = min_t(int, step_size, pending);
reinit_completion(&bs->done);
if (tx) {
memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
tx += curr_step;
}
*(__be16 *)(&val) = cpu_to_be16(opcode | curr_step);
__raw_writew(val, bs->fifo);
/* enable interrupt */
if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
__raw_writel(HSSPI_PINGx_CMD_DONE(0),
bs->regs + HSSPI_INT_MASK_REG);
reg = !chip_select << PINGPONG_CMD_SS_SHIFT |
chip_select << PINGPONG_CMD_PROFILE_SHIFT |
PINGPONG_COMMAND_START_NOW;
__raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
if (bcm63xx_hsspi_wait_cmd(bs))
return -ETIMEDOUT;
if (rx) {
memcpy_fromio(rx, bs->fifo, curr_step);
rx += curr_step;
}
pending -= curr_step;
}
return 0;
}
static int bcm63xx_hsspi_setup(struct spi_device *spi)
{
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
u32 reg;
reg = __raw_readl(bs->regs +
HSSPI_PROFILE_SIGNAL_CTRL_REG(spi_get_chipselect(spi, 0)));
reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
if (spi->mode & SPI_CPHA)
reg |= SIGNAL_CTRL_LAUNCH_RISING;
else
reg |= SIGNAL_CTRL_LATCH_RISING;
__raw_writel(reg, bs->regs +
HSSPI_PROFILE_SIGNAL_CTRL_REG(spi_get_chipselect(spi, 0)));
mutex_lock(&bs->bus_mutex);
reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
/* only change actual polarities if there is no transfer */
if ((reg & GLOBAL_CTRL_CS_POLARITY_MASK) == bs->cs_polarity) {
if (spi->mode & SPI_CS_HIGH)
reg |= BIT(spi_get_chipselect(spi, 0));
else
reg &= ~BIT(spi_get_chipselect(spi, 0));
__raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
}
if (spi->mode & SPI_CS_HIGH)
bs->cs_polarity |= BIT(spi_get_chipselect(spi, 0));
else
bs->cs_polarity &= ~BIT(spi_get_chipselect(spi, 0));
mutex_unlock(&bs->bus_mutex);
return 0;
}
static int bcm63xx_hsspi_do_dummy_cs_txrx(struct spi_device *spi,
struct spi_message *msg)
{
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
int status = -EINVAL;
int dummy_cs;
bool keep_cs = false;
struct spi_transfer *t;
/*
* This controller does not support keeping CS active during idle.
* To work around this, we use the following ugly hack:
*
* a. Invert the target chip select's polarity so it will be active.
* b. Select a "dummy" chip select to use as the hardware target.
* c. Invert the dummy chip select's polarity so it will be inactive
* during the actual transfers.
* d. Tell the hardware to send to the dummy chip select. Thanks to
* the multiplexed nature of SPI the actual target will receive
* the transfer and we see its response.
*
* e. At the end restore the polarities again to their default values.
*/
dummy_cs = !spi_get_chipselect(spi, 0);
bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
/*
* We are here because one of reasons below:
* a. Message is not prependable and in default auto xfer mode. This mean
* we fallback to dummy cs mode at maximum 25MHz safe clock rate.
* b. User set to use the dummy cs mode.
*/
if (bs->xfer_mode == HSSPI_XFER_MODE_AUTO) {
if (t->speed_hz > HSSPI_MAX_SYNC_CLOCK) {
t->speed_hz = HSSPI_MAX_SYNC_CLOCK;
dev_warn_once(&bs->pdev->dev,
"Force to dummy cs mode. Reduce the speed to %dHz",
t->speed_hz);
}
}
status = bcm63xx_hsspi_do_txrx(spi, t);
if (status)
break;
msg->actual_length += t->len;
spi_transfer_delay_exec(t);
/* use existing cs change logic from spi_transfer_one_message */
if (t->cs_change) {
if (list_is_last(&t->transfer_list, &msg->transfers)) {
keep_cs = true;
} else {
if (!t->cs_off)
bcm63xx_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), false);
spi_transfer_cs_change_delay_exec(msg, t);
if (!list_next_entry(t, transfer_list)->cs_off)
bcm63xx_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), true);
}
} else if (!list_is_last(&t->transfer_list, &msg->transfers) &&
t->cs_off != list_next_entry(t, transfer_list)->cs_off) {
bcm63xx_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), t->cs_off);
}
}
bcm63xx_hsspi_set_cs(bs, dummy_cs, false);
if (status || !keep_cs)
bcm63xx_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), false);
return status;
}
static int bcm63xx_hsspi_transfer_one(struct spi_controller *host,
struct spi_message *msg)
{
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
int status = -EINVAL;
bool prependable = false;
struct spi_transfer t_prepend;
mutex_lock(&bs->msg_mutex);
if (bs->xfer_mode != HSSPI_XFER_MODE_DUMMYCS)
prependable = bcm63xx_prepare_prepend_transfer(host, msg, &t_prepend);
if (prependable) {
status = bcm63xx_hsspi_do_prepend_txrx(spi, &t_prepend);
msg->actual_length = (t_prepend.len + bs->prepend_cnt);
} else {
if (bs->xfer_mode == HSSPI_XFER_MODE_PREPEND) {
dev_err(&bs->pdev->dev,
"User sets prepend mode but msg not prependable! Abort transfer\n");
status = -EINVAL;
} else
status = bcm63xx_hsspi_do_dummy_cs_txrx(spi, msg);
}
mutex_unlock(&bs->msg_mutex);
msg->status = status;
spi_finalize_current_message(host);
return 0;
}
static bool bcm63xx_hsspi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
/* Controller doesn't support spi mem dual io mode */
if ((op->cmd.opcode == SPINOR_OP_READ_1_2_2) ||
(op->cmd.opcode == SPINOR_OP_READ_1_2_2_4B) ||
(op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR) ||
(op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR_4B))
return false;
return true;
}
static const struct spi_controller_mem_ops bcm63xx_hsspi_mem_ops = {
.supports_op = bcm63xx_hsspi_mem_supports_op,
};
static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
{
struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
return IRQ_NONE;
__raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
complete(&bs->done);
return IRQ_HANDLED;
}
static int bcm63xx_hsspi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct bcm63xx_hsspi *bs;
void __iomem *regs;
struct device *dev = &pdev->dev;
struct clk *clk, *pll_clk = NULL;
int irq, ret;
u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
struct reset_control *reset;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
clk = devm_clk_get(dev, "hsspi");
if (IS_ERR(clk))
return PTR_ERR(clk);
reset = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(reset))
return PTR_ERR(reset);
ret = clk_prepare_enable(clk);
if (ret)
return ret;
ret = reset_control_reset(reset);
if (ret) {
dev_err(dev, "unable to reset device: %d\n", ret);
goto out_disable_clk;
}
rate = clk_get_rate(clk);
if (!rate) {
pll_clk = devm_clk_get(dev, "pll");
if (IS_ERR(pll_clk)) {
ret = PTR_ERR(pll_clk);
goto out_disable_clk;
}
ret = clk_prepare_enable(pll_clk);
if (ret)
goto out_disable_clk;
rate = clk_get_rate(pll_clk);
if (!rate) {
ret = -EINVAL;
goto out_disable_pll_clk;
}
}
host = spi_alloc_host(&pdev->dev, sizeof(*bs));
if (!host) {
ret = -ENOMEM;
goto out_disable_pll_clk;
}
bs = spi_controller_get_devdata(host);
bs->pdev = pdev;
bs->clk = clk;
bs->pll_clk = pll_clk;
bs->regs = regs;
bs->speed_hz = rate;
bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
bs->wait_mode = HSSPI_WAIT_MODE_POLLING;
bs->prepend_buf = devm_kzalloc(dev, HSSPI_BUFFER_LEN, GFP_KERNEL);
if (!bs->prepend_buf) {
ret = -ENOMEM;
goto out_put_host;
}
mutex_init(&bs->bus_mutex);
mutex_init(&bs->msg_mutex);
init_completion(&bs->done);
host->mem_ops = &bcm63xx_hsspi_mem_ops;
host->dev.of_node = dev->of_node;
if (!dev->of_node)
host->bus_num = HSSPI_BUS_NUM;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > 8) {
dev_warn(dev, "unsupported number of cs (%i), reducing to 8\n",
num_cs);
num_cs = HSSPI_SPI_MAX_CS;
}
host->num_chipselect = num_cs;
host->setup = bcm63xx_hsspi_setup;
host->transfer_one_message = bcm63xx_hsspi_transfer_one;
host->max_transfer_size = bcm63xx_hsspi_max_message_size;
host->max_message_size = bcm63xx_hsspi_max_message_size;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
SPI_RX_DUAL | SPI_TX_DUAL;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->auto_runtime_pm = true;
platform_set_drvdata(pdev, host);
/* Initialize the hardware */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
/* clean up any pending interrupts */
__raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
/* read out default CS polarities */
reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
__raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
bs->regs + HSSPI_GLOBAL_CTRL_REG);
if (irq > 0) {
ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
pdev->name, bs);
if (ret)
goto out_put_host;
}
pm_runtime_enable(&pdev->dev);
ret = sysfs_create_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
if (ret) {
dev_err(&pdev->dev, "couldn't register sysfs group\n");
goto out_pm_disable;
}
/* register and we are done */
ret = devm_spi_register_controller(dev, host);
if (ret)
goto out_sysgroup_disable;
dev_info(dev, "Broadcom 63XX High Speed SPI Controller driver");
return 0;
out_sysgroup_disable:
sysfs_remove_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
out_pm_disable:
pm_runtime_disable(&pdev->dev);
out_put_host:
spi_controller_put(host);
out_disable_pll_clk:
clk_disable_unprepare(pll_clk);
out_disable_clk:
clk_disable_unprepare(clk);
return ret;
}
static void bcm63xx_hsspi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
/* reset the hardware and block queue progress */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
sysfs_remove_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
}
#ifdef CONFIG_PM_SLEEP
static int bcm63xx_hsspi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
spi_controller_suspend(host);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
return 0;
}
static int bcm63xx_hsspi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(bs->clk);
if (ret)
return ret;
if (bs->pll_clk) {
ret = clk_prepare_enable(bs->pll_clk);
if (ret) {
clk_disable_unprepare(bs->clk);
return ret;
}
}
spi_controller_resume(host);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
bcm63xx_hsspi_resume);
static const struct of_device_id bcm63xx_hsspi_of_match[] = {
{ .compatible = "brcm,bcm6328-hsspi", },
{ .compatible = "brcm,bcmbca-hsspi-v1.0", },
{ },
};
MODULE_DEVICE_TABLE(of, bcm63xx_hsspi_of_match);
static struct platform_driver bcm63xx_hsspi_driver = {
.driver = {
.name = "bcm63xx-hsspi",
.pm = &bcm63xx_hsspi_pm_ops,
.of_match_table = bcm63xx_hsspi_of_match,
},
.probe = bcm63xx_hsspi_probe,
.remove_new = bcm63xx_hsspi_remove,
};
module_platform_driver(bcm63xx_hsspi_driver);
MODULE_ALIAS("platform:bcm63xx_hsspi");
MODULE_DESCRIPTION("Broadcom BCM63xx High Speed SPI Controller driver");
MODULE_AUTHOR("Jonas Gorski <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-bcm63xx-hsspi.c |
// SPDX-License-Identifier: (GPL-2.0)
/*
* Microchip CoreSPI SPI controller driver
*
* Copyright (c) 2018-2022 Microchip Technology Inc. and its subsidiaries
*
* Author: Daire McNamara <[email protected]>
* Author: Conor Dooley <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#define MAX_LEN (0xffff)
#define MAX_CS (8)
#define DEFAULT_FRAMESIZE (8)
#define FIFO_DEPTH (32)
#define CLK_GEN_MODE1_MAX (255)
#define CLK_GEN_MODE0_MAX (15)
#define CLK_GEN_MIN (0)
#define MODE_X_MASK_SHIFT (24)
#define CONTROL_ENABLE BIT(0)
#define CONTROL_MASTER BIT(1)
#define CONTROL_RX_DATA_INT BIT(4)
#define CONTROL_TX_DATA_INT BIT(5)
#define CONTROL_RX_OVER_INT BIT(6)
#define CONTROL_TX_UNDER_INT BIT(7)
#define CONTROL_SPO BIT(24)
#define CONTROL_SPH BIT(25)
#define CONTROL_SPS BIT(26)
#define CONTROL_FRAMEURUN BIT(27)
#define CONTROL_CLKMODE BIT(28)
#define CONTROL_BIGFIFO BIT(29)
#define CONTROL_OENOFF BIT(30)
#define CONTROL_RESET BIT(31)
#define CONTROL_MODE_MASK GENMASK(3, 2)
#define MOTOROLA_MODE (0)
#define CONTROL_FRAMECNT_MASK GENMASK(23, 8)
#define CONTROL_FRAMECNT_SHIFT (8)
#define STATUS_ACTIVE BIT(14)
#define STATUS_SSEL BIT(13)
#define STATUS_FRAMESTART BIT(12)
#define STATUS_TXFIFO_EMPTY_NEXT_READ BIT(11)
#define STATUS_TXFIFO_EMPTY BIT(10)
#define STATUS_TXFIFO_FULL_NEXT_WRITE BIT(9)
#define STATUS_TXFIFO_FULL BIT(8)
#define STATUS_RXFIFO_EMPTY_NEXT_READ BIT(7)
#define STATUS_RXFIFO_EMPTY BIT(6)
#define STATUS_RXFIFO_FULL_NEXT_WRITE BIT(5)
#define STATUS_RXFIFO_FULL BIT(4)
#define STATUS_TX_UNDERRUN BIT(3)
#define STATUS_RX_OVERFLOW BIT(2)
#define STATUS_RXDAT_RXED BIT(1)
#define STATUS_TXDAT_SENT BIT(0)
#define INT_TXDONE BIT(0)
#define INT_RXRDY BIT(1)
#define INT_RX_CHANNEL_OVERFLOW BIT(2)
#define INT_TX_CHANNEL_UNDERRUN BIT(3)
#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
#define REG_CONTROL (0x00)
#define REG_FRAME_SIZE (0x04)
#define REG_STATUS (0x08)
#define REG_INT_CLEAR (0x0c)
#define REG_RX_DATA (0x10)
#define REG_TX_DATA (0x14)
#define REG_CLK_GEN (0x18)
#define REG_SLAVE_SELECT (0x1c)
#define SSEL_MASK GENMASK(7, 0)
#define SSEL_DIRECT BIT(8)
#define SSELOUT_SHIFT 9
#define SSELOUT BIT(SSELOUT_SHIFT)
#define REG_MIS (0x20)
#define REG_RIS (0x24)
#define REG_CONTROL2 (0x28)
#define REG_COMMAND (0x2c)
#define REG_PKTSIZE (0x30)
#define REG_CMD_SIZE (0x34)
#define REG_HWSTATUS (0x38)
#define REG_STAT8 (0x3c)
#define REG_CTRL2 (0x48)
#define REG_FRAMESUP (0x50)
struct mchp_corespi {
void __iomem *regs;
struct clk *clk;
const u8 *tx_buf;
u8 *rx_buf;
u32 clk_gen; /* divider for spi output clock generated by the controller */
u32 clk_mode;
int irq;
int tx_len;
int rx_len;
int pending;
};
static inline u32 mchp_corespi_read(struct mchp_corespi *spi, unsigned int reg)
{
return readl(spi->regs + reg);
}
static inline void mchp_corespi_write(struct mchp_corespi *spi, unsigned int reg, u32 val)
{
writel(val, spi->regs + reg);
}
static inline void mchp_corespi_disable(struct mchp_corespi *spi)
{
u32 control = mchp_corespi_read(spi, REG_CONTROL);
control &= ~CONTROL_ENABLE;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
{
u8 data;
int fifo_max, i = 0;
fifo_max = min(spi->rx_len, FIFO_DEPTH);
while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
data = mchp_corespi_read(spi, REG_RX_DATA);
if (spi->rx_buf)
*spi->rx_buf++ = data;
i++;
}
spi->rx_len -= i;
spi->pending -= i;
}
static void mchp_corespi_enable_ints(struct mchp_corespi *spi)
{
u32 control, mask = INT_ENABLE_MASK;
mchp_corespi_disable(spi);
control = mchp_corespi_read(spi, REG_CONTROL);
control |= mask;
mchp_corespi_write(spi, REG_CONTROL, control);
control |= CONTROL_ENABLE;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static void mchp_corespi_disable_ints(struct mchp_corespi *spi)
{
u32 control, mask = INT_ENABLE_MASK;
mchp_corespi_disable(spi);
control = mchp_corespi_read(spi, REG_CONTROL);
control &= ~mask;
mchp_corespi_write(spi, REG_CONTROL, control);
control |= CONTROL_ENABLE;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
{
u32 control;
u16 lenpart;
/*
* Disable the SPI controller. Writes to transfer length have
* no effect when the controller is enabled.
*/
mchp_corespi_disable(spi);
/*
* The lower 16 bits of the frame count are stored in the control reg
* for legacy reasons, but the upper 16 written to a different register:
* FRAMESUP. While both the upper and lower bits can be *READ* from the
* FRAMESUP register, writing to the lower 16 bits is a NOP
*/
lenpart = len & 0xffff;
control = mchp_corespi_read(spi, REG_CONTROL);
control &= ~CONTROL_FRAMECNT_MASK;
control |= lenpart << CONTROL_FRAMECNT_SHIFT;
mchp_corespi_write(spi, REG_CONTROL, control);
lenpart = len & 0xffff0000;
mchp_corespi_write(spi, REG_FRAMESUP, lenpart);
control |= CONTROL_ENABLE;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
{
u8 byte;
int fifo_max, i = 0;
fifo_max = min(spi->tx_len, FIFO_DEPTH);
mchp_corespi_set_xfer_size(spi, fifo_max);
while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
byte = spi->tx_buf ? *spi->tx_buf++ : 0xaa;
mchp_corespi_write(spi, REG_TX_DATA, byte);
i++;
}
spi->tx_len -= i;
spi->pending += i;
}
static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
{
u32 control;
/*
* Disable the SPI controller. Writes to the frame size have
* no effect when the controller is enabled.
*/
mchp_corespi_disable(spi);
mchp_corespi_write(spi, REG_FRAME_SIZE, bt);
control = mchp_corespi_read(spi, REG_CONTROL);
control |= CONTROL_ENABLE;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
{
u32 reg;
struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
reg &= ~BIT(spi_get_chipselect(spi, 0));
reg |= !disable << spi_get_chipselect(spi, 0);
mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
}
static int mchp_corespi_setup(struct spi_device *spi)
{
struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
u32 reg;
/*
* Active high slaves need to be specifically set to their inactive
* states during probe by adding them to the "control group" & thus
* driving their select line low.
*/
if (spi->mode & SPI_CS_HIGH) {
reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
reg |= BIT(spi_get_chipselect(spi, 0));
mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
}
return 0;
}
static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *spi)
{
unsigned long clk_hz;
u32 control = mchp_corespi_read(spi, REG_CONTROL);
control |= CONTROL_MASTER;
control &= ~CONTROL_MODE_MASK;
control |= MOTOROLA_MODE;
mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
/* max. possible spi clock rate is the apb clock rate */
clk_hz = clk_get_rate(spi->clk);
master->max_speed_hz = clk_hz;
/*
* The controller must be configured so that it doesn't remove Chip
* Select until the entire message has been transferred, even if at
* some points TX FIFO becomes empty.
*
* BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames
* for the 8 bit transfers that this driver uses.
*/
control = mchp_corespi_read(spi, REG_CONTROL);
control |= CONTROL_SPS | CONTROL_BIGFIFO;
mchp_corespi_write(spi, REG_CONTROL, control);
mchp_corespi_enable_ints(spi);
/*
* It is required to enable direct mode, otherwise control over the chip
* select is relinquished to the hardware. SSELOUT is enabled too so we
* can deal with active high slaves.
*/
mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT);
control = mchp_corespi_read(spi, REG_CONTROL);
control &= ~CONTROL_RESET;
control |= CONTROL_ENABLE;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
{
u32 control;
mchp_corespi_disable(spi);
control = mchp_corespi_read(spi, REG_CONTROL);
if (spi->clk_mode)
control |= CONTROL_CLKMODE;
else
control &= ~CONTROL_CLKMODE;
mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen);
mchp_corespi_write(spi, REG_CONTROL, control);
mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE);
}
static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode)
{
u32 control, mode_val;
switch (mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
mode_val = 0;
break;
case SPI_MODE_1:
mode_val = CONTROL_SPH;
break;
case SPI_MODE_2:
mode_val = CONTROL_SPO;
break;
case SPI_MODE_3:
mode_val = CONTROL_SPH | CONTROL_SPO;
break;
}
/*
* Disable the SPI controller. Writes to the frame size have
* no effect when the controller is enabled.
*/
mchp_corespi_disable(spi);
control = mchp_corespi_read(spi, REG_CONTROL);
control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT);
control |= mode_val;
mchp_corespi_write(spi, REG_CONTROL, control);
control |= CONTROL_ENABLE;
mchp_corespi_write(spi, REG_CONTROL, control);
}
static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct mchp_corespi *spi = spi_master_get_devdata(master);
u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf;
bool finalise = false;
/* Interrupt line may be shared and not for us at all */
if (intfield == 0)
return IRQ_NONE;
if (intfield & INT_TXDONE) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
if (spi->rx_len)
mchp_corespi_read_fifo(spi);
if (spi->tx_len)
mchp_corespi_write_fifo(spi);
if (!spi->rx_len)
finalise = true;
}
if (intfield & INT_RXRDY)
mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
if (intfield & INT_RX_CHANNEL_OVERFLOW) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
finalise = true;
dev_err(&master->dev,
"%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__,
spi->rx_len, spi->tx_len);
}
if (intfield & INT_TX_CHANNEL_UNDERRUN) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN);
finalise = true;
dev_err(&master->dev,
"%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__,
spi->rx_len, spi->tx_len);
}
if (finalise)
spi_finalize_current_transfer(master);
return IRQ_HANDLED;
}
static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi,
unsigned long target_hz)
{
unsigned long clk_hz, spi_hz, clk_gen;
clk_hz = clk_get_rate(spi->clk);
if (!clk_hz)
return -EINVAL;
spi_hz = min(target_hz, clk_hz);
/*
* There are two possible clock modes for the controller generated
* clock's division ratio:
* CLK_MODE = 0: 1 / (2^(CLK_GEN + 1)) where CLK_GEN = 0 to 15.
* CLK_MODE = 1: 1 / (2 * CLK_GEN + 1) where CLK_GEN = 0 to 255.
* First try mode 1, fall back to 0 and if we have tried both modes and
* we /still/ can't get a good setting, we then throw the toys out of
* the pram and give up
* clk_gen is the register name for the clock divider on MPFS.
*/
clk_gen = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1;
if (clk_gen > CLK_GEN_MODE1_MAX || clk_gen <= CLK_GEN_MIN) {
clk_gen = DIV_ROUND_UP(clk_hz, spi_hz);
clk_gen = fls(clk_gen) - 1;
if (clk_gen > CLK_GEN_MODE0_MAX)
return -EINVAL;
spi->clk_mode = 0;
} else {
spi->clk_mode = 1;
}
spi->clk_gen = clk_gen;
return 0;
}
static int mchp_corespi_transfer_one(struct spi_master *master,
struct spi_device *spi_dev,
struct spi_transfer *xfer)
{
struct mchp_corespi *spi = spi_master_get_devdata(master);
int ret;
ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz);
if (ret) {
dev_err(&master->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz);
return ret;
}
mchp_corespi_set_clk_gen(spi);
spi->tx_buf = xfer->tx_buf;
spi->rx_buf = xfer->rx_buf;
spi->tx_len = xfer->len;
spi->rx_len = xfer->len;
spi->pending = 0;
mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH)
? FIFO_DEPTH : spi->tx_len);
if (spi->tx_len)
mchp_corespi_write_fifo(spi);
return 1;
}
static int mchp_corespi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
struct spi_device *spi_dev = msg->spi;
struct mchp_corespi *spi = spi_master_get_devdata(master);
mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
mchp_corespi_set_mode(spi, spi_dev->mode);
return 0;
}
static int mchp_corespi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct mchp_corespi *spi;
struct resource *res;
u32 num_cs;
int ret = 0;
master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi));
if (!master)
return dev_err_probe(&pdev->dev, -ENOMEM,
"unable to allocate master for SPI controller\n");
platform_set_drvdata(pdev, master);
if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs))
num_cs = MAX_CS;
master->num_chipselect = num_cs;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = mchp_corespi_setup;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->transfer_one = mchp_corespi_transfer_one;
master->prepare_message = mchp_corespi_prepare_message;
master->set_cs = mchp_corespi_set_cs;
master->dev.of_node = pdev->dev.of_node;
spi = spi_master_get_devdata(master);
spi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(spi->regs))
return PTR_ERR(spi->regs);
spi->irq = platform_get_irq(pdev, 0);
if (spi->irq < 0)
return spi->irq;
ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt,
IRQF_SHARED, dev_name(&pdev->dev), master);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"could not request irq\n");
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk),
"could not get clk\n");
ret = clk_prepare_enable(spi->clk);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to enable clock\n");
mchp_corespi_init(master, spi);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
mchp_corespi_disable(spi);
clk_disable_unprepare(spi->clk);
return dev_err_probe(&pdev->dev, ret,
"unable to register master for SPI controller\n");
}
dev_info(&pdev->dev, "Registered SPI controller %d\n", master->bus_num);
return 0;
}
static void mchp_corespi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mchp_corespi *spi = spi_master_get_devdata(master);
mchp_corespi_disable_ints(spi);
clk_disable_unprepare(spi->clk);
mchp_corespi_disable(spi);
}
#define MICROCHIP_SPI_PM_OPS (NULL)
/*
* Platform driver data structure
*/
#if defined(CONFIG_OF)
static const struct of_device_id mchp_corespi_dt_ids[] = {
{ .compatible = "microchip,mpfs-spi" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mchp_corespi_dt_ids);
#endif
static struct platform_driver mchp_corespi_driver = {
.probe = mchp_corespi_probe,
.driver = {
.name = "microchip-corespi",
.pm = MICROCHIP_SPI_PM_OPS,
.of_match_table = of_match_ptr(mchp_corespi_dt_ids),
},
.remove_new = mchp_corespi_remove,
};
module_platform_driver(mchp_corespi_driver);
MODULE_DESCRIPTION("Microchip coreSPI SPI controller driver");
MODULE_AUTHOR("Daire McNamara <[email protected]>");
MODULE_AUTHOR("Conor Dooley <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-microchip-core.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
//
// Authors:
// Ramil Zaripov <[email protected]>
// Serge Semin <[email protected]>
//
// Baikal-T1 DW APB SPI and System Boot SPI driver
//
#include <linux/clk.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spi/spi-mem.h>
#include <linux/spi/spi.h>
#include "spi-dw.h"
#define BT1_BOOT_DIRMAP 0
#define BT1_BOOT_REGS 1
struct dw_spi_bt1 {
struct dw_spi dws;
struct clk *clk;
struct mux_control *mux;
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
void __iomem *map;
resource_size_t map_len;
#endif
};
#define to_dw_spi_bt1(_ctlr) \
container_of(spi_controller_get_devdata(_ctlr), struct dw_spi_bt1, dws)
typedef int (*dw_spi_bt1_init_cb)(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1);
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
static int dw_spi_bt1_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
if (!dwsbt1->map ||
!dwsbt1->dws.mem_ops.supports_op(desc->mem, &desc->info.op_tmpl))
return -EOPNOTSUPP;
/*
* Make sure the requested region doesn't go out of the physically
* mapped flash memory bounds and the operation is read-only.
*/
if (desc->info.offset + desc->info.length > dwsbt1->map_len ||
desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
return -EOPNOTSUPP;
return 0;
}
/*
* Directly mapped SPI memory region is only accessible in the dword chunks.
* That's why we have to create a dedicated read-method to copy data from there
* to the passed buffer.
*/
static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t len)
{
size_t shift, chunk;
u32 data;
/*
* We split the copying up into the next three stages: unaligned head,
* aligned body, unaligned tail.
*/
shift = (size_t)from & 0x3;
if (shift) {
chunk = min_t(size_t, 4 - shift, len);
data = readl_relaxed(from - shift);
memcpy(to, (char *)&data + shift, chunk);
from += chunk;
to += chunk;
len -= chunk;
}
while (len >= 4) {
data = readl_relaxed(from);
memcpy(to, &data, 4);
from += 4;
to += 4;
len -= 4;
}
if (len) {
data = readl_relaxed(from);
memcpy(to, &data, len);
}
}
static ssize_t dw_spi_bt1_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
struct dw_spi *dws = &dwsbt1->dws;
struct spi_mem *mem = desc->mem;
struct dw_spi_cfg cfg;
int ret;
/*
* Make sure the requested operation length is valid. Truncate the
* length if it's greater than the length of the MMIO region.
*/
if (offs >= dwsbt1->map_len || !len)
return 0;
len = min_t(size_t, len, dwsbt1->map_len - offs);
/* Collect the controller configuration required by the operation */
cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.dfs = 8;
cfg.ndf = 4;
cfg.freq = mem->spi->max_speed_hz;
/* Make sure the corresponding CS is de-asserted on transmission */
dw_spi_set_cs(mem->spi, false);
dw_spi_enable_chip(dws, 0);
dw_spi_update_config(dws, mem->spi, &cfg);
dw_spi_umask_intr(dws, DW_SPI_INT_RXFI);
dw_spi_enable_chip(dws, 1);
/*
* Enable the transparent mode of the System Boot Controller.
* The SPI core IO should have been locked before calling this method
* so noone would be touching the controller' registers during the
* dirmap operation.
*/
ret = mux_control_select(dwsbt1->mux, BT1_BOOT_DIRMAP);
if (ret)
return ret;
dw_spi_bt1_dirmap_copy_from_map(buf, dwsbt1->map + offs, len);
mux_control_deselect(dwsbt1->mux);
dw_spi_set_cs(mem->spi, true);
ret = dw_spi_check_status(dws, true);
return ret ?: len;
}
#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
static int dw_spi_bt1_std_init(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1)
{
struct dw_spi *dws = &dwsbt1->dws;
dws->irq = platform_get_irq(pdev, 0);
if (dws->irq < 0)
return dws->irq;
dws->num_cs = 4;
/*
* Baikal-T1 Normal SPI Controllers don't always keep up with full SPI
* bus speed especially when it comes to the concurrent access to the
* APB bus resources. Thus we have no choice but to set a constraint on
* the SPI bus frequency for the memory operations which require to
* read/write data as fast as possible.
*/
dws->max_mem_freq = 20000000U;
dw_spi_dma_setup_generic(dws);
return 0;
}
static int dw_spi_bt1_sys_init(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1)
{
struct resource *mem __maybe_unused;
struct dw_spi *dws = &dwsbt1->dws;
/*
* Baikal-T1 System Boot Controller is equipped with a mux, which
* switches between the directly mapped SPI flash access mode and
* IO access to the DW APB SSI registers. Note the mux controller
* must be setup to preserve the registers being accessible by default
* (on idle-state).
*/
dwsbt1->mux = devm_mux_control_get(&pdev->dev, NULL);
if (IS_ERR(dwsbt1->mux))
return PTR_ERR(dwsbt1->mux);
/*
* Directly mapped SPI flash memory is a 16MB MMIO region, which can be
* used to access a peripheral memory device just by reading/writing
* data from/to it. Note the system APB bus will stall during each IO
* from/to the dirmap region until the operation is finished. So don't
* use it concurrently with time-critical tasks (like the SPI memory
* operations implemented in the DW APB SSI driver).
*/
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (mem) {
dwsbt1->map = devm_ioremap_resource(&pdev->dev, mem);
if (!IS_ERR(dwsbt1->map)) {
dwsbt1->map_len = resource_size(mem);
dws->mem_ops.dirmap_create = dw_spi_bt1_dirmap_create;
dws->mem_ops.dirmap_read = dw_spi_bt1_dirmap_read;
} else {
dwsbt1->map = NULL;
}
}
#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
/*
* There is no IRQ, no DMA and just one CS available on the System Boot
* SPI controller.
*/
dws->irq = IRQ_NOTCONNECTED;
dws->num_cs = 1;
/*
* Baikal-T1 System Boot SPI Controller doesn't keep up with the full
* SPI bus speed due to relatively slow APB bus and races for it'
* resources from different CPUs. The situation is worsen by a small
* FIFOs depth (just 8 words). It works better in a single CPU mode
* though, but still tends to be not fast enough at low CPU
* frequencies.
*/
if (num_possible_cpus() > 1)
dws->max_mem_freq = 10000000U;
else
dws->max_mem_freq = 20000000U;
return 0;
}
static int dw_spi_bt1_probe(struct platform_device *pdev)
{
dw_spi_bt1_init_cb init_func;
struct dw_spi_bt1 *dwsbt1;
struct resource *mem;
struct dw_spi *dws;
int ret;
dwsbt1 = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_bt1), GFP_KERNEL);
if (!dwsbt1)
return -ENOMEM;
dws = &dwsbt1->dws;
dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(dws->regs))
return PTR_ERR(dws->regs);
dws->paddr = mem->start;
dwsbt1->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dwsbt1->clk))
return PTR_ERR(dwsbt1->clk);
ret = clk_prepare_enable(dwsbt1->clk);
if (ret)
return ret;
dws->bus_num = pdev->id;
dws->reg_io_width = 4;
dws->max_freq = clk_get_rate(dwsbt1->clk);
if (!dws->max_freq) {
ret = -EINVAL;
goto err_disable_clk;
}
init_func = device_get_match_data(&pdev->dev);
ret = init_func(pdev, dwsbt1);
if (ret)
goto err_disable_clk;
pm_runtime_enable(&pdev->dev);
ret = dw_spi_add_host(&pdev->dev, dws);
if (ret) {
pm_runtime_disable(&pdev->dev);
goto err_disable_clk;
}
platform_set_drvdata(pdev, dwsbt1);
return 0;
err_disable_clk:
clk_disable_unprepare(dwsbt1->clk);
return ret;
}
static void dw_spi_bt1_remove(struct platform_device *pdev)
{
struct dw_spi_bt1 *dwsbt1 = platform_get_drvdata(pdev);
dw_spi_remove_host(&dwsbt1->dws);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsbt1->clk);
}
static const struct of_device_id dw_spi_bt1_of_match[] = {
{ .compatible = "baikal,bt1-ssi", .data = dw_spi_bt1_std_init},
{ .compatible = "baikal,bt1-sys-ssi", .data = dw_spi_bt1_sys_init},
{ }
};
MODULE_DEVICE_TABLE(of, dw_spi_bt1_of_match);
static struct platform_driver dw_spi_bt1_driver = {
.probe = dw_spi_bt1_probe,
.remove_new = dw_spi_bt1_remove,
.driver = {
.name = "bt1-sys-ssi",
.of_match_table = dw_spi_bt1_of_match,
},
};
module_platform_driver(dw_spi_bt1_driver);
MODULE_AUTHOR("Serge Semin <[email protected]>");
MODULE_DESCRIPTION("Baikal-T1 System Boot SPI Controller driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(SPI_DW_CORE);
| linux-master | drivers/spi/spi-dw-bt1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale/Motorola Coldfire Queued SPI driver
*
* Copyright 2010 Steven King <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/spi/spi.h>
#include <linux/pm_runtime.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfqspi.h>
#define DRIVER_NAME "mcfqspi"
#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2)
#define MCFQSPI_QMR 0x00
#define MCFQSPI_QMR_MSTR 0x8000
#define MCFQSPI_QMR_CPOL 0x0200
#define MCFQSPI_QMR_CPHA 0x0100
#define MCFQSPI_QDLYR 0x04
#define MCFQSPI_QDLYR_SPE 0x8000
#define MCFQSPI_QWR 0x08
#define MCFQSPI_QWR_HALT 0x8000
#define MCFQSPI_QWR_WREN 0x4000
#define MCFQSPI_QWR_CSIV 0x1000
#define MCFQSPI_QIR 0x0C
#define MCFQSPI_QIR_WCEFB 0x8000
#define MCFQSPI_QIR_ABRTB 0x4000
#define MCFQSPI_QIR_ABRTL 0x1000
#define MCFQSPI_QIR_WCEFE 0x0800
#define MCFQSPI_QIR_ABRTE 0x0400
#define MCFQSPI_QIR_SPIFE 0x0100
#define MCFQSPI_QIR_WCEF 0x0008
#define MCFQSPI_QIR_ABRT 0x0004
#define MCFQSPI_QIR_SPIF 0x0001
#define MCFQSPI_QAR 0x010
#define MCFQSPI_QAR_TXBUF 0x00
#define MCFQSPI_QAR_RXBUF 0x10
#define MCFQSPI_QAR_CMDBUF 0x20
#define MCFQSPI_QDR 0x014
#define MCFQSPI_QCR 0x014
#define MCFQSPI_QCR_CONT 0x8000
#define MCFQSPI_QCR_BITSE 0x4000
#define MCFQSPI_QCR_DT 0x2000
struct mcfqspi {
void __iomem *iobase;
int irq;
struct clk *clk;
struct mcfqspi_cs_control *cs_control;
wait_queue_head_t waitq;
};
static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
{
writew(val, mcfqspi->iobase + MCFQSPI_QMR);
}
static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val)
{
writew(val, mcfqspi->iobase + MCFQSPI_QDLYR);
}
static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi)
{
return readw(mcfqspi->iobase + MCFQSPI_QDLYR);
}
static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val)
{
writew(val, mcfqspi->iobase + MCFQSPI_QWR);
}
static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val)
{
writew(val, mcfqspi->iobase + MCFQSPI_QIR);
}
static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val)
{
writew(val, mcfqspi->iobase + MCFQSPI_QAR);
}
static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val)
{
writew(val, mcfqspi->iobase + MCFQSPI_QDR);
}
static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi)
{
return readw(mcfqspi->iobase + MCFQSPI_QDR);
}
static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select,
bool cs_high)
{
mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high);
}
static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
bool cs_high)
{
mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high);
}
static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
{
return (mcfqspi->cs_control->setup) ?
mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
}
static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
{
if (mcfqspi->cs_control->teardown)
mcfqspi->cs_control->teardown(mcfqspi->cs_control);
}
static u8 mcfqspi_qmr_baud(u32 speed_hz)
{
return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u);
}
static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi)
{
return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE;
}
static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id)
{
struct mcfqspi *mcfqspi = dev_id;
/* clear interrupt */
mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF);
wake_up(&mcfqspi->waitq);
return IRQ_HANDLED;
}
static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count,
const u8 *txbuf, u8 *rxbuf)
{
unsigned i, n, offset = 0;
n = min(count, 16u);
mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
for (i = 0; i < n; ++i)
mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
if (txbuf)
for (i = 0; i < n; ++i)
mcfqspi_wr_qdr(mcfqspi, *txbuf++);
else
for (i = 0; i < count; ++i)
mcfqspi_wr_qdr(mcfqspi, 0);
count -= n;
if (count) {
u16 qwr = 0xf08;
mcfqspi_wr_qwr(mcfqspi, 0x700);
mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
do {
wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
mcfqspi_wr_qwr(mcfqspi, qwr);
mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
if (rxbuf) {
mcfqspi_wr_qar(mcfqspi,
MCFQSPI_QAR_RXBUF + offset);
for (i = 0; i < 8; ++i)
*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
}
n = min(count, 8u);
if (txbuf) {
mcfqspi_wr_qar(mcfqspi,
MCFQSPI_QAR_TXBUF + offset);
for (i = 0; i < n; ++i)
mcfqspi_wr_qdr(mcfqspi, *txbuf++);
}
qwr = (offset ? 0x808 : 0) + ((n - 1) << 8);
offset ^= 8;
count -= n;
} while (count);
wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
mcfqspi_wr_qwr(mcfqspi, qwr);
mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
if (rxbuf) {
mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
for (i = 0; i < 8; ++i)
*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
offset ^= 8;
}
} else {
mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
}
wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
if (rxbuf) {
mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
for (i = 0; i < n; ++i)
*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
}
}
static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
const u16 *txbuf, u16 *rxbuf)
{
unsigned i, n, offset = 0;
n = min(count, 16u);
mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
for (i = 0; i < n; ++i)
mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
if (txbuf)
for (i = 0; i < n; ++i)
mcfqspi_wr_qdr(mcfqspi, *txbuf++);
else
for (i = 0; i < count; ++i)
mcfqspi_wr_qdr(mcfqspi, 0);
count -= n;
if (count) {
u16 qwr = 0xf08;
mcfqspi_wr_qwr(mcfqspi, 0x700);
mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
do {
wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
mcfqspi_wr_qwr(mcfqspi, qwr);
mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
if (rxbuf) {
mcfqspi_wr_qar(mcfqspi,
MCFQSPI_QAR_RXBUF + offset);
for (i = 0; i < 8; ++i)
*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
}
n = min(count, 8u);
if (txbuf) {
mcfqspi_wr_qar(mcfqspi,
MCFQSPI_QAR_TXBUF + offset);
for (i = 0; i < n; ++i)
mcfqspi_wr_qdr(mcfqspi, *txbuf++);
}
qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8);
offset ^= 8;
count -= n;
} while (count);
wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
mcfqspi_wr_qwr(mcfqspi, qwr);
mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
if (rxbuf) {
mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
for (i = 0; i < 8; ++i)
*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
offset ^= 8;
}
} else {
mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
}
wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
if (rxbuf) {
mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
for (i = 0; i < n; ++i)
*rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
}
}
static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
{
struct mcfqspi *mcfqspi = spi_controller_get_devdata(spi->controller);
bool cs_high = spi->mode & SPI_CS_HIGH;
if (enable)
mcfqspi_cs_select(mcfqspi, spi_get_chipselect(spi, 0), cs_high);
else
mcfqspi_cs_deselect(mcfqspi, spi_get_chipselect(spi, 0), cs_high);
}
static int mcfqspi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
u16 qmr = MCFQSPI_QMR_MSTR;
qmr |= t->bits_per_word << 10;
if (spi->mode & SPI_CPHA)
qmr |= MCFQSPI_QMR_CPHA;
if (spi->mode & SPI_CPOL)
qmr |= MCFQSPI_QMR_CPOL;
qmr |= mcfqspi_qmr_baud(t->speed_hz);
mcfqspi_wr_qmr(mcfqspi, qmr);
mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
if (t->bits_per_word == 8)
mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, t->rx_buf);
else
mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
t->rx_buf);
mcfqspi_wr_qir(mcfqspi, 0);
return 0;
}
static int mcfqspi_setup(struct spi_device *spi)
{
mcfqspi_cs_deselect(spi_controller_get_devdata(spi->controller),
spi_get_chipselect(spi, 0), spi->mode & SPI_CS_HIGH);
dev_dbg(&spi->dev,
"bits per word %d, chip select %d, speed %d KHz\n",
spi->bits_per_word, spi_get_chipselect(spi, 0),
(MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz))
/ 1000);
return 0;
}
static int mcfqspi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct mcfqspi *mcfqspi;
struct mcfqspi_platform_data *pdata;
int status;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_dbg(&pdev->dev, "platform data is missing\n");
return -ENOENT;
}
if (!pdata->cs_control) {
dev_dbg(&pdev->dev, "pdata->cs_control is NULL\n");
return -EINVAL;
}
host = spi_alloc_host(&pdev->dev, sizeof(*mcfqspi));
if (host == NULL) {
dev_dbg(&pdev->dev, "spi_alloc_host failed\n");
return -ENOMEM;
}
mcfqspi = spi_controller_get_devdata(host);
mcfqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcfqspi->iobase)) {
status = PTR_ERR(mcfqspi->iobase);
goto fail0;
}
mcfqspi->irq = platform_get_irq(pdev, 0);
if (mcfqspi->irq < 0) {
dev_dbg(&pdev->dev, "platform_get_irq failed\n");
status = -ENXIO;
goto fail0;
}
status = devm_request_irq(&pdev->dev, mcfqspi->irq, mcfqspi_irq_handler,
0, pdev->name, mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "request_irq failed\n");
goto fail0;
}
mcfqspi->clk = devm_clk_get_enabled(&pdev->dev, "qspi_clk");
if (IS_ERR(mcfqspi->clk)) {
dev_dbg(&pdev->dev, "clk_get failed\n");
status = PTR_ERR(mcfqspi->clk);
goto fail0;
}
host->bus_num = pdata->bus_num;
host->num_chipselect = pdata->num_chipselect;
mcfqspi->cs_control = pdata->cs_control;
status = mcfqspi_cs_setup(mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "error initializing cs_control\n");
goto fail0;
}
init_waitqueue_head(&mcfqspi->waitq);
host->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
host->setup = mcfqspi_setup;
host->set_cs = mcfqspi_set_cs;
host->transfer_one = mcfqspi_transfer_one;
host->auto_runtime_pm = true;
platform_set_drvdata(pdev, host);
pm_runtime_enable(&pdev->dev);
status = devm_spi_register_controller(&pdev->dev, host);
if (status) {
dev_dbg(&pdev->dev, "devm_spi_register_controller failed\n");
goto fail1;
}
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
return 0;
fail1:
pm_runtime_disable(&pdev->dev);
mcfqspi_cs_teardown(mcfqspi);
fail0:
spi_controller_put(host);
dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n");
return status;
}
static void mcfqspi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
pm_runtime_disable(&pdev->dev);
/* disable the hardware (set the baud rate to 0) */
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
mcfqspi_cs_teardown(mcfqspi);
clk_disable_unprepare(mcfqspi->clk);
}
#ifdef CONFIG_PM_SLEEP
static int mcfqspi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
int ret;
ret = spi_controller_suspend(host);
if (ret)
return ret;
clk_disable(mcfqspi->clk);
return 0;
}
static int mcfqspi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
clk_enable(mcfqspi->clk);
return spi_controller_resume(host);
}
#endif
#ifdef CONFIG_PM
static int mcfqspi_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
clk_disable(mcfqspi->clk);
return 0;
}
static int mcfqspi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
clk_enable(mcfqspi->clk);
return 0;
}
#endif
static const struct dev_pm_ops mcfqspi_pm = {
SET_SYSTEM_SLEEP_PM_OPS(mcfqspi_suspend, mcfqspi_resume)
SET_RUNTIME_PM_OPS(mcfqspi_runtime_suspend, mcfqspi_runtime_resume,
NULL)
};
static struct platform_driver mcfqspi_driver = {
.driver.name = DRIVER_NAME,
.driver.owner = THIS_MODULE,
.driver.pm = &mcfqspi_pm,
.probe = mcfqspi_probe,
.remove_new = mcfqspi_remove,
};
module_platform_driver(mcfqspi_driver);
MODULE_AUTHOR("Steven King <[email protected]>");
MODULE_DESCRIPTION("Coldfire QSPI Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/spi/spi-coldfire-qspi.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2022 Jonathan Neuschäfer
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi-mem.h>
#define FIU_CFG 0x00
#define FIU_BURST_BFG 0x01
#define FIU_RESP_CFG 0x02
#define FIU_CFBB_PROT 0x03
#define FIU_FWIN1_LOW 0x04
#define FIU_FWIN1_HIGH 0x06
#define FIU_FWIN2_LOW 0x08
#define FIU_FWIN2_HIGH 0x0a
#define FIU_FWIN3_LOW 0x0c
#define FIU_FWIN3_HIGH 0x0e
#define FIU_PROT_LOCK 0x10
#define FIU_PROT_CLEAR 0x11
#define FIU_SPI_FL_CFG 0x14
#define FIU_UMA_CODE 0x16
#define FIU_UMA_AB0 0x17
#define FIU_UMA_AB1 0x18
#define FIU_UMA_AB2 0x19
#define FIU_UMA_DB0 0x1a
#define FIU_UMA_DB1 0x1b
#define FIU_UMA_DB2 0x1c
#define FIU_UMA_DB3 0x1d
#define FIU_UMA_CTS 0x1e
#define FIU_UMA_ECTS 0x1f
#define FIU_BURST_CFG_R16 3
#define FIU_UMA_CTS_D_SIZE(x) (x)
#define FIU_UMA_CTS_A_SIZE BIT(3)
#define FIU_UMA_CTS_WR BIT(4)
#define FIU_UMA_CTS_CS(x) ((x) << 5)
#define FIU_UMA_CTS_EXEC_DONE BIT(7)
#define SHM_FLASH_SIZE 0x02
#define SHM_FLASH_SIZE_STALL_HOST BIT(6)
/*
* I observed a typical wait time of 16 iterations for a UMA transfer to
* finish, so this should be a safe limit.
*/
#define UMA_WAIT_ITERATIONS 100
/* The memory-mapped view of flash is 16 MiB long */
#define MAX_MEMORY_SIZE_PER_CS (16 << 20)
#define MAX_MEMORY_SIZE_TOTAL (4 * MAX_MEMORY_SIZE_PER_CS)
struct wpcm_fiu_spi {
struct device *dev;
struct clk *clk;
void __iomem *regs;
void __iomem *memory;
size_t memory_size;
struct regmap *shm_regmap;
};
static void wpcm_fiu_set_opcode(struct wpcm_fiu_spi *fiu, u8 opcode)
{
writeb(opcode, fiu->regs + FIU_UMA_CODE);
}
static void wpcm_fiu_set_addr(struct wpcm_fiu_spi *fiu, u32 addr)
{
writeb((addr >> 0) & 0xff, fiu->regs + FIU_UMA_AB0);
writeb((addr >> 8) & 0xff, fiu->regs + FIU_UMA_AB1);
writeb((addr >> 16) & 0xff, fiu->regs + FIU_UMA_AB2);
}
static void wpcm_fiu_set_data(struct wpcm_fiu_spi *fiu, const u8 *data, unsigned int nbytes)
{
int i;
for (i = 0; i < nbytes; i++)
writeb(data[i], fiu->regs + FIU_UMA_DB0 + i);
}
static void wpcm_fiu_get_data(struct wpcm_fiu_spi *fiu, u8 *data, unsigned int nbytes)
{
int i;
for (i = 0; i < nbytes; i++)
data[i] = readb(fiu->regs + FIU_UMA_DB0 + i);
}
/*
* Perform a UMA (User Mode Access) operation, i.e. a software-controlled SPI transfer.
*/
static int wpcm_fiu_do_uma(struct wpcm_fiu_spi *fiu, unsigned int cs,
bool use_addr, bool write, int data_bytes)
{
int i = 0;
u8 cts = FIU_UMA_CTS_EXEC_DONE | FIU_UMA_CTS_CS(cs);
if (use_addr)
cts |= FIU_UMA_CTS_A_SIZE;
if (write)
cts |= FIU_UMA_CTS_WR;
cts |= FIU_UMA_CTS_D_SIZE(data_bytes);
writeb(cts, fiu->regs + FIU_UMA_CTS);
for (i = 0; i < UMA_WAIT_ITERATIONS; i++)
if (!(readb(fiu->regs + FIU_UMA_CTS) & FIU_UMA_CTS_EXEC_DONE))
return 0;
dev_info(fiu->dev, "UMA transfer has not finished in %d iterations\n", UMA_WAIT_ITERATIONS);
return -EIO;
}
static void wpcm_fiu_ects_assert(struct wpcm_fiu_spi *fiu, unsigned int cs)
{
u8 ects = readb(fiu->regs + FIU_UMA_ECTS);
ects &= ~BIT(cs);
writeb(ects, fiu->regs + FIU_UMA_ECTS);
}
static void wpcm_fiu_ects_deassert(struct wpcm_fiu_spi *fiu, unsigned int cs)
{
u8 ects = readb(fiu->regs + FIU_UMA_ECTS);
ects |= BIT(cs);
writeb(ects, fiu->regs + FIU_UMA_ECTS);
}
struct wpcm_fiu_op_shape {
bool (*match)(const struct spi_mem_op *op);
int (*exec)(struct spi_mem *mem, const struct spi_mem_op *op);
};
static bool wpcm_fiu_normal_match(const struct spi_mem_op *op)
{
// Opcode 0x0b (FAST READ) is treated differently in hardware
if (op->cmd.opcode == 0x0b)
return false;
return (op->addr.nbytes == 0 || op->addr.nbytes == 3) &&
op->dummy.nbytes == 0 && op->data.nbytes <= 4;
}
static int wpcm_fiu_normal_exec(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
int ret;
wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
wpcm_fiu_set_addr(fiu, op->addr.val);
if (op->data.dir == SPI_MEM_DATA_OUT)
wpcm_fiu_set_data(fiu, op->data.buf.out, op->data.nbytes);
ret = wpcm_fiu_do_uma(fiu, spi_get_chipselect(mem->spi, 0), op->addr.nbytes == 3,
op->data.dir == SPI_MEM_DATA_OUT, op->data.nbytes);
if (op->data.dir == SPI_MEM_DATA_IN)
wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
return ret;
}
static bool wpcm_fiu_fast_read_match(const struct spi_mem_op *op)
{
return op->cmd.opcode == 0x0b && op->addr.nbytes == 3 &&
op->dummy.nbytes == 1 &&
op->data.nbytes >= 1 && op->data.nbytes <= 4 &&
op->data.dir == SPI_MEM_DATA_IN;
}
static int wpcm_fiu_fast_read_exec(struct spi_mem *mem, const struct spi_mem_op *op)
{
return -EINVAL;
}
/*
* 4-byte addressing.
*
* Flash view: [ C A A A A D D D D]
* bytes: 13 aa bb cc dd -> 5a a5 f0 0f
* FIU's view: [ C A A A][ C D D D D]
* FIU mode: [ read/write][ read ]
*/
static bool wpcm_fiu_4ba_match(const struct spi_mem_op *op)
{
return op->addr.nbytes == 4 && op->dummy.nbytes == 0 && op->data.nbytes <= 4;
}
static int wpcm_fiu_4ba_exec(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
int cs = spi_get_chipselect(mem->spi, 0);
wpcm_fiu_ects_assert(fiu, cs);
wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
wpcm_fiu_set_addr(fiu, op->addr.val >> 8);
wpcm_fiu_do_uma(fiu, cs, true, false, 0);
wpcm_fiu_set_opcode(fiu, op->addr.val & 0xff);
wpcm_fiu_set_addr(fiu, 0);
if (op->data.dir == SPI_MEM_DATA_OUT)
wpcm_fiu_set_data(fiu, op->data.buf.out, op->data.nbytes);
wpcm_fiu_do_uma(fiu, cs, false, op->data.dir == SPI_MEM_DATA_OUT, op->data.nbytes);
wpcm_fiu_ects_deassert(fiu, cs);
if (op->data.dir == SPI_MEM_DATA_IN)
wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
return 0;
}
/*
* RDID (Read Identification) needs special handling because Linux expects to
* be able to read 6 ID bytes and FIU can only read up to 4 at once.
*
* We're lucky in this case, because executing the RDID instruction twice will
* result in the same result.
*
* What we do is as follows (C: write command/opcode byte, D: read data byte,
* A: write address byte):
*
* 1. C D D D
* 2. C A A A D D D
*/
static bool wpcm_fiu_rdid_match(const struct spi_mem_op *op)
{
return op->cmd.opcode == 0x9f && op->addr.nbytes == 0 &&
op->dummy.nbytes == 0 && op->data.nbytes == 6 &&
op->data.dir == SPI_MEM_DATA_IN;
}
static int wpcm_fiu_rdid_exec(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
int cs = spi_get_chipselect(mem->spi, 0);
/* First transfer */
wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
wpcm_fiu_set_addr(fiu, 0);
wpcm_fiu_do_uma(fiu, cs, false, false, 3);
wpcm_fiu_get_data(fiu, op->data.buf.in, 3);
/* Second transfer */
wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
wpcm_fiu_set_addr(fiu, 0);
wpcm_fiu_do_uma(fiu, cs, true, false, 3);
wpcm_fiu_get_data(fiu, op->data.buf.in + 3, 3);
return 0;
}
/*
* With some dummy bytes.
*
* C A A A X* X D D D D
* [C A A A D*][C D D D D]
*/
static bool wpcm_fiu_dummy_match(const struct spi_mem_op *op)
{
// Opcode 0x0b (FAST READ) is treated differently in hardware
if (op->cmd.opcode == 0x0b)
return false;
return (op->addr.nbytes == 0 || op->addr.nbytes == 3) &&
op->dummy.nbytes >= 1 && op->dummy.nbytes <= 5 &&
op->data.nbytes <= 4;
}
static int wpcm_fiu_dummy_exec(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
int cs = spi_get_chipselect(mem->spi, 0);
wpcm_fiu_ects_assert(fiu, cs);
/* First transfer */
wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
wpcm_fiu_set_addr(fiu, op->addr.val);
wpcm_fiu_do_uma(fiu, cs, op->addr.nbytes != 0, true, op->dummy.nbytes - 1);
/* Second transfer */
wpcm_fiu_set_opcode(fiu, 0);
wpcm_fiu_set_addr(fiu, 0);
wpcm_fiu_do_uma(fiu, cs, false, false, op->data.nbytes);
wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
wpcm_fiu_ects_deassert(fiu, cs);
return 0;
}
static const struct wpcm_fiu_op_shape wpcm_fiu_op_shapes[] = {
{ .match = wpcm_fiu_normal_match, .exec = wpcm_fiu_normal_exec },
{ .match = wpcm_fiu_fast_read_match, .exec = wpcm_fiu_fast_read_exec },
{ .match = wpcm_fiu_4ba_match, .exec = wpcm_fiu_4ba_exec },
{ .match = wpcm_fiu_rdid_match, .exec = wpcm_fiu_rdid_exec },
{ .match = wpcm_fiu_dummy_match, .exec = wpcm_fiu_dummy_exec },
};
static const struct wpcm_fiu_op_shape *wpcm_fiu_find_op_shape(const struct spi_mem_op *op)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(wpcm_fiu_op_shapes); i++) {
const struct wpcm_fiu_op_shape *shape = &wpcm_fiu_op_shapes[i];
if (shape->match(op))
return shape;
}
return NULL;
}
static bool wpcm_fiu_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
return false;
if (op->cmd.buswidth > 1 || op->addr.buswidth > 1 ||
op->dummy.buswidth > 1 || op->data.buswidth > 1)
return false;
return wpcm_fiu_find_op_shape(op) != NULL;
}
/*
* In order to ensure the integrity of SPI transfers performed via UMA,
* temporarily disable (stall) memory accesses coming from the host CPU.
*/
static void wpcm_fiu_stall_host(struct wpcm_fiu_spi *fiu, bool stall)
{
if (fiu->shm_regmap) {
int res = regmap_update_bits(fiu->shm_regmap, SHM_FLASH_SIZE,
SHM_FLASH_SIZE_STALL_HOST,
stall ? SHM_FLASH_SIZE_STALL_HOST : 0);
if (res)
dev_warn(fiu->dev, "Failed to (un)stall host memory accesses: %d\n", res);
}
}
static int wpcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
const struct wpcm_fiu_op_shape *shape = wpcm_fiu_find_op_shape(op);
wpcm_fiu_stall_host(fiu, true);
if (shape)
return shape->exec(mem, op);
wpcm_fiu_stall_host(fiu, false);
return -ENOTSUPP;
}
static int wpcm_fiu_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
if (op->data.nbytes > 4)
op->data.nbytes = 4;
return 0;
}
static int wpcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(desc->mem->spi->controller);
int cs = spi_get_chipselect(desc->mem->spi, 0);
if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
return -ENOTSUPP;
/*
* Unfortunately, FIU only supports a 16 MiB direct mapping window (per
* attached flash chip), but the SPI MEM core doesn't support partial
* direct mappings. This means that we can't support direct mapping on
* flashes that are bigger than 16 MiB.
*/
if (desc->info.offset + desc->info.length > MAX_MEMORY_SIZE_PER_CS)
return -ENOTSUPP;
/* Don't read past the memory window */
if (cs * MAX_MEMORY_SIZE_PER_CS + desc->info.offset + desc->info.length > fiu->memory_size)
return -ENOTSUPP;
return 0;
}
static ssize_t wpcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf)
{
struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(desc->mem->spi->controller);
int cs = spi_get_chipselect(desc->mem->spi, 0);
if (offs >= MAX_MEMORY_SIZE_PER_CS)
return -ENOTSUPP;
offs += cs * MAX_MEMORY_SIZE_PER_CS;
if (!fiu->memory || offs >= fiu->memory_size)
return -ENOTSUPP;
len = min_t(size_t, len, fiu->memory_size - offs);
memcpy_fromio(buf, fiu->memory + offs, len);
return len;
}
static const struct spi_controller_mem_ops wpcm_fiu_mem_ops = {
.adjust_op_size = wpcm_fiu_adjust_op_size,
.supports_op = wpcm_fiu_supports_op,
.exec_op = wpcm_fiu_exec_op,
.dirmap_create = wpcm_fiu_dirmap_create,
.dirmap_read = wpcm_fiu_direct_read,
};
static void wpcm_fiu_hw_init(struct wpcm_fiu_spi *fiu)
{
/* Configure memory-mapped flash access */
writeb(FIU_BURST_CFG_R16, fiu->regs + FIU_BURST_BFG);
writeb(MAX_MEMORY_SIZE_TOTAL / (512 << 10), fiu->regs + FIU_CFG);
writeb(MAX_MEMORY_SIZE_PER_CS / (512 << 10) | BIT(6), fiu->regs + FIU_SPI_FL_CFG);
/* Deassert all manually asserted chip selects */
writeb(0x0f, fiu->regs + FIU_UMA_ECTS);
}
static int wpcm_fiu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *ctrl;
struct wpcm_fiu_spi *fiu;
struct resource *res;
ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
if (!ctrl)
return -ENOMEM;
fiu = spi_controller_get_devdata(ctrl);
fiu->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
fiu->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(fiu->regs)) {
dev_err(dev, "Failed to map registers\n");
return PTR_ERR(fiu->regs);
}
fiu->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(fiu->clk))
return PTR_ERR(fiu->clk);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory");
fiu->memory = devm_ioremap_resource(dev, res);
fiu->memory_size = min_t(size_t, resource_size(res), MAX_MEMORY_SIZE_TOTAL);
if (IS_ERR(fiu->memory)) {
dev_err(dev, "Failed to map flash memory window\n");
return PTR_ERR(fiu->memory);
}
fiu->shm_regmap = syscon_regmap_lookup_by_phandle_optional(dev->of_node, "nuvoton,shm");
wpcm_fiu_hw_init(fiu);
ctrl->bus_num = -1;
ctrl->mem_ops = &wpcm_fiu_mem_ops;
ctrl->num_chipselect = 4;
ctrl->dev.of_node = dev->of_node;
/*
* The FIU doesn't include a clock divider, the clock is entirely
* determined by the AHB3 bus clock.
*/
ctrl->min_speed_hz = clk_get_rate(fiu->clk);
ctrl->max_speed_hz = clk_get_rate(fiu->clk);
return devm_spi_register_controller(dev, ctrl);
}
static const struct of_device_id wpcm_fiu_dt_ids[] = {
{ .compatible = "nuvoton,wpcm450-fiu", },
{ }
};
MODULE_DEVICE_TABLE(of, wpcm_fiu_dt_ids);
static struct platform_driver wpcm_fiu_driver = {
.driver = {
.name = "wpcm450-fiu",
.bus = &platform_bus_type,
.of_match_table = wpcm_fiu_dt_ids,
},
.probe = wpcm_fiu_probe,
};
module_platform_driver(wpcm_fiu_driver);
MODULE_DESCRIPTION("Nuvoton WPCM450 FIU SPI controller driver");
MODULE_AUTHOR("Jonathan Neuschäfer <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-wpcm-fiu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016 Broadcom
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include "spi-bcm-qspi.h"
static const struct of_device_id brcmstb_qspi_of_match[] = {
{ .compatible = "brcm,spi-brcmstb-qspi" },
{ .compatible = "brcm,spi-brcmstb-mspi" },
{},
};
MODULE_DEVICE_TABLE(of, brcmstb_qspi_of_match);
static int brcmstb_qspi_probe(struct platform_device *pdev)
{
return bcm_qspi_probe(pdev, NULL);
}
static void brcmstb_qspi_remove(struct platform_device *pdev)
{
bcm_qspi_remove(pdev);
}
static struct platform_driver brcmstb_qspi_driver = {
.probe = brcmstb_qspi_probe,
.remove_new = brcmstb_qspi_remove,
.driver = {
.name = "brcmstb_qspi",
.pm = &bcm_qspi_pm_ops,
.of_match_table = brcmstb_qspi_of_match,
}
};
module_platform_driver(brcmstb_qspi_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Kamal Dasu");
MODULE_DESCRIPTION("Broadcom SPI driver for settop SoC");
| linux-master | drivers/spi/spi-brcmstb-qspi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale SPI/eSPI controller driver library.
*
* Maintainer: Kumar Gala
*
* Copyright (C) 2006 Polycom, Inc.
*
* CPM SPI and QE buffer descriptors mode support:
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <[email protected]>
*
* Copyright 2010 Freescale Semiconductor, Inc.
*/
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#ifdef CONFIG_FSL_SOC
#include <sysdev/fsl_soc.h>
#endif
#include "spi-fsl-lib.h"
#define MPC8XXX_SPI_RX_BUF(type) \
void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
{ \
type *rx = mpc8xxx_spi->rx; \
*rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
mpc8xxx_spi->rx = rx; \
} \
EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
#define MPC8XXX_SPI_TX_BUF(type) \
u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
{ \
u32 data; \
const type *tx = mpc8xxx_spi->tx; \
if (!tx) \
return 0; \
data = *tx++ << mpc8xxx_spi->tx_shift; \
mpc8xxx_spi->tx = tx; \
return data; \
} \
EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
MPC8XXX_SPI_RX_BUF(u8)
MPC8XXX_SPI_RX_BUF(u16)
MPC8XXX_SPI_RX_BUF(u32)
MPC8XXX_SPI_TX_BUF(u8)
MPC8XXX_SPI_TX_BUF(u16)
MPC8XXX_SPI_TX_BUF(u32)
struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
{
return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
}
EXPORT_SYMBOL_GPL(to_of_pinfo);
const char *mpc8xxx_spi_strmode(unsigned int flags)
{
if (flags & SPI_QE_CPU_MODE) {
return "QE CPU";
} else if (flags & SPI_CPM_MODE) {
if (flags & SPI_QE)
return "QE";
else if (flags & SPI_CPM2)
return "CPM2";
else
return "CPM1";
}
return "CPU";
}
EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
master = dev_get_drvdata(dev);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
| SPI_LSB_FIRST | SPI_LOOP;
master->dev.of_node = dev->of_node;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->dev = dev;
mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
mpc8xxx_spi->flags = pdata->flags;
mpc8xxx_spi->spibrg = pdata->sysclk;
mpc8xxx_spi->irq = irq;
mpc8xxx_spi->rx_shift = 0;
mpc8xxx_spi->tx_shift = 0;
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
init_completion(&mpc8xxx_spi->done);
}
EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct mpc8xxx_spi_probe_info *pinfo;
struct fsl_spi_platform_data *pdata;
const void *prop;
int ret = -ENOMEM;
pinfo = devm_kzalloc(&ofdev->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return ret;
pdata = &pinfo->pdata;
dev->platform_data = pdata;
/* Allocate bus num dynamically. */
pdata->bus_num = -1;
#ifdef CONFIG_FSL_SOC
/* SPI controller is either clocked from QE or SoC clock. */
pdata->sysclk = get_brgfreq();
if (pdata->sysclk == -1) {
pdata->sysclk = fsl_get_sys_freq();
if (pdata->sysclk == -1)
return -ENODEV;
}
#else
ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk);
if (ret)
return ret;
#endif
prop = of_get_property(np, "mode", NULL);
if (prop && !strcmp(prop, "cpu-qe"))
pdata->flags = SPI_QE_CPU_MODE;
else if (prop && !strcmp(prop, "qe"))
pdata->flags = SPI_CPM_MODE | SPI_QE;
else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
pdata->flags = SPI_CPM_MODE | SPI_CPM2;
else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
pdata->flags = SPI_CPM_MODE | SPI_CPM1;
return 0;
}
EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-fsl-lib.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale SPI controller driver cpm functions.
*
* Maintainer: Kumar Gala
*
* Copyright (C) 2006 Polycom, Inc.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* CPM SPI and QE buffer descriptors mode support:
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <[email protected]>
*/
#include <asm/cpm.h>
#include <soc/fsl/qe/qe.h>
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/byteorder/generic.h>
#include "spi-fsl-cpm.h"
#include "spi-fsl-lib.h"
#include "spi-fsl-spi.h"
/* CPM1 and CPM2 are mutually exclusive. */
#ifdef CONFIG_CPM1
#include <asm/cpm1.h>
#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
#else
#include <asm/cpm2.h>
#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
#endif
#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
/* SPCOM register values */
#define SPCOM_STR (1 << 23) /* Start transmit */
#define SPI_PRAM_SIZE 0x100
#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
static void *fsl_dummy_rx;
static DEFINE_MUTEX(fsl_dummy_rx_lock);
static int fsl_dummy_rx_refcnt;
void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
{
if (mspi->flags & SPI_QE) {
qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
} else {
if (mspi->flags & SPI_CPM1) {
iowrite32be(0, &mspi->pram->rstate);
iowrite16be(ioread16be(&mspi->pram->rbase),
&mspi->pram->rbptr);
iowrite32be(0, &mspi->pram->tstate);
iowrite16be(ioread16be(&mspi->pram->tbase),
&mspi->pram->tbptr);
} else {
cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
}
}
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
{
struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
unsigned int xfer_ofs;
struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
if (mspi->rx_dma == mspi->dma_dummy_rx)
iowrite32be(mspi->rx_dma, &rx_bd->cbd_bufaddr);
else
iowrite32be(mspi->rx_dma + xfer_ofs, &rx_bd->cbd_bufaddr);
iowrite16be(0, &rx_bd->cbd_datlen);
iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP, &rx_bd->cbd_sc);
if (mspi->tx_dma == mspi->dma_dummy_tx)
iowrite32be(mspi->tx_dma, &tx_bd->cbd_bufaddr);
else
iowrite32be(mspi->tx_dma + xfer_ofs, &tx_bd->cbd_bufaddr);
iowrite16be(xfer_len, &tx_bd->cbd_datlen);
iowrite16be(BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | BD_SC_LAST,
&tx_bd->cbd_sc);
/* start transfer */
mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR);
}
int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, bool is_dma_mapped)
{
struct device *dev = mspi->dev;
struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
if (is_dma_mapped) {
mspi->map_tx_dma = 0;
mspi->map_rx_dma = 0;
} else {
mspi->map_tx_dma = 1;
mspi->map_rx_dma = 1;
}
if (!t->tx_buf) {
mspi->tx_dma = mspi->dma_dummy_tx;
mspi->map_tx_dma = 0;
}
if (!t->rx_buf) {
mspi->rx_dma = mspi->dma_dummy_rx;
mspi->map_rx_dma = 0;
}
if (t->bits_per_word == 16 && t->tx_buf) {
const u16 *src = t->tx_buf;
__le16 *dst;
int i;
dst = kmalloc(t->len, GFP_KERNEL);
if (!dst)
return -ENOMEM;
for (i = 0; i < t->len >> 1; i++)
dst[i] = cpu_to_le16p(src + i);
mspi->tx = dst;
mspi->map_tx_dma = 1;
}
if (mspi->map_tx_dma) {
void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, mspi->tx_dma)) {
dev_err(dev, "unable to map tx dma\n");
return -ENOMEM;
}
} else if (t->tx_buf) {
mspi->tx_dma = t->tx_dma;
}
if (mspi->map_rx_dma) {
mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mspi->rx_dma)) {
dev_err(dev, "unable to map rx dma\n");
goto err_rx_dma;
}
} else if (t->rx_buf) {
mspi->rx_dma = t->rx_dma;
}
/* enable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB);
mspi->xfer_in_progress = t;
mspi->count = t->len;
/* start CPM transfers */
fsl_spi_cpm_bufs_start(mspi);
return 0;
err_rx_dma:
if (mspi->map_tx_dma)
dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct spi_transfer *t = mspi->xfer_in_progress;
if (mspi->map_tx_dma)
dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
if (mspi->map_rx_dma)
dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
mspi->xfer_in_progress = NULL;
if (t->bits_per_word == 16 && t->rx_buf) {
int i;
for (i = 0; i < t->len; i += 2)
le16_to_cpus(t->rx_buf + i);
}
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
{
u16 len;
struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
ioread16be(&mspi->rx_bd->cbd_datlen), mspi->count);
len = ioread16be(&mspi->rx_bd->cbd_datlen);
if (len > mspi->count) {
WARN_ON(1);
len = mspi->count;
}
/* Clear the events */
mpc8xxx_spi_write_reg(®_base->event, events);
mspi->count -= len;
if (mspi->count)
fsl_spi_cpm_bufs_start(mspi);
else
complete(&mspi->done);
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
static void *fsl_spi_alloc_dummy_rx(void)
{
mutex_lock(&fsl_dummy_rx_lock);
if (!fsl_dummy_rx)
fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
if (fsl_dummy_rx)
fsl_dummy_rx_refcnt++;
mutex_unlock(&fsl_dummy_rx_lock);
return fsl_dummy_rx;
}
static void fsl_spi_free_dummy_rx(void)
{
mutex_lock(&fsl_dummy_rx_lock);
switch (fsl_dummy_rx_refcnt) {
case 0:
WARN_ON(1);
break;
case 1:
kfree(fsl_dummy_rx);
fsl_dummy_rx = NULL;
fallthrough;
default:
fsl_dummy_rx_refcnt--;
break;
}
mutex_unlock(&fsl_dummy_rx_lock);
}
static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
void __iomem *spi_base;
unsigned long pram_ofs = -ENOMEM;
/* Can't use of_address_to_resource(), QE muram isn't at 0. */
iprop = of_get_property(np, "reg", &size);
/* QE with a fixed pram location? */
if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
/* QE but with a dynamic pram location? */
if (mspi->flags & SPI_QE) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
return pram_ofs;
}
spi_base = of_iomap(np, 1);
if (spi_base == NULL)
return -EINVAL;
if (mspi->flags & SPI_CPM2) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
out_be16(spi_base, pram_ofs);
}
iounmap(spi_base);
return pram_ofs;
}
int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
unsigned long bds_ofs;
if (!(mspi->flags & SPI_CPM_MODE))
return 0;
if (!fsl_spi_alloc_dummy_rx())
return -ENOMEM;
if (mspi->flags & SPI_QE) {
iprop = of_get_property(np, "cell-index", &size);
if (iprop && size == sizeof(*iprop))
mspi->subblock = *iprop;
switch (mspi->subblock) {
default:
dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
fallthrough;
case 0:
mspi->subblock = QE_CR_SUBBLOCK_SPI1;
break;
case 1:
mspi->subblock = QE_CR_SUBBLOCK_SPI2;
break;
}
}
if (mspi->flags & SPI_CPM1) {
void __iomem *pram;
pram = devm_platform_ioremap_resource(to_platform_device(dev),
1);
if (IS_ERR(pram))
mspi->pram = NULL;
else
mspi->pram = pram;
} else {
unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
if (IS_ERR_VALUE(pram_ofs))
mspi->pram = NULL;
else
mspi->pram = cpm_muram_addr(pram_ofs);
}
if (mspi->pram == NULL) {
dev_err(dev, "can't allocate spi parameter ram\n");
goto err_pram;
}
bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
sizeof(*mspi->rx_bd), 8);
if (IS_ERR_VALUE(bds_ofs)) {
dev_err(dev, "can't allocate bds\n");
goto err_bds;
}
mspi->dma_dummy_tx = dma_map_single(dev, ZERO_PAGE(0), PAGE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
dev_err(dev, "unable to map dummy tx buffer\n");
goto err_dummy_tx;
}
mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
dev_err(dev, "unable to map dummy rx buffer\n");
goto err_dummy_rx;
}
mspi->tx_bd = cpm_muram_addr(bds_ofs);
mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
/* Initialize parameter ram. */
iowrite16be(cpm_muram_offset(mspi->tx_bd), &mspi->pram->tbase);
iowrite16be(cpm_muram_offset(mspi->rx_bd), &mspi->pram->rbase);
iowrite8(CPMFCR_EB | CPMFCR_GBL, &mspi->pram->tfcr);
iowrite8(CPMFCR_EB | CPMFCR_GBL, &mspi->pram->rfcr);
iowrite16be(SPI_MRBLR, &mspi->pram->mrblr);
iowrite32be(0, &mspi->pram->rstate);
iowrite32be(0, &mspi->pram->rdp);
iowrite16be(0, &mspi->pram->rbptr);
iowrite16be(0, &mspi->pram->rbc);
iowrite32be(0, &mspi->pram->rxtmp);
iowrite32be(0, &mspi->pram->tstate);
iowrite32be(0, &mspi->pram->tdp);
iowrite16be(0, &mspi->pram->tbptr);
iowrite16be(0, &mspi->pram->tbc);
iowrite32be(0, &mspi->pram->txtmp);
return 0;
err_dummy_rx:
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
err_dummy_tx:
cpm_muram_free(bds_ofs);
err_bds:
if (!(mspi->flags & SPI_CPM1))
cpm_muram_free(cpm_muram_offset(mspi->pram));
err_pram:
fsl_spi_free_dummy_rx();
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
if (!(mspi->flags & SPI_CPM_MODE))
return;
dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
if (!(mspi->flags & SPI_CPM1))
cpm_muram_free(cpm_muram_offset(mspi->pram));
fsl_spi_free_dummy_rx();
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-fsl-cpm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#define QUP_CONFIG 0x0000
#define QUP_STATE 0x0004
#define QUP_IO_M_MODES 0x0008
#define QUP_SW_RESET 0x000c
#define QUP_OPERATIONAL 0x0018
#define QUP_ERROR_FLAGS 0x001c
#define QUP_ERROR_FLAGS_EN 0x0020
#define QUP_OPERATIONAL_MASK 0x0028
#define QUP_HW_VERSION 0x0030
#define QUP_MX_OUTPUT_CNT 0x0100
#define QUP_OUTPUT_FIFO 0x0110
#define QUP_MX_WRITE_CNT 0x0150
#define QUP_MX_INPUT_CNT 0x0200
#define QUP_MX_READ_CNT 0x0208
#define QUP_INPUT_FIFO 0x0218
#define SPI_CONFIG 0x0300
#define SPI_IO_CONTROL 0x0304
#define SPI_ERROR_FLAGS 0x0308
#define SPI_ERROR_FLAGS_EN 0x030c
/* QUP_CONFIG fields */
#define QUP_CONFIG_SPI_MODE (1 << 8)
#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
#define QUP_CONFIG_NO_INPUT BIT(7)
#define QUP_CONFIG_NO_OUTPUT BIT(6)
#define QUP_CONFIG_N 0x001f
/* QUP_STATE fields */
#define QUP_STATE_VALID BIT(2)
#define QUP_STATE_RESET 0
#define QUP_STATE_RUN 1
#define QUP_STATE_PAUSE 3
#define QUP_STATE_MASK 3
#define QUP_STATE_CLEAR 2
#define QUP_HW_VERSION_2_1_1 0x20010001
/* QUP_IO_M_MODES fields */
#define QUP_IO_M_PACK_EN BIT(15)
#define QUP_IO_M_UNPACK_EN BIT(14)
#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
#define QUP_IO_M_MODE_FIFO 0
#define QUP_IO_M_MODE_BLOCK 1
#define QUP_IO_M_MODE_DMOV 2
#define QUP_IO_M_MODE_BAM 3
/* QUP_OPERATIONAL fields */
#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
#define QUP_OP_IN_SERVICE_FLAG BIT(9)
#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
#define QUP_OP_IN_FIFO_FULL BIT(7)
#define QUP_OP_OUT_FIFO_FULL BIT(6)
#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
/* SPI_CONFIG fields */
#define SPI_CONFIG_HS_MODE BIT(10)
#define SPI_CONFIG_INPUT_FIRST BIT(9)
#define SPI_CONFIG_LOOPBACK BIT(8)
/* SPI_IO_CONTROL fields */
#define SPI_IO_C_FORCE_CS BIT(11)
#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
#define SPI_IO_C_MX_CS_MODE BIT(8)
#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
#define SPI_IO_C_CS_SELECT_MASK 0x000c
#define SPI_IO_C_TRISTATE_CS BIT(1)
#define SPI_IO_C_NO_TRI_STATE BIT(0)
/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
#define SPI_ERROR_CLK_OVER_RUN BIT(1)
#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
#define SPI_NUM_CHIPSELECTS 4
#define SPI_MAX_XFER (SZ_64K - 64)
/* high speed mode is when bus rate is greater then 26MHz */
#define SPI_HS_MIN_RATE 26000000
#define SPI_MAX_RATE 50000000
#define SPI_DELAY_THRESHOLD 1
#define SPI_DELAY_RETRY 10
struct spi_qup {
void __iomem *base;
struct device *dev;
struct clk *cclk; /* core clock */
struct clk *iclk; /* interface clock */
int irq;
spinlock_t lock;
int in_fifo_sz;
int out_fifo_sz;
int in_blk_sz;
int out_blk_sz;
struct spi_transfer *xfer;
struct completion done;
int error;
int w_size; /* bytes per SPI word */
int n_words;
int tx_bytes;
int rx_bytes;
const u8 *tx_buf;
u8 *rx_buf;
int qup_v1;
int mode;
struct dma_slave_config rx_conf;
struct dma_slave_config tx_conf;
};
static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
{
u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
return (opflag & flag) != 0;
}
static inline bool spi_qup_is_dma_xfer(int mode)
{
if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
return true;
return false;
}
/* get's the transaction size length */
static inline unsigned int spi_qup_len(struct spi_qup *controller)
{
return controller->n_words * controller->w_size;
}
static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
{
u32 opstate = readl_relaxed(controller->base + QUP_STATE);
return opstate & QUP_STATE_VALID;
}
static int spi_qup_set_state(struct spi_qup *controller, u32 state)
{
unsigned long loop;
u32 cur_state;
loop = 0;
while (!spi_qup_is_valid_state(controller)) {
usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
if (++loop > SPI_DELAY_RETRY)
return -EIO;
}
if (loop)
dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
loop, state);
cur_state = readl_relaxed(controller->base + QUP_STATE);
/*
* Per spec: for PAUSE_STATE to RESET_STATE, two writes
* of (b10) are required
*/
if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
(state == QUP_STATE_RESET)) {
writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
} else {
cur_state &= ~QUP_STATE_MASK;
cur_state |= state;
writel_relaxed(cur_state, controller->base + QUP_STATE);
}
loop = 0;
while (!spi_qup_is_valid_state(controller)) {
usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
if (++loop > SPI_DELAY_RETRY)
return -EIO;
}
return 0;
}
static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
{
u8 *rx_buf = controller->rx_buf;
int i, shift, num_bytes;
u32 word;
for (; num_words; num_words--) {
word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
num_bytes = min_t(int, spi_qup_len(controller) -
controller->rx_bytes,
controller->w_size);
if (!rx_buf) {
controller->rx_bytes += num_bytes;
continue;
}
for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
/*
* The data format depends on bytes per SPI word:
* 4 bytes: 0x12345678
* 2 bytes: 0x00001234
* 1 byte : 0x00000012
*/
shift = BITS_PER_BYTE;
shift *= (controller->w_size - i - 1);
rx_buf[controller->rx_bytes] = word >> shift;
}
}
}
static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
{
u32 remainder, words_per_block, num_words;
bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
controller->w_size);
words_per_block = controller->in_blk_sz >> 2;
do {
/* ACK by clearing service flag */
writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
controller->base + QUP_OPERATIONAL);
if (!remainder)
goto exit;
if (is_block_mode) {
num_words = (remainder > words_per_block) ?
words_per_block : remainder;
} else {
if (!spi_qup_is_flag_set(controller,
QUP_OP_IN_FIFO_NOT_EMPTY))
break;
num_words = 1;
}
/* read up to the maximum transfer size available */
spi_qup_read_from_fifo(controller, num_words);
remainder -= num_words;
/* if block mode, check to see if next block is available */
if (is_block_mode && !spi_qup_is_flag_set(controller,
QUP_OP_IN_BLOCK_READ_REQ))
break;
} while (remainder);
/*
* Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
* reads, it has to be cleared again at the very end. However, be sure
* to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
* present and this is used to determine if transaction is complete
*/
exit:
if (!remainder) {
*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
controller->base + QUP_OPERATIONAL);
}
}
static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
{
const u8 *tx_buf = controller->tx_buf;
int i, num_bytes;
u32 word, data;
for (; num_words; num_words--) {
word = 0;
num_bytes = min_t(int, spi_qup_len(controller) -
controller->tx_bytes,
controller->w_size);
if (tx_buf)
for (i = 0; i < num_bytes; i++) {
data = tx_buf[controller->tx_bytes + i];
word |= data << (BITS_PER_BYTE * (3 - i));
}
controller->tx_bytes += num_bytes;
writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
}
}
static void spi_qup_dma_done(void *data)
{
struct spi_qup *qup = data;
complete(&qup->done);
}
static void spi_qup_write(struct spi_qup *controller)
{
bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
u32 remainder, words_per_block, num_words;
remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
controller->w_size);
words_per_block = controller->out_blk_sz >> 2;
do {
/* ACK by clearing service flag */
writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
controller->base + QUP_OPERATIONAL);
/* make sure the interrupt is valid */
if (!remainder)
return;
if (is_block_mode) {
num_words = (remainder > words_per_block) ?
words_per_block : remainder;
} else {
if (spi_qup_is_flag_set(controller,
QUP_OP_OUT_FIFO_FULL))
break;
num_words = 1;
}
spi_qup_write_to_fifo(controller, num_words);
remainder -= num_words;
/* if block mode, check to see if next block is available */
if (is_block_mode && !spi_qup_is_flag_set(controller,
QUP_OP_OUT_BLOCK_WRITE_REQ))
break;
} while (remainder);
}
static int spi_qup_prep_sg(struct spi_controller *host, struct scatterlist *sgl,
unsigned int nents, enum dma_transfer_direction dir,
dma_async_tx_callback callback)
{
struct spi_qup *qup = spi_controller_get_devdata(host);
unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan;
dma_cookie_t cookie;
if (dir == DMA_MEM_TO_DEV)
chan = host->dma_tx;
else
chan = host->dma_rx;
desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
if (IS_ERR_OR_NULL(desc))
return desc ? PTR_ERR(desc) : -EINVAL;
desc->callback = callback;
desc->callback_param = qup;
cookie = dmaengine_submit(desc);
return dma_submit_error(cookie);
}
static void spi_qup_dma_terminate(struct spi_controller *host,
struct spi_transfer *xfer)
{
if (xfer->tx_buf)
dmaengine_terminate_all(host->dma_tx);
if (xfer->rx_buf)
dmaengine_terminate_all(host->dma_rx);
}
static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
u32 *nents)
{
struct scatterlist *sg;
u32 total = 0;
for (sg = sgl; sg; sg = sg_next(sg)) {
unsigned int len = sg_dma_len(sg);
/* check for overflow as well as limit */
if (((total + len) < total) || ((total + len) > max))
break;
total += len;
(*nents)++;
}
return total;
}
static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
unsigned long timeout)
{
dma_async_tx_callback rx_done = NULL, tx_done = NULL;
struct spi_controller *host = spi->controller;
struct spi_qup *qup = spi_controller_get_devdata(host);
struct scatterlist *tx_sgl, *rx_sgl;
int ret;
if (xfer->rx_buf)
rx_done = spi_qup_dma_done;
else if (xfer->tx_buf)
tx_done = spi_qup_dma_done;
rx_sgl = xfer->rx_sg.sgl;
tx_sgl = xfer->tx_sg.sgl;
do {
u32 rx_nents = 0, tx_nents = 0;
if (rx_sgl)
qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
SPI_MAX_XFER, &rx_nents) / qup->w_size;
if (tx_sgl)
qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
SPI_MAX_XFER, &tx_nents) / qup->w_size;
if (!qup->n_words)
return -EIO;
ret = spi_qup_io_config(spi, xfer);
if (ret)
return ret;
/* before issuing the descriptors, set the QUP to run */
ret = spi_qup_set_state(qup, QUP_STATE_RUN);
if (ret) {
dev_warn(qup->dev, "cannot set RUN state\n");
return ret;
}
if (rx_sgl) {
ret = spi_qup_prep_sg(host, rx_sgl, rx_nents,
DMA_DEV_TO_MEM, rx_done);
if (ret)
return ret;
dma_async_issue_pending(host->dma_rx);
}
if (tx_sgl) {
ret = spi_qup_prep_sg(host, tx_sgl, tx_nents,
DMA_MEM_TO_DEV, tx_done);
if (ret)
return ret;
dma_async_issue_pending(host->dma_tx);
}
if (!wait_for_completion_timeout(&qup->done, timeout))
return -ETIMEDOUT;
for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
;
for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
;
} while (rx_sgl || tx_sgl);
return 0;
}
static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
unsigned long timeout)
{
struct spi_controller *host = spi->controller;
struct spi_qup *qup = spi_controller_get_devdata(host);
int ret, n_words, iterations, offset = 0;
n_words = qup->n_words;
iterations = n_words / SPI_MAX_XFER; /* round down */
qup->rx_buf = xfer->rx_buf;
qup->tx_buf = xfer->tx_buf;
do {
if (iterations)
qup->n_words = SPI_MAX_XFER;
else
qup->n_words = n_words % SPI_MAX_XFER;
if (qup->tx_buf && offset)
qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
if (qup->rx_buf && offset)
qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
/*
* if the transaction is small enough, we need
* to fallback to FIFO mode
*/
if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
qup->mode = QUP_IO_M_MODE_FIFO;
ret = spi_qup_io_config(spi, xfer);
if (ret)
return ret;
ret = spi_qup_set_state(qup, QUP_STATE_RUN);
if (ret) {
dev_warn(qup->dev, "cannot set RUN state\n");
return ret;
}
ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
if (ret) {
dev_warn(qup->dev, "cannot set PAUSE state\n");
return ret;
}
if (qup->mode == QUP_IO_M_MODE_FIFO)
spi_qup_write(qup);
ret = spi_qup_set_state(qup, QUP_STATE_RUN);
if (ret) {
dev_warn(qup->dev, "cannot set RUN state\n");
return ret;
}
if (!wait_for_completion_timeout(&qup->done, timeout))
return -ETIMEDOUT;
offset++;
} while (iterations--);
return 0;
}
static bool spi_qup_data_pending(struct spi_qup *controller)
{
unsigned int remainder_tx, remainder_rx;
remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
controller->tx_bytes, controller->w_size);
remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
controller->rx_bytes, controller->w_size);
return remainder_tx || remainder_rx;
}
static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
{
struct spi_qup *controller = dev_id;
u32 opflags, qup_err, spi_err;
int error = 0;
qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
if (qup_err) {
if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
dev_warn(controller->dev, "INPUT_OVER_RUN\n");
error = -EIO;
}
if (spi_err) {
if (spi_err & SPI_ERROR_CLK_OVER_RUN)
dev_warn(controller->dev, "CLK_OVER_RUN\n");
if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
dev_warn(controller->dev, "CLK_UNDER_RUN\n");
error = -EIO;
}
spin_lock(&controller->lock);
if (!controller->error)
controller->error = error;
spin_unlock(&controller->lock);
if (spi_qup_is_dma_xfer(controller->mode)) {
writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
} else {
if (opflags & QUP_OP_IN_SERVICE_FLAG)
spi_qup_read(controller, &opflags);
if (opflags & QUP_OP_OUT_SERVICE_FLAG)
spi_qup_write(controller);
if (!spi_qup_data_pending(controller))
complete(&controller->done);
}
if (error)
complete(&controller->done);
if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
if (!spi_qup_is_dma_xfer(controller->mode)) {
if (spi_qup_data_pending(controller))
return IRQ_HANDLED;
}
complete(&controller->done);
}
return IRQ_HANDLED;
}
/* set clock freq ... bits per word, determine mode */
static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
{
struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
int ret;
if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
dev_err(controller->dev, "too big size for loopback %d > %d\n",
xfer->len, controller->in_fifo_sz);
return -EIO;
}
ret = clk_set_rate(controller->cclk, xfer->speed_hz);
if (ret) {
dev_err(controller->dev, "fail to set frequency %d",
xfer->speed_hz);
return -EIO;
}
controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
controller->n_words = xfer->len / controller->w_size;
if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
controller->mode = QUP_IO_M_MODE_FIFO;
else if (spi->controller->can_dma &&
spi->controller->can_dma(spi->controller, spi, xfer) &&
spi->controller->cur_msg_mapped)
controller->mode = QUP_IO_M_MODE_BAM;
else
controller->mode = QUP_IO_M_MODE_BLOCK;
return 0;
}
/* prep qup for another spi transaction of specific type */
static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
{
struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
u32 config, iomode, control;
unsigned long flags;
spin_lock_irqsave(&controller->lock, flags);
controller->xfer = xfer;
controller->error = 0;
controller->rx_bytes = 0;
controller->tx_bytes = 0;
spin_unlock_irqrestore(&controller->lock, flags);
if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
dev_err(controller->dev, "cannot set RESET state\n");
return -EIO;
}
switch (controller->mode) {
case QUP_IO_M_MODE_FIFO:
writel_relaxed(controller->n_words,
controller->base + QUP_MX_READ_CNT);
writel_relaxed(controller->n_words,
controller->base + QUP_MX_WRITE_CNT);
/* must be zero for FIFO */
writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
break;
case QUP_IO_M_MODE_BAM:
writel_relaxed(controller->n_words,
controller->base + QUP_MX_INPUT_CNT);
writel_relaxed(controller->n_words,
controller->base + QUP_MX_OUTPUT_CNT);
/* must be zero for BLOCK and BAM */
writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
if (!controller->qup_v1) {
void __iomem *input_cnt;
input_cnt = controller->base + QUP_MX_INPUT_CNT;
/*
* for DMA transfers, both QUP_MX_INPUT_CNT and
* QUP_MX_OUTPUT_CNT must be zero to all cases but one.
* That case is a non-balanced transfer when there is
* only a rx_buf.
*/
if (xfer->tx_buf)
writel_relaxed(0, input_cnt);
else
writel_relaxed(controller->n_words, input_cnt);
writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
}
break;
case QUP_IO_M_MODE_BLOCK:
reinit_completion(&controller->done);
writel_relaxed(controller->n_words,
controller->base + QUP_MX_INPUT_CNT);
writel_relaxed(controller->n_words,
controller->base + QUP_MX_OUTPUT_CNT);
/* must be zero for BLOCK and BAM */
writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
break;
default:
dev_err(controller->dev, "unknown mode = %d\n",
controller->mode);
return -EIO;
}
iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
/* Set input and output transfer mode */
iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
if (!spi_qup_is_dma_xfer(controller->mode))
iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
else
iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
control = readl_relaxed(controller->base + SPI_IO_CONTROL);
if (spi->mode & SPI_CPOL)
control |= SPI_IO_C_CLK_IDLE_HIGH;
else
control &= ~SPI_IO_C_CLK_IDLE_HIGH;
writel_relaxed(control, controller->base + SPI_IO_CONTROL);
config = readl_relaxed(controller->base + SPI_CONFIG);
if (spi->mode & SPI_LOOP)
config |= SPI_CONFIG_LOOPBACK;
else
config &= ~SPI_CONFIG_LOOPBACK;
if (spi->mode & SPI_CPHA)
config &= ~SPI_CONFIG_INPUT_FIRST;
else
config |= SPI_CONFIG_INPUT_FIRST;
/*
* HS_MODE improves signal stability for spi-clk high rates,
* but is invalid in loop back mode.
*/
if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
config |= SPI_CONFIG_HS_MODE;
else
config &= ~SPI_CONFIG_HS_MODE;
writel_relaxed(config, controller->base + SPI_CONFIG);
config = readl_relaxed(controller->base + QUP_CONFIG);
config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
config |= xfer->bits_per_word - 1;
config |= QUP_CONFIG_SPI_MODE;
if (spi_qup_is_dma_xfer(controller->mode)) {
if (!xfer->tx_buf)
config |= QUP_CONFIG_NO_OUTPUT;
if (!xfer->rx_buf)
config |= QUP_CONFIG_NO_INPUT;
}
writel_relaxed(config, controller->base + QUP_CONFIG);
/* only write to OPERATIONAL_MASK when register is present */
if (!controller->qup_v1) {
u32 mask = 0;
/*
* mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
* status change in BAM mode
*/
if (spi_qup_is_dma_xfer(controller->mode))
mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
}
return 0;
}
static int spi_qup_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct spi_qup *controller = spi_controller_get_devdata(host);
unsigned long timeout, flags;
int ret;
ret = spi_qup_io_prep(spi, xfer);
if (ret)
return ret;
timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
xfer->len) * 8, timeout);
timeout = 100 * msecs_to_jiffies(timeout);
reinit_completion(&controller->done);
spin_lock_irqsave(&controller->lock, flags);
controller->xfer = xfer;
controller->error = 0;
controller->rx_bytes = 0;
controller->tx_bytes = 0;
spin_unlock_irqrestore(&controller->lock, flags);
if (spi_qup_is_dma_xfer(controller->mode))
ret = spi_qup_do_dma(spi, xfer, timeout);
else
ret = spi_qup_do_pio(spi, xfer, timeout);
spi_qup_set_state(controller, QUP_STATE_RESET);
spin_lock_irqsave(&controller->lock, flags);
if (!ret)
ret = controller->error;
spin_unlock_irqrestore(&controller->lock, flags);
if (ret && spi_qup_is_dma_xfer(controller->mode))
spi_qup_dma_terminate(host, xfer);
return ret;
}
static bool spi_qup_can_dma(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct spi_qup *qup = spi_controller_get_devdata(host);
size_t dma_align = dma_get_cache_alignment();
int n_words;
if (xfer->rx_buf) {
if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
IS_ERR_OR_NULL(host->dma_rx))
return false;
if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
return false;
}
if (xfer->tx_buf) {
if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
IS_ERR_OR_NULL(host->dma_tx))
return false;
if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
return false;
}
n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
return false;
return true;
}
static void spi_qup_release_dma(struct spi_controller *host)
{
if (!IS_ERR_OR_NULL(host->dma_rx))
dma_release_channel(host->dma_rx);
if (!IS_ERR_OR_NULL(host->dma_tx))
dma_release_channel(host->dma_tx);
}
static int spi_qup_init_dma(struct spi_controller *host, resource_size_t base)
{
struct spi_qup *spi = spi_controller_get_devdata(host);
struct dma_slave_config *rx_conf = &spi->rx_conf,
*tx_conf = &spi->tx_conf;
struct device *dev = spi->dev;
int ret;
/* allocate dma resources, if available */
host->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(host->dma_rx))
return PTR_ERR(host->dma_rx);
host->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(host->dma_tx)) {
ret = PTR_ERR(host->dma_tx);
goto err_tx;
}
/* set DMA parameters */
rx_conf->direction = DMA_DEV_TO_MEM;
rx_conf->device_fc = 1;
rx_conf->src_addr = base + QUP_INPUT_FIFO;
rx_conf->src_maxburst = spi->in_blk_sz;
tx_conf->direction = DMA_MEM_TO_DEV;
tx_conf->device_fc = 1;
tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
tx_conf->dst_maxburst = spi->out_blk_sz;
ret = dmaengine_slave_config(host->dma_rx, rx_conf);
if (ret) {
dev_err(dev, "failed to configure RX channel\n");
goto err;
}
ret = dmaengine_slave_config(host->dma_tx, tx_conf);
if (ret) {
dev_err(dev, "failed to configure TX channel\n");
goto err;
}
return 0;
err:
dma_release_channel(host->dma_tx);
err_tx:
dma_release_channel(host->dma_rx);
return ret;
}
static void spi_qup_set_cs(struct spi_device *spi, bool val)
{
struct spi_qup *controller;
u32 spi_ioc;
u32 spi_ioc_orig;
controller = spi_controller_get_devdata(spi->controller);
spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
spi_ioc_orig = spi_ioc;
if (!val)
spi_ioc |= SPI_IO_C_FORCE_CS;
else
spi_ioc &= ~SPI_IO_C_FORCE_CS;
if (spi_ioc != spi_ioc_orig)
writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
}
static int spi_qup_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct clk *iclk, *cclk;
struct spi_qup *controller;
struct resource *res;
struct device *dev;
void __iomem *base;
u32 max_freq, iomode, num_cs;
int ret, irq, size;
dev = &pdev->dev;
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
cclk = devm_clk_get(dev, "core");
if (IS_ERR(cclk))
return PTR_ERR(cclk);
iclk = devm_clk_get(dev, "iface");
if (IS_ERR(iclk))
return PTR_ERR(iclk);
/* This is optional parameter */
if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
max_freq = SPI_MAX_RATE;
if (!max_freq || max_freq > SPI_MAX_RATE) {
dev_err(dev, "invalid clock frequency %d\n", max_freq);
return -ENXIO;
}
host = spi_alloc_host(dev, sizeof(struct spi_qup));
if (!host) {
dev_err(dev, "cannot allocate host\n");
return -ENOMEM;
}
/* use num-cs unless not present or out of range */
if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
num_cs > SPI_NUM_CHIPSELECTS)
host->num_chipselect = SPI_NUM_CHIPSELECTS;
else
host->num_chipselect = num_cs;
host->use_gpio_descriptors = true;
host->max_native_cs = SPI_NUM_CHIPSELECTS;
host->bus_num = pdev->id;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
host->max_speed_hz = max_freq;
host->transfer_one = spi_qup_transfer_one;
host->dev.of_node = pdev->dev.of_node;
host->auto_runtime_pm = true;
host->dma_alignment = dma_get_cache_alignment();
host->max_dma_len = SPI_MAX_XFER;
platform_set_drvdata(pdev, host);
controller = spi_controller_get_devdata(host);
controller->dev = dev;
controller->base = base;
controller->iclk = iclk;
controller->cclk = cclk;
controller->irq = irq;
ret = spi_qup_init_dma(host, res->start);
if (ret == -EPROBE_DEFER)
goto error;
else if (!ret)
host->can_dma = spi_qup_can_dma;
controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
if (!controller->qup_v1)
host->set_cs = spi_qup_set_cs;
spin_lock_init(&controller->lock);
init_completion(&controller->done);
ret = clk_prepare_enable(cclk);
if (ret) {
dev_err(dev, "cannot enable core clock\n");
goto error_dma;
}
ret = clk_prepare_enable(iclk);
if (ret) {
clk_disable_unprepare(cclk);
dev_err(dev, "cannot enable iface clock\n");
goto error_dma;
}
iomode = readl_relaxed(base + QUP_IO_M_MODES);
size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
if (size)
controller->out_blk_sz = size * 16;
else
controller->out_blk_sz = 4;
size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
if (size)
controller->in_blk_sz = size * 16;
else
controller->in_blk_sz = 4;
size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
controller->in_blk_sz, controller->in_fifo_sz,
controller->out_blk_sz, controller->out_fifo_sz);
writel_relaxed(1, base + QUP_SW_RESET);
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret) {
dev_err(dev, "cannot set RESET state\n");
goto error_clk;
}
writel_relaxed(0, base + QUP_OPERATIONAL);
writel_relaxed(0, base + QUP_IO_M_MODES);
if (!controller->qup_v1)
writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
base + SPI_ERROR_FLAGS_EN);
/* if earlier version of the QUP, disable INPUT_OVERRUN */
if (controller->qup_v1)
writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
base + QUP_ERROR_FLAGS_EN);
writel_relaxed(0, base + SPI_CONFIG);
writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
IRQF_TRIGGER_HIGH, pdev->name, controller);
if (ret)
goto error_clk;
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = devm_spi_register_controller(dev, host);
if (ret)
goto disable_pm;
return 0;
disable_pm:
pm_runtime_disable(&pdev->dev);
error_clk:
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
error_dma:
spi_qup_release_dma(host);
error:
spi_controller_put(host);
return ret;
}
#ifdef CONFIG_PM
static int spi_qup_pm_suspend_runtime(struct device *device)
{
struct spi_controller *host = dev_get_drvdata(device);
struct spi_qup *controller = spi_controller_get_devdata(host);
u32 config;
/* Enable clocks auto gaiting */
config = readl(controller->base + QUP_CONFIG);
config |= QUP_CONFIG_CLOCK_AUTO_GATE;
writel_relaxed(config, controller->base + QUP_CONFIG);
clk_disable_unprepare(controller->cclk);
clk_disable_unprepare(controller->iclk);
return 0;
}
static int spi_qup_pm_resume_runtime(struct device *device)
{
struct spi_controller *host = dev_get_drvdata(device);
struct spi_qup *controller = spi_controller_get_devdata(host);
u32 config;
int ret;
ret = clk_prepare_enable(controller->iclk);
if (ret)
return ret;
ret = clk_prepare_enable(controller->cclk);
if (ret) {
clk_disable_unprepare(controller->iclk);
return ret;
}
/* Disable clocks auto gaiting */
config = readl_relaxed(controller->base + QUP_CONFIG);
config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
writel_relaxed(config, controller->base + QUP_CONFIG);
return 0;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int spi_qup_suspend(struct device *device)
{
struct spi_controller *host = dev_get_drvdata(device);
struct spi_qup *controller = spi_controller_get_devdata(host);
int ret;
if (pm_runtime_suspended(device)) {
ret = spi_qup_pm_resume_runtime(device);
if (ret)
return ret;
}
ret = spi_controller_suspend(host);
if (ret)
return ret;
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret)
return ret;
clk_disable_unprepare(controller->cclk);
clk_disable_unprepare(controller->iclk);
return 0;
}
static int spi_qup_resume(struct device *device)
{
struct spi_controller *host = dev_get_drvdata(device);
struct spi_qup *controller = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(controller->iclk);
if (ret)
return ret;
ret = clk_prepare_enable(controller->cclk);
if (ret) {
clk_disable_unprepare(controller->iclk);
return ret;
}
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret)
goto disable_clk;
ret = spi_controller_resume(host);
if (ret)
goto disable_clk;
return 0;
disable_clk:
clk_disable_unprepare(controller->cclk);
clk_disable_unprepare(controller->iclk);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
static void spi_qup_remove(struct platform_device *pdev)
{
struct spi_controller *host = dev_get_drvdata(&pdev->dev);
struct spi_qup *controller = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret >= 0) {
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret)
dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
ERR_PTR(ret));
clk_disable_unprepare(controller->cclk);
clk_disable_unprepare(controller->iclk);
} else {
dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
ERR_PTR(ret));
}
spi_qup_release_dma(host);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id spi_qup_dt_match[] = {
{ .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
{ .compatible = "qcom,spi-qup-v2.1.1", },
{ .compatible = "qcom,spi-qup-v2.2.1", },
{ }
};
MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
static const struct dev_pm_ops spi_qup_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
spi_qup_pm_resume_runtime,
NULL)
};
static struct platform_driver spi_qup_driver = {
.driver = {
.name = "spi_qup",
.pm = &spi_qup_dev_pm_ops,
.of_match_table = spi_qup_dt_match,
},
.probe = spi_qup_probe,
.remove_new = spi_qup_remove,
};
module_platform_driver(spi_qup_driver);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:spi_qup");
| linux-master | drivers/spi/spi-qup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI driver for NVIDIA's Tegra114 SPI Controller.
*
* Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#define SPI_COMMAND1 0x000
#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
#define SPI_PACKED (1 << 5)
#define SPI_TX_EN (1 << 11)
#define SPI_RX_EN (1 << 12)
#define SPI_BOTH_EN_BYTE (1 << 13)
#define SPI_BOTH_EN_BIT (1 << 14)
#define SPI_LSBYTE_FE (1 << 15)
#define SPI_LSBIT_FE (1 << 16)
#define SPI_BIDIROE (1 << 17)
#define SPI_IDLE_SDA_DRIVE_LOW (0 << 18)
#define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18)
#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
#define SPI_IDLE_SDA_MASK (3 << 18)
#define SPI_CS_SW_VAL (1 << 20)
#define SPI_CS_SW_HW (1 << 21)
/* SPI_CS_POL_INACTIVE bits are default high */
/* n from 0 to 3 */
#define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
#define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
#define SPI_CS_SEL_0 (0 << 26)
#define SPI_CS_SEL_1 (1 << 26)
#define SPI_CS_SEL_2 (2 << 26)
#define SPI_CS_SEL_3 (3 << 26)
#define SPI_CS_SEL_MASK (3 << 26)
#define SPI_CS_SEL(x) (((x) & 0x3) << 26)
#define SPI_CONTROL_MODE_0 (0 << 28)
#define SPI_CONTROL_MODE_1 (1 << 28)
#define SPI_CONTROL_MODE_2 (2 << 28)
#define SPI_CONTROL_MODE_3 (3 << 28)
#define SPI_CONTROL_MODE_MASK (3 << 28)
#define SPI_MODE_SEL(x) (((x) & 0x3) << 28)
#define SPI_M_S (1 << 30)
#define SPI_PIO (1 << 31)
#define SPI_COMMAND2 0x004
#define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6)
#define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0)
#define SPI_CS_TIMING1 0x008
#define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
#define SPI_CS_SETUP_HOLD(reg, cs, val) \
((((val) & 0xFFu) << ((cs) * 8)) | \
((reg) & ~(0xFFu << ((cs) * 8))))
#define SPI_CS_TIMING2 0x00C
#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0)
#define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5)
#define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8)
#define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13)
#define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16)
#define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21)
#define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24)
#define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29)
#define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \
(reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
((reg) & ~(1 << ((cs) * 8 + 5))))
#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
(reg = (((val) & 0x1F) << ((cs) * 8)) | \
((reg) & ~(0x1F << ((cs) * 8))))
#define MAX_SETUP_HOLD_CYCLES 16
#define MAX_INACTIVE_CYCLES 32
#define SPI_TRANS_STATUS 0x010
#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
#define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF)
#define SPI_RDY (1 << 30)
#define SPI_FIFO_STATUS 0x014
#define SPI_RX_FIFO_EMPTY (1 << 0)
#define SPI_RX_FIFO_FULL (1 << 1)
#define SPI_TX_FIFO_EMPTY (1 << 2)
#define SPI_TX_FIFO_FULL (1 << 3)
#define SPI_RX_FIFO_UNF (1 << 4)
#define SPI_RX_FIFO_OVF (1 << 5)
#define SPI_TX_FIFO_UNF (1 << 6)
#define SPI_TX_FIFO_OVF (1 << 7)
#define SPI_ERR (1 << 8)
#define SPI_TX_FIFO_FLUSH (1 << 14)
#define SPI_RX_FIFO_FLUSH (1 << 15)
#define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F)
#define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F)
#define SPI_FRAME_END (1 << 30)
#define SPI_CS_INACTIVE (1 << 31)
#define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \
SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
#define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
#define SPI_TX_DATA 0x018
#define SPI_RX_DATA 0x01C
#define SPI_DMA_CTL 0x020
#define SPI_TX_TRIG_1 (0 << 15)
#define SPI_TX_TRIG_4 (1 << 15)
#define SPI_TX_TRIG_8 (2 << 15)
#define SPI_TX_TRIG_16 (3 << 15)
#define SPI_TX_TRIG_MASK (3 << 15)
#define SPI_RX_TRIG_1 (0 << 19)
#define SPI_RX_TRIG_4 (1 << 19)
#define SPI_RX_TRIG_8 (2 << 19)
#define SPI_RX_TRIG_16 (3 << 19)
#define SPI_RX_TRIG_MASK (3 << 19)
#define SPI_IE_TX (1 << 28)
#define SPI_IE_RX (1 << 29)
#define SPI_CONT (1 << 30)
#define SPI_DMA (1 << 31)
#define SPI_DMA_EN SPI_DMA
#define SPI_DMA_BLK 0x024
#define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0)
#define SPI_TX_FIFO 0x108
#define SPI_RX_FIFO 0x188
#define SPI_INTR_MASK 0x18c
#define SPI_INTR_ALL_MASK (0x1fUL << 25)
#define MAX_CHIP_SELECT 4
#define SPI_FIFO_DEPTH 64
#define DATA_DIR_TX (1 << 0)
#define DATA_DIR_RX (1 << 1)
#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
#define TX_FIFO_EMPTY_COUNT_MAX SPI_TX_FIFO_EMPTY_COUNT(0x40)
#define RX_FIFO_FULL_COUNT_ZERO SPI_RX_FIFO_FULL_COUNT(0)
#define MAX_HOLD_CYCLES 16
#define SPI_DEFAULT_SPEED 25000000
struct tegra_spi_soc_data {
bool has_intr_mask_reg;
};
struct tegra_spi_client_data {
int tx_clk_tap_delay;
int rx_clk_tap_delay;
};
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
spinlock_t lock;
struct clk *clk;
struct reset_control *rst;
void __iomem *base;
phys_addr_t phys;
unsigned irq;
u32 cur_speed;
struct spi_device *cur_spi;
struct spi_device *cs_control;
unsigned cur_pos;
unsigned words_per_32bit;
unsigned bytes_per_word;
unsigned curr_dma_words;
unsigned cur_direction;
unsigned cur_rx_pos;
unsigned cur_tx_pos;
unsigned dma_buf_size;
unsigned max_buf_size;
bool is_curr_dma_xfer;
bool use_hw_based_cs;
struct completion rx_dma_complete;
struct completion tx_dma_complete;
u32 tx_status;
u32 rx_status;
u32 status_reg;
bool is_packed;
u32 command1_reg;
u32 dma_control_reg;
u32 def_command1_reg;
u32 def_command2_reg;
u32 spi_cs_timing1;
u32 spi_cs_timing2;
u8 last_used_cs;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
struct dma_chan *rx_dma_chan;
u32 *rx_dma_buf;
dma_addr_t rx_dma_phys;
struct dma_async_tx_descriptor *rx_dma_desc;
struct dma_chan *tx_dma_chan;
u32 *tx_dma_buf;
dma_addr_t tx_dma_phys;
struct dma_async_tx_descriptor *tx_dma_desc;
const struct tegra_spi_soc_data *soc_data;
};
static int tegra_spi_runtime_suspend(struct device *dev);
static int tegra_spi_runtime_resume(struct device *dev);
static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
unsigned long reg)
{
return readl(tspi->base + reg);
}
static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
u32 val, unsigned long reg)
{
writel(val, tspi->base + reg);
/* Read back register to make sure that register writes completed */
if (reg != SPI_TX_FIFO)
readl(tspi->base + SPI_COMMAND1);
}
static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
{
u32 val;
/* Write 1 to clear status register */
val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
/* Clear fifo status error if any */
val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if (val & SPI_ERR)
tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
SPI_FIFO_STATUS);
}
static unsigned tegra_spi_calculate_curr_xfer_param(
struct spi_device *spi, struct tegra_spi_data *tspi,
struct spi_transfer *t)
{
unsigned remain_len = t->len - tspi->cur_pos;
unsigned max_word;
unsigned bits_per_word = t->bits_per_word;
unsigned max_len;
unsigned total_fifo_words;
tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
if ((bits_per_word == 8 || bits_per_word == 16 ||
bits_per_word == 32) && t->len > 3) {
tspi->is_packed = true;
tspi->words_per_32bit = 32/bits_per_word;
} else {
tspi->is_packed = false;
tspi->words_per_32bit = 1;
}
if (tspi->is_packed) {
max_len = min(remain_len, tspi->max_buf_size);
tspi->curr_dma_words = max_len/tspi->bytes_per_word;
total_fifo_words = (max_len + 3) / 4;
} else {
max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
max_word = min(max_word, tspi->max_buf_size/4);
tspi->curr_dma_words = max_word;
total_fifo_words = max_word;
}
return total_fifo_words;
}
static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned nbytes;
unsigned tx_empty_count;
u32 fifo_status;
unsigned max_n_32bit;
unsigned i, count;
unsigned int written_words;
unsigned fifo_words_left;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
if (tspi->is_packed) {
fifo_words_left = tx_empty_count * tspi->words_per_32bit;
written_words = min(fifo_words_left, tspi->curr_dma_words);
nbytes = written_words * tspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
} else {
unsigned int write_bytes;
max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
if (nbytes > t->len - tspi->cur_pos)
nbytes = t->len - tspi->cur_pos;
write_bytes = nbytes;
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
tspi->cur_tx_pos += write_bytes;
}
return written_words;
}
static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned rx_full_count;
u32 fifo_status;
unsigned i, count;
unsigned int read_words = 0;
unsigned len;
u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
read_words += tspi->curr_dma_words;
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
u8 bytes_per_word = tspi->bytes_per_word;
unsigned int read_bytes;
len = rx_full_count * bytes_per_word;
if (len > t->len - tspi->cur_pos)
len = t->len - tspi->cur_pos;
read_bytes = len;
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
for (i = 0; len && (i < bytes_per_word); i++, len--)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
read_words += rx_full_count;
tspi->cur_rx_pos += read_bytes;
}
return read_words;
}
static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
if (tspi->is_packed) {
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
unsigned int write_bytes;
if (consume > t->len - tspi->cur_pos)
consume = t->len - tspi->cur_pos;
write_bytes = consume;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = 0;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
tspi->cur_tx_pos += write_bytes;
}
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
}
static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
if (tspi->is_packed) {
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
unsigned int read_bytes;
if (consume > t->len - tspi->cur_pos)
consume = t->len - tspi->cur_pos;
read_bytes = consume;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = tspi->rx_dma_buf[count] & rx_mask;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
tspi->cur_rx_pos += read_bytes;
}
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
}
static void tegra_spi_dma_complete(void *args)
{
struct completion *dma_complete = args;
complete(dma_complete);
}
static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
{
reinit_completion(&tspi->tx_dma_complete);
tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tspi->tx_dma_desc) {
dev_err(tspi->dev, "Not able to get desc for Tx\n");
return -EIO;
}
tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
dmaengine_submit(tspi->tx_dma_desc);
dma_async_issue_pending(tspi->tx_dma_chan);
return 0;
}
static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
{
reinit_completion(&tspi->rx_dma_complete);
tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tspi->rx_dma_desc) {
dev_err(tspi->dev, "Not able to get desc for Rx\n");
return -EIO;
}
tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
dmaengine_submit(tspi->rx_dma_desc);
dma_async_issue_pending(tspi->rx_dma_chan);
return 0;
}
static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
{
unsigned long timeout = jiffies + HZ;
u32 status;
status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if (time_after(jiffies, timeout)) {
dev_err(tspi->dev,
"timeout waiting for fifo flush\n");
return -EIO;
}
udelay(1);
}
}
return 0;
}
static int tegra_spi_start_dma_based_transfer(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
u32 val;
unsigned int len;
int ret = 0;
u8 dma_burst;
struct dma_slave_config dma_sconfig = {0};
val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
tegra_spi_writel(tspi, val, SPI_DMA_BLK);
if (tspi->is_packed)
len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
4) * 4;
else
len = tspi->curr_dma_words * 4;
/* Set attention level based on length of transfer */
if (len & 0xF) {
val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
dma_burst = 1;
} else if (((len) >> 4) & 0x1) {
val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
dma_burst = 4;
} else {
val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
dma_burst = 8;
}
if (!tspi->soc_data->has_intr_mask_reg) {
if (tspi->cur_direction & DATA_DIR_TX)
val |= SPI_IE_TX;
if (tspi->cur_direction & DATA_DIR_RX)
val |= SPI_IE_RX;
}
tegra_spi_writel(tspi, val, SPI_DMA_CTL);
tspi->dma_control_reg = val;
dma_sconfig.device_fc = true;
if (tspi->cur_direction & DATA_DIR_TX) {
dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_sconfig.dst_maxburst = dma_burst;
ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
if (ret < 0) {
dev_err(tspi->dev,
"DMA slave config failed: %d\n", ret);
return ret;
}
tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
ret = tegra_spi_start_tx_dma(tspi, len);
if (ret < 0) {
dev_err(tspi->dev,
"Starting tx dma failed, err %d\n", ret);
return ret;
}
}
if (tspi->cur_direction & DATA_DIR_RX) {
dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_sconfig.src_maxburst = dma_burst;
ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
if (ret < 0) {
dev_err(tspi->dev,
"DMA slave config failed: %d\n", ret);
return ret;
}
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
ret = tegra_spi_start_rx_dma(tspi, len);
if (ret < 0) {
dev_err(tspi->dev,
"Starting rx dma failed, err %d\n", ret);
if (tspi->cur_direction & DATA_DIR_TX)
dmaengine_terminate_all(tspi->tx_dma_chan);
return ret;
}
}
tspi->is_curr_dma_xfer = true;
tspi->dma_control_reg = val;
val |= SPI_DMA_EN;
tegra_spi_writel(tspi, val, SPI_DMA_CTL);
return ret;
}
static int tegra_spi_start_cpu_based_transfer(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
u32 val;
unsigned cur_words;
if (tspi->cur_direction & DATA_DIR_TX)
cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
else
cur_words = tspi->curr_dma_words;
val = SPI_DMA_BLK_SET(cur_words - 1);
tegra_spi_writel(tspi, val, SPI_DMA_BLK);
val = 0;
if (tspi->cur_direction & DATA_DIR_TX)
val |= SPI_IE_TX;
if (tspi->cur_direction & DATA_DIR_RX)
val |= SPI_IE_RX;
tegra_spi_writel(tspi, val, SPI_DMA_CTL);
tspi->dma_control_reg = val;
tspi->is_curr_dma_xfer = false;
val = tspi->command1_reg;
val |= SPI_PIO;
tegra_spi_writel(tspi, val, SPI_COMMAND1);
return 0;
}
static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
bool dma_to_memory)
{
struct dma_chan *dma_chan;
u32 *dma_buf;
dma_addr_t dma_phys;
dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan))
return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
"Dma channel is not available\n");
dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
&dma_phys, GFP_KERNEL);
if (!dma_buf) {
dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
dma_release_channel(dma_chan);
return -ENOMEM;
}
if (dma_to_memory) {
tspi->rx_dma_chan = dma_chan;
tspi->rx_dma_buf = dma_buf;
tspi->rx_dma_phys = dma_phys;
} else {
tspi->tx_dma_chan = dma_chan;
tspi->tx_dma_buf = dma_buf;
tspi->tx_dma_phys = dma_phys;
}
return 0;
}
static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
bool dma_to_memory)
{
u32 *dma_buf;
dma_addr_t dma_phys;
struct dma_chan *dma_chan;
if (dma_to_memory) {
dma_buf = tspi->rx_dma_buf;
dma_chan = tspi->rx_dma_chan;
dma_phys = tspi->rx_dma_phys;
tspi->rx_dma_chan = NULL;
tspi->rx_dma_buf = NULL;
} else {
dma_buf = tspi->tx_dma_buf;
dma_chan = tspi->tx_dma_chan;
dma_phys = tspi->tx_dma_phys;
tspi->tx_dma_buf = NULL;
tspi->tx_dma_chan = NULL;
}
if (!dma_chan)
return;
dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
dma_release_channel(dma_chan);
}
static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
struct spi_delay *setup = &spi->cs_setup;
struct spi_delay *hold = &spi->cs_hold;
struct spi_delay *inactive = &spi->cs_inactive;
u8 setup_dly, hold_dly;
u32 setup_hold;
u32 spi_cs_timing;
u32 inactive_cycles;
u8 cs_state;
if (setup->unit != SPI_DELAY_UNIT_SCK ||
hold->unit != SPI_DELAY_UNIT_SCK ||
inactive->unit != SPI_DELAY_UNIT_SCK) {
dev_err(&spi->dev,
"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
SPI_DELAY_UNIT_SCK);
return -EINVAL;
}
setup_dly = min_t(u8, setup->value, MAX_SETUP_HOLD_CYCLES);
hold_dly = min_t(u8, hold->value, MAX_SETUP_HOLD_CYCLES);
if (setup_dly && hold_dly) {
setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
spi_get_chipselect(spi, 0),
setup_hold);
if (tspi->spi_cs_timing1 != spi_cs_timing) {
tspi->spi_cs_timing1 = spi_cs_timing;
tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING1);
}
}
inactive_cycles = min_t(u8, inactive->value, MAX_INACTIVE_CYCLES);
if (inactive_cycles)
inactive_cycles--;
cs_state = inactive_cycles ? 0 : 1;
spi_cs_timing = tspi->spi_cs_timing2;
SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing, spi_get_chipselect(spi, 0),
cs_state);
SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing, spi_get_chipselect(spi, 0),
inactive_cycles);
if (tspi->spi_cs_timing2 != spi_cs_timing) {
tspi->spi_cs_timing2 = spi_cs_timing;
tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
}
return 0;
}
static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
struct spi_transfer *t,
bool is_first_of_msg,
bool is_single_xfer)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
struct tegra_spi_client_data *cdata = spi->controller_data;
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
u32 command1, command2;
int req_mode;
u32 tx_tap = 0, rx_tap = 0;
if (speed != tspi->cur_speed) {
clk_set_rate(tspi->clk, speed);
tspi->cur_speed = speed;
}
tspi->cur_spi = spi;
tspi->cur_pos = 0;
tspi->cur_rx_pos = 0;
tspi->cur_tx_pos = 0;
tspi->curr_xfer = t;
if (is_first_of_msg) {
tegra_spi_clear_status(tspi);
command1 = tspi->def_command1_reg;
command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
command1 &= ~SPI_CONTROL_MODE_MASK;
req_mode = spi->mode & 0x3;
if (req_mode == SPI_MODE_0)
command1 |= SPI_CONTROL_MODE_0;
else if (req_mode == SPI_MODE_1)
command1 |= SPI_CONTROL_MODE_1;
else if (req_mode == SPI_MODE_2)
command1 |= SPI_CONTROL_MODE_2;
else if (req_mode == SPI_MODE_3)
command1 |= SPI_CONTROL_MODE_3;
if (spi->mode & SPI_LSB_FIRST)
command1 |= SPI_LSBIT_FE;
else
command1 &= ~SPI_LSBIT_FE;
if (spi->mode & SPI_3WIRE)
command1 |= SPI_BIDIROE;
else
command1 &= ~SPI_BIDIROE;
if (tspi->cs_control) {
if (tspi->cs_control != spi)
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
tspi->cs_control = NULL;
} else
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
/* GPIO based chip select control */
if (spi_get_csgpiod(spi, 0))
gpiod_set_value(spi_get_csgpiod(spi, 0), 1);
if (is_single_xfer && !(t->cs_change)) {
tspi->use_hw_based_cs = true;
command1 &= ~(SPI_CS_SW_HW | SPI_CS_SW_VAL);
} else {
tspi->use_hw_based_cs = false;
command1 |= SPI_CS_SW_HW;
if (spi->mode & SPI_CS_HIGH)
command1 |= SPI_CS_SW_VAL;
else
command1 &= ~SPI_CS_SW_VAL;
}
if (tspi->last_used_cs != spi_get_chipselect(spi, 0)) {
if (cdata && cdata->tx_clk_tap_delay)
tx_tap = cdata->tx_clk_tap_delay;
if (cdata && cdata->rx_clk_tap_delay)
rx_tap = cdata->rx_clk_tap_delay;
command2 = SPI_TX_TAP_DELAY(tx_tap) |
SPI_RX_TAP_DELAY(rx_tap);
if (command2 != tspi->def_command2_reg)
tegra_spi_writel(tspi, command2, SPI_COMMAND2);
tspi->last_used_cs = spi_get_chipselect(spi, 0);
}
} else {
command1 = tspi->command1_reg;
command1 &= ~SPI_BIT_LENGTH(~0);
command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
}
return command1;
}
static int tegra_spi_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, u32 command1)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
unsigned total_fifo_words;
int ret;
total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
command1 |= SPI_BOTH_EN_BIT;
else
command1 &= ~SPI_BOTH_EN_BIT;
if (tspi->is_packed)
command1 |= SPI_PACKED;
else
command1 &= ~SPI_PACKED;
command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
tspi->cur_direction = 0;
if (t->rx_buf) {
command1 |= SPI_RX_EN;
tspi->cur_direction |= DATA_DIR_RX;
}
if (t->tx_buf) {
command1 |= SPI_TX_EN;
tspi->cur_direction |= DATA_DIR_TX;
}
command1 |= SPI_CS_SEL(spi_get_chipselect(spi, 0));
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
tspi->command1_reg = command1;
dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
tspi->def_command1_reg, (unsigned)command1);
ret = tegra_spi_flush_fifos(tspi);
if (ret < 0)
return ret;
if (total_fifo_words > SPI_FIFO_DEPTH)
ret = tegra_spi_start_dma_based_transfer(tspi, t);
else
ret = tegra_spi_start_cpu_based_transfer(tspi, t);
return ret;
}
static struct tegra_spi_client_data
*tegra_spi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_spi_client_data *cdata;
struct device_node *slave_np;
slave_np = spi->dev.of_node;
if (!slave_np) {
dev_dbg(&spi->dev, "device node not found\n");
return NULL;
}
cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
if (!cdata)
return NULL;
of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
&cdata->tx_clk_tap_delay);
of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
&cdata->rx_clk_tap_delay);
return cdata;
}
static void tegra_spi_cleanup(struct spi_device *spi)
{
struct tegra_spi_client_data *cdata = spi->controller_data;
spi->controller_data = NULL;
if (spi->dev.of_node)
kfree(cdata);
}
static int tegra_spi_setup(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
struct tegra_spi_client_data *cdata = spi->controller_data;
u32 val;
unsigned long flags;
int ret;
dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
spi->bits_per_word,
spi->mode & SPI_CPOL ? "" : "~",
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
if (!cdata) {
cdata = tegra_spi_parse_cdata_dt(spi);
spi->controller_data = cdata;
}
ret = pm_runtime_resume_and_get(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
if (cdata)
tegra_spi_cleanup(spi);
return ret;
}
if (tspi->soc_data->has_intr_mask_reg) {
val = tegra_spi_readl(tspi, SPI_INTR_MASK);
val &= ~SPI_INTR_ALL_MASK;
tegra_spi_writel(tspi, val, SPI_INTR_MASK);
}
spin_lock_irqsave(&tspi->lock, flags);
/* GPIO based chip select control */
if (spi_get_csgpiod(spi, 0))
gpiod_set_value(spi_get_csgpiod(spi, 0), 0);
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
val &= ~SPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
else
val |= SPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
tspi->def_command1_reg = val;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
spin_unlock_irqrestore(&tspi->lock, flags);
pm_runtime_put(tspi->dev);
return 0;
}
static void tegra_spi_transfer_end(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
/* GPIO based chip select control */
if (spi_get_csgpiod(spi, 0))
gpiod_set_value(spi_get_csgpiod(spi, 0), 0);
if (!tspi->use_hw_based_cs) {
if (cs_val)
tspi->command1_reg |= SPI_CS_SW_VAL;
else
tspi->command1_reg &= ~SPI_CS_SW_VAL;
tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
}
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
}
static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
{
dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
dev_dbg(tspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
tegra_spi_readl(tspi, SPI_COMMAND1),
tegra_spi_readl(tspi, SPI_COMMAND2));
dev_dbg(tspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
tegra_spi_readl(tspi, SPI_DMA_CTL),
tegra_spi_readl(tspi, SPI_DMA_BLK));
dev_dbg(tspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
tegra_spi_readl(tspi, SPI_TRANS_STATUS),
tegra_spi_readl(tspi, SPI_FIFO_STATUS));
}
static int tegra_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
bool is_first_msg = true;
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret;
bool skip = false;
int single_xfer;
msg->status = 0;
msg->actual_length = 0;
single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
u32 cmd1;
reinit_completion(&tspi->xfer_completion);
cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg,
single_xfer);
if (!xfer->len) {
ret = 0;
skip = true;
goto complete_xfer;
}
ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
if (ret < 0) {
dev_err(tspi->dev,
"spi can not start transfer, err %d\n", ret);
goto complete_xfer;
}
is_first_msg = false;
ret = wait_for_completion_timeout(&tspi->xfer_completion,
SPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tspi->dev, "spi transfer timeout\n");
if (tspi->is_curr_dma_xfer &&
(tspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tspi->tx_dma_chan);
if (tspi->is_curr_dma_xfer &&
(tspi->cur_direction & DATA_DIR_RX))
dmaengine_terminate_all(tspi->rx_dma_chan);
ret = -EIO;
tegra_spi_dump_regs(tspi);
tegra_spi_flush_fifos(tspi);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
tspi->last_used_cs = master->num_chipselect + 1;
goto complete_xfer;
}
if (tspi->tx_status || tspi->rx_status) {
dev_err(tspi->dev, "Error in Transfer\n");
ret = -EIO;
tegra_spi_dump_regs(tspi);
goto complete_xfer;
}
msg->actual_length += xfer->len;
complete_xfer:
if (ret < 0 || skip) {
tegra_spi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
goto exit;
} else if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
if (xfer->cs_change)
tspi->cs_control = spi;
else {
tegra_spi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
}
} else if (xfer->cs_change) {
tegra_spi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
}
}
ret = 0;
exit:
msg->status = ret;
spi_finalize_current_message(master);
return ret;
}
static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
{
struct spi_transfer *t = tspi->curr_xfer;
unsigned long flags;
spin_lock_irqsave(&tspi->lock, flags);
if (tspi->tx_status || tspi->rx_status) {
dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
tspi->status_reg);
dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
tegra_spi_dump_regs(tspi);
tegra_spi_flush_fifos(tspi);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
return IRQ_HANDLED;
}
if (tspi->cur_direction & DATA_DIR_RX)
tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
if (tspi->cur_direction & DATA_DIR_TX)
tspi->cur_pos = tspi->cur_tx_pos;
else
tspi->cur_pos = tspi->cur_rx_pos;
if (tspi->cur_pos == t->len) {
complete(&tspi->xfer_completion);
goto exit;
}
tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
tegra_spi_start_cpu_based_transfer(tspi, t);
exit:
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
{
struct spi_transfer *t = tspi->curr_xfer;
long wait_status;
int err = 0;
unsigned total_fifo_words;
unsigned long flags;
/* Abort dmas if any error */
if (tspi->cur_direction & DATA_DIR_TX) {
if (tspi->tx_status) {
dmaengine_terminate_all(tspi->tx_dma_chan);
err += 1;
} else {
wait_status = wait_for_completion_interruptible_timeout(
&tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tspi->tx_dma_chan);
dev_err(tspi->dev, "TxDma Xfer failed\n");
err += 1;
}
}
}
if (tspi->cur_direction & DATA_DIR_RX) {
if (tspi->rx_status) {
dmaengine_terminate_all(tspi->rx_dma_chan);
err += 2;
} else {
wait_status = wait_for_completion_interruptible_timeout(
&tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tspi->rx_dma_chan);
dev_err(tspi->dev, "RxDma Xfer failed\n");
err += 2;
}
}
}
spin_lock_irqsave(&tspi->lock, flags);
if (err) {
dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
tspi->status_reg);
dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
tegra_spi_dump_regs(tspi);
tegra_spi_flush_fifos(tspi);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
return IRQ_HANDLED;
}
if (tspi->cur_direction & DATA_DIR_RX)
tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
if (tspi->cur_direction & DATA_DIR_TX)
tspi->cur_pos = tspi->cur_tx_pos;
else
tspi->cur_pos = tspi->cur_rx_pos;
if (tspi->cur_pos == t->len) {
complete(&tspi->xfer_completion);
goto exit;
}
/* Continue transfer in current message */
total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
tspi, t);
if (total_fifo_words > SPI_FIFO_DEPTH)
err = tegra_spi_start_dma_based_transfer(tspi, t);
else
err = tegra_spi_start_cpu_based_transfer(tspi, t);
exit:
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
{
struct tegra_spi_data *tspi = context_data;
if (!tspi->is_curr_dma_xfer)
return handle_cpu_based_xfer(tspi);
return handle_dma_based_xfer(tspi);
}
static irqreturn_t tegra_spi_isr(int irq, void *context_data)
{
struct tegra_spi_data *tspi = context_data;
tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if (tspi->cur_direction & DATA_DIR_TX)
tspi->tx_status = tspi->status_reg &
(SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
if (tspi->cur_direction & DATA_DIR_RX)
tspi->rx_status = tspi->status_reg &
(SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
tegra_spi_clear_status(tspi);
return IRQ_WAKE_THREAD;
}
static struct tegra_spi_soc_data tegra114_spi_soc_data = {
.has_intr_mask_reg = false,
};
static struct tegra_spi_soc_data tegra124_spi_soc_data = {
.has_intr_mask_reg = false,
};
static struct tegra_spi_soc_data tegra210_spi_soc_data = {
.has_intr_mask_reg = true,
};
static const struct of_device_id tegra_spi_of_match[] = {
{
.compatible = "nvidia,tegra114-spi",
.data = &tegra114_spi_soc_data,
}, {
.compatible = "nvidia,tegra124-spi",
.data = &tegra124_spi_soc_data,
}, {
.compatible = "nvidia,tegra210-spi",
.data = &tegra210_spi_soc_data,
},
{}
};
MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
static int tegra_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct tegra_spi_data *tspi;
struct resource *r;
int ret, spi_irq;
int bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
dev_err(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, master);
tspi = spi_master_get_devdata(master);
if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
&master->max_speed_hz))
master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = tegra_spi_setup;
master->cleanup = tegra_spi_cleanup;
master->transfer_one_message = tegra_spi_transfer_one_message;
master->set_cs_timing = tegra_spi_set_hw_cs_timing;
master->num_chipselect = MAX_CHIP_SELECT;
master->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
if (bus_num >= 0)
master->bus_num = bus_num;
tspi->master = master;
tspi->dev = &pdev->dev;
spin_lock_init(&tspi->lock);
tspi->soc_data = of_device_get_match_data(&pdev->dev);
if (!tspi->soc_data) {
dev_err(&pdev->dev, "unsupported tegra\n");
ret = -ENODEV;
goto exit_free_master;
}
tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tspi->base)) {
ret = PTR_ERR(tspi->base);
goto exit_free_master;
}
tspi->phys = r->start;
spi_irq = platform_get_irq(pdev, 0);
if (spi_irq < 0) {
ret = spi_irq;
goto exit_free_master;
}
tspi->irq = spi_irq;
tspi->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
goto exit_free_master;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
goto exit_free_master;
}
tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
ret = tegra_spi_init_dma_param(tspi, true);
if (ret < 0)
goto exit_free_master;
ret = tegra_spi_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
tspi->max_buf_size = tspi->dma_buf_size;
init_completion(&tspi->tx_dma_complete);
init_completion(&tspi->rx_dma_complete);
init_completion(&tspi->xfer_completion);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra_spi_runtime_resume(&pdev->dev);
if (ret)
goto exit_pm_disable;
}
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
goto exit_pm_disable;
}
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
tspi->def_command1_reg = SPI_M_S;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
tspi->last_used_cs = master->num_chipselect + 1;
pm_runtime_put(&pdev->dev);
ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
tegra_spi_isr_thread, IRQF_ONESHOT,
dev_name(&pdev->dev), tspi);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
goto exit_pm_disable;
}
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_free_irq;
}
return ret;
exit_free_irq:
free_irq(spi_irq, tspi);
exit_pm_disable:
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_spi_runtime_suspend(&pdev->dev);
tegra_spi_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_spi_deinit_dma_param(tspi, true);
exit_free_master:
spi_master_put(master);
return ret;
}
static void tegra_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
free_irq(tspi->irq, tspi);
if (tspi->tx_dma_chan)
tegra_spi_deinit_dma_param(tspi, false);
if (tspi->rx_dma_chan)
tegra_spi_deinit_dma_param(tspi, true);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_spi_runtime_suspend(&pdev->dev);
}
#ifdef CONFIG_PM_SLEEP
static int tegra_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
return spi_master_suspend(master);
}
static int tegra_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
tspi->last_used_cs = master->num_chipselect + 1;
pm_runtime_put(dev);
return spi_master_resume(master);
}
#endif
static int tegra_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
/* Flush all write which are in PPSB queue by reading back */
tegra_spi_readl(tspi, SPI_COMMAND1);
clk_disable_unprepare(tspi->clk);
return 0;
}
static int tegra_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_spi_data *tspi = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(tspi->clk);
if (ret < 0) {
dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
return ret;
}
return 0;
}
static const struct dev_pm_ops tegra_spi_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
tegra_spi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
};
static struct platform_driver tegra_spi_driver = {
.driver = {
.name = "spi-tegra114",
.pm = &tegra_spi_pm_ops,
.of_match_table = tegra_spi_of_match,
},
.probe = tegra_spi_probe,
.remove_new = tegra_spi_remove,
};
module_platform_driver(tegra_spi_driver);
MODULE_ALIAS("platform:spi-tegra114");
MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-tegra114.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH MSIOF SPI Controller Interface
*
* Copyright (c) 2009 Magnus Damm
* Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2014-2017 Glider bvba
*/
#include <linux/bitmap.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
#include <linux/spi/sh_msiof.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
struct sh_msiof_chipdata {
u32 bits_per_word_mask;
u16 tx_fifo_size;
u16 rx_fifo_size;
u16 ctlr_flags;
u16 min_div_pow;
};
struct sh_msiof_spi_priv {
struct spi_controller *ctlr;
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
struct sh_msiof_spi_info *info;
struct completion done;
struct completion done_txdma;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
unsigned int min_div_pow;
void *tx_dma_page;
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
bool native_cs_inited;
bool native_cs_high;
bool target_aborted;
};
#define MAX_SS 3 /* Maximum number of native chip selects */
#define SITMDR1 0x00 /* Transmit Mode Register 1 */
#define SITMDR2 0x04 /* Transmit Mode Register 2 */
#define SITMDR3 0x08 /* Transmit Mode Register 3 */
#define SIRMDR1 0x10 /* Receive Mode Register 1 */
#define SIRMDR2 0x14 /* Receive Mode Register 2 */
#define SIRMDR3 0x18 /* Receive Mode Register 3 */
#define SITSCR 0x20 /* Transmit Clock Select Register */
#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
#define SICTR 0x28 /* Control Register */
#define SIFCTR 0x30 /* FIFO Control Register */
#define SISTR 0x40 /* Status Register */
#define SIIER 0x44 /* Interrupt Enable Register */
#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
#define SITFDR 0x50 /* Transmit FIFO Data Register */
#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
#define SIRFDR 0x60 /* Receive FIFO Data Register */
/* SITMDR1 and SIRMDR1 */
#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
#define SIMDR1_FLD_SHIFT 2
#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
/* SITMDR1 */
#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* SITMDR2 and SIRMDR2 */
#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
/* SITSCR and SIRSCR */
#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
#define SISCR_BRPS(i) (((i) - 1) << 8)
#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
#define SISCR_BRDV_DIV_2 0
#define SISCR_BRDV_DIV_4 1
#define SISCR_BRDV_DIV_8 2
#define SISCR_BRDV_DIV_16 3
#define SISCR_BRDV_DIV_32 4
#define SISCR_BRDV_DIV_1 7
/* SICTR */
#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
#define SICTR_TXE BIT(9) /* Transmit Enable */
#define SICTR_RXE BIT(8) /* Receive Enable */
#define SICTR_TXRST BIT(1) /* Transmit Reset */
#define SICTR_RXRST BIT(0) /* Receive Reset */
/* SIFCTR */
#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
#define SIFCTR_TFUA_SHIFT 20
#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
#define SIFCTR_RFUA_SHIFT 4
#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
/* SISTR */
#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
#define SISTR_TEOF BIT(23) /* Frame Transmission End */
#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
#define SISTR_REOF BIT(7) /* Frame Reception End */
#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
/* SIIER */
#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
case SITSCR:
case SIRSCR:
return ioread16(p->mapbase + reg_offs);
default:
return ioread32(p->mapbase + reg_offs);
}
}
static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
u32 value)
{
switch (reg_offs) {
case SITSCR:
case SIRSCR:
iowrite16(value, p->mapbase + reg_offs);
break;
default:
iowrite32(value, p->mapbase + reg_offs);
break;
}
}
static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
u32 clr, u32 set)
{
u32 mask = clr | set;
u32 data;
data = sh_msiof_read(p, SICTR);
data &= ~clr;
data |= set;
sh_msiof_write(p, SICTR, data);
return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
(data & mask) == set, 1, 100);
}
static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
{
struct sh_msiof_spi_priv *p = data;
/* just disable the interrupt and wake up */
sh_msiof_write(p, SIIER, 0);
complete(&p->done);
return IRQ_HANDLED;
}
static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
{
u32 mask = SICTR_TXRST | SICTR_RXRST;
u32 data;
data = sh_msiof_read(p, SICTR);
data |= mask;
sh_msiof_write(p, SICTR, data);
readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
100);
}
static const u32 sh_msiof_spi_div_array[] = {
SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
struct spi_transfer *t)
{
unsigned long parent_rate = clk_get_rate(p->clk);
unsigned int div_pow = p->min_div_pow;
u32 spi_hz = t->speed_hz;
unsigned long div;
u32 brps, scr;
if (!spi_hz || !parent_rate) {
WARN(1, "Invalid clock rate parameters %lu and %u\n",
parent_rate, spi_hz);
return;
}
div = DIV_ROUND_UP(parent_rate, spi_hz);
if (div <= 1024) {
/* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
if (!div_pow && div <= 32 && div > 2)
div_pow = 1;
if (div_pow)
brps = (div + 1) >> div_pow;
else
brps = div;
for (; brps > 32; div_pow++)
brps = (brps + 1) >> 1;
} else {
/* Set transfer rate composite divisor to 2^5 * 32 = 1024 */
dev_err(&p->pdev->dev,
"Requested SPI transfer rate %d is too low\n", spi_hz);
div_pow = 5;
brps = 32;
}
t->effective_speed_hz = parent_rate / (brps << div_pow);
scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
sh_msiof_write(p, SITSCR, scr);
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, SIRSCR, scr);
}
static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
{
/*
* DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl
* b'000 : 0
* b'001 : 100
* b'010 : 200
* b'011 (SYNCDL only) : 300
* b'101 : 50
* b'110 : 150
*/
if (dtdl_or_syncdl % 100)
return dtdl_or_syncdl / 100 + 5;
else
return dtdl_or_syncdl / 100;
}
static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
{
u32 val;
if (!p->info)
return 0;
/* check if DTDL and SYNCDL is allowed value */
if (p->info->dtdl > 200 || p->info->syncdl > 300) {
dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
return 0;
}
/* check if the sum of DTDL and SYNCDL becomes an integer value */
if ((p->info->dtdl + p->info->syncdl) % 100) {
dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
return 0;
}
val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
return val;
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
u32 cpol, u32 cpha,
u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
u32 tmp;
int edge;
/*
* CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
* 0 0 10 10 1 1
* 0 1 10 10 0 0
* 1 0 11 11 0 0
* 1 1 11 11 1 1
*/
tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
if (spi_controller_is_target(p->ctlr)) {
sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
} else {
sh_msiof_write(p, SITMDR1,
tmp | SIMDR1_TRMD | SITMDR1_PCON |
(ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
}
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
sh_msiof_write(p, SIRMDR1, tmp);
tmp = 0;
tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
tmp |= edge << SICTR_TEDG_SHIFT;
tmp |= edge << SICTR_REDG_SHIFT;
tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
sh_msiof_write(p, SICTR, tmp);
}
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, SITMDR2, dr2);
else
sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
if (rx_buf)
sh_msiof_write(p, SIRMDR2, dr2);
}
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
sh_msiof_write(p, SISTR,
sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
const u8 *buf_8 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_8[k] << fs);
}
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
const u16 *buf_16 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_16[k] << fs);
}
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
const u16 *buf_16 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
}
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
const u32 *buf_32 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_32[k] << fs);
}
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
const u32 *buf_32 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
}
static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
const u32 *buf_32 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
}
static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
const void *tx_buf, int words, int fs)
{
const u32 *buf_32 = tx_buf;
int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
}
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
u8 *buf_8 = rx_buf;
int k;
for (k = 0; k < words; k++)
buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
u16 *buf_16 = rx_buf;
int k;
for (k = 0; k < words; k++)
buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
u16 *buf_16 = rx_buf;
int k;
for (k = 0; k < words; k++)
put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
}
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
u32 *buf_32 = rx_buf;
int k;
for (k = 0; k < words; k++)
buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
u32 *buf_32 = rx_buf;
int k;
for (k = 0; k < words; k++)
put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
}
static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
u32 *buf_32 = rx_buf;
int k;
for (k = 0; k < words; k++)
buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
}
static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
void *rx_buf, int words, int fs)
{
u32 *buf_32 = rx_buf;
int k;
for (k = 0; k < words; k++)
put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
}
static int sh_msiof_spi_setup(struct spi_device *spi)
{
struct sh_msiof_spi_priv *p =
spi_controller_get_devdata(spi->controller);
u32 clr, set, tmp;
if (spi_get_csgpiod(spi, 0) || spi_controller_is_target(p->ctlr))
return 0;
if (p->native_cs_inited &&
(p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
return 0;
/* Configure native chip select mode/polarity early */
clr = SIMDR1_SYNCMD_MASK;
set = SIMDR1_SYNCMD_SPI;
if (spi->mode & SPI_CS_HIGH)
clr |= BIT(SIMDR1_SYNCAC_SHIFT);
else
set |= BIT(SIMDR1_SYNCAC_SHIFT);
pm_runtime_get_sync(&p->pdev->dev);
tmp = sh_msiof_read(p, SITMDR1) & ~clr;
sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
sh_msiof_write(p, SIRMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
p->native_cs_high = spi->mode & SPI_CS_HIGH;
p->native_cs_inited = true;
return 0;
}
static int sh_msiof_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
const struct spi_device *spi = msg->spi;
u32 ss, cs_high;
/* Configure pins before asserting CS */
if (spi_get_csgpiod(spi, 0)) {
ss = ctlr->unused_native_cs;
cs_high = p->native_cs_high;
} else {
ss = spi_get_chipselect(spi, 0);
cs_high = !!(spi->mode & SPI_CS_HIGH);
}
sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
!!(spi->mode & SPI_CPHA),
!!(spi->mode & SPI_3WIRE),
!!(spi->mode & SPI_LSB_FIRST), cs_high);
return 0;
}
static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
{
bool target = spi_controller_is_target(p->ctlr);
int ret = 0;
/* setup clock and rx/tx signals */
if (!target)
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
if (rx_buf && !ret)
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
if (!ret)
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
/* start by setting frame bit */
if (!ret && !target)
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
return ret;
}
static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
{
bool target = spi_controller_is_target(p->ctlr);
int ret = 0;
/* shut down frame, rx/tx and clock signals */
if (!target)
ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
if (!ret)
ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
if (rx_buf && !ret)
ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
if (!ret && !target)
ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
return ret;
}
static int sh_msiof_target_abort(struct spi_controller *ctlr)
{
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
p->target_aborted = true;
complete(&p->done);
complete(&p->done_txdma);
return 0;
}
static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
struct completion *x)
{
if (spi_controller_is_target(p->ctlr)) {
if (wait_for_completion_interruptible(x) ||
p->target_aborted) {
dev_dbg(&p->pdev->dev, "interrupted\n");
return -EINTR;
}
} else {
if (!wait_for_completion_timeout(x, HZ)) {
dev_err(&p->pdev->dev, "timeout\n");
return -ETIMEDOUT;
}
}
return 0;
}
static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
void (*tx_fifo)(struct sh_msiof_spi_priv *,
const void *, int, int),
void (*rx_fifo)(struct sh_msiof_spi_priv *,
void *, int, int),
const void *tx_buf, void *rx_buf,
int words, int bits)
{
int fifo_shift;
int ret;
/* limit maximum word transfer to rx/tx fifo size */
if (tx_buf)
words = min_t(int, words, p->tx_fifo_size);
if (rx_buf)
words = min_t(int, words, p->rx_fifo_size);
/* the fifo contents need shifting */
fifo_shift = 32 - bits;
/* default FIFO watermarks for PIO */
sh_msiof_write(p, SIFCTR, 0);
/* setup msiof transfer mode registers */
sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
/* write tx fifo */
if (tx_buf)
tx_fifo(p, tx_buf, words, fifo_shift);
reinit_completion(&p->done);
p->target_aborted = false;
ret = sh_msiof_spi_start(p, rx_buf);
if (ret) {
dev_err(&p->pdev->dev, "failed to start hardware\n");
goto stop_ier;
}
/* wait for tx fifo to be emptied / rx fifo to be filled */
ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
/* read rx fifo */
if (rx_buf)
rx_fifo(p, rx_buf, words, fifo_shift);
/* clear status bits */
sh_msiof_reset_str(p);
ret = sh_msiof_spi_stop(p, rx_buf);
if (ret) {
dev_err(&p->pdev->dev, "failed to shut down hardware\n");
return ret;
}
return words;
stop_reset:
sh_msiof_reset_str(p);
sh_msiof_spi_stop(p, rx_buf);
stop_ier:
sh_msiof_write(p, SIIER, 0);
return ret;
}
static void sh_msiof_dma_complete(void *arg)
{
complete(arg);
}
static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
void *rx, unsigned int len)
{
u32 ier_bits = 0;
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
dma_cookie_t cookie;
int ret;
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
return -EAGAIN;
desc_rx->callback = sh_msiof_dma_complete;
desc_rx->callback_param = &p->done;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie))
return cookie;
}
if (tx) {
ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
p->tx_dma_addr, len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
goto no_dma_tx;
}
desc_tx->callback = sh_msiof_dma_complete;
desc_tx->callback_param = &p->done_txdma;
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie)) {
ret = cookie;
goto no_dma_tx;
}
}
/* 1 stage FIFO watermarks for DMA */
sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
/* setup msiof transfer mode registers (32-bit words) */
sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
sh_msiof_write(p, SIIER, ier_bits);
reinit_completion(&p->done);
if (tx)
reinit_completion(&p->done_txdma);
p->target_aborted = false;
/* Now start DMA */
if (rx)
dma_async_issue_pending(p->ctlr->dma_rx);
if (tx)
dma_async_issue_pending(p->ctlr->dma_tx);
ret = sh_msiof_spi_start(p, rx);
if (ret) {
dev_err(&p->pdev->dev, "failed to start hardware\n");
goto stop_dma;
}
if (tx) {
/* wait for tx DMA completion */
ret = sh_msiof_wait_for_completion(p, &p->done_txdma);
if (ret)
goto stop_reset;
}
if (rx) {
/* wait for rx DMA completion */
ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
sh_msiof_write(p, SIIER, 0);
} else {
/* wait for tx fifo to be emptied */
sh_msiof_write(p, SIIER, SIIER_TEOFE);
ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
}
/* clear status bits */
sh_msiof_reset_str(p);
ret = sh_msiof_spi_stop(p, rx);
if (ret) {
dev_err(&p->pdev->dev, "failed to shut down hardware\n");
return ret;
}
if (rx)
dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
p->rx_dma_addr, len, DMA_FROM_DEVICE);
return 0;
stop_reset:
sh_msiof_reset_str(p);
sh_msiof_spi_stop(p, rx);
stop_dma:
if (tx)
dmaengine_terminate_sync(p->ctlr->dma_tx);
no_dma_tx:
if (rx)
dmaengine_terminate_sync(p->ctlr->dma_rx);
sh_msiof_write(p, SIIER, 0);
return ret;
}
static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words)
{
/* src or dst can be unaligned, but not both */
if ((unsigned long)src & 3) {
while (words--) {
*dst++ = swab32(get_unaligned(src));
src++;
}
} else if ((unsigned long)dst & 3) {
while (words--) {
put_unaligned(swab32(*src++), dst);
dst++;
}
} else {
while (words--)
*dst++ = swab32(*src++);
}
}
static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words)
{
/* src or dst can be unaligned, but not both */
if ((unsigned long)src & 3) {
while (words--) {
*dst++ = swahw32(get_unaligned(src));
src++;
}
} else if ((unsigned long)dst & 3) {
while (words--) {
put_unaligned(swahw32(*src++), dst);
dst++;
}
} else {
while (words--)
*dst++ = swahw32(*src++);
}
}
static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
{
memcpy(dst, src, words * 4);
}
static int sh_msiof_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *t)
{
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
void (*copy32)(u32 *, const u32 *, unsigned int);
void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
const void *tx_buf = t->tx_buf;
void *rx_buf = t->rx_buf;
unsigned int len = t->len;
unsigned int bits = t->bits_per_word;
unsigned int bytes_per_word;
unsigned int words;
int n;
bool swab;
int ret;
/* reset registers */
sh_msiof_spi_reset_regs(p);
/* setup clocks (clock already enabled in chipselect()) */
if (!spi_controller_is_target(p->ctlr))
sh_msiof_spi_set_clk_regs(p, t);
while (ctlr->dma_tx && len > 15) {
/*
* DMA supports 32-bit words only, hence pack 8-bit and 16-bit
* words, with byte resp. word swapping.
*/
unsigned int l = 0;
if (tx_buf)
l = min(round_down(len, 4), p->tx_fifo_size * 4);
if (rx_buf)
l = min(round_down(len, 4), p->rx_fifo_size * 4);
if (bits <= 8) {
copy32 = copy_bswap32;
} else if (bits <= 16) {
copy32 = copy_wswap32;
} else {
copy32 = copy_plain32;
}
if (tx_buf)
copy32(p->tx_dma_page, tx_buf, l / 4);
ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
if (ret == -EAGAIN) {
dev_warn_once(&p->pdev->dev,
"DMA not available, falling back to PIO\n");
break;
}
if (ret)
return ret;
if (rx_buf) {
copy32(rx_buf, p->rx_dma_page, l / 4);
rx_buf += l;
}
if (tx_buf)
tx_buf += l;
len -= l;
if (!len)
return 0;
}
if (bits <= 8 && len > 15) {
bits = 32;
swab = true;
} else {
swab = false;
}
/* setup bytes per word and fifo read/write functions */
if (bits <= 8) {
bytes_per_word = 1;
tx_fifo = sh_msiof_spi_write_fifo_8;
rx_fifo = sh_msiof_spi_read_fifo_8;
} else if (bits <= 16) {
bytes_per_word = 2;
if ((unsigned long)tx_buf & 0x01)
tx_fifo = sh_msiof_spi_write_fifo_16u;
else
tx_fifo = sh_msiof_spi_write_fifo_16;
if ((unsigned long)rx_buf & 0x01)
rx_fifo = sh_msiof_spi_read_fifo_16u;
else
rx_fifo = sh_msiof_spi_read_fifo_16;
} else if (swab) {
bytes_per_word = 4;
if ((unsigned long)tx_buf & 0x03)
tx_fifo = sh_msiof_spi_write_fifo_s32u;
else
tx_fifo = sh_msiof_spi_write_fifo_s32;
if ((unsigned long)rx_buf & 0x03)
rx_fifo = sh_msiof_spi_read_fifo_s32u;
else
rx_fifo = sh_msiof_spi_read_fifo_s32;
} else {
bytes_per_word = 4;
if ((unsigned long)tx_buf & 0x03)
tx_fifo = sh_msiof_spi_write_fifo_32u;
else
tx_fifo = sh_msiof_spi_write_fifo_32;
if ((unsigned long)rx_buf & 0x03)
rx_fifo = sh_msiof_spi_read_fifo_32u;
else
rx_fifo = sh_msiof_spi_read_fifo_32;
}
/* transfer in fifo sized chunks */
words = len / bytes_per_word;
while (words > 0) {
n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
words, bits);
if (n < 0)
return n;
if (tx_buf)
tx_buf += n * bytes_per_word;
if (rx_buf)
rx_buf += n * bytes_per_word;
words -= n;
if (words == 0 && (len % bytes_per_word)) {
words = len % bytes_per_word;
bits = t->bits_per_word;
bytes_per_word = 1;
tx_fifo = sh_msiof_spi_write_fifo_8;
rx_fifo = sh_msiof_spi_read_fifo_8;
}
}
return 0;
}
static const struct sh_msiof_chipdata sh_data = {
.bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
.ctlr_flags = 0,
.min_div_pow = 0,
};
static const struct sh_msiof_chipdata rcar_gen2_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 0,
};
static const struct sh_msiof_chipdata rcar_gen3_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 1,
};
static const struct of_device_id sh_msiof_match[] __maybe_unused = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
{ .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
{ .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
#ifdef CONFIG_OF
static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
{
struct sh_msiof_spi_info *info;
struct device_node *np = dev->of_node;
u32 num_cs = 1;
info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
if (!info)
return NULL;
info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_TARGET
: MSIOF_SPI_HOST;
/* Parse the MSIOF properties */
if (info->mode == MSIOF_SPI_HOST)
of_property_read_u32(np, "num-cs", &num_cs);
of_property_read_u32(np, "renesas,tx-fifo-size",
&info->tx_fifo_override);
of_property_read_u32(np, "renesas,rx-fifo-size",
&info->rx_fifo_override);
of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
info->num_chipselect = num_cs;
return info;
}
#else
static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
{
return NULL;
}
#endif
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
struct dma_slave_config cfg;
int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
(void *)(unsigned long)id, dev,
dir == DMA_MEM_TO_DEV ? "tx" : "rx");
if (!chan) {
dev_warn(dev, "dma_request_slave_channel_compat failed\n");
return NULL;
}
memset(&cfg, 0, sizeof(cfg));
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
} else {
cfg.src_addr = port_addr;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
}
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
dma_release_channel(chan);
return NULL;
}
return chan;
}
static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
{
struct platform_device *pdev = p->pdev;
struct device *dev = &pdev->dev;
const struct sh_msiof_spi_info *info = p->info;
unsigned int dma_tx_id, dma_rx_id;
const struct resource *res;
struct spi_controller *ctlr;
struct device *tx_dev, *rx_dev;
if (dev->of_node) {
/* In the OF case we will get the slave IDs from the DT */
dma_tx_id = 0;
dma_rx_id = 0;
} else if (info && info->dma_tx_id && info->dma_rx_id) {
dma_tx_id = info->dma_tx_id;
dma_rx_id = info->dma_rx_id;
} else {
/* The driver assumes no error */
return 0;
}
/* The DMA engine uses the second register set, if present */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctlr = p->ctlr;
ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
dma_tx_id, res->start + SITFDR);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
dma_rx_id, res->start + SIRFDR);
if (!ctlr->dma_rx)
goto free_tx_chan;
p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!p->tx_dma_page)
goto free_rx_chan;
p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!p->rx_dma_page)
goto free_tx_page;
tx_dev = ctlr->dma_tx->device->dev;
p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(tx_dev, p->tx_dma_addr))
goto free_rx_page;
rx_dev = ctlr->dma_rx->device->dev;
p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_dev, p->rx_dma_addr))
goto unmap_tx_page;
dev_info(dev, "DMA available");
return 0;
unmap_tx_page:
dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
free_rx_page:
free_page((unsigned long)p->rx_dma_page);
free_tx_page:
free_page((unsigned long)p->tx_dma_page);
free_rx_chan:
dma_release_channel(ctlr->dma_rx);
free_tx_chan:
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
return -ENODEV;
}
static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
{
struct spi_controller *ctlr = p->ctlr;
if (!ctlr->dma_tx)
return;
dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
DMA_TO_DEVICE);
free_page((unsigned long)p->rx_dma_page);
free_page((unsigned long)p->tx_dma_page);
dma_release_channel(ctlr->dma_rx);
dma_release_channel(ctlr->dma_tx);
}
static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
unsigned long clksrc;
int i;
int ret;
chipdata = of_device_get_match_data(&pdev->dev);
if (chipdata) {
info = sh_msiof_spi_parse_dt(&pdev->dev);
} else {
chipdata = (const void *)pdev->id_entry->driver_data;
info = dev_get_platdata(&pdev->dev);
}
if (!info) {
dev_err(&pdev->dev, "failed to obtain device info\n");
return -ENXIO;
}
if (info->mode == MSIOF_SPI_TARGET)
ctlr = spi_alloc_target(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
else
ctlr = spi_alloc_host(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
if (ctlr == NULL)
return -ENOMEM;
p = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, p);
p->ctlr = ctlr;
p->info = info;
p->min_div_pow = chipdata->min_div_pow;
init_completion(&p->done);
init_completion(&p->done_txdma);
p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
i = platform_get_irq(pdev, 0);
if (i < 0) {
ret = i;
goto err1;
}
p->mapbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->mapbase)) {
ret = PTR_ERR(p->mapbase);
goto err1;
}
ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
dev_name(&pdev->dev), p);
if (ret) {
dev_err(&pdev->dev, "unable to request irq\n");
goto err1;
}
p->pdev = pdev;
pm_runtime_enable(&pdev->dev);
/* Platform data may override FIFO sizes */
p->tx_fifo_size = chipdata->tx_fifo_size;
p->rx_fifo_size = chipdata->rx_fifo_size;
if (p->info->tx_fifo_override)
p->tx_fifo_size = p->info->tx_fifo_override;
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
/* init controller code */
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
clksrc = clk_get_rate(p->clk);
ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, 1024);
ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, 1 << p->min_div_pow);
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
ctlr->num_chipselect = p->info->num_chipselect;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
ctlr->target_abort = sh_msiof_target_abort;
ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
ctlr->use_gpio_descriptors = true;
ctlr->max_native_cs = MAX_SS;
ret = sh_msiof_request_dma(p);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto err2;
}
return 0;
err2:
sh_msiof_release_dma(p);
pm_runtime_disable(&pdev->dev);
err1:
spi_controller_put(ctlr);
return ret;
}
static void sh_msiof_spi_remove(struct platform_device *pdev)
{
struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
sh_msiof_release_dma(p);
pm_runtime_disable(&pdev->dev);
}
static const struct platform_device_id spi_driver_ids[] = {
{ "spi_sh_msiof", (kernel_ulong_t)&sh_data },
{},
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
#ifdef CONFIG_PM_SLEEP
static int sh_msiof_spi_suspend(struct device *dev)
{
struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
return spi_controller_suspend(p->ctlr);
}
static int sh_msiof_spi_resume(struct device *dev)
{
struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
return spi_controller_resume(p->ctlr);
}
static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
sh_msiof_spi_resume);
#define DEV_PM_OPS (&sh_msiof_spi_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove_new = sh_msiof_spi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
.pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(sh_msiof_match),
},
};
module_platform_driver(sh_msiof_spi_drv);
MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-sh-msiof.c |
// SPDX-License-Identifier: GPL-2.0+
// Cadence XSPI flash controller driver
// Copyright (C) 2020-21 Cadence
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/bitfield.h>
#include <linux/limits.h>
#include <linux/log2.h>
#define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522
#define CDNS_XSPI_MAX_BANKS 8
#define CDNS_XSPI_NAME "cadence-xspi"
/*
* Note: below are additional auxiliary registers to
* configure XSPI controller pin-strap settings
*/
/* PHY DQ timing register */
#define CDNS_XSPI_CCP_PHY_DQ_TIMING 0x0000
/* PHY DQS timing register */
#define CDNS_XSPI_CCP_PHY_DQS_TIMING 0x0004
/* PHY gate loopback control register */
#define CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL 0x0008
/* PHY DLL slave control register */
#define CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL 0x0010
/* DLL PHY control register */
#define CDNS_XSPI_DLL_PHY_CTRL 0x1034
/* Command registers */
#define CDNS_XSPI_CMD_REG_0 0x0000
#define CDNS_XSPI_CMD_REG_1 0x0004
#define CDNS_XSPI_CMD_REG_2 0x0008
#define CDNS_XSPI_CMD_REG_3 0x000C
#define CDNS_XSPI_CMD_REG_4 0x0010
#define CDNS_XSPI_CMD_REG_5 0x0014
/* Command status registers */
#define CDNS_XSPI_CMD_STATUS_REG 0x0044
/* Controller status register */
#define CDNS_XSPI_CTRL_STATUS_REG 0x0100
#define CDNS_XSPI_INIT_COMPLETED BIT(16)
#define CDNS_XSPI_INIT_LEGACY BIT(9)
#define CDNS_XSPI_INIT_FAIL BIT(8)
#define CDNS_XSPI_CTRL_BUSY BIT(7)
/* Controller interrupt status register */
#define CDNS_XSPI_INTR_STATUS_REG 0x0110
#define CDNS_XSPI_STIG_DONE BIT(23)
#define CDNS_XSPI_SDMA_ERROR BIT(22)
#define CDNS_XSPI_SDMA_TRIGGER BIT(21)
#define CDNS_XSPI_CMD_IGNRD_EN BIT(20)
#define CDNS_XSPI_DDMA_TERR_EN BIT(18)
#define CDNS_XSPI_CDMA_TREE_EN BIT(17)
#define CDNS_XSPI_CTRL_IDLE_EN BIT(16)
#define CDNS_XSPI_TRD_COMP_INTR_STATUS 0x0120
#define CDNS_XSPI_TRD_ERR_INTR_STATUS 0x0130
#define CDNS_XSPI_TRD_ERR_INTR_EN 0x0134
/* Controller interrupt enable register */
#define CDNS_XSPI_INTR_ENABLE_REG 0x0114
#define CDNS_XSPI_INTR_EN BIT(31)
#define CDNS_XSPI_STIG_DONE_EN BIT(23)
#define CDNS_XSPI_SDMA_ERROR_EN BIT(22)
#define CDNS_XSPI_SDMA_TRIGGER_EN BIT(21)
#define CDNS_XSPI_INTR_MASK (CDNS_XSPI_INTR_EN | \
CDNS_XSPI_STIG_DONE_EN | \
CDNS_XSPI_SDMA_ERROR_EN | \
CDNS_XSPI_SDMA_TRIGGER_EN)
/* Controller config register */
#define CDNS_XSPI_CTRL_CONFIG_REG 0x0230
#define CDNS_XSPI_CTRL_WORK_MODE GENMASK(6, 5)
#define CDNS_XSPI_WORK_MODE_DIRECT 0
#define CDNS_XSPI_WORK_MODE_STIG 1
#define CDNS_XSPI_WORK_MODE_ACMD 3
/* SDMA trigger transaction registers */
#define CDNS_XSPI_SDMA_SIZE_REG 0x0240
#define CDNS_XSPI_SDMA_TRD_INFO_REG 0x0244
#define CDNS_XSPI_SDMA_DIR BIT(8)
/* Controller features register */
#define CDNS_XSPI_CTRL_FEATURES_REG 0x0F04
#define CDNS_XSPI_NUM_BANKS GENMASK(25, 24)
#define CDNS_XSPI_DMA_DATA_WIDTH BIT(21)
#define CDNS_XSPI_NUM_THREADS GENMASK(3, 0)
/* Controller version register */
#define CDNS_XSPI_CTRL_VERSION_REG 0x0F00
#define CDNS_XSPI_MAGIC_NUM GENMASK(31, 16)
#define CDNS_XSPI_CTRL_REV GENMASK(7, 0)
/* STIG Profile 1.0 instruction fields (split into registers) */
#define CDNS_XSPI_CMD_INSTR_TYPE GENMASK(6, 0)
#define CDNS_XSPI_CMD_P1_R1_ADDR0 GENMASK(31, 24)
#define CDNS_XSPI_CMD_P1_R2_ADDR1 GENMASK(7, 0)
#define CDNS_XSPI_CMD_P1_R2_ADDR2 GENMASK(15, 8)
#define CDNS_XSPI_CMD_P1_R2_ADDR3 GENMASK(23, 16)
#define CDNS_XSPI_CMD_P1_R2_ADDR4 GENMASK(31, 24)
#define CDNS_XSPI_CMD_P1_R3_ADDR5 GENMASK(7, 0)
#define CDNS_XSPI_CMD_P1_R3_CMD GENMASK(23, 16)
#define CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES GENMASK(30, 28)
#define CDNS_XSPI_CMD_P1_R4_ADDR_IOS GENMASK(1, 0)
#define CDNS_XSPI_CMD_P1_R4_CMD_IOS GENMASK(9, 8)
#define CDNS_XSPI_CMD_P1_R4_BANK GENMASK(14, 12)
/* STIG data sequence instruction fields (split into registers) */
#define CDNS_XSPI_CMD_DSEQ_R2_DCNT_L GENMASK(31, 16)
#define CDNS_XSPI_CMD_DSEQ_R3_DCNT_H GENMASK(15, 0)
#define CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY GENMASK(25, 20)
#define CDNS_XSPI_CMD_DSEQ_R4_BANK GENMASK(14, 12)
#define CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS GENMASK(9, 8)
#define CDNS_XSPI_CMD_DSEQ_R4_DIR BIT(4)
/* STIG command status fields */
#define CDNS_XSPI_CMD_STATUS_COMPLETED BIT(15)
#define CDNS_XSPI_CMD_STATUS_FAILED BIT(14)
#define CDNS_XSPI_CMD_STATUS_DQS_ERROR BIT(3)
#define CDNS_XSPI_CMD_STATUS_CRC_ERROR BIT(2)
#define CDNS_XSPI_CMD_STATUS_BUS_ERROR BIT(1)
#define CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR BIT(0)
#define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
#define CDNS_XSPI_TRD_STATUS 0x0104
/* Helper macros for filling command registers */
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
CDNS_XSPI_STIG_INSTR_TYPE_1 : CDNS_XSPI_STIG_INSTR_TYPE_0) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R1_ADDR0, (op)->addr.val & 0xff))
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op) ( \
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR1, ((op)->addr.val >> 8) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR2, ((op)->addr.val >> 16) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
FIELD_PREP(CDNS_XSPI_CMD_P1_R4_ADDR_IOS, ilog2((op)->addr.buswidth)) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R4_CMD_IOS, ilog2((op)->cmd.buswidth)) | \
FIELD_PREP(CDNS_XSPI_CMD_P1_R4_BANK, chipsel))
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op) \
FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ)
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
((op)->data.nbytes >> 16) & 0xffff) | \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
(op)->dummy.buswidth != 0 ? \
(((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
0))
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS, \
ilog2((op)->data.buswidth)) | \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DIR, \
((op)->data.dir == SPI_MEM_DATA_IN) ? \
CDNS_XSPI_STIG_CMD_DIR_READ : CDNS_XSPI_STIG_CMD_DIR_WRITE))
enum cdns_xspi_stig_instr_type {
CDNS_XSPI_STIG_INSTR_TYPE_0,
CDNS_XSPI_STIG_INSTR_TYPE_1,
CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ = 127,
};
enum cdns_xspi_sdma_dir {
CDNS_XSPI_SDMA_DIR_READ,
CDNS_XSPI_SDMA_DIR_WRITE,
};
enum cdns_xspi_stig_cmd_dir {
CDNS_XSPI_STIG_CMD_DIR_READ,
CDNS_XSPI_STIG_CMD_DIR_WRITE,
};
struct cdns_xspi_dev {
struct platform_device *pdev;
struct device *dev;
void __iomem *iobase;
void __iomem *auxbase;
void __iomem *sdmabase;
int irq;
int cur_cs;
unsigned int sdmasize;
struct completion cmd_complete;
struct completion auto_cmd_complete;
struct completion sdma_complete;
bool sdma_error;
void *in_buffer;
const void *out_buffer;
u8 hw_num_banks;
};
static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi)
{
u32 ctrl_stat;
return readl_relaxed_poll_timeout(cdns_xspi->iobase +
CDNS_XSPI_CTRL_STATUS_REG,
ctrl_stat,
((ctrl_stat &
CDNS_XSPI_CTRL_BUSY) == 0),
100, 1000);
}
static void cdns_xspi_trigger_command(struct cdns_xspi_dev *cdns_xspi,
u32 cmd_regs[6])
{
writel(cmd_regs[5], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_5);
writel(cmd_regs[4], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_4);
writel(cmd_regs[3], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_3);
writel(cmd_regs[2], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_2);
writel(cmd_regs[1], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_1);
writel(cmd_regs[0], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_0);
}
static int cdns_xspi_check_command_status(struct cdns_xspi_dev *cdns_xspi)
{
int ret = 0;
u32 cmd_status = readl(cdns_xspi->iobase + CDNS_XSPI_CMD_STATUS_REG);
if (cmd_status & CDNS_XSPI_CMD_STATUS_COMPLETED) {
if ((cmd_status & CDNS_XSPI_CMD_STATUS_FAILED) != 0) {
if (cmd_status & CDNS_XSPI_CMD_STATUS_DQS_ERROR) {
dev_err(cdns_xspi->dev,
"Incorrect DQS pulses detected\n");
ret = -EPROTO;
}
if (cmd_status & CDNS_XSPI_CMD_STATUS_CRC_ERROR) {
dev_err(cdns_xspi->dev,
"CRC error received\n");
ret = -EPROTO;
}
if (cmd_status & CDNS_XSPI_CMD_STATUS_BUS_ERROR) {
dev_err(cdns_xspi->dev,
"Error resp on system DMA interface\n");
ret = -EPROTO;
}
if (cmd_status & CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR) {
dev_err(cdns_xspi->dev,
"Invalid command sequence detected\n");
ret = -EPROTO;
}
}
} else {
dev_err(cdns_xspi->dev, "Fatal err - command not completed\n");
ret = -EPROTO;
}
return ret;
}
static void cdns_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi,
bool enabled)
{
u32 intr_enable;
intr_enable = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
if (enabled)
intr_enable |= CDNS_XSPI_INTR_MASK;
else
intr_enable &= ~CDNS_XSPI_INTR_MASK;
writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
}
static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi)
{
u32 ctrl_ver;
u32 ctrl_features;
u16 hw_magic_num;
ctrl_ver = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_VERSION_REG);
hw_magic_num = FIELD_GET(CDNS_XSPI_MAGIC_NUM, ctrl_ver);
if (hw_magic_num != CDNS_XSPI_MAGIC_NUM_VALUE) {
dev_err(cdns_xspi->dev,
"Incorrect XSPI magic number: %x, expected: %x\n",
hw_magic_num, CDNS_XSPI_MAGIC_NUM_VALUE);
return -EIO;
}
ctrl_features = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_FEATURES_REG);
cdns_xspi->hw_num_banks = FIELD_GET(CDNS_XSPI_NUM_BANKS, ctrl_features);
cdns_xspi_set_interrupts(cdns_xspi, false);
return 0;
}
static void cdns_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi)
{
u32 sdma_size, sdma_trd_info;
u8 sdma_dir;
sdma_size = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_SIZE_REG);
sdma_trd_info = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_TRD_INFO_REG);
sdma_dir = FIELD_GET(CDNS_XSPI_SDMA_DIR, sdma_trd_info);
switch (sdma_dir) {
case CDNS_XSPI_SDMA_DIR_READ:
ioread8_rep(cdns_xspi->sdmabase,
cdns_xspi->in_buffer, sdma_size);
break;
case CDNS_XSPI_SDMA_DIR_WRITE:
iowrite8_rep(cdns_xspi->sdmabase,
cdns_xspi->out_buffer, sdma_size);
break;
}
}
static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
const struct spi_mem_op *op,
bool data_phase)
{
u32 cmd_regs[6];
u32 cmd_status;
int ret;
ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
if (ret < 0)
return -EIO;
writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG),
cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG);
cdns_xspi_set_interrupts(cdns_xspi, true);
cdns_xspi->sdma_error = false;
memset(cmd_regs, 0, sizeof(cmd_regs));
cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
cdns_xspi->cur_cs);
cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
if (data_phase) {
cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
cdns_xspi->cur_cs);
cdns_xspi->in_buffer = op->data.buf.in;
cdns_xspi->out_buffer = op->data.buf.out;
cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
wait_for_completion(&cdns_xspi->sdma_complete);
if (cdns_xspi->sdma_error) {
cdns_xspi_set_interrupts(cdns_xspi, false);
return -EIO;
}
cdns_xspi_sdma_handle(cdns_xspi);
}
wait_for_completion(&cdns_xspi->cmd_complete);
cdns_xspi_set_interrupts(cdns_xspi, false);
cmd_status = cdns_xspi_check_command_status(cdns_xspi);
if (cmd_status)
return -EPROTO;
return 0;
}
static int cdns_xspi_mem_op(struct cdns_xspi_dev *cdns_xspi,
struct spi_mem *mem,
const struct spi_mem_op *op)
{
enum spi_mem_data_dir dir = op->data.dir;
if (cdns_xspi->cur_cs != spi_get_chipselect(mem->spi, 0))
cdns_xspi->cur_cs = spi_get_chipselect(mem->spi, 0);
return cdns_xspi_send_stig_command(cdns_xspi, op,
(dir != SPI_MEM_NO_DATA));
}
static int cdns_xspi_mem_op_execute(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct cdns_xspi_dev *cdns_xspi =
spi_controller_get_devdata(mem->spi->controller);
int ret = 0;
ret = cdns_xspi_mem_op(cdns_xspi, mem, op);
return ret;
}
static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct cdns_xspi_dev *cdns_xspi =
spi_controller_get_devdata(mem->spi->controller);
op->data.nbytes = clamp_val(op->data.nbytes, 0, cdns_xspi->sdmasize);
return 0;
}
static const struct spi_controller_mem_ops cadence_xspi_mem_ops = {
.exec_op = cdns_xspi_mem_op_execute,
.adjust_op_size = cdns_xspi_adjust_mem_op_size,
};
static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev)
{
struct cdns_xspi_dev *cdns_xspi = dev;
u32 irq_status;
irqreturn_t result = IRQ_NONE;
irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
writel(irq_status, cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
if (irq_status &
(CDNS_XSPI_SDMA_ERROR | CDNS_XSPI_SDMA_TRIGGER |
CDNS_XSPI_STIG_DONE)) {
if (irq_status & CDNS_XSPI_SDMA_ERROR) {
dev_err(cdns_xspi->dev,
"Slave DMA transaction error\n");
cdns_xspi->sdma_error = true;
complete(&cdns_xspi->sdma_complete);
}
if (irq_status & CDNS_XSPI_SDMA_TRIGGER)
complete(&cdns_xspi->sdma_complete);
if (irq_status & CDNS_XSPI_STIG_DONE)
complete(&cdns_xspi->cmd_complete);
result = IRQ_HANDLED;
}
irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
if (irq_status) {
writel(irq_status,
cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
complete(&cdns_xspi->auto_cmd_complete);
result = IRQ_HANDLED;
}
return result;
}
static int cdns_xspi_of_get_plat_data(struct platform_device *pdev)
{
struct device_node *node_prop = pdev->dev.of_node;
struct device_node *node_child;
unsigned int cs;
for_each_child_of_node(node_prop, node_child) {
if (!of_device_is_available(node_child))
continue;
if (of_property_read_u32(node_child, "reg", &cs)) {
dev_err(&pdev->dev, "Couldn't get memory chip select\n");
of_node_put(node_child);
return -ENXIO;
} else if (cs >= CDNS_XSPI_MAX_BANKS) {
dev_err(&pdev->dev, "reg (cs) parameter value too large\n");
of_node_put(node_child);
return -ENXIO;
}
}
return 0;
}
static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi)
{
struct device *dev = cdns_xspi->dev;
dev_info(dev, "PHY configuration\n");
dev_info(dev, " * xspi_dll_phy_ctrl: %08x\n",
readl(cdns_xspi->iobase + CDNS_XSPI_DLL_PHY_CTRL));
dev_info(dev, " * phy_dq_timing: %08x\n",
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQ_TIMING));
dev_info(dev, " * phy_dqs_timing: %08x\n",
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQS_TIMING));
dev_info(dev, " * phy_gate_loopback_ctrl: %08x\n",
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL));
dev_info(dev, " * phy_dll_slave_ctrl: %08x\n",
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL));
}
static int cdns_xspi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *host = NULL;
struct cdns_xspi_dev *cdns_xspi = NULL;
struct resource *res;
int ret;
host = devm_spi_alloc_host(dev, sizeof(*cdns_xspi));
if (!host)
return -ENOMEM;
host->mode_bits = SPI_3WIRE | SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL |
SPI_MODE_0 | SPI_MODE_3;
host->mem_ops = &cadence_xspi_mem_ops;
host->dev.of_node = pdev->dev.of_node;
host->bus_num = -1;
platform_set_drvdata(pdev, host);
cdns_xspi = spi_controller_get_devdata(host);
cdns_xspi->pdev = pdev;
cdns_xspi->dev = &pdev->dev;
cdns_xspi->cur_cs = 0;
init_completion(&cdns_xspi->cmd_complete);
init_completion(&cdns_xspi->auto_cmd_complete);
init_completion(&cdns_xspi->sdma_complete);
ret = cdns_xspi_of_get_plat_data(pdev);
if (ret)
return -ENODEV;
cdns_xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "io");
if (IS_ERR(cdns_xspi->iobase)) {
dev_err(dev, "Failed to remap controller base address\n");
return PTR_ERR(cdns_xspi->iobase);
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma");
cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
if (IS_ERR(cdns_xspi->sdmabase))
return PTR_ERR(cdns_xspi->sdmabase);
cdns_xspi->sdmasize = resource_size(res);
cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux");
if (IS_ERR(cdns_xspi->auxbase)) {
dev_err(dev, "Failed to remap AUX address\n");
return PTR_ERR(cdns_xspi->auxbase);
}
cdns_xspi->irq = platform_get_irq(pdev, 0);
if (cdns_xspi->irq < 0)
return -ENXIO;
ret = devm_request_irq(dev, cdns_xspi->irq, cdns_xspi_irq_handler,
IRQF_SHARED, pdev->name, cdns_xspi);
if (ret) {
dev_err(dev, "Failed to request IRQ: %d\n", cdns_xspi->irq);
return ret;
}
cdns_xspi_print_phy_config(cdns_xspi);
ret = cdns_xspi_controller_init(cdns_xspi);
if (ret) {
dev_err(dev, "Failed to initialize controller\n");
return ret;
}
host->num_chipselect = 1 << cdns_xspi->hw_num_banks;
ret = devm_spi_register_controller(dev, host);
if (ret) {
dev_err(dev, "Failed to register SPI host\n");
return ret;
}
dev_info(dev, "Successfully registered SPI host\n");
return 0;
}
static const struct of_device_id cdns_xspi_of_match[] = {
{
.compatible = "cdns,xspi-nor",
},
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, cdns_xspi_of_match);
static struct platform_driver cdns_xspi_platform_driver = {
.probe = cdns_xspi_probe,
.remove = NULL,
.driver = {
.name = CDNS_XSPI_NAME,
.of_match_table = cdns_xspi_of_match,
},
};
module_platform_driver(cdns_xspi_platform_driver);
MODULE_DESCRIPTION("Cadence XSPI Controller Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" CDNS_XSPI_NAME);
MODULE_AUTHOR("Konrad Kociolek <[email protected]>");
MODULE_AUTHOR("Jayshri Pawar <[email protected]>");
MODULE_AUTHOR("Parshuram Thombare <[email protected]>");
| linux-master | drivers/spi/spi-cadence-xspi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Analog Devices AD-FMCOMMS1-EBZ board I2C-SPI bridge driver
*
* Copyright 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
#define SPI_XCOMM_SETTINGS_LEN_OFFSET 10
#define SPI_XCOMM_SETTINGS_3WIRE BIT(6)
#define SPI_XCOMM_SETTINGS_CS_HIGH BIT(5)
#define SPI_XCOMM_SETTINGS_SAMPLE_END BIT(4)
#define SPI_XCOMM_SETTINGS_CPHA BIT(3)
#define SPI_XCOMM_SETTINGS_CPOL BIT(2)
#define SPI_XCOMM_SETTINGS_CLOCK_DIV_MASK 0x3
#define SPI_XCOMM_SETTINGS_CLOCK_DIV_64 0x2
#define SPI_XCOMM_SETTINGS_CLOCK_DIV_16 0x1
#define SPI_XCOMM_SETTINGS_CLOCK_DIV_4 0x0
#define SPI_XCOMM_CMD_UPDATE_CONFIG 0x03
#define SPI_XCOMM_CMD_WRITE 0x04
#define SPI_XCOMM_CLOCK 48000000
struct spi_xcomm {
struct i2c_client *i2c;
uint16_t settings;
uint16_t chipselect;
unsigned int current_speed;
uint8_t buf[63];
};
static int spi_xcomm_sync_config(struct spi_xcomm *spi_xcomm, unsigned int len)
{
uint16_t settings;
uint8_t *buf = spi_xcomm->buf;
settings = spi_xcomm->settings;
settings |= len << SPI_XCOMM_SETTINGS_LEN_OFFSET;
buf[0] = SPI_XCOMM_CMD_UPDATE_CONFIG;
put_unaligned_be16(settings, &buf[1]);
put_unaligned_be16(spi_xcomm->chipselect, &buf[3]);
return i2c_master_send(spi_xcomm->i2c, buf, 5);
}
static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm,
struct spi_device *spi, int is_active)
{
unsigned long cs = spi_get_chipselect(spi, 0);
uint16_t chipselect = spi_xcomm->chipselect;
if (is_active)
chipselect |= BIT(cs);
else
chipselect &= ~BIT(cs);
spi_xcomm->chipselect = chipselect;
}
static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
struct spi_device *spi, struct spi_transfer *t, unsigned int *settings)
{
if (t->len > 62)
return -EINVAL;
if (t->speed_hz != spi_xcomm->current_speed) {
unsigned int divider;
divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, t->speed_hz);
if (divider >= 64)
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_64;
else if (divider >= 16)
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_16;
else
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_4;
spi_xcomm->current_speed = t->speed_hz;
}
if (spi->mode & SPI_CPOL)
*settings |= SPI_XCOMM_SETTINGS_CPOL;
else
*settings &= ~SPI_XCOMM_SETTINGS_CPOL;
if (spi->mode & SPI_CPHA)
*settings &= ~SPI_XCOMM_SETTINGS_CPHA;
else
*settings |= SPI_XCOMM_SETTINGS_CPHA;
if (spi->mode & SPI_3WIRE)
*settings |= SPI_XCOMM_SETTINGS_3WIRE;
else
*settings &= ~SPI_XCOMM_SETTINGS_3WIRE;
return 0;
}
static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm,
struct spi_device *spi, struct spi_transfer *t)
{
int ret;
if (t->tx_buf) {
spi_xcomm->buf[0] = SPI_XCOMM_CMD_WRITE;
memcpy(spi_xcomm->buf + 1, t->tx_buf, t->len);
ret = i2c_master_send(spi_xcomm->i2c, spi_xcomm->buf, t->len + 1);
if (ret < 0)
return ret;
else if (ret != t->len + 1)
return -EIO;
} else if (t->rx_buf) {
ret = i2c_master_recv(spi_xcomm->i2c, t->rx_buf, t->len);
if (ret < 0)
return ret;
else if (ret != t->len)
return -EIO;
}
return t->len;
}
static int spi_xcomm_transfer_one(struct spi_master *master,
struct spi_message *msg)
{
struct spi_xcomm *spi_xcomm = spi_master_get_devdata(master);
unsigned int settings = spi_xcomm->settings;
struct spi_device *spi = msg->spi;
unsigned cs_change = 0;
struct spi_transfer *t;
bool is_first = true;
int status = 0;
bool is_last;
spi_xcomm_chipselect(spi_xcomm, spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
if (!t->tx_buf && !t->rx_buf && t->len) {
status = -EINVAL;
break;
}
status = spi_xcomm_setup_transfer(spi_xcomm, spi, t, &settings);
if (status < 0)
break;
is_last = list_is_last(&t->transfer_list, &msg->transfers);
cs_change = t->cs_change;
if (cs_change ^ is_last)
settings |= BIT(5);
else
settings &= ~BIT(5);
if (t->rx_buf) {
spi_xcomm->settings = settings;
status = spi_xcomm_sync_config(spi_xcomm, t->len);
if (status < 0)
break;
} else if (settings != spi_xcomm->settings || is_first) {
spi_xcomm->settings = settings;
status = spi_xcomm_sync_config(spi_xcomm, 0);
if (status < 0)
break;
}
if (t->len) {
status = spi_xcomm_txrx_bufs(spi_xcomm, spi, t);
if (status < 0)
break;
if (status > 0)
msg->actual_length += status;
}
status = 0;
spi_transfer_delay_exec(t);
is_first = false;
}
if (status != 0 || !cs_change)
spi_xcomm_chipselect(spi_xcomm, spi, false);
msg->status = status;
spi_finalize_current_message(master);
return status;
}
static int spi_xcomm_probe(struct i2c_client *i2c)
{
struct spi_xcomm *spi_xcomm;
struct spi_master *master;
int ret;
master = spi_alloc_master(&i2c->dev, sizeof(*spi_xcomm));
if (!master)
return -ENOMEM;
spi_xcomm = spi_master_get_devdata(master);
spi_xcomm->i2c = i2c;
master->num_chipselect = 16;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->transfer_one_message = spi_xcomm_transfer_one;
master->dev.of_node = i2c->dev.of_node;
i2c_set_clientdata(i2c, master);
ret = devm_spi_register_master(&i2c->dev, master);
if (ret < 0)
spi_master_put(master);
return ret;
}
static const struct i2c_device_id spi_xcomm_ids[] = {
{ "spi-xcomm" },
{ },
};
MODULE_DEVICE_TABLE(i2c, spi_xcomm_ids);
static struct i2c_driver spi_xcomm_driver = {
.driver = {
.name = "spi-xcomm",
},
.id_table = spi_xcomm_ids,
.probe = spi_xcomm_probe,
};
module_i2c_driver(spi_xcomm_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
MODULE_DESCRIPTION("Analog Devices AD-FMCOMMS1-EBZ board I2C-SPI bridge driver");
| linux-master | drivers/spi/spi-xcomm.c |
/*
* Copyright (C) 2017 Spreadtrum Communications Inc.
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <linux/delay.h>
#include <linux/hwspinlock.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/spi/spi.h>
#include <linux/sizes.h>
/* Registers definitions for ADI controller */
#define REG_ADI_CTRL0 0x4
#define REG_ADI_CHN_PRIL 0x8
#define REG_ADI_CHN_PRIH 0xc
#define REG_ADI_INT_EN 0x10
#define REG_ADI_INT_RAW 0x14
#define REG_ADI_INT_MASK 0x18
#define REG_ADI_INT_CLR 0x1c
#define REG_ADI_GSSI_CFG0 0x20
#define REG_ADI_GSSI_CFG1 0x24
#define REG_ADI_RD_CMD 0x28
#define REG_ADI_RD_DATA 0x2c
#define REG_ADI_ARM_FIFO_STS 0x30
#define REG_ADI_STS 0x34
#define REG_ADI_EVT_FIFO_STS 0x38
#define REG_ADI_ARM_CMD_STS 0x3c
#define REG_ADI_CHN_EN 0x40
#define REG_ADI_CHN_ADDR(id) (0x44 + (id - 2) * 4)
#define REG_ADI_CHN_EN1 0x20c
/* Bits definitions for register REG_ADI_GSSI_CFG0 */
#define BIT_CLK_ALL_ON BIT(30)
/* Bits definitions for register REG_ADI_RD_DATA */
#define BIT_RD_CMD_BUSY BIT(31)
#define RD_ADDR_SHIFT 16
#define RD_VALUE_MASK GENMASK(15, 0)
#define RD_ADDR_MASK GENMASK(30, 16)
/* Bits definitions for register REG_ADI_ARM_FIFO_STS */
#define BIT_FIFO_FULL BIT(11)
#define BIT_FIFO_EMPTY BIT(10)
/*
* ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
* ADI supports 12/14bit address for r2p0, and additional 17bit for r3p0 or
* later versions. Since bit[1:0] are zero, so the spec describe them as
* 10/12/15bit address mode.
* The 10bit mode supports sigle slave, 12/15bit mode supports 3 slave, the
* high two bits is slave_id.
* The slave devices address offset is 0x8000 for 10/12bit address mode,
* and 0x20000 for 15bit mode.
*/
#define ADI_10BIT_SLAVE_ADDR_SIZE SZ_4K
#define ADI_10BIT_SLAVE_OFFSET 0x8000
#define ADI_12BIT_SLAVE_ADDR_SIZE SZ_16K
#define ADI_12BIT_SLAVE_OFFSET 0x8000
#define ADI_15BIT_SLAVE_ADDR_SIZE SZ_128K
#define ADI_15BIT_SLAVE_OFFSET 0x20000
/* Timeout (ms) for the trylock of hardware spinlocks */
#define ADI_HWSPINLOCK_TIMEOUT 5000
/*
* ADI controller has 50 channels including 2 software channels
* and 48 hardware channels.
*/
#define ADI_HW_CHNS 50
#define ADI_FIFO_DRAIN_TIMEOUT 1000
#define ADI_READ_TIMEOUT 2000
/*
* Read back address from REG_ADI_RD_DATA bit[30:16] which maps to:
* REG_ADI_RD_CMD bit[14:0] for r2p0
* REG_ADI_RD_CMD bit[16:2] for r3p0
*/
#define RDBACK_ADDR_MASK_R2 GENMASK(14, 0)
#define RDBACK_ADDR_MASK_R3 GENMASK(16, 2)
#define RDBACK_ADDR_SHIFT_R3 2
/* Registers definitions for PMIC watchdog controller */
#define REG_WDG_LOAD_LOW 0x0
#define REG_WDG_LOAD_HIGH 0x4
#define REG_WDG_CTRL 0x8
#define REG_WDG_LOCK 0x20
/* Bits definitions for register REG_WDG_CTRL */
#define BIT_WDG_RUN BIT(1)
#define BIT_WDG_NEW BIT(2)
#define BIT_WDG_RST BIT(3)
/* Bits definitions for register REG_MODULE_EN */
#define BIT_WDG_EN BIT(2)
/* Registers definitions for PMIC */
#define PMIC_RST_STATUS 0xee8
#define PMIC_MODULE_EN 0xc08
#define PMIC_CLK_EN 0xc18
#define PMIC_WDG_BASE 0x80
/* Definition of PMIC reset status register */
#define HWRST_STATUS_SECURITY 0x02
#define HWRST_STATUS_RECOVERY 0x20
#define HWRST_STATUS_NORMAL 0x40
#define HWRST_STATUS_ALARM 0x50
#define HWRST_STATUS_SLEEP 0x60
#define HWRST_STATUS_FASTBOOT 0x30
#define HWRST_STATUS_SPECIAL 0x70
#define HWRST_STATUS_PANIC 0x80
#define HWRST_STATUS_CFTREBOOT 0x90
#define HWRST_STATUS_AUTODLOADER 0xa0
#define HWRST_STATUS_IQMODE 0xb0
#define HWRST_STATUS_SPRDISK 0xc0
#define HWRST_STATUS_FACTORYTEST 0xe0
#define HWRST_STATUS_WATCHDOG 0xf0
/* Use default timeout 50 ms that converts to watchdog values */
#define WDG_LOAD_VAL ((50 * 32768) / 1000)
#define WDG_LOAD_MASK GENMASK(15, 0)
#define WDG_UNLOCK_KEY 0xe551
struct sprd_adi_wdg {
u32 base;
u32 rst_sts;
u32 wdg_en;
u32 wdg_clk;
};
struct sprd_adi_data {
u32 slave_offset;
u32 slave_addr_size;
int (*read_check)(u32 val, u32 reg);
int (*restart)(struct notifier_block *this,
unsigned long mode, void *cmd);
void (*wdg_rst)(void *p);
};
struct sprd_adi {
struct spi_controller *ctlr;
struct device *dev;
void __iomem *base;
struct hwspinlock *hwlock;
unsigned long slave_vbase;
unsigned long slave_pbase;
struct notifier_block restart_handler;
const struct sprd_adi_data *data;
};
static int sprd_adi_check_addr(struct sprd_adi *sadi, u32 reg)
{
if (reg >= sadi->data->slave_addr_size) {
dev_err(sadi->dev,
"slave address offset is incorrect, reg = 0x%x\n",
reg);
return -EINVAL;
}
return 0;
}
static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
{
u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
u32 sts;
do {
sts = readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS);
if (sts & BIT_FIFO_EMPTY)
break;
cpu_relax();
} while (--timeout);
if (timeout == 0) {
dev_err(sadi->dev, "drain write fifo timeout\n");
return -EBUSY;
}
return 0;
}
static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
{
return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
}
static int sprd_adi_read_check(u32 val, u32 addr)
{
u32 rd_addr;
rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
if (rd_addr != addr) {
pr_err("ADI read error, addr = 0x%x, val = 0x%x\n", addr, val);
return -EIO;
}
return 0;
}
static int sprd_adi_read_check_r2(u32 val, u32 reg)
{
return sprd_adi_read_check(val, reg & RDBACK_ADDR_MASK_R2);
}
static int sprd_adi_read_check_r3(u32 val, u32 reg)
{
return sprd_adi_read_check(val, (reg & RDBACK_ADDR_MASK_R3) >> RDBACK_ADDR_SHIFT_R3);
}
static int sprd_adi_read(struct sprd_adi *sadi, u32 reg, u32 *read_val)
{
int read_timeout = ADI_READ_TIMEOUT;
unsigned long flags;
u32 val;
int ret = 0;
if (sadi->hwlock) {
ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
ADI_HWSPINLOCK_TIMEOUT,
&flags);
if (ret) {
dev_err(sadi->dev, "get the hw lock failed\n");
return ret;
}
}
ret = sprd_adi_check_addr(sadi, reg);
if (ret)
goto out;
/*
* Set the slave address offset need to read into RD_CMD register,
* then ADI controller will start to transfer automatically.
*/
writel_relaxed(reg, sadi->base + REG_ADI_RD_CMD);
/*
* Wait read operation complete, the BIT_RD_CMD_BUSY will be set
* simultaneously when writing read command to register, and the
* BIT_RD_CMD_BUSY will be cleared after the read operation is
* completed.
*/
do {
val = readl_relaxed(sadi->base + REG_ADI_RD_DATA);
if (!(val & BIT_RD_CMD_BUSY))
break;
cpu_relax();
} while (--read_timeout);
if (read_timeout == 0) {
dev_err(sadi->dev, "ADI read timeout\n");
ret = -EBUSY;
goto out;
}
/*
* The return value before adi r5p0 includes data and read register
* address, from bit 0to bit 15 are data, and from bit 16 to bit 30
* are read register address. Then we can check the returned register
* address to validate data.
*/
if (sadi->data->read_check) {
ret = sadi->data->read_check(val, reg);
if (ret < 0)
goto out;
}
*read_val = val & RD_VALUE_MASK;
out:
if (sadi->hwlock)
hwspin_unlock_irqrestore(sadi->hwlock, &flags);
return ret;
}
static int sprd_adi_write(struct sprd_adi *sadi, u32 reg, u32 val)
{
u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
unsigned long flags;
int ret;
if (sadi->hwlock) {
ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
ADI_HWSPINLOCK_TIMEOUT,
&flags);
if (ret) {
dev_err(sadi->dev, "get the hw lock failed\n");
return ret;
}
}
ret = sprd_adi_check_addr(sadi, reg);
if (ret)
goto out;
ret = sprd_adi_drain_fifo(sadi);
if (ret < 0)
goto out;
/*
* we should wait for write fifo is empty before writing data to PMIC
* registers.
*/
do {
if (!sprd_adi_fifo_is_full(sadi)) {
/* we need virtual register address to write. */
writel_relaxed(val, (void __iomem *)(sadi->slave_vbase + reg));
break;
}
cpu_relax();
} while (--timeout);
if (timeout == 0) {
dev_err(sadi->dev, "write fifo is full\n");
ret = -EBUSY;
}
out:
if (sadi->hwlock)
hwspin_unlock_irqrestore(sadi->hwlock, &flags);
return ret;
}
static int sprd_adi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi_dev,
struct spi_transfer *t)
{
struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
u32 reg, val;
int ret;
if (t->rx_buf) {
reg = *(u32 *)t->rx_buf;
ret = sprd_adi_read(sadi, reg, &val);
*(u32 *)t->rx_buf = val;
} else if (t->tx_buf) {
u32 *p = (u32 *)t->tx_buf;
reg = *p++;
val = *p;
ret = sprd_adi_write(sadi, reg, val);
} else {
dev_err(sadi->dev, "no buffer for transfer\n");
ret = -EINVAL;
}
return ret;
}
static void sprd_adi_set_wdt_rst_mode(void *p)
{
#if IS_ENABLED(CONFIG_SPRD_WATCHDOG)
u32 val;
struct sprd_adi *sadi = (struct sprd_adi *)p;
/* Init watchdog reset mode */
sprd_adi_read(sadi, PMIC_RST_STATUS, &val);
val |= HWRST_STATUS_WATCHDOG;
sprd_adi_write(sadi, PMIC_RST_STATUS, val);
#endif
}
static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
void *cmd, struct sprd_adi_wdg *wdg)
{
struct sprd_adi *sadi = container_of(this, struct sprd_adi,
restart_handler);
u32 val, reboot_mode = 0;
if (!cmd)
reboot_mode = HWRST_STATUS_NORMAL;
else if (!strncmp(cmd, "recovery", 8))
reboot_mode = HWRST_STATUS_RECOVERY;
else if (!strncmp(cmd, "alarm", 5))
reboot_mode = HWRST_STATUS_ALARM;
else if (!strncmp(cmd, "fastsleep", 9))
reboot_mode = HWRST_STATUS_SLEEP;
else if (!strncmp(cmd, "bootloader", 10))
reboot_mode = HWRST_STATUS_FASTBOOT;
else if (!strncmp(cmd, "panic", 5))
reboot_mode = HWRST_STATUS_PANIC;
else if (!strncmp(cmd, "special", 7))
reboot_mode = HWRST_STATUS_SPECIAL;
else if (!strncmp(cmd, "cftreboot", 9))
reboot_mode = HWRST_STATUS_CFTREBOOT;
else if (!strncmp(cmd, "autodloader", 11))
reboot_mode = HWRST_STATUS_AUTODLOADER;
else if (!strncmp(cmd, "iqmode", 6))
reboot_mode = HWRST_STATUS_IQMODE;
else if (!strncmp(cmd, "sprdisk", 7))
reboot_mode = HWRST_STATUS_SPRDISK;
else if (!strncmp(cmd, "tospanic", 8))
reboot_mode = HWRST_STATUS_SECURITY;
else if (!strncmp(cmd, "factorytest", 11))
reboot_mode = HWRST_STATUS_FACTORYTEST;
else
reboot_mode = HWRST_STATUS_NORMAL;
/* Record the reboot mode */
sprd_adi_read(sadi, wdg->rst_sts, &val);
val &= ~HWRST_STATUS_WATCHDOG;
val |= reboot_mode;
sprd_adi_write(sadi, wdg->rst_sts, val);
/* Enable the interface clock of the watchdog */
sprd_adi_read(sadi, wdg->wdg_en, &val);
val |= BIT_WDG_EN;
sprd_adi_write(sadi, wdg->wdg_en, val);
/* Enable the work clock of the watchdog */
sprd_adi_read(sadi, wdg->wdg_clk, &val);
val |= BIT_WDG_EN;
sprd_adi_write(sadi, wdg->wdg_clk, val);
/* Unlock the watchdog */
sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, WDG_UNLOCK_KEY);
sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
val |= BIT_WDG_NEW;
sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
/* Load the watchdog timeout value, 50ms is always enough. */
sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_HIGH, 0);
sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_LOW,
WDG_LOAD_VAL & WDG_LOAD_MASK);
/* Start the watchdog to reset system */
sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
val |= BIT_WDG_RUN | BIT_WDG_RST;
sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
/* Lock the watchdog */
sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
mdelay(1000);
dev_emerg(sadi->dev, "Unable to restart system\n");
return NOTIFY_DONE;
}
static int sprd_adi_restart_sc9860(struct notifier_block *this,
unsigned long mode, void *cmd)
{
struct sprd_adi_wdg wdg = {
.base = PMIC_WDG_BASE,
.rst_sts = PMIC_RST_STATUS,
.wdg_en = PMIC_MODULE_EN,
.wdg_clk = PMIC_CLK_EN,
};
return sprd_adi_restart(this, mode, cmd, &wdg);
}
static void sprd_adi_hw_init(struct sprd_adi *sadi)
{
struct device_node *np = sadi->dev->of_node;
int i, size, chn_cnt;
const __be32 *list;
u32 tmp;
/* Set all channels as default priority */
writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
/* Set clock auto gate mode */
tmp = readl_relaxed(sadi->base + REG_ADI_GSSI_CFG0);
tmp &= ~BIT_CLK_ALL_ON;
writel_relaxed(tmp, sadi->base + REG_ADI_GSSI_CFG0);
/* Set hardware channels setting */
list = of_get_property(np, "sprd,hw-channels", &size);
if (!list || !size) {
dev_info(sadi->dev, "no hw channels setting in node\n");
return;
}
chn_cnt = size / 8;
for (i = 0; i < chn_cnt; i++) {
u32 value;
u32 chn_id = be32_to_cpu(*list++);
u32 chn_config = be32_to_cpu(*list++);
/* Channel 0 and 1 are software channels */
if (chn_id < 2)
continue;
writel_relaxed(chn_config, sadi->base +
REG_ADI_CHN_ADDR(chn_id));
if (chn_id < 32) {
value = readl_relaxed(sadi->base + REG_ADI_CHN_EN);
value |= BIT(chn_id);
writel_relaxed(value, sadi->base + REG_ADI_CHN_EN);
} else if (chn_id < ADI_HW_CHNS) {
value = readl_relaxed(sadi->base + REG_ADI_CHN_EN1);
value |= BIT(chn_id - 32);
writel_relaxed(value, sadi->base + REG_ADI_CHN_EN1);
}
}
}
static int sprd_adi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct sprd_adi_data *data;
struct spi_controller *ctlr;
struct sprd_adi *sadi;
struct resource *res;
u16 num_chipselect;
int ret;
if (!np) {
dev_err(&pdev->dev, "can not find the adi bus node\n");
return -ENODEV;
}
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "no matching driver data found\n");
return -EINVAL;
}
pdev->id = of_alias_get_id(np, "spi");
num_chipselect = of_get_child_count(np);
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi));
if (!ctlr)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, ctlr);
sadi = spi_controller_get_devdata(ctlr);
sadi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(sadi->base)) {
ret = PTR_ERR(sadi->base);
goto put_ctlr;
}
sadi->slave_vbase = (unsigned long)sadi->base +
data->slave_offset;
sadi->slave_pbase = res->start + data->slave_offset;
sadi->ctlr = ctlr;
sadi->dev = &pdev->dev;
sadi->data = data;
ret = of_hwspin_lock_get_id(np, 0);
if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
sadi->hwlock =
devm_hwspin_lock_request_specific(&pdev->dev, ret);
if (!sadi->hwlock) {
ret = -ENXIO;
goto put_ctlr;
}
} else {
switch (ret) {
case -ENOENT:
dev_info(&pdev->dev, "no hardware spinlock supplied\n");
break;
default:
dev_err_probe(&pdev->dev, ret, "failed to find hwlock id\n");
goto put_ctlr;
}
}
sprd_adi_hw_init(sadi);
if (sadi->data->wdg_rst)
sadi->data->wdg_rst(sadi);
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->bus_num = pdev->id;
ctlr->num_chipselect = num_chipselect;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
ctlr->bits_per_word_mask = 0;
ctlr->transfer_one = sprd_adi_transfer_one;
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev, "failed to register SPI controller\n");
goto put_ctlr;
}
if (sadi->data->restart) {
sadi->restart_handler.notifier_call = sadi->data->restart;
sadi->restart_handler.priority = 128;
ret = register_restart_handler(&sadi->restart_handler);
if (ret) {
dev_err(&pdev->dev, "can not register restart handler\n");
goto put_ctlr;
}
}
return 0;
put_ctlr:
spi_controller_put(ctlr);
return ret;
}
static void sprd_adi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
unregister_restart_handler(&sadi->restart_handler);
}
static struct sprd_adi_data sc9860_data = {
.slave_offset = ADI_10BIT_SLAVE_OFFSET,
.slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE,
.read_check = sprd_adi_read_check_r2,
.restart = sprd_adi_restart_sc9860,
.wdg_rst = sprd_adi_set_wdt_rst_mode,
};
static struct sprd_adi_data sc9863_data = {
.slave_offset = ADI_12BIT_SLAVE_OFFSET,
.slave_addr_size = ADI_12BIT_SLAVE_ADDR_SIZE,
.read_check = sprd_adi_read_check_r3,
};
static struct sprd_adi_data ums512_data = {
.slave_offset = ADI_15BIT_SLAVE_OFFSET,
.slave_addr_size = ADI_15BIT_SLAVE_ADDR_SIZE,
.read_check = sprd_adi_read_check_r3,
};
static const struct of_device_id sprd_adi_of_match[] = {
{
.compatible = "sprd,sc9860-adi",
.data = &sc9860_data,
},
{
.compatible = "sprd,sc9863-adi",
.data = &sc9863_data,
},
{
.compatible = "sprd,ums512-adi",
.data = &ums512_data,
},
{ },
};
MODULE_DEVICE_TABLE(of, sprd_adi_of_match);
static struct platform_driver sprd_adi_driver = {
.driver = {
.name = "sprd-adi",
.of_match_table = sprd_adi_of_match,
},
.probe = sprd_adi_probe,
.remove_new = sprd_adi_remove,
};
module_platform_driver(sprd_adi_driver);
MODULE_DESCRIPTION("Spreadtrum ADI Controller Driver");
MODULE_AUTHOR("Baolin Wang <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-sprd-adi.c |
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Apple SoC PMGR device power state driver
*
* Copyright The Asahi Linux Contributors
*/
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/reset-controller.h>
#include <linux/module.h>
#define APPLE_PMGR_RESET BIT(31)
#define APPLE_PMGR_AUTO_ENABLE BIT(28)
#define APPLE_PMGR_PS_AUTO GENMASK(27, 24)
#define APPLE_PMGR_PS_MIN GENMASK(19, 16)
#define APPLE_PMGR_PARENT_OFF BIT(11)
#define APPLE_PMGR_DEV_DISABLE BIT(10)
#define APPLE_PMGR_WAS_CLKGATED BIT(9)
#define APPLE_PMGR_WAS_PWRGATED BIT(8)
#define APPLE_PMGR_PS_ACTUAL GENMASK(7, 4)
#define APPLE_PMGR_PS_TARGET GENMASK(3, 0)
#define APPLE_PMGR_FLAGS (APPLE_PMGR_WAS_CLKGATED | APPLE_PMGR_WAS_PWRGATED)
#define APPLE_PMGR_PS_ACTIVE 0xf
#define APPLE_PMGR_PS_CLKGATE 0x4
#define APPLE_PMGR_PS_PWRGATE 0x0
#define APPLE_PMGR_PS_SET_TIMEOUT 100
#define APPLE_PMGR_RESET_TIME 1
struct apple_pmgr_ps {
struct device *dev;
struct generic_pm_domain genpd;
struct reset_controller_dev rcdev;
struct regmap *regmap;
u32 offset;
u32 min_state;
};
#define genpd_to_apple_pmgr_ps(_genpd) container_of(_genpd, struct apple_pmgr_ps, genpd)
#define rcdev_to_apple_pmgr_ps(_rcdev) container_of(_rcdev, struct apple_pmgr_ps, rcdev)
static int apple_pmgr_ps_set(struct generic_pm_domain *genpd, u32 pstate, bool auto_enable)
{
int ret;
struct apple_pmgr_ps *ps = genpd_to_apple_pmgr_ps(genpd);
u32 reg;
ret = regmap_read(ps->regmap, ps->offset, ®);
if (ret < 0)
return ret;
/* Resets are synchronous, and only work if the device is powered and clocked. */
if (reg & APPLE_PMGR_RESET && pstate != APPLE_PMGR_PS_ACTIVE)
dev_err(ps->dev, "PS %s: powering off with RESET active\n",
genpd->name);
reg &= ~(APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS | APPLE_PMGR_PS_TARGET);
reg |= FIELD_PREP(APPLE_PMGR_PS_TARGET, pstate);
dev_dbg(ps->dev, "PS %s: pwrstate = 0x%x: 0x%x\n", genpd->name, pstate, reg);
regmap_write(ps->regmap, ps->offset, reg);
ret = regmap_read_poll_timeout_atomic(
ps->regmap, ps->offset, reg,
(FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == pstate), 1,
APPLE_PMGR_PS_SET_TIMEOUT);
if (ret < 0)
dev_err(ps->dev, "PS %s: Failed to reach power state 0x%x (now: 0x%x)\n",
genpd->name, pstate, reg);
if (auto_enable) {
/* Not all devices implement this; this is a no-op where not implemented. */
reg &= ~APPLE_PMGR_FLAGS;
reg |= APPLE_PMGR_AUTO_ENABLE;
regmap_write(ps->regmap, ps->offset, reg);
}
return ret;
}
static bool apple_pmgr_ps_is_active(struct apple_pmgr_ps *ps)
{
u32 reg = 0;
regmap_read(ps->regmap, ps->offset, ®);
/*
* We consider domains as active if they are actually on, or if they have auto-PM
* enabled and the intended target is on.
*/
return (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == APPLE_PMGR_PS_ACTIVE ||
(FIELD_GET(APPLE_PMGR_PS_TARGET, reg) == APPLE_PMGR_PS_ACTIVE &&
reg & APPLE_PMGR_AUTO_ENABLE));
}
static int apple_pmgr_ps_power_on(struct generic_pm_domain *genpd)
{
return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_ACTIVE, true);
}
static int apple_pmgr_ps_power_off(struct generic_pm_domain *genpd)
{
return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_PWRGATE, false);
}
static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
unsigned long flags;
spin_lock_irqsave(&ps->genpd.slock, flags);
if (ps->genpd.status == GENPD_STATE_OFF)
dev_err(ps->dev, "PS 0x%x: asserting RESET while powered down\n", ps->offset);
dev_dbg(ps->dev, "PS 0x%x: assert reset\n", ps->offset);
/* Quiesce device before asserting reset */
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE,
APPLE_PMGR_DEV_DISABLE);
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET,
APPLE_PMGR_RESET);
spin_unlock_irqrestore(&ps->genpd.slock, flags);
return 0;
}
static int apple_pmgr_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
unsigned long flags;
spin_lock_irqsave(&ps->genpd.slock, flags);
dev_dbg(ps->dev, "PS 0x%x: deassert reset\n", ps->offset);
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, 0);
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE, 0);
if (ps->genpd.status == GENPD_STATE_OFF)
dev_err(ps->dev, "PS 0x%x: RESET was deasserted while powered down\n", ps->offset);
spin_unlock_irqrestore(&ps->genpd.slock, flags);
return 0;
}
static int apple_pmgr_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
int ret;
ret = apple_pmgr_reset_assert(rcdev, id);
if (ret)
return ret;
usleep_range(APPLE_PMGR_RESET_TIME, 2 * APPLE_PMGR_RESET_TIME);
return apple_pmgr_reset_deassert(rcdev, id);
}
static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
{
struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
u32 reg = 0;
regmap_read(ps->regmap, ps->offset, ®);
return !!(reg & APPLE_PMGR_RESET);
}
const struct reset_control_ops apple_pmgr_reset_ops = {
.assert = apple_pmgr_reset_assert,
.deassert = apple_pmgr_reset_deassert,
.reset = apple_pmgr_reset_reset,
.status = apple_pmgr_reset_status,
};
static int apple_pmgr_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
return 0;
}
static int apple_pmgr_ps_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct apple_pmgr_ps *ps;
struct regmap *regmap;
struct of_phandle_iterator it;
int ret;
const char *name;
bool active;
regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ps = devm_kzalloc(dev, sizeof(*ps), GFP_KERNEL);
if (!ps)
return -ENOMEM;
ps->dev = dev;
ps->regmap = regmap;
ret = of_property_read_string(node, "label", &name);
if (ret < 0) {
dev_err(dev, "missing label property\n");
return ret;
}
ret = of_property_read_u32(node, "reg", &ps->offset);
if (ret < 0) {
dev_err(dev, "missing reg property\n");
return ret;
}
ps->genpd.flags |= GENPD_FLAG_IRQ_SAFE;
ps->genpd.name = name;
ps->genpd.power_on = apple_pmgr_ps_power_on;
ps->genpd.power_off = apple_pmgr_ps_power_off;
ret = of_property_read_u32(node, "apple,min-state", &ps->min_state);
if (ret == 0 && ps->min_state <= APPLE_PMGR_PS_ACTIVE)
regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_PS_MIN,
FIELD_PREP(APPLE_PMGR_PS_MIN, ps->min_state));
active = apple_pmgr_ps_is_active(ps);
if (of_property_read_bool(node, "apple,always-on")) {
ps->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
if (!active) {
dev_warn(dev, "always-on domain %s is not on at boot\n", name);
/* Turn it on so pm_genpd_init does not fail */
active = apple_pmgr_ps_power_on(&ps->genpd) == 0;
}
}
/* Turn on auto-PM if the domain is already on */
if (active)
regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_AUTO_ENABLE,
APPLE_PMGR_AUTO_ENABLE);
ret = pm_genpd_init(&ps->genpd, NULL, !active);
if (ret < 0) {
dev_err(dev, "pm_genpd_init failed\n");
return ret;
}
ret = of_genpd_add_provider_simple(node, &ps->genpd);
if (ret < 0) {
dev_err(dev, "of_genpd_add_provider_simple failed\n");
return ret;
}
of_for_each_phandle(&it, ret, node, "power-domains", "#power-domain-cells", -1) {
struct of_phandle_args parent, child;
parent.np = it.node;
parent.args_count = of_phandle_iterator_args(&it, parent.args, MAX_PHANDLE_ARGS);
child.np = node;
child.args_count = 0;
ret = of_genpd_add_subdomain(&parent, &child);
if (ret == -EPROBE_DEFER) {
of_node_put(parent.np);
goto err_remove;
} else if (ret < 0) {
dev_err(dev, "failed to add to parent domain: %d (%s -> %s)\n",
ret, it.node->name, node->name);
of_node_put(parent.np);
goto err_remove;
}
}
/*
* Do not participate in regular PM; parent power domains are handled via the
* genpd hierarchy.
*/
pm_genpd_remove_device(dev);
ps->rcdev.owner = THIS_MODULE;
ps->rcdev.nr_resets = 1;
ps->rcdev.ops = &apple_pmgr_reset_ops;
ps->rcdev.of_node = dev->of_node;
ps->rcdev.of_reset_n_cells = 0;
ps->rcdev.of_xlate = apple_pmgr_reset_xlate;
ret = devm_reset_controller_register(dev, &ps->rcdev);
if (ret < 0)
goto err_remove;
return 0;
err_remove:
of_genpd_del_provider(node);
pm_genpd_remove(&ps->genpd);
return ret;
}
static const struct of_device_id apple_pmgr_ps_of_match[] = {
{ .compatible = "apple,pmgr-pwrstate" },
{}
};
MODULE_DEVICE_TABLE(of, apple_pmgr_ps_of_match);
static struct platform_driver apple_pmgr_ps_driver = {
.probe = apple_pmgr_ps_probe,
.driver = {
.name = "apple-pmgr-pwrstate",
.of_match_table = apple_pmgr_ps_of_match,
},
};
MODULE_AUTHOR("Hector Martin <[email protected]>");
MODULE_DESCRIPTION("PMGR power state driver for Apple SoCs");
module_platform_driver(apple_pmgr_ps_driver);
| linux-master | drivers/pmdomain/apple/pmgr-pwrstate.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 NXP, Peng Fan <[email protected]>
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
#include <dt-bindings/power/fsl,imx93-power.h>
#define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4
#define BLK_MAX_CLKS 4
#define DOMAIN_MAX_CLKS 4
#define LCDIF_QOS_REG 0xC
#define LCDIF_DEFAULT_QOS_OFF 12
#define LCDIF_CFG_QOS_OFF 8
#define PXP_QOS_REG 0x10
#define PXP_R_DEFAULT_QOS_OFF 28
#define PXP_R_CFG_QOS_OFF 24
#define PXP_W_DEFAULT_QOS_OFF 20
#define PXP_W_CFG_QOS_OFF 16
#define ISI_CACHE_REG 0x14
#define ISI_QOS_REG 0x1C
#define ISI_V_DEFAULT_QOS_OFF 28
#define ISI_V_CFG_QOS_OFF 24
#define ISI_U_DEFAULT_QOS_OFF 20
#define ISI_U_CFG_QOS_OFF 16
#define ISI_Y_R_DEFAULT_QOS_OFF 12
#define ISI_Y_R_CFG_QOS_OFF 8
#define ISI_Y_W_DEFAULT_QOS_OFF 4
#define ISI_Y_W_CFG_QOS_OFF 0
#define PRIO_MASK 0xF
#define PRIO(X) (X)
struct imx93_blk_ctrl_domain;
struct imx93_blk_ctrl {
struct device *dev;
struct regmap *regmap;
int num_clks;
struct clk_bulk_data clks[BLK_MAX_CLKS];
struct imx93_blk_ctrl_domain *domains;
struct genpd_onecell_data onecell_data;
};
#define DOMAIN_MAX_QOS 4
struct imx93_blk_ctrl_qos {
u32 reg;
u32 cfg_off;
u32 default_prio;
u32 cfg_prio;
};
struct imx93_blk_ctrl_domain_data {
const char *name;
const char * const *clk_names;
int num_clks;
u32 rst_mask;
u32 clk_mask;
int num_qos;
struct imx93_blk_ctrl_qos qos[DOMAIN_MAX_QOS];
};
struct imx93_blk_ctrl_domain {
struct generic_pm_domain genpd;
const struct imx93_blk_ctrl_domain_data *data;
struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
struct imx93_blk_ctrl *bc;
};
struct imx93_blk_ctrl_data {
const struct imx93_blk_ctrl_domain_data *domains;
int num_domains;
const char * const *clk_names;
int num_clks;
const struct regmap_access_table *reg_access_table;
};
static inline struct imx93_blk_ctrl_domain *
to_imx93_blk_ctrl_domain(struct generic_pm_domain *genpd)
{
return container_of(genpd, struct imx93_blk_ctrl_domain, genpd);
}
static int imx93_blk_ctrl_set_qos(struct imx93_blk_ctrl_domain *domain)
{
const struct imx93_blk_ctrl_domain_data *data = domain->data;
struct imx93_blk_ctrl *bc = domain->bc;
const struct imx93_blk_ctrl_qos *qos;
u32 val, mask;
int i;
for (i = 0; i < data->num_qos; i++) {
qos = &data->qos[i];
mask = PRIO_MASK << qos->cfg_off;
mask |= PRIO_MASK << (qos->cfg_off + 4);
val = qos->cfg_prio << qos->cfg_off;
val |= qos->default_prio << (qos->cfg_off + 4);
regmap_write_bits(bc->regmap, qos->reg, mask, val);
dev_dbg(bc->dev, "data->qos[i].reg 0x%x 0x%x\n", qos->reg, val);
}
return 0;
}
static int imx93_blk_ctrl_power_on(struct generic_pm_domain *genpd)
{
struct imx93_blk_ctrl_domain *domain = to_imx93_blk_ctrl_domain(genpd);
const struct imx93_blk_ctrl_domain_data *data = domain->data;
struct imx93_blk_ctrl *bc = domain->bc;
int ret;
ret = clk_bulk_prepare_enable(bc->num_clks, bc->clks);
if (ret) {
dev_err(bc->dev, "failed to enable bus clocks\n");
return ret;
}
ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
if (ret) {
clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
dev_err(bc->dev, "failed to enable clocks\n");
return ret;
}
ret = pm_runtime_get_sync(bc->dev);
if (ret < 0) {
pm_runtime_put_noidle(bc->dev);
dev_err(bc->dev, "failed to power up domain\n");
goto disable_clk;
}
/* ungate clk */
regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
/* release reset */
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
dev_dbg(bc->dev, "pd_on: name: %s\n", genpd->name);
return imx93_blk_ctrl_set_qos(domain);
disable_clk:
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
return ret;
}
static int imx93_blk_ctrl_power_off(struct generic_pm_domain *genpd)
{
struct imx93_blk_ctrl_domain *domain = to_imx93_blk_ctrl_domain(genpd);
const struct imx93_blk_ctrl_domain_data *data = domain->data;
struct imx93_blk_ctrl *bc = domain->bc;
dev_dbg(bc->dev, "pd_off: name: %s\n", genpd->name);
regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
regmap_set_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
pm_runtime_put(bc->dev);
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
return 0;
}
static struct lock_class_key blk_ctrl_genpd_lock_class;
static int imx93_blk_ctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct imx93_blk_ctrl_data *bc_data = of_device_get_match_data(dev);
struct imx93_blk_ctrl *bc;
void __iomem *base;
int i, ret;
struct regmap_config regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.rd_table = bc_data->reg_access_table,
.wr_table = bc_data->reg_access_table,
.max_register = SZ_4K,
};
bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
if (!bc)
return -ENOMEM;
bc->dev = dev;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
bc->regmap = devm_regmap_init_mmio(dev, base, ®map_config);
if (IS_ERR(bc->regmap))
return dev_err_probe(dev, PTR_ERR(bc->regmap),
"failed to init regmap\n");
bc->domains = devm_kcalloc(dev, bc_data->num_domains,
sizeof(struct imx93_blk_ctrl_domain),
GFP_KERNEL);
if (!bc->domains)
return -ENOMEM;
bc->onecell_data.num_domains = bc_data->num_domains;
bc->onecell_data.domains =
devm_kcalloc(dev, bc_data->num_domains,
sizeof(struct generic_pm_domain *), GFP_KERNEL);
if (!bc->onecell_data.domains)
return -ENOMEM;
for (i = 0; i < bc_data->num_clks; i++)
bc->clks[i].id = bc_data->clk_names[i];
bc->num_clks = bc_data->num_clks;
ret = devm_clk_bulk_get(dev, bc->num_clks, bc->clks);
if (ret) {
dev_err_probe(dev, ret, "failed to get bus clock\n");
return ret;
}
for (i = 0; i < bc_data->num_domains; i++) {
const struct imx93_blk_ctrl_domain_data *data = &bc_data->domains[i];
struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
int j;
domain->data = data;
for (j = 0; j < data->num_clks; j++)
domain->clks[j].id = data->clk_names[j];
ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
if (ret) {
dev_err_probe(dev, ret, "failed to get clock\n");
goto cleanup_pds;
}
domain->genpd.name = data->name;
domain->genpd.power_on = imx93_blk_ctrl_power_on;
domain->genpd.power_off = imx93_blk_ctrl_power_off;
domain->bc = bc;
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err_probe(dev, ret, "failed to init power domain\n");
goto cleanup_pds;
}
/*
* We use runtime PM to trigger power on/off of the upstream GPC
* domain, as a strict hierarchical parent/child power domain
* setup doesn't allow us to meet the sequencing requirements.
* This means we have nested locking of genpd locks, without the
* nesting being visible at the genpd level, so we need a
* separate lock class to make lockdep aware of the fact that
* this are separate domain locks that can be nested without a
* self-deadlock.
*/
lockdep_set_class(&domain->genpd.mlock,
&blk_ctrl_genpd_lock_class);
bc->onecell_data.domains[i] = &domain->genpd;
}
pm_runtime_enable(dev);
ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
if (ret) {
dev_err_probe(dev, ret, "failed to add power domain provider\n");
goto cleanup_pds;
}
dev_set_drvdata(dev, bc);
return 0;
cleanup_pds:
for (i--; i >= 0; i--)
pm_genpd_remove(&bc->domains[i].genpd);
return ret;
}
static int imx93_blk_ctrl_remove(struct platform_device *pdev)
{
struct imx93_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
int i;
of_genpd_del_provider(pdev->dev.of_node);
for (i = 0; bc->onecell_data.num_domains; i++) {
struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
}
return 0;
}
static const struct imx93_blk_ctrl_domain_data imx93_media_blk_ctl_domain_data[] = {
[IMX93_MEDIABLK_PD_MIPI_DSI] = {
.name = "mediablk-mipi-dsi",
.clk_names = (const char *[]){ "dsi" },
.num_clks = 1,
.rst_mask = BIT(11) | BIT(12),
.clk_mask = BIT(11) | BIT(12),
},
[IMX93_MEDIABLK_PD_MIPI_CSI] = {
.name = "mediablk-mipi-csi",
.clk_names = (const char *[]){ "cam", "csi" },
.num_clks = 2,
.rst_mask = BIT(9) | BIT(10),
.clk_mask = BIT(9) | BIT(10),
},
[IMX93_MEDIABLK_PD_PXP] = {
.name = "mediablk-pxp",
.clk_names = (const char *[]){ "pxp" },
.num_clks = 1,
.rst_mask = BIT(7) | BIT(8),
.clk_mask = BIT(7) | BIT(8),
.num_qos = 2,
.qos = {
{
.reg = PXP_QOS_REG,
.cfg_off = PXP_R_CFG_QOS_OFF,
.default_prio = PRIO(3),
.cfg_prio = PRIO(6),
}, {
.reg = PXP_QOS_REG,
.cfg_off = PXP_W_CFG_QOS_OFF,
.default_prio = PRIO(3),
.cfg_prio = PRIO(6),
}
}
},
[IMX93_MEDIABLK_PD_LCDIF] = {
.name = "mediablk-lcdif",
.clk_names = (const char *[]){ "disp", "lcdif" },
.num_clks = 2,
.rst_mask = BIT(4) | BIT(5) | BIT(6),
.clk_mask = BIT(4) | BIT(5) | BIT(6),
.num_qos = 1,
.qos = {
{
.reg = LCDIF_QOS_REG,
.cfg_off = LCDIF_CFG_QOS_OFF,
.default_prio = PRIO(3),
.cfg_prio = PRIO(7),
}
}
},
[IMX93_MEDIABLK_PD_ISI] = {
.name = "mediablk-isi",
.clk_names = (const char *[]){ "isi" },
.num_clks = 1,
.rst_mask = BIT(2) | BIT(3),
.clk_mask = BIT(2) | BIT(3),
.num_qos = 4,
.qos = {
{
.reg = ISI_QOS_REG,
.cfg_off = ISI_Y_W_CFG_QOS_OFF,
.default_prio = PRIO(3),
.cfg_prio = PRIO(7),
}, {
.reg = ISI_QOS_REG,
.cfg_off = ISI_Y_R_CFG_QOS_OFF,
.default_prio = PRIO(3),
.cfg_prio = PRIO(7),
}, {
.reg = ISI_QOS_REG,
.cfg_off = ISI_U_CFG_QOS_OFF,
.default_prio = PRIO(3),
.cfg_prio = PRIO(7),
}, {
.reg = ISI_QOS_REG,
.cfg_off = ISI_V_CFG_QOS_OFF,
.default_prio = PRIO(3),
.cfg_prio = PRIO(7),
}
}
},
};
static const struct regmap_range imx93_media_blk_ctl_yes_ranges[] = {
regmap_reg_range(BLK_SFT_RSTN, BLK_CLK_EN),
regmap_reg_range(LCDIF_QOS_REG, ISI_CACHE_REG),
regmap_reg_range(ISI_QOS_REG, ISI_QOS_REG),
};
static const struct regmap_access_table imx93_media_blk_ctl_access_table = {
.yes_ranges = imx93_media_blk_ctl_yes_ranges,
.n_yes_ranges = ARRAY_SIZE(imx93_media_blk_ctl_yes_ranges),
};
static const struct imx93_blk_ctrl_data imx93_media_blk_ctl_dev_data = {
.domains = imx93_media_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx93_media_blk_ctl_domain_data),
.clk_names = (const char *[]){ "axi", "apb", "nic", },
.num_clks = 3,
.reg_access_table = &imx93_media_blk_ctl_access_table,
};
static const struct of_device_id imx93_blk_ctrl_of_match[] = {
{
.compatible = "fsl,imx93-media-blk-ctrl",
.data = &imx93_media_blk_ctl_dev_data
}, {
/* Sentinel */
}
};
MODULE_DEVICE_TABLE(of, imx93_blk_ctrl_of_match);
static struct platform_driver imx93_blk_ctrl_driver = {
.probe = imx93_blk_ctrl_probe,
.remove = imx93_blk_ctrl_remove,
.driver = {
.name = "imx93-blk-ctrl",
.of_match_table = imx93_blk_ctrl_of_match,
},
};
module_platform_driver(imx93_blk_ctrl_driver);
MODULE_AUTHOR("Peng Fan <[email protected]>");
MODULE_DESCRIPTION("i.MX93 BLK CTRL driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/pmdomain/imx/imx93-blk-ctrl.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2015-2017 Pengutronix, Lucas Stach <[email protected]>
* Copyright 2011-2013 Freescale Semiconductor, Inc.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#define GPC_CNTR 0x000
#define GPC_PGC_CTRL_OFFS 0x0
#define GPC_PGC_PUPSCR_OFFS 0x4
#define GPC_PGC_PDNSCR_OFFS 0x8
#define GPC_PGC_SW2ISO_SHIFT 0x8
#define GPC_PGC_SW_SHIFT 0x0
#define GPC_PGC_PCI_PDN 0x200
#define GPC_PGC_PCI_SR 0x20c
#define GPC_PGC_GPU_PDN 0x260
#define GPC_PGC_GPU_PUPSCR 0x264
#define GPC_PGC_GPU_PDNSCR 0x268
#define GPC_PGC_GPU_SR 0x26c
#define GPC_PGC_DISP_PDN 0x240
#define GPC_PGC_DISP_SR 0x24c
#define GPU_VPU_PUP_REQ BIT(1)
#define GPU_VPU_PDN_REQ BIT(0)
#define GPC_CLK_MAX 7
#define PGC_DOMAIN_FLAG_NO_PD BIT(0)
struct imx_pm_domain {
struct generic_pm_domain base;
struct regmap *regmap;
struct regulator *supply;
struct clk *clk[GPC_CLK_MAX];
int num_clks;
unsigned int reg_offs;
signed char cntr_pdn_bit;
unsigned int ipg_rate_mhz;
};
static inline struct imx_pm_domain *
to_imx_pm_domain(struct generic_pm_domain *genpd)
{
return container_of(genpd, struct imx_pm_domain, base);
}
static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
{
struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
int iso, iso2sw;
u32 val;
/* Read ISO and ISO2SW power down delays */
regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PDNSCR_OFFS, &val);
iso = val & 0x3f;
iso2sw = (val >> 8) & 0x3f;
/* Gate off domain when powered down */
regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
0x1, 0x1);
/* Request GPC to power down domain */
val = BIT(pd->cntr_pdn_bit);
regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
/* Wait ISO + ISO2SW IPG clock cycles */
udelay(DIV_ROUND_UP(iso + iso2sw, pd->ipg_rate_mhz));
if (pd->supply)
regulator_disable(pd->supply);
return 0;
}
static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
{
struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
int i, ret;
u32 val, req;
if (pd->supply) {
ret = regulator_enable(pd->supply);
if (ret) {
pr_err("%s: failed to enable regulator: %d\n",
__func__, ret);
return ret;
}
}
/* Enable reset clocks for all devices in the domain */
for (i = 0; i < pd->num_clks; i++)
clk_prepare_enable(pd->clk[i]);
/* Gate off domain when powered down */
regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
0x1, 0x1);
/* Request GPC to power up domain */
req = BIT(pd->cntr_pdn_bit + 1);
regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
/* Wait for the PGC to handle the request */
ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
1, 50);
if (ret)
pr_err("powerup request on domain %s timed out\n", genpd->name);
/* Wait for reset to propagate through peripherals */
usleep_range(5, 10);
/* Disable reset clocks for all devices in the domain */
for (i = 0; i < pd->num_clks; i++)
clk_disable_unprepare(pd->clk[i]);
return 0;
}
static int imx_pgc_get_clocks(struct device *dev, struct imx_pm_domain *domain)
{
int i, ret;
for (i = 0; ; i++) {
struct clk *clk = of_clk_get(dev->of_node, i);
if (IS_ERR(clk))
break;
if (i >= GPC_CLK_MAX) {
dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
ret = -EINVAL;
goto clk_err;
}
domain->clk[i] = clk;
}
domain->num_clks = i;
return 0;
clk_err:
while (i--)
clk_put(domain->clk[i]);
return ret;
}
static void imx_pgc_put_clocks(struct imx_pm_domain *domain)
{
int i;
for (i = domain->num_clks - 1; i >= 0; i--)
clk_put(domain->clk[i]);
}
static int imx_pgc_parse_dt(struct device *dev, struct imx_pm_domain *domain)
{
/* try to get the domain supply regulator */
domain->supply = devm_regulator_get_optional(dev, "power");
if (IS_ERR(domain->supply)) {
if (PTR_ERR(domain->supply) == -ENODEV)
domain->supply = NULL;
else
return PTR_ERR(domain->supply);
}
/* try to get all clocks needed for reset propagation */
return imx_pgc_get_clocks(dev, domain);
}
static int imx_pgc_power_domain_probe(struct platform_device *pdev)
{
struct imx_pm_domain *domain = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
int ret;
/* if this PD is associated with a DT node try to parse it */
if (dev->of_node) {
ret = imx_pgc_parse_dt(dev, domain);
if (ret)
return ret;
}
/* initially power on the domain */
if (domain->base.power_on)
domain->base.power_on(&domain->base);
if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
pm_genpd_init(&domain->base, NULL, false);
ret = of_genpd_add_provider_simple(dev->of_node, &domain->base);
if (ret)
goto genpd_err;
}
device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER);
return 0;
genpd_err:
pm_genpd_remove(&domain->base);
imx_pgc_put_clocks(domain);
return ret;
}
static int imx_pgc_power_domain_remove(struct platform_device *pdev)
{
struct imx_pm_domain *domain = pdev->dev.platform_data;
if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
of_genpd_del_provider(pdev->dev.of_node);
pm_genpd_remove(&domain->base);
imx_pgc_put_clocks(domain);
}
return 0;
}
static const struct platform_device_id imx_pgc_power_domain_id[] = {
{ "imx-pgc-power-domain"},
{ },
};
static struct platform_driver imx_pgc_power_domain_driver = {
.driver = {
.name = "imx-pgc-pd",
},
.probe = imx_pgc_power_domain_probe,
.remove = imx_pgc_power_domain_remove,
.id_table = imx_pgc_power_domain_id,
};
builtin_platform_driver(imx_pgc_power_domain_driver)
#define GPC_PGC_DOMAIN_ARM 0
#define GPC_PGC_DOMAIN_PU 1
#define GPC_PGC_DOMAIN_DISPLAY 2
#define GPC_PGC_DOMAIN_PCI 3
static struct genpd_power_state imx6_pm_domain_pu_state = {
.power_off_latency_ns = 25000,
.power_on_latency_ns = 2000000,
};
static struct imx_pm_domain imx_gpc_domains[] = {
[GPC_PGC_DOMAIN_ARM] = {
.base = {
.name = "ARM",
.flags = GENPD_FLAG_ALWAYS_ON,
},
},
[GPC_PGC_DOMAIN_PU] = {
.base = {
.name = "PU",
.power_off = imx6_pm_domain_power_off,
.power_on = imx6_pm_domain_power_on,
.states = &imx6_pm_domain_pu_state,
.state_count = 1,
},
.reg_offs = 0x260,
.cntr_pdn_bit = 0,
},
[GPC_PGC_DOMAIN_DISPLAY] = {
.base = {
.name = "DISPLAY",
.power_off = imx6_pm_domain_power_off,
.power_on = imx6_pm_domain_power_on,
},
.reg_offs = 0x240,
.cntr_pdn_bit = 4,
},
[GPC_PGC_DOMAIN_PCI] = {
.base = {
.name = "PCI",
.power_off = imx6_pm_domain_power_off,
.power_on = imx6_pm_domain_power_on,
},
.reg_offs = 0x200,
.cntr_pdn_bit = 6,
},
};
struct imx_gpc_dt_data {
int num_domains;
bool err009619_present;
bool err006287_present;
};
static const struct imx_gpc_dt_data imx6q_dt_data = {
.num_domains = 2,
.err009619_present = false,
.err006287_present = false,
};
static const struct imx_gpc_dt_data imx6qp_dt_data = {
.num_domains = 2,
.err009619_present = true,
.err006287_present = false,
};
static const struct imx_gpc_dt_data imx6sl_dt_data = {
.num_domains = 3,
.err009619_present = false,
.err006287_present = true,
};
static const struct imx_gpc_dt_data imx6sx_dt_data = {
.num_domains = 4,
.err009619_present = false,
.err006287_present = false,
};
static const struct of_device_id imx_gpc_dt_ids[] = {
{ .compatible = "fsl,imx6q-gpc", .data = &imx6q_dt_data },
{ .compatible = "fsl,imx6qp-gpc", .data = &imx6qp_dt_data },
{ .compatible = "fsl,imx6sl-gpc", .data = &imx6sl_dt_data },
{ .compatible = "fsl,imx6sx-gpc", .data = &imx6sx_dt_data },
{ }
};
static const struct regmap_range yes_ranges[] = {
regmap_reg_range(GPC_CNTR, GPC_CNTR),
regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
};
static const struct regmap_access_table access_table = {
.yes_ranges = yes_ranges,
.n_yes_ranges = ARRAY_SIZE(yes_ranges),
};
static const struct regmap_config imx_gpc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.rd_table = &access_table,
.wr_table = &access_table,
.max_register = 0x2ac,
.fast_io = true,
};
static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
&imx_gpc_domains[GPC_PGC_DOMAIN_ARM].base,
&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base,
};
static struct genpd_onecell_data imx_gpc_onecell_data = {
.domains = imx_gpc_onecell_domains,
.num_domains = 2,
};
static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,
unsigned int num_domains)
{
struct imx_pm_domain *domain;
int i, ret;
for (i = 0; i < num_domains; i++) {
domain = &imx_gpc_domains[i];
domain->regmap = regmap;
domain->ipg_rate_mhz = 66;
if (i == 1) {
domain->supply = devm_regulator_get(dev, "pu");
if (IS_ERR(domain->supply))
return PTR_ERR(domain->supply);
ret = imx_pgc_get_clocks(dev, domain);
if (ret)
goto clk_err;
domain->base.power_on(&domain->base);
}
}
for (i = 0; i < num_domains; i++)
pm_genpd_init(&imx_gpc_domains[i].base, NULL, false);
if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
ret = of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
if (ret)
goto genpd_err;
}
return 0;
genpd_err:
for (i = 0; i < num_domains; i++)
pm_genpd_remove(&imx_gpc_domains[i].base);
imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]);
clk_err:
return ret;
}
static int imx_gpc_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(imx_gpc_dt_ids, &pdev->dev);
const struct imx_gpc_dt_data *of_id_data = of_id->data;
struct device_node *pgc_node;
struct regmap *regmap;
void __iomem *base;
int ret;
pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
/* bail out if DT too old and doesn't provide the necessary info */
if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
!pgc_node)
return 0;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
&imx_gpc_regmap_config);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&pdev->dev, "failed to init regmap: %d\n",
ret);
return ret;
}
/*
* Disable PU power down by runtime PM if ERR009619 is present.
*
* The PRE clock will be paused for several cycles when turning on the
* PU domain LDO from power down state. If PRE is in use at that time,
* the IPU/PRG cannot get the correct display data from the PRE.
*
* This is not a concern when the whole system enters suspend state, so
* it's safe to power down PU in this case.
*/
if (of_id_data->err009619_present)
imx_gpc_domains[GPC_PGC_DOMAIN_PU].base.flags |=
GENPD_FLAG_RPM_ALWAYS_ON;
/* Keep DISP always on if ERR006287 is present */
if (of_id_data->err006287_present)
imx_gpc_domains[GPC_PGC_DOMAIN_DISPLAY].base.flags |=
GENPD_FLAG_ALWAYS_ON;
if (!pgc_node) {
ret = imx_gpc_old_dt_init(&pdev->dev, regmap,
of_id_data->num_domains);
if (ret)
return ret;
} else {
struct imx_pm_domain *domain;
struct platform_device *pd_pdev;
struct device_node *np;
struct clk *ipg_clk;
unsigned int ipg_rate_mhz;
int domain_index;
ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(ipg_clk))
return PTR_ERR(ipg_clk);
ipg_rate_mhz = clk_get_rate(ipg_clk) / 1000000;
for_each_child_of_node(pgc_node, np) {
ret = of_property_read_u32(np, "reg", &domain_index);
if (ret) {
of_node_put(np);
return ret;
}
if (domain_index >= of_id_data->num_domains)
continue;
pd_pdev = platform_device_alloc("imx-pgc-power-domain",
domain_index);
if (!pd_pdev) {
of_node_put(np);
return -ENOMEM;
}
ret = platform_device_add_data(pd_pdev,
&imx_gpc_domains[domain_index],
sizeof(imx_gpc_domains[domain_index]));
if (ret) {
platform_device_put(pd_pdev);
of_node_put(np);
return ret;
}
domain = pd_pdev->dev.platform_data;
domain->regmap = regmap;
domain->ipg_rate_mhz = ipg_rate_mhz;
pd_pdev->dev.parent = &pdev->dev;
pd_pdev->dev.of_node = np;
ret = platform_device_add(pd_pdev);
if (ret) {
platform_device_put(pd_pdev);
of_node_put(np);
return ret;
}
}
}
return 0;
}
static int imx_gpc_remove(struct platform_device *pdev)
{
struct device_node *pgc_node;
int ret;
pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
/* bail out if DT too old and doesn't provide the necessary info */
if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
!pgc_node)
return 0;
/*
* If the old DT binding is used the toplevel driver needs to
* de-register the power domains
*/
if (!pgc_node) {
of_genpd_del_provider(pdev->dev.of_node);
ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
if (ret)
return ret;
imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]);
ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_ARM].base);
if (ret)
return ret;
}
return 0;
}
static struct platform_driver imx_gpc_driver = {
.driver = {
.name = "imx-gpc",
.of_match_table = imx_gpc_dt_ids,
},
.probe = imx_gpc_probe,
.remove = imx_gpc_remove,
};
builtin_platform_driver(imx_gpc_driver)
| linux-master | drivers/pmdomain/imx/gpc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2018 NXP
* Dong Aisheng <[email protected]>
*
* Implementation of the SCU based Power Domains
*
* NOTE: a better implementation suggested by Ulf Hansson is using a
* single global power domain and implement the ->attach|detach_dev()
* callback for the genpd and use the regular of_genpd_add_provider_simple().
* From within the ->attach_dev(), we could get the OF node for
* the device that is being attached and then parse the power-domain
* cell containing the "resource id" and store that in the per device
* struct generic_pm_domain_data (we have void pointer there for
* storing these kind of things).
*
* Additionally, we need to implement the ->stop() and ->start()
* callbacks of genpd, which is where you "power on/off" devices,
* rather than using the above ->power_on|off() callbacks.
*
* However, there're two known issues:
* 1. The ->attach_dev() of power domain infrastructure still does
* not support multi domains case as the struct device *dev passed
* in is a virtual PD device, it does not help for parsing the real
* device resource id from device tree, so it's unware of which
* real sub power domain of device should be attached.
*
* The framework needs some proper extension to support multi power
* domain cases.
*
* Update: Genpd assigns the ->of_node for the virtual device before it
* invokes ->attach_dev() callback, hence parsing for device resources via
* DT should work fine.
*
* 2. It also breaks most of current drivers as the driver probe sequence
* behavior changed if removing ->power_on|off() callback and use
* ->start() and ->stop() instead. genpd_dev_pm_attach will only power
* up the domain and attach device, but will not call .start() which
* relies on device runtime pm. That means the device power is still
* not up before running driver probe function. For SCU enabled
* platforms, all device drivers accessing registers/clock without power
* domain enabled will trigger a HW access error. That means we need fix
* most drivers probe sequence with proper runtime pm.
*
* Update: Runtime PM support isn't necessary. Instead, this can easily be
* fixed in drivers by adding a call to dev_pm_domain_start() during probe.
*
* In summary, the second part needs to be addressed via minor updates to the
* relevant drivers, before the "single global power domain" model can be used.
*
*/
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/console.h>
#include <linux/firmware/imx/sci.h>
#include <linux/firmware/imx/svc/rm.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
/* SCU Power Mode Protocol definition */
struct imx_sc_msg_req_set_resource_power_mode {
struct imx_sc_rpc_msg hdr;
u16 resource;
u8 mode;
} __packed __aligned(4);
struct req_get_resource_mode {
u16 resource;
};
struct resp_get_resource_mode {
u8 mode;
};
struct imx_sc_msg_req_get_resource_power_mode {
struct imx_sc_rpc_msg hdr;
union {
struct req_get_resource_mode req;
struct resp_get_resource_mode resp;
} data;
} __packed __aligned(4);
#define IMX_SCU_PD_NAME_SIZE 20
struct imx_sc_pm_domain {
struct generic_pm_domain pd;
char name[IMX_SCU_PD_NAME_SIZE];
u32 rsrc;
};
struct imx_sc_pd_range {
char *name;
u32 rsrc;
u8 num;
/* add domain index */
bool postfix;
u8 start_from;
};
struct imx_sc_pd_soc {
const struct imx_sc_pd_range *pd_ranges;
u8 num_ranges;
};
static int imx_con_rsrc;
/* Align with the IMX_SC_PM_PW_MODE_[OFF,STBY,LP,ON] macros */
static const char * const imx_sc_pm_mode[] = {
"IMX_SC_PM_PW_MODE_OFF",
"IMX_SC_PM_PW_MODE_STBY",
"IMX_SC_PM_PW_MODE_LP",
"IMX_SC_PM_PW_MODE_ON"
};
static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* LSIO SS */
{ "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
{ "gpio", IMX_SC_R_GPIO_0, 8, true, 0 },
{ "gpt", IMX_SC_R_GPT_0, 5, true, 0 },
{ "kpp", IMX_SC_R_KPP, 1, false, 0 },
{ "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
{ "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
{ "mu_b", IMX_SC_R_MU_5B, 9, true, 5 },
/* CONN SS */
{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
{ "usb0phy", IMX_SC_R_USB_0_PHY, 1, false, 0 },
{ "usb1phy", IMX_SC_R_USB_1_PHY, 1, false, 0},
{ "usb2", IMX_SC_R_USB_2, 1, false, 0 },
{ "usb2phy", IMX_SC_R_USB_2_PHY, 1, false, 0 },
{ "sdhc", IMX_SC_R_SDHC_0, 3, true, 0 },
{ "enet", IMX_SC_R_ENET_0, 2, true, 0 },
{ "nand", IMX_SC_R_NAND, 1, false, 0 },
{ "mlb", IMX_SC_R_MLB_0, 1, true, 0 },
/* AUDIO SS */
{ "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
{ "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
{ "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
{ "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 },
{ "mclk-out-0", IMX_SC_R_MCLK_OUT_0, 1, false, 0 },
{ "mclk-out-1", IMX_SC_R_MCLK_OUT_1, 1, false, 0 },
{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 32, true, 0 },
{ "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
{ "dma2-ch", IMX_SC_R_DMA_2_CH0, 32, true, 0 },
{ "dma3-ch", IMX_SC_R_DMA_3_CH0, 32, true, 0 },
{ "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
{ "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
{ "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
{ "esai1", IMX_SC_R_ESAI_1, 1, false, 0 },
{ "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
{ "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 },
{ "sai", IMX_SC_R_SAI_0, 3, true, 0 },
{ "sai3", IMX_SC_R_SAI_3, 1, false, 0 },
{ "sai4", IMX_SC_R_SAI_4, 1, false, 0 },
{ "sai5", IMX_SC_R_SAI_5, 1, false, 0 },
{ "sai6", IMX_SC_R_SAI_6, 1, false, 0 },
{ "sai7", IMX_SC_R_SAI_7, 1, false, 0 },
{ "amix", IMX_SC_R_AMIX, 1, false, 0 },
{ "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
{ "dsp", IMX_SC_R_DSP, 1, false, 0 },
{ "dsp-ram", IMX_SC_R_DSP_RAM, 1, false, 0 },
/* DMA SS */
{ "can", IMX_SC_R_CAN_0, 3, true, 0 },
{ "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
{ "lpi2c", IMX_SC_R_I2C_0, 5, true, 0 },
{ "adc", IMX_SC_R_ADC_0, 2, true, 0 },
{ "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
{ "lcd-pll", IMX_SC_R_ELCDIF_PLL, 1, true, 0 },
{ "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
{ "lpuart", IMX_SC_R_UART_0, 5, true, 0 },
{ "sim", IMX_SC_R_EMVSIM_0, 2, true, 0 },
{ "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
{ "irqstr_dsp", IMX_SC_R_IRQSTR_DSP, 1, false, 0 },
/* VPU SS */
{ "vpu", IMX_SC_R_VPU, 1, false, 0 },
{ "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 },
{ "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 },
{ "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 },
{ "vpu-enc1", IMX_SC_R_VPU_ENC_1, 1, false, 0 },
{ "vpu-mu0", IMX_SC_R_VPU_MU_0, 1, false, 0 },
{ "vpu-mu1", IMX_SC_R_VPU_MU_1, 1, false, 0 },
{ "vpu-mu2", IMX_SC_R_VPU_MU_2, 1, false, 0 },
/* GPU SS */
{ "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 },
{ "gpu1-pid", IMX_SC_R_GPU_1_PID0, 4, true, 0 },
/* HSIO SS */
{ "pcie-a", IMX_SC_R_PCIE_A, 1, false, 0 },
{ "serdes-0", IMX_SC_R_SERDES_0, 1, false, 0 },
{ "pcie-b", IMX_SC_R_PCIE_B, 1, false, 0 },
{ "serdes-1", IMX_SC_R_SERDES_1, 1, false, 0 },
{ "sata-0", IMX_SC_R_SATA_0, 1, false, 0 },
{ "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, false, 0 },
/* MIPI SS */
{ "mipi0", IMX_SC_R_MIPI_0, 1, false, 0 },
{ "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
{ "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
{ "mipi1", IMX_SC_R_MIPI_1, 1, false, 0 },
{ "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, false, 0 },
{ "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, true, 0 },
/* LVDS SS */
{ "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
{ "lvds0-pwm", IMX_SC_R_LVDS_0_PWM_0, 1, false, 0 },
{ "lvds0-lpi2c", IMX_SC_R_LVDS_0_I2C_0, 2, true, 0 },
{ "lvds1", IMX_SC_R_LVDS_1, 1, false, 0 },
{ "lvds1-pwm", IMX_SC_R_LVDS_1_PWM_0, 1, false, 0 },
{ "lvds1-lpi2c", IMX_SC_R_LVDS_1_I2C_0, 2, true, 0 },
{ "mipi1", IMX_SC_R_MIPI_1, 1, 0 },
{ "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, 0 },
{ "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, 1 },
{ "lvds1", IMX_SC_R_LVDS_1, 1, 0 },
/* DC SS */
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
{ "dc0-video", IMX_SC_R_DC_0_VIDEO0, 2, true, 0 },
{ "dc1", IMX_SC_R_DC_1, 1, false, 0 },
{ "dc1-pll", IMX_SC_R_DC_1_PLL_0, 2, true, 0 },
{ "dc1-video", IMX_SC_R_DC_1_VIDEO0, 2, true, 0 },
/* CM40 SS */
{ "cm40-i2c", IMX_SC_R_M4_0_I2C, 1, false, 0 },
{ "cm40-intmux", IMX_SC_R_M4_0_INTMUX, 1, false, 0 },
{ "cm40-pid", IMX_SC_R_M4_0_PID0, 5, true, 0},
{ "cm40-mu-a1", IMX_SC_R_M4_0_MU_1A, 1, false, 0},
{ "cm40-lpuart", IMX_SC_R_M4_0_UART, 1, false, 0},
/* CM41 SS */
{ "cm41-i2c", IMX_SC_R_M4_1_I2C, 1, false, 0 },
{ "cm41-intmux", IMX_SC_R_M4_1_INTMUX, 1, false, 0 },
{ "cm41-pid", IMX_SC_R_M4_1_PID0, 5, true, 0},
{ "cm41-mu-a1", IMX_SC_R_M4_1_MU_1A, 1, false, 0},
{ "cm41-lpuart", IMX_SC_R_M4_1_UART, 1, false, 0},
/* CM41 SS */
{ "cm41_i2c", IMX_SC_R_M4_1_I2C, 1, false, 0 },
{ "cm41_intmux", IMX_SC_R_M4_1_INTMUX, 1, false, 0 },
/* DB SS */
{ "perf", IMX_SC_R_PERF, 1, false, 0},
/* IMAGE SS */
{ "img-jpegdec-mp", IMX_SC_R_MJPEG_DEC_MP, 1, false, 0 },
{ "img-jpegdec-s0", IMX_SC_R_MJPEG_DEC_S0, 4, true, 0 },
{ "img-jpegenc-mp", IMX_SC_R_MJPEG_ENC_MP, 1, false, 0 },
{ "img-jpegenc-s0", IMX_SC_R_MJPEG_ENC_S0, 4, true, 0 },
/* SECO SS */
{ "seco_mu", IMX_SC_R_SECO_MU_2, 3, true, 2},
/* V2X SS */
{ "v2x_mu", IMX_SC_R_V2X_MU_0, 2, true, 0},
{ "v2x_mu", IMX_SC_R_V2X_MU_2, 1, true, 2},
{ "v2x_mu", IMX_SC_R_V2X_MU_3, 2, true, 3},
{ "img-pdma", IMX_SC_R_ISI_CH0, 8, true, 0 },
{ "img-csi0", IMX_SC_R_CSI_0, 1, false, 0 },
{ "img-csi0-i2c0", IMX_SC_R_CSI_0_I2C_0, 1, false, 0 },
{ "img-csi0-pwm0", IMX_SC_R_CSI_0_PWM_0, 1, false, 0 },
{ "img-csi1", IMX_SC_R_CSI_1, 1, false, 0 },
{ "img-csi1-i2c0", IMX_SC_R_CSI_1_I2C_0, 1, false, 0 },
{ "img-csi1-pwm0", IMX_SC_R_CSI_1_PWM_0, 1, false, 0 },
{ "img-parallel", IMX_SC_R_PI_0, 1, false, 0 },
{ "img-parallel-i2c0", IMX_SC_R_PI_0_I2C_0, 1, false, 0 },
{ "img-parallel-pwm0", IMX_SC_R_PI_0_PWM_0, 2, true, 0 },
{ "img-parallel-pll", IMX_SC_R_PI_0_PLL, 1, false, 0 },
/* HDMI TX SS */
{ "hdmi-tx", IMX_SC_R_HDMI, 1, false, 0},
{ "hdmi-tx-i2s", IMX_SC_R_HDMI_I2S, 1, false, 0},
{ "hdmi-tx-i2c0", IMX_SC_R_HDMI_I2C_0, 1, false, 0},
{ "hdmi-tx-pll0", IMX_SC_R_HDMI_PLL_0, 1, false, 0},
{ "hdmi-tx-pll1", IMX_SC_R_HDMI_PLL_1, 1, false, 0},
/* HDMI RX SS */
{ "hdmi-rx", IMX_SC_R_HDMI_RX, 1, false, 0},
{ "hdmi-rx-pwm", IMX_SC_R_HDMI_RX_PWM_0, 1, false, 0},
{ "hdmi-rx-i2c0", IMX_SC_R_HDMI_RX_I2C_0, 1, false, 0},
{ "hdmi-rx-bypass", IMX_SC_R_HDMI_RX_BYPASS, 1, false, 0},
/* SECURITY SS */
{ "sec-jr", IMX_SC_R_CAAM_JR2, 2, true, 2},
/* BOARD SS */
{ "board", IMX_SC_R_BOARD_R0, 8, true, 0},
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
.pd_ranges = imx8qxp_scu_pd_ranges,
.num_ranges = ARRAY_SIZE(imx8qxp_scu_pd_ranges),
};
static struct imx_sc_ipc *pm_ipc_handle;
static inline struct imx_sc_pm_domain *
to_imx_sc_pd(struct generic_pm_domain *genpd)
{
return container_of(genpd, struct imx_sc_pm_domain, pd);
}
static void imx_sc_pd_get_console_rsrc(void)
{
struct of_phandle_args specs;
int ret;
if (!of_stdout)
return;
ret = of_parse_phandle_with_args(of_stdout, "power-domains",
"#power-domain-cells",
0, &specs);
if (ret)
return;
imx_con_rsrc = specs.args[0];
}
static int imx_sc_get_pd_power(struct device *dev, u32 rsrc)
{
struct imx_sc_msg_req_get_resource_power_mode msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
int ret;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_PM;
hdr->func = IMX_SC_PM_FUNC_GET_RESOURCE_POWER_MODE;
hdr->size = 2;
msg.data.req.resource = rsrc;
ret = imx_scu_call_rpc(pm_ipc_handle, &msg, true);
if (ret)
dev_err(dev, "failed to get power resource %d mode, ret %d\n",
rsrc, ret);
return msg.data.resp.mode;
}
static int imx_sc_pd_power(struct generic_pm_domain *domain, bool power_on)
{
struct imx_sc_msg_req_set_resource_power_mode msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
struct imx_sc_pm_domain *pd;
int ret;
pd = to_imx_sc_pd(domain);
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_PM;
hdr->func = IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE;
hdr->size = 2;
msg.resource = pd->rsrc;
msg.mode = power_on ? IMX_SC_PM_PW_MODE_ON : IMX_SC_PM_PW_MODE_LP;
/* keep uart console power on for no_console_suspend */
if (imx_con_rsrc == pd->rsrc && !console_suspend_enabled && !power_on)
return -EBUSY;
ret = imx_scu_call_rpc(pm_ipc_handle, &msg, true);
if (ret)
dev_err(&domain->dev, "failed to power %s resource %d ret %d\n",
power_on ? "up" : "off", pd->rsrc, ret);
return ret;
}
static int imx_sc_pd_power_on(struct generic_pm_domain *domain)
{
return imx_sc_pd_power(domain, true);
}
static int imx_sc_pd_power_off(struct generic_pm_domain *domain)
{
return imx_sc_pd_power(domain, false);
}
static struct generic_pm_domain *imx_scu_pd_xlate(struct of_phandle_args *spec,
void *data)
{
struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
struct genpd_onecell_data *pd_data = data;
unsigned int i;
for (i = 0; i < pd_data->num_domains; i++) {
struct imx_sc_pm_domain *sc_pd;
sc_pd = to_imx_sc_pd(pd_data->domains[i]);
if (sc_pd->rsrc == spec->args[0]) {
domain = &sc_pd->pd;
break;
}
}
return domain;
}
static struct imx_sc_pm_domain *
imx_scu_add_pm_domain(struct device *dev, int idx,
const struct imx_sc_pd_range *pd_ranges)
{
struct imx_sc_pm_domain *sc_pd;
bool is_off;
int mode, ret;
if (!imx_sc_rm_is_resource_owned(pm_ipc_handle, pd_ranges->rsrc + idx))
return NULL;
sc_pd = devm_kzalloc(dev, sizeof(*sc_pd), GFP_KERNEL);
if (!sc_pd)
return ERR_PTR(-ENOMEM);
sc_pd->rsrc = pd_ranges->rsrc + idx;
sc_pd->pd.power_off = imx_sc_pd_power_off;
sc_pd->pd.power_on = imx_sc_pd_power_on;
if (pd_ranges->postfix)
snprintf(sc_pd->name, sizeof(sc_pd->name),
"%s%i", pd_ranges->name, pd_ranges->start_from + idx);
else
snprintf(sc_pd->name, sizeof(sc_pd->name),
"%s", pd_ranges->name);
sc_pd->pd.name = sc_pd->name;
if (imx_con_rsrc == sc_pd->rsrc)
sc_pd->pd.flags = GENPD_FLAG_RPM_ALWAYS_ON;
mode = imx_sc_get_pd_power(dev, pd_ranges->rsrc + idx);
if (mode == IMX_SC_PM_PW_MODE_ON)
is_off = false;
else
is_off = true;
dev_dbg(dev, "%s : %s\n", sc_pd->name, imx_sc_pm_mode[mode]);
if (sc_pd->rsrc >= IMX_SC_R_LAST) {
dev_warn(dev, "invalid pd %s rsrc id %d found",
sc_pd->name, sc_pd->rsrc);
devm_kfree(dev, sc_pd);
return NULL;
}
ret = pm_genpd_init(&sc_pd->pd, NULL, is_off);
if (ret) {
dev_warn(dev, "failed to init pd %s rsrc id %d",
sc_pd->name, sc_pd->rsrc);
devm_kfree(dev, sc_pd);
return NULL;
}
return sc_pd;
}
static int imx_scu_init_pm_domains(struct device *dev,
const struct imx_sc_pd_soc *pd_soc)
{
const struct imx_sc_pd_range *pd_ranges = pd_soc->pd_ranges;
struct generic_pm_domain **domains;
struct genpd_onecell_data *pd_data;
struct imx_sc_pm_domain *sc_pd;
u32 count = 0;
int i, j;
for (i = 0; i < pd_soc->num_ranges; i++)
count += pd_ranges[i].num;
domains = devm_kcalloc(dev, count, sizeof(*domains), GFP_KERNEL);
if (!domains)
return -ENOMEM;
pd_data = devm_kzalloc(dev, sizeof(*pd_data), GFP_KERNEL);
if (!pd_data)
return -ENOMEM;
count = 0;
for (i = 0; i < pd_soc->num_ranges; i++) {
for (j = 0; j < pd_ranges[i].num; j++) {
sc_pd = imx_scu_add_pm_domain(dev, j, &pd_ranges[i]);
if (IS_ERR_OR_NULL(sc_pd))
continue;
domains[count++] = &sc_pd->pd;
dev_dbg(dev, "added power domain %s\n", sc_pd->pd.name);
}
}
pd_data->domains = domains;
pd_data->num_domains = count;
pd_data->xlate = imx_scu_pd_xlate;
of_genpd_add_provider_onecell(dev->of_node, pd_data);
return 0;
}
static int imx_sc_pd_probe(struct platform_device *pdev)
{
const struct imx_sc_pd_soc *pd_soc;
int ret;
ret = imx_scu_get_handle(&pm_ipc_handle);
if (ret)
return ret;
pd_soc = of_device_get_match_data(&pdev->dev);
if (!pd_soc)
return -ENODEV;
imx_sc_pd_get_console_rsrc();
return imx_scu_init_pm_domains(&pdev->dev, pd_soc);
}
static const struct of_device_id imx_sc_pd_match[] = {
{ .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd},
{ .compatible = "fsl,scu-pd", &imx8qxp_scu_pd},
{ /* sentinel */ }
};
static struct platform_driver imx_sc_pd_driver = {
.driver = {
.name = "imx-scu-pd",
.of_match_table = imx_sc_pd_match,
.suppress_bind_attrs = true,
},
.probe = imx_sc_pd_probe,
};
builtin_platform_driver(imx_sc_pd_driver);
MODULE_AUTHOR("Dong Aisheng <[email protected]>");
MODULE_DESCRIPTION("IMX SCU Power Domain driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pmdomain/imx/scu-pd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 NXP
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#define MIX_SLICE_SW_CTRL_OFF 0x20
#define SLICE_SW_CTRL_PSW_CTRL_OFF_MASK BIT(4)
#define SLICE_SW_CTRL_PDN_SOFT_MASK BIT(31)
#define MIX_FUNC_STAT_OFF 0xB4
#define FUNC_STAT_PSW_STAT_MASK BIT(0)
#define FUNC_STAT_RST_STAT_MASK BIT(2)
#define FUNC_STAT_ISO_STAT_MASK BIT(4)
struct imx93_power_domain {
struct generic_pm_domain genpd;
struct device *dev;
void __iomem *addr;
struct clk_bulk_data *clks;
int num_clks;
bool init_off;
};
#define to_imx93_pd(_genpd) container_of(_genpd, struct imx93_power_domain, genpd)
static int imx93_pd_on(struct generic_pm_domain *genpd)
{
struct imx93_power_domain *domain = to_imx93_pd(genpd);
void __iomem *addr = domain->addr;
u32 val;
int ret;
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
dev_err(domain->dev, "failed to enable clocks for domain: %s\n", genpd->name);
return ret;
}
val = readl(addr + MIX_SLICE_SW_CTRL_OFF);
val &= ~SLICE_SW_CTRL_PDN_SOFT_MASK;
writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
!(val & FUNC_STAT_ISO_STAT_MASK), 1, 10000);
if (ret) {
dev_err(domain->dev, "pd_on timeout: name: %s, stat: %x\n", genpd->name, val);
return ret;
}
return 0;
}
static int imx93_pd_off(struct generic_pm_domain *genpd)
{
struct imx93_power_domain *domain = to_imx93_pd(genpd);
void __iomem *addr = domain->addr;
int ret;
u32 val;
/* Power off MIX */
val = readl(addr + MIX_SLICE_SW_CTRL_OFF);
val |= SLICE_SW_CTRL_PDN_SOFT_MASK;
writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
val & FUNC_STAT_PSW_STAT_MASK, 1, 1000);
if (ret) {
dev_err(domain->dev, "pd_off timeout: name: %s, stat: %x\n", genpd->name, val);
return ret;
}
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return 0;
};
static int imx93_pd_remove(struct platform_device *pdev)
{
struct imx93_power_domain *domain = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
if (!domain->init_off)
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
of_genpd_del_provider(np);
pm_genpd_remove(&domain->genpd);
return 0;
}
static int imx93_pd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx93_power_domain *domain;
int ret;
domain = devm_kzalloc(dev, sizeof(*domain), GFP_KERNEL);
if (!domain)
return -ENOMEM;
domain->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(domain->addr))
return PTR_ERR(domain->addr);
domain->num_clks = devm_clk_bulk_get_all(dev, &domain->clks);
if (domain->num_clks < 0)
return dev_err_probe(dev, domain->num_clks, "Failed to get domain's clocks\n");
domain->genpd.name = dev_name(dev);
domain->genpd.power_off = imx93_pd_off;
domain->genpd.power_on = imx93_pd_on;
domain->dev = dev;
domain->init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK;
/* Just to sync the status of hardware */
if (!domain->init_off) {
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
dev_err(domain->dev, "failed to enable clocks for domain: %s\n",
domain->genpd.name);
return ret;
}
}
ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off);
if (ret)
goto err_clk_unprepare;
platform_set_drvdata(pdev, domain);
ret = of_genpd_add_provider_simple(np, &domain->genpd);
if (ret)
goto err_genpd_remove;
return 0;
err_genpd_remove:
pm_genpd_remove(&domain->genpd);
err_clk_unprepare:
if (!domain->init_off)
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return ret;
}
static const struct of_device_id imx93_pd_ids[] = {
{ .compatible = "fsl,imx93-src-slice" },
{ }
};
MODULE_DEVICE_TABLE(of, imx93_pd_ids);
static struct platform_driver imx93_power_domain_driver = {
.driver = {
.name = "imx93_power_domain",
.of_match_table = imx93_pd_ids,
},
.probe = imx93_pd_probe,
.remove = imx93_pd_remove,
};
module_platform_driver(imx93_power_domain_driver);
MODULE_AUTHOR("Peng Fan <[email protected]>");
MODULE_DESCRIPTION("NXP i.MX93 power domain driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/pmdomain/imx/imx93-pd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2022 Pengutronix, Lucas Stach <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/power/imx8mp-power.h>
#define GPR_REG0 0x0
#define PCIE_CLOCK_MODULE_EN BIT(0)
#define USB_CLOCK_MODULE_EN BIT(1)
#define PCIE_PHY_APB_RST BIT(4)
#define PCIE_PHY_INIT_RST BIT(5)
#define GPR_REG1 0x4
#define PLL_LOCK BIT(13)
#define GPR_REG2 0x8
#define P_PLL_MASK GENMASK(5, 0)
#define M_PLL_MASK GENMASK(15, 6)
#define S_PLL_MASK GENMASK(18, 16)
#define GPR_REG3 0xc
#define PLL_CKE BIT(17)
#define PLL_RST BIT(31)
struct imx8mp_blk_ctrl_domain;
struct imx8mp_blk_ctrl {
struct device *dev;
struct notifier_block power_nb;
struct device *bus_power_dev;
struct regmap *regmap;
struct imx8mp_blk_ctrl_domain *domains;
struct genpd_onecell_data onecell_data;
void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
};
struct imx8mp_blk_ctrl_domain_data {
const char *name;
const char * const *clk_names;
int num_clks;
const char * const *path_names;
int num_paths;
const char *gpc_name;
};
#define DOMAIN_MAX_CLKS 2
#define DOMAIN_MAX_PATHS 3
struct imx8mp_blk_ctrl_domain {
struct generic_pm_domain genpd;
const struct imx8mp_blk_ctrl_domain_data *data;
struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
struct icc_bulk_data paths[DOMAIN_MAX_PATHS];
struct device *power_dev;
struct imx8mp_blk_ctrl *bc;
int num_paths;
int id;
};
struct imx8mp_blk_ctrl_data {
int max_reg;
int (*probe) (struct imx8mp_blk_ctrl *bc);
notifier_fn_t power_notifier_fn;
void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
const struct imx8mp_blk_ctrl_domain_data *domains;
int num_domains;
};
static inline struct imx8mp_blk_ctrl_domain *
to_imx8mp_blk_ctrl_domain(struct generic_pm_domain *genpd)
{
return container_of(genpd, struct imx8mp_blk_ctrl_domain, genpd);
}
struct clk_hsio_pll {
struct clk_hw hw;
struct regmap *regmap;
};
static inline struct clk_hsio_pll *to_clk_hsio_pll(struct clk_hw *hw)
{
return container_of(hw, struct clk_hsio_pll, hw);
}
static int clk_hsio_pll_prepare(struct clk_hw *hw)
{
struct clk_hsio_pll *clk = to_clk_hsio_pll(hw);
u32 val;
/* set the PLL configuration */
regmap_update_bits(clk->regmap, GPR_REG2,
P_PLL_MASK | M_PLL_MASK | S_PLL_MASK,
FIELD_PREP(P_PLL_MASK, 12) |
FIELD_PREP(M_PLL_MASK, 800) |
FIELD_PREP(S_PLL_MASK, 4));
/* de-assert PLL reset */
regmap_update_bits(clk->regmap, GPR_REG3, PLL_RST, PLL_RST);
/* enable PLL */
regmap_update_bits(clk->regmap, GPR_REG3, PLL_CKE, PLL_CKE);
return regmap_read_poll_timeout(clk->regmap, GPR_REG1, val,
val & PLL_LOCK, 10, 100);
}
static void clk_hsio_pll_unprepare(struct clk_hw *hw)
{
struct clk_hsio_pll *clk = to_clk_hsio_pll(hw);
regmap_update_bits(clk->regmap, GPR_REG3, PLL_RST | PLL_CKE, 0);
}
static int clk_hsio_pll_is_prepared(struct clk_hw *hw)
{
struct clk_hsio_pll *clk = to_clk_hsio_pll(hw);
return regmap_test_bits(clk->regmap, GPR_REG1, PLL_LOCK);
}
static unsigned long clk_hsio_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 100000000;
}
static const struct clk_ops clk_hsio_pll_ops = {
.prepare = clk_hsio_pll_prepare,
.unprepare = clk_hsio_pll_unprepare,
.is_prepared = clk_hsio_pll_is_prepared,
.recalc_rate = clk_hsio_pll_recalc_rate,
};
static int imx8mp_hsio_blk_ctrl_probe(struct imx8mp_blk_ctrl *bc)
{
struct clk_hsio_pll *clk_hsio_pll;
struct clk_hw *hw;
struct clk_init_data init = {};
int ret;
clk_hsio_pll = devm_kzalloc(bc->dev, sizeof(*clk_hsio_pll), GFP_KERNEL);
if (!clk_hsio_pll)
return -ENOMEM;
init.name = "hsio_pll";
init.ops = &clk_hsio_pll_ops;
init.parent_names = (const char *[]){"osc_24m"};
init.num_parents = 1;
clk_hsio_pll->regmap = bc->regmap;
clk_hsio_pll->hw.init = &init;
hw = &clk_hsio_pll->hw;
ret = devm_clk_hw_register(bc->bus_power_dev, hw);
if (ret)
return ret;
return devm_of_clk_add_hw_provider(bc->dev, of_clk_hw_simple_get, hw);
}
static void imx8mp_hsio_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
struct imx8mp_blk_ctrl_domain *domain)
{
switch (domain->id) {
case IMX8MP_HSIOBLK_PD_USB:
regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
break;
case IMX8MP_HSIOBLK_PD_PCIE:
regmap_set_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
break;
case IMX8MP_HSIOBLK_PD_PCIE_PHY:
regmap_set_bits(bc->regmap, GPR_REG0,
PCIE_PHY_APB_RST | PCIE_PHY_INIT_RST);
break;
default:
break;
}
}
static void imx8mp_hsio_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
struct imx8mp_blk_ctrl_domain *domain)
{
switch (domain->id) {
case IMX8MP_HSIOBLK_PD_USB:
regmap_clear_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
break;
case IMX8MP_HSIOBLK_PD_PCIE:
regmap_clear_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
break;
case IMX8MP_HSIOBLK_PD_PCIE_PHY:
regmap_clear_bits(bc->regmap, GPR_REG0,
PCIE_PHY_APB_RST | PCIE_PHY_INIT_RST);
break;
default:
break;
}
}
static int imx8mp_hsio_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct imx8mp_blk_ctrl *bc = container_of(nb, struct imx8mp_blk_ctrl,
power_nb);
struct clk_bulk_data *usb_clk = bc->domains[IMX8MP_HSIOBLK_PD_USB].clks;
int num_clks = bc->domains[IMX8MP_HSIOBLK_PD_USB].data->num_clks;
int ret;
switch (action) {
case GENPD_NOTIFY_ON:
/*
* enable USB clock for a moment for the power-on ADB handshake
* to proceed
*/
ret = clk_bulk_prepare_enable(num_clks, usb_clk);
if (ret)
return NOTIFY_BAD;
regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
udelay(5);
regmap_clear_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
clk_bulk_disable_unprepare(num_clks, usb_clk);
break;
case GENPD_NOTIFY_PRE_OFF:
/* enable USB clock for the power-down ADB handshake to work */
ret = clk_bulk_prepare_enable(num_clks, usb_clk);
if (ret)
return NOTIFY_BAD;
regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
break;
case GENPD_NOTIFY_OFF:
clk_bulk_disable_unprepare(num_clks, usb_clk);
break;
default:
break;
}
return NOTIFY_OK;
}
static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = {
[IMX8MP_HSIOBLK_PD_USB] = {
.name = "hsioblk-usb",
.clk_names = (const char *[]){ "usb" },
.num_clks = 1,
.gpc_name = "usb",
.path_names = (const char *[]){"usb1", "usb2"},
.num_paths = 2,
},
[IMX8MP_HSIOBLK_PD_USB_PHY1] = {
.name = "hsioblk-usb-phy1",
.gpc_name = "usb-phy1",
},
[IMX8MP_HSIOBLK_PD_USB_PHY2] = {
.name = "hsioblk-usb-phy2",
.gpc_name = "usb-phy2",
},
[IMX8MP_HSIOBLK_PD_PCIE] = {
.name = "hsioblk-pcie",
.clk_names = (const char *[]){ "pcie" },
.num_clks = 1,
.gpc_name = "pcie",
.path_names = (const char *[]){"noc-pcie", "pcie"},
.num_paths = 2,
},
[IMX8MP_HSIOBLK_PD_PCIE_PHY] = {
.name = "hsioblk-pcie-phy",
.gpc_name = "pcie-phy",
},
};
static const struct imx8mp_blk_ctrl_data imx8mp_hsio_blk_ctl_dev_data = {
.max_reg = 0x24,
.probe = imx8mp_hsio_blk_ctrl_probe,
.power_on = imx8mp_hsio_blk_ctrl_power_on,
.power_off = imx8mp_hsio_blk_ctrl_power_off,
.power_notifier_fn = imx8mp_hsio_power_notifier,
.domains = imx8mp_hsio_domain_data,
.num_domains = ARRAY_SIZE(imx8mp_hsio_domain_data),
};
#define HDMI_RTX_RESET_CTL0 0x20
#define HDMI_RTX_CLK_CTL0 0x40
#define HDMI_RTX_CLK_CTL1 0x50
#define HDMI_RTX_CLK_CTL2 0x60
#define HDMI_RTX_CLK_CTL3 0x70
#define HDMI_RTX_CLK_CTL4 0x80
#define HDMI_TX_CONTROL0 0x200
#define HDMI_LCDIF_NOC_HURRY_MASK GENMASK(14, 12)
static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
struct imx8mp_blk_ctrl_domain *domain)
{
switch (domain->id) {
case IMX8MP_HDMIBLK_PD_IRQSTEER:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(9));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(16));
break;
case IMX8MP_HDMIBLK_PD_LCDIF:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
BIT(16) | BIT(17) | BIT(18) |
BIT(19) | BIT(20));
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
BIT(4) | BIT(5) | BIT(6));
regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0,
FIELD_PREP(HDMI_LCDIF_NOC_HURRY_MASK, 7));
break;
case IMX8MP_HDMIBLK_PD_PAI:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(18));
break;
case IMX8MP_HDMIBLK_PD_PVI:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(28));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(22));
break;
case IMX8MP_HDMIBLK_PD_TRNG:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(27) | BIT(30));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(20));
break;
case IMX8MP_HDMIBLK_PD_HDMI_TX:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
BIT(2) | BIT(4) | BIT(5));
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1,
BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
BIT(18) | BIT(19) | BIT(20) | BIT(21));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
BIT(7) | BIT(10) | BIT(11));
regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
break;
case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
break;
case IMX8MP_HDMIBLK_PD_HDCP:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(11));
break;
case IMX8MP_HDMIBLK_PD_HRV:
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(3) | BIT(4) | BIT(5));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(15));
break;
default:
break;
}
}
static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
struct imx8mp_blk_ctrl_domain *domain)
{
switch (domain->id) {
case IMX8MP_HDMIBLK_PD_IRQSTEER:
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(9));
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(16));
break;
case IMX8MP_HDMIBLK_PD_LCDIF:
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
BIT(4) | BIT(5) | BIT(6));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
BIT(16) | BIT(17) | BIT(18) |
BIT(19) | BIT(20));
break;
case IMX8MP_HDMIBLK_PD_PAI:
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(18));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17));
break;
case IMX8MP_HDMIBLK_PD_PVI:
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(22));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(28));
break;
case IMX8MP_HDMIBLK_PD_TRNG:
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(20));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(27) | BIT(30));
break;
case IMX8MP_HDMIBLK_PD_HDMI_TX:
regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
BIT(7) | BIT(10) | BIT(11));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1,
BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
BIT(18) | BIT(19) | BIT(20) | BIT(21));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
BIT(2) | BIT(4) | BIT(5));
break;
case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
break;
case IMX8MP_HDMIBLK_PD_HDCP:
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(11));
break;
case IMX8MP_HDMIBLK_PD_HRV:
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(15));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(3) | BIT(4) | BIT(5));
break;
default:
break;
}
}
static int imx8mp_hdmi_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct imx8mp_blk_ctrl *bc = container_of(nb, struct imx8mp_blk_ctrl,
power_nb);
if (action != GENPD_NOTIFY_ON)
return NOTIFY_OK;
/*
* Contrary to other blk-ctrls the reset and clock don't clear when the
* power domain is powered down. To ensure the proper reset pulsing,
* first clear them all to asserted state, then enable the bus clocks
* and then release the ADB reset.
*/
regmap_write(bc->regmap, HDMI_RTX_RESET_CTL0, 0x0);
regmap_write(bc->regmap, HDMI_RTX_CLK_CTL0, 0x0);
regmap_write(bc->regmap, HDMI_RTX_CLK_CTL1, 0x0);
regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
BIT(0) | BIT(1) | BIT(10));
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(0));
/*
* On power up we have no software backchannel to the GPC to
* wait for the ADB handshake to happen, so we just delay for a
* bit. On power down the GPC driver waits for the handshake.
*/
udelay(5);
return NOTIFY_OK;
}
static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
[IMX8MP_HDMIBLK_PD_IRQSTEER] = {
.name = "hdmiblk-irqsteer",
.clk_names = (const char *[]){ "apb" },
.num_clks = 1,
.gpc_name = "irqsteer",
},
[IMX8MP_HDMIBLK_PD_LCDIF] = {
.name = "hdmiblk-lcdif",
.clk_names = (const char *[]){ "axi", "apb" },
.num_clks = 2,
.gpc_name = "lcdif",
.path_names = (const char *[]){"lcdif-hdmi"},
.num_paths = 1,
},
[IMX8MP_HDMIBLK_PD_PAI] = {
.name = "hdmiblk-pai",
.clk_names = (const char *[]){ "apb" },
.num_clks = 1,
.gpc_name = "pai",
},
[IMX8MP_HDMIBLK_PD_PVI] = {
.name = "hdmiblk-pvi",
.clk_names = (const char *[]){ "apb" },
.num_clks = 1,
.gpc_name = "pvi",
},
[IMX8MP_HDMIBLK_PD_TRNG] = {
.name = "hdmiblk-trng",
.clk_names = (const char *[]){ "apb" },
.num_clks = 1,
.gpc_name = "trng",
},
[IMX8MP_HDMIBLK_PD_HDMI_TX] = {
.name = "hdmiblk-hdmi-tx",
.clk_names = (const char *[]){ "apb", "ref_266m" },
.num_clks = 2,
.gpc_name = "hdmi-tx",
},
[IMX8MP_HDMIBLK_PD_HDMI_TX_PHY] = {
.name = "hdmiblk-hdmi-tx-phy",
.clk_names = (const char *[]){ "apb", "ref_24m" },
.num_clks = 2,
.gpc_name = "hdmi-tx-phy",
},
[IMX8MP_HDMIBLK_PD_HRV] = {
.name = "hdmiblk-hrv",
.clk_names = (const char *[]){ "axi", "apb" },
.num_clks = 2,
.gpc_name = "hrv",
.path_names = (const char *[]){"hrv"},
.num_paths = 1,
},
[IMX8MP_HDMIBLK_PD_HDCP] = {
.name = "hdmiblk-hdcp",
.clk_names = (const char *[]){ "axi", "apb" },
.num_clks = 2,
.gpc_name = "hdcp",
.path_names = (const char *[]){"hdcp"},
.num_paths = 1,
},
};
static const struct imx8mp_blk_ctrl_data imx8mp_hdmi_blk_ctl_dev_data = {
.max_reg = 0x23c,
.power_on = imx8mp_hdmi_blk_ctrl_power_on,
.power_off = imx8mp_hdmi_blk_ctrl_power_off,
.power_notifier_fn = imx8mp_hdmi_power_notifier,
.domains = imx8mp_hdmi_domain_data,
.num_domains = ARRAY_SIZE(imx8mp_hdmi_domain_data),
};
static int imx8mp_blk_ctrl_power_on(struct generic_pm_domain *genpd)
{
struct imx8mp_blk_ctrl_domain *domain = to_imx8mp_blk_ctrl_domain(genpd);
const struct imx8mp_blk_ctrl_domain_data *data = domain->data;
struct imx8mp_blk_ctrl *bc = domain->bc;
int ret;
/* make sure bus domain is awake */
ret = pm_runtime_resume_and_get(bc->bus_power_dev);
if (ret < 0) {
dev_err(bc->dev, "failed to power up bus domain\n");
return ret;
}
/* enable upstream clocks */
ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
if (ret) {
dev_err(bc->dev, "failed to enable clocks\n");
goto bus_put;
}
/* domain specific blk-ctrl manipulation */
bc->power_on(bc, domain);
/* power up upstream GPC domain */
ret = pm_runtime_resume_and_get(domain->power_dev);
if (ret < 0) {
dev_err(bc->dev, "failed to power up peripheral domain\n");
goto clk_disable;
}
ret = icc_bulk_set_bw(domain->num_paths, domain->paths);
if (ret)
dev_err(bc->dev, "failed to set icc bw\n");
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
return 0;
clk_disable:
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
bus_put:
pm_runtime_put(bc->bus_power_dev);
return ret;
}
static int imx8mp_blk_ctrl_power_off(struct generic_pm_domain *genpd)
{
struct imx8mp_blk_ctrl_domain *domain = to_imx8mp_blk_ctrl_domain(genpd);
const struct imx8mp_blk_ctrl_domain_data *data = domain->data;
struct imx8mp_blk_ctrl *bc = domain->bc;
int ret;
ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
if (ret) {
dev_err(bc->dev, "failed to enable clocks\n");
return ret;
}
/* domain specific blk-ctrl manipulation */
bc->power_off(bc, domain);
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
/* power down upstream GPC domain */
pm_runtime_put(domain->power_dev);
/* allow bus domain to suspend */
pm_runtime_put(bc->bus_power_dev);
return 0;
}
static struct lock_class_key blk_ctrl_genpd_lock_class;
static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
{
const struct imx8mp_blk_ctrl_data *bc_data;
struct device *dev = &pdev->dev;
struct imx8mp_blk_ctrl *bc;
void __iomem *base;
int num_domains, i, ret;
struct regmap_config regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
if (!bc)
return -ENOMEM;
bc->dev = dev;
bc_data = of_device_get_match_data(dev);
num_domains = bc_data->num_domains;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap_config.max_register = bc_data->max_reg;
bc->regmap = devm_regmap_init_mmio(dev, base, ®map_config);
if (IS_ERR(bc->regmap))
return dev_err_probe(dev, PTR_ERR(bc->regmap),
"failed to init regmap\n");
bc->domains = devm_kcalloc(dev, num_domains,
sizeof(struct imx8mp_blk_ctrl_domain),
GFP_KERNEL);
if (!bc->domains)
return -ENOMEM;
bc->onecell_data.num_domains = num_domains;
bc->onecell_data.domains =
devm_kcalloc(dev, num_domains,
sizeof(struct generic_pm_domain *), GFP_KERNEL);
if (!bc->onecell_data.domains)
return -ENOMEM;
bc->bus_power_dev = dev_pm_domain_attach_by_name(dev, "bus");
if (IS_ERR(bc->bus_power_dev))
return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev),
"failed to attach bus power domain\n");
bc->power_off = bc_data->power_off;
bc->power_on = bc_data->power_on;
for (i = 0; i < num_domains; i++) {
const struct imx8mp_blk_ctrl_domain_data *data = &bc_data->domains[i];
struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
int j;
domain->data = data;
domain->num_paths = data->num_paths;
for (j = 0; j < data->num_clks; j++)
domain->clks[j].id = data->clk_names[j];
for (j = 0; j < data->num_paths; j++) {
domain->paths[j].name = data->path_names[j];
/* Fake value for now, just let ICC could configure NoC mode/priority */
domain->paths[j].avg_bw = 1;
domain->paths[j].peak_bw = 1;
}
ret = devm_of_icc_bulk_get(dev, data->num_paths, domain->paths);
if (ret) {
if (ret != -EPROBE_DEFER) {
dev_warn_once(dev, "Could not get interconnect paths, NoC will stay unconfigured!\n");
domain->num_paths = 0;
} else {
dev_err_probe(dev, ret, "failed to get noc entries\n");
goto cleanup_pds;
}
}
ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
if (ret) {
dev_err_probe(dev, ret, "failed to get clock\n");
goto cleanup_pds;
}
domain->power_dev =
dev_pm_domain_attach_by_name(dev, data->gpc_name);
if (IS_ERR(domain->power_dev)) {
dev_err_probe(dev, PTR_ERR(domain->power_dev),
"failed to attach power domain %s\n",
data->gpc_name);
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
domain->genpd.name = data->name;
domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
domain->genpd.power_off = imx8mp_blk_ctrl_power_off;
domain->bc = bc;
domain->id = i;
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err_probe(dev, ret, "failed to init power domain\n");
dev_pm_domain_detach(domain->power_dev, true);
goto cleanup_pds;
}
/*
* We use runtime PM to trigger power on/off of the upstream GPC
* domain, as a strict hierarchical parent/child power domain
* setup doesn't allow us to meet the sequencing requirements.
* This means we have nested locking of genpd locks, without the
* nesting being visible at the genpd level, so we need a
* separate lock class to make lockdep aware of the fact that
* this are separate domain locks that can be nested without a
* self-deadlock.
*/
lockdep_set_class(&domain->genpd.mlock,
&blk_ctrl_genpd_lock_class);
bc->onecell_data.domains[i] = &domain->genpd;
}
ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
if (ret) {
dev_err_probe(dev, ret, "failed to add power domain provider\n");
goto cleanup_pds;
}
bc->power_nb.notifier_call = bc_data->power_notifier_fn;
ret = dev_pm_genpd_add_notifier(bc->bus_power_dev, &bc->power_nb);
if (ret) {
dev_err_probe(dev, ret, "failed to add power notifier\n");
goto cleanup_provider;
}
if (bc_data->probe) {
ret = bc_data->probe(bc);
if (ret)
goto cleanup_provider;
}
dev_set_drvdata(dev, bc);
return 0;
cleanup_provider:
of_genpd_del_provider(dev->of_node);
cleanup_pds:
for (i--; i >= 0; i--) {
pm_genpd_remove(&bc->domains[i].genpd);
dev_pm_domain_detach(bc->domains[i].power_dev, true);
}
dev_pm_domain_detach(bc->bus_power_dev, true);
return ret;
}
static int imx8mp_blk_ctrl_remove(struct platform_device *pdev)
{
struct imx8mp_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
int i;
of_genpd_del_provider(pdev->dev.of_node);
for (i = 0; bc->onecell_data.num_domains; i++) {
struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
dev_pm_domain_detach(domain->power_dev, true);
}
dev_pm_genpd_remove_notifier(bc->bus_power_dev);
dev_pm_domain_detach(bc->bus_power_dev, true);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int imx8mp_blk_ctrl_suspend(struct device *dev)
{
struct imx8mp_blk_ctrl *bc = dev_get_drvdata(dev);
int ret, i;
/*
* This may look strange, but is done so the generic PM_SLEEP code
* can power down our domains and more importantly power them up again
* after resume, without tripping over our usage of runtime PM to
* control the upstream GPC domains. Things happen in the right order
* in the system suspend/resume paths due to the device parent/child
* hierarchy.
*/
ret = pm_runtime_get_sync(bc->bus_power_dev);
if (ret < 0) {
pm_runtime_put_noidle(bc->bus_power_dev);
return ret;
}
for (i = 0; i < bc->onecell_data.num_domains; i++) {
struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
ret = pm_runtime_get_sync(domain->power_dev);
if (ret < 0) {
pm_runtime_put_noidle(domain->power_dev);
goto out_fail;
}
}
return 0;
out_fail:
for (i--; i >= 0; i--)
pm_runtime_put(bc->domains[i].power_dev);
pm_runtime_put(bc->bus_power_dev);
return ret;
}
static int imx8mp_blk_ctrl_resume(struct device *dev)
{
struct imx8mp_blk_ctrl *bc = dev_get_drvdata(dev);
int i;
for (i = 0; i < bc->onecell_data.num_domains; i++)
pm_runtime_put(bc->domains[i].power_dev);
pm_runtime_put(bc->bus_power_dev);
return 0;
}
#endif
static const struct dev_pm_ops imx8mp_blk_ctrl_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(imx8mp_blk_ctrl_suspend,
imx8mp_blk_ctrl_resume)
};
static const struct of_device_id imx8mp_blk_ctrl_of_match[] = {
{
.compatible = "fsl,imx8mp-hsio-blk-ctrl",
.data = &imx8mp_hsio_blk_ctl_dev_data,
}, {
.compatible = "fsl,imx8mp-hdmi-blk-ctrl",
.data = &imx8mp_hdmi_blk_ctl_dev_data,
}, {
/* Sentinel */
}
};
MODULE_DEVICE_TABLE(of, imx8mp_blk_ctrl_of_match);
static struct platform_driver imx8mp_blk_ctrl_driver = {
.probe = imx8mp_blk_ctrl_probe,
.remove = imx8mp_blk_ctrl_remove,
.driver = {
.name = "imx8mp-blk-ctrl",
.pm = &imx8mp_blk_ctrl_pm_ops,
.of_match_table = imx8mp_blk_ctrl_of_match,
},
};
module_platform_driver(imx8mp_blk_ctrl_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/pmdomain/imx/imx8mp-blk-ctrl.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2021 Pengutronix, Lucas Stach <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <dt-bindings/power/imx8mm-power.h>
#include <dt-bindings/power/imx8mn-power.h>
#include <dt-bindings/power/imx8mp-power.h>
#include <dt-bindings/power/imx8mq-power.h>
#define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4
#define BLK_MIPI_RESET_DIV 0x8 /* Mini/Nano/Plus DISPLAY_BLK_CTRL only */
struct imx8m_blk_ctrl_domain;
struct imx8m_blk_ctrl {
struct device *dev;
struct notifier_block power_nb;
struct device *bus_power_dev;
struct regmap *regmap;
struct imx8m_blk_ctrl_domain *domains;
struct genpd_onecell_data onecell_data;
};
struct imx8m_blk_ctrl_domain_data {
const char *name;
const char * const *clk_names;
const char * const *path_names;
const char *gpc_name;
int num_clks;
int num_paths;
u32 rst_mask;
u32 clk_mask;
/*
* i.MX8M Mini, Nano and Plus have a third DISPLAY_BLK_CTRL register
* which is used to control the reset for the MIPI Phy.
* Since it's only present in certain circumstances,
* an if-statement should be used before setting and clearing this
* register.
*/
u32 mipi_phy_rst_mask;
};
#define DOMAIN_MAX_CLKS 4
#define DOMAIN_MAX_PATHS 4
struct imx8m_blk_ctrl_domain {
struct generic_pm_domain genpd;
const struct imx8m_blk_ctrl_domain_data *data;
struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
struct icc_bulk_data paths[DOMAIN_MAX_PATHS];
struct device *power_dev;
struct imx8m_blk_ctrl *bc;
int num_paths;
};
struct imx8m_blk_ctrl_data {
int max_reg;
notifier_fn_t power_notifier_fn;
const struct imx8m_blk_ctrl_domain_data *domains;
int num_domains;
};
static inline struct imx8m_blk_ctrl_domain *
to_imx8m_blk_ctrl_domain(struct generic_pm_domain *genpd)
{
return container_of(genpd, struct imx8m_blk_ctrl_domain, genpd);
}
static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
{
struct imx8m_blk_ctrl_domain *domain = to_imx8m_blk_ctrl_domain(genpd);
const struct imx8m_blk_ctrl_domain_data *data = domain->data;
struct imx8m_blk_ctrl *bc = domain->bc;
int ret;
/* make sure bus domain is awake */
ret = pm_runtime_get_sync(bc->bus_power_dev);
if (ret < 0) {
pm_runtime_put_noidle(bc->bus_power_dev);
dev_err(bc->dev, "failed to power up bus domain\n");
return ret;
}
/* put devices into reset */
regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
if (data->mipi_phy_rst_mask)
regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
/* enable upstream and blk-ctrl clocks to allow reset to propagate */
ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
if (ret) {
dev_err(bc->dev, "failed to enable clocks\n");
goto bus_put;
}
regmap_set_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
/* power up upstream GPC domain */
ret = pm_runtime_get_sync(domain->power_dev);
if (ret < 0) {
dev_err(bc->dev, "failed to power up peripheral domain\n");
goto clk_disable;
}
/* wait for reset to propagate */
udelay(5);
/* release reset */
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
if (data->mipi_phy_rst_mask)
regmap_set_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
ret = icc_bulk_set_bw(domain->num_paths, domain->paths);
if (ret)
dev_err(bc->dev, "failed to set icc bw\n");
/* disable upstream clocks */
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
return 0;
clk_disable:
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
bus_put:
pm_runtime_put(bc->bus_power_dev);
return ret;
}
static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
{
struct imx8m_blk_ctrl_domain *domain = to_imx8m_blk_ctrl_domain(genpd);
const struct imx8m_blk_ctrl_domain_data *data = domain->data;
struct imx8m_blk_ctrl *bc = domain->bc;
/* put devices into reset and disable clocks */
if (data->mipi_phy_rst_mask)
regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
/* power down upstream GPC domain */
pm_runtime_put(domain->power_dev);
/* allow bus domain to suspend */
pm_runtime_put(bc->bus_power_dev);
return 0;
}
static struct lock_class_key blk_ctrl_genpd_lock_class;
static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
{
const struct imx8m_blk_ctrl_data *bc_data;
struct device *dev = &pdev->dev;
struct imx8m_blk_ctrl *bc;
void __iomem *base;
int i, ret;
struct regmap_config regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
if (!bc)
return -ENOMEM;
bc->dev = dev;
bc_data = of_device_get_match_data(dev);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap_config.max_register = bc_data->max_reg;
bc->regmap = devm_regmap_init_mmio(dev, base, ®map_config);
if (IS_ERR(bc->regmap))
return dev_err_probe(dev, PTR_ERR(bc->regmap),
"failed to init regmap\n");
bc->domains = devm_kcalloc(dev, bc_data->num_domains,
sizeof(struct imx8m_blk_ctrl_domain),
GFP_KERNEL);
if (!bc->domains)
return -ENOMEM;
bc->onecell_data.num_domains = bc_data->num_domains;
bc->onecell_data.domains =
devm_kcalloc(dev, bc_data->num_domains,
sizeof(struct generic_pm_domain *), GFP_KERNEL);
if (!bc->onecell_data.domains)
return -ENOMEM;
bc->bus_power_dev = dev_pm_domain_attach_by_name(dev, "bus");
if (IS_ERR(bc->bus_power_dev)) {
if (PTR_ERR(bc->bus_power_dev) == -ENODEV)
return dev_err_probe(dev, -EPROBE_DEFER,
"failed to attach power domain \"bus\"\n");
else
return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev),
"failed to attach power domain \"bus\"\n");
}
for (i = 0; i < bc_data->num_domains; i++) {
const struct imx8m_blk_ctrl_domain_data *data = &bc_data->domains[i];
struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
int j;
domain->data = data;
domain->num_paths = data->num_paths;
for (j = 0; j < data->num_clks; j++)
domain->clks[j].id = data->clk_names[j];
for (j = 0; j < data->num_paths; j++) {
domain->paths[j].name = data->path_names[j];
/* Fake value for now, just let ICC could configure NoC mode/priority */
domain->paths[j].avg_bw = 1;
domain->paths[j].peak_bw = 1;
}
ret = devm_of_icc_bulk_get(dev, data->num_paths, domain->paths);
if (ret) {
if (ret != -EPROBE_DEFER) {
dev_warn_once(dev, "Could not get interconnect paths, NoC will stay unconfigured!\n");
domain->num_paths = 0;
} else {
dev_err_probe(dev, ret, "failed to get noc entries\n");
goto cleanup_pds;
}
}
ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
if (ret) {
dev_err_probe(dev, ret, "failed to get clock\n");
goto cleanup_pds;
}
domain->power_dev =
dev_pm_domain_attach_by_name(dev, data->gpc_name);
if (IS_ERR(domain->power_dev)) {
dev_err_probe(dev, PTR_ERR(domain->power_dev),
"failed to attach power domain \"%s\"\n",
data->gpc_name);
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
domain->genpd.name = data->name;
domain->genpd.power_on = imx8m_blk_ctrl_power_on;
domain->genpd.power_off = imx8m_blk_ctrl_power_off;
domain->bc = bc;
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err_probe(dev, ret,
"failed to init power domain \"%s\"\n",
data->gpc_name);
dev_pm_domain_detach(domain->power_dev, true);
goto cleanup_pds;
}
/*
* We use runtime PM to trigger power on/off of the upstream GPC
* domain, as a strict hierarchical parent/child power domain
* setup doesn't allow us to meet the sequencing requirements.
* This means we have nested locking of genpd locks, without the
* nesting being visible at the genpd level, so we need a
* separate lock class to make lockdep aware of the fact that
* this are separate domain locks that can be nested without a
* self-deadlock.
*/
lockdep_set_class(&domain->genpd.mlock,
&blk_ctrl_genpd_lock_class);
bc->onecell_data.domains[i] = &domain->genpd;
}
ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
if (ret) {
dev_err_probe(dev, ret, "failed to add power domain provider\n");
goto cleanup_pds;
}
bc->power_nb.notifier_call = bc_data->power_notifier_fn;
ret = dev_pm_genpd_add_notifier(bc->bus_power_dev, &bc->power_nb);
if (ret) {
dev_err_probe(dev, ret, "failed to add power notifier\n");
goto cleanup_provider;
}
dev_set_drvdata(dev, bc);
ret = devm_of_platform_populate(dev);
if (ret)
goto cleanup_provider;
return 0;
cleanup_provider:
of_genpd_del_provider(dev->of_node);
cleanup_pds:
for (i--; i >= 0; i--) {
pm_genpd_remove(&bc->domains[i].genpd);
dev_pm_domain_detach(bc->domains[i].power_dev, true);
}
dev_pm_domain_detach(bc->bus_power_dev, true);
return ret;
}
static int imx8m_blk_ctrl_remove(struct platform_device *pdev)
{
struct imx8m_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
int i;
of_genpd_del_provider(pdev->dev.of_node);
for (i = 0; bc->onecell_data.num_domains; i++) {
struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
dev_pm_domain_detach(domain->power_dev, true);
}
dev_pm_genpd_remove_notifier(bc->bus_power_dev);
dev_pm_domain_detach(bc->bus_power_dev, true);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int imx8m_blk_ctrl_suspend(struct device *dev)
{
struct imx8m_blk_ctrl *bc = dev_get_drvdata(dev);
int ret, i;
/*
* This may look strange, but is done so the generic PM_SLEEP code
* can power down our domains and more importantly power them up again
* after resume, without tripping over our usage of runtime PM to
* control the upstream GPC domains. Things happen in the right order
* in the system suspend/resume paths due to the device parent/child
* hierarchy.
*/
ret = pm_runtime_get_sync(bc->bus_power_dev);
if (ret < 0) {
pm_runtime_put_noidle(bc->bus_power_dev);
return ret;
}
for (i = 0; i < bc->onecell_data.num_domains; i++) {
struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
ret = pm_runtime_get_sync(domain->power_dev);
if (ret < 0) {
pm_runtime_put_noidle(domain->power_dev);
goto out_fail;
}
}
return 0;
out_fail:
for (i--; i >= 0; i--)
pm_runtime_put(bc->domains[i].power_dev);
pm_runtime_put(bc->bus_power_dev);
return ret;
}
static int imx8m_blk_ctrl_resume(struct device *dev)
{
struct imx8m_blk_ctrl *bc = dev_get_drvdata(dev);
int i;
for (i = 0; i < bc->onecell_data.num_domains; i++)
pm_runtime_put(bc->domains[i].power_dev);
pm_runtime_put(bc->bus_power_dev);
return 0;
}
#endif
static const struct dev_pm_ops imx8m_blk_ctrl_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(imx8m_blk_ctrl_suspend, imx8m_blk_ctrl_resume)
};
static int imx8mm_vpu_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
power_nb);
if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
return NOTIFY_OK;
/*
* The ADB in the VPUMIX domain has no separate reset and clock
* enable bits, but is ungated together with the VPU clocks. To
* allow the handshake with the GPC to progress we put the VPUs
* in reset and ungate the clocks.
*/
regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, BIT(0) | BIT(1) | BIT(2));
regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(0) | BIT(1) | BIT(2));
if (action == GENPD_NOTIFY_ON) {
/*
* On power up we have no software backchannel to the GPC to
* wait for the ADB handshake to happen, so we just delay for a
* bit. On power down the GPC driver waits for the handshake.
*/
udelay(5);
/* set "fuse" bits to enable the VPUs */
regmap_set_bits(bc->regmap, 0x8, 0xffffffff);
regmap_set_bits(bc->regmap, 0xc, 0xffffffff);
regmap_set_bits(bc->regmap, 0x10, 0xffffffff);
regmap_set_bits(bc->regmap, 0x14, 0xffffffff);
}
return NOTIFY_OK;
}
static const struct imx8m_blk_ctrl_domain_data imx8mm_vpu_blk_ctl_domain_data[] = {
[IMX8MM_VPUBLK_PD_G1] = {
.name = "vpublk-g1",
.clk_names = (const char *[]){ "g1", },
.num_clks = 1,
.gpc_name = "g1",
.rst_mask = BIT(1),
.clk_mask = BIT(1),
},
[IMX8MM_VPUBLK_PD_G2] = {
.name = "vpublk-g2",
.clk_names = (const char *[]){ "g2", },
.num_clks = 1,
.gpc_name = "g2",
.rst_mask = BIT(0),
.clk_mask = BIT(0),
},
[IMX8MM_VPUBLK_PD_H1] = {
.name = "vpublk-h1",
.clk_names = (const char *[]){ "h1", },
.num_clks = 1,
.gpc_name = "h1",
.rst_mask = BIT(2),
.clk_mask = BIT(2),
},
};
static const struct imx8m_blk_ctrl_data imx8mm_vpu_blk_ctl_dev_data = {
.max_reg = 0x18,
.power_notifier_fn = imx8mm_vpu_power_notifier,
.domains = imx8mm_vpu_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx8mm_vpu_blk_ctl_domain_data),
};
static const struct imx8m_blk_ctrl_domain_data imx8mp_vpu_blk_ctl_domain_data[] = {
[IMX8MP_VPUBLK_PD_G1] = {
.name = "vpublk-g1",
.clk_names = (const char *[]){ "g1", },
.num_clks = 1,
.gpc_name = "g1",
.rst_mask = BIT(1),
.clk_mask = BIT(1),
.path_names = (const char *[]){"g1"},
.num_paths = 1,
},
[IMX8MP_VPUBLK_PD_G2] = {
.name = "vpublk-g2",
.clk_names = (const char *[]){ "g2", },
.num_clks = 1,
.gpc_name = "g2",
.rst_mask = BIT(0),
.clk_mask = BIT(0),
.path_names = (const char *[]){"g2"},
.num_paths = 1,
},
[IMX8MP_VPUBLK_PD_VC8000E] = {
.name = "vpublk-vc8000e",
.clk_names = (const char *[]){ "vc8000e", },
.num_clks = 1,
.gpc_name = "vc8000e",
.rst_mask = BIT(2),
.clk_mask = BIT(2),
.path_names = (const char *[]){"vc8000e"},
.num_paths = 1,
},
};
static const struct imx8m_blk_ctrl_data imx8mp_vpu_blk_ctl_dev_data = {
.max_reg = 0x18,
.power_notifier_fn = imx8mm_vpu_power_notifier,
.domains = imx8mp_vpu_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx8mp_vpu_blk_ctl_domain_data),
};
static int imx8mm_disp_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
power_nb);
if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
return NOTIFY_OK;
/* Enable bus clock and deassert bus reset */
regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(12));
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(6));
/*
* On power up we have no software backchannel to the GPC to
* wait for the ADB handshake to happen, so we just delay for a
* bit. On power down the GPC driver waits for the handshake.
*/
if (action == GENPD_NOTIFY_ON)
udelay(5);
return NOTIFY_OK;
}
static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[] = {
[IMX8MM_DISPBLK_PD_CSI_BRIDGE] = {
.name = "dispblk-csi-bridge",
.clk_names = (const char *[]){ "csi-bridge-axi", "csi-bridge-apb",
"csi-bridge-core", },
.num_clks = 3,
.gpc_name = "csi-bridge",
.rst_mask = BIT(0) | BIT(1) | BIT(2),
.clk_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5),
},
[IMX8MM_DISPBLK_PD_LCDIF] = {
.name = "dispblk-lcdif",
.clk_names = (const char *[]){ "lcdif-axi", "lcdif-apb", "lcdif-pix", },
.num_clks = 3,
.gpc_name = "lcdif",
.clk_mask = BIT(6) | BIT(7),
},
[IMX8MM_DISPBLK_PD_MIPI_DSI] = {
.name = "dispblk-mipi-dsi",
.clk_names = (const char *[]){ "dsi-pclk", "dsi-ref", },
.num_clks = 2,
.gpc_name = "mipi-dsi",
.rst_mask = BIT(5),
.clk_mask = BIT(8) | BIT(9),
.mipi_phy_rst_mask = BIT(17),
},
[IMX8MM_DISPBLK_PD_MIPI_CSI] = {
.name = "dispblk-mipi-csi",
.clk_names = (const char *[]){ "csi-aclk", "csi-pclk" },
.num_clks = 2,
.gpc_name = "mipi-csi",
.rst_mask = BIT(3) | BIT(4),
.clk_mask = BIT(10) | BIT(11),
.mipi_phy_rst_mask = BIT(16),
},
};
static const struct imx8m_blk_ctrl_data imx8mm_disp_blk_ctl_dev_data = {
.max_reg = 0x2c,
.power_notifier_fn = imx8mm_disp_power_notifier,
.domains = imx8mm_disp_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx8mm_disp_blk_ctl_domain_data),
};
static int imx8mn_disp_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
power_nb);
if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
return NOTIFY_OK;
/* Enable bus clock and deassert bus reset */
regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8));
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8));
/*
* On power up we have no software backchannel to the GPC to
* wait for the ADB handshake to happen, so we just delay for a
* bit. On power down the GPC driver waits for the handshake.
*/
if (action == GENPD_NOTIFY_ON)
udelay(5);
return NOTIFY_OK;
}
static const struct imx8m_blk_ctrl_domain_data imx8mn_disp_blk_ctl_domain_data[] = {
[IMX8MN_DISPBLK_PD_MIPI_DSI] = {
.name = "dispblk-mipi-dsi",
.clk_names = (const char *[]){ "dsi-pclk", "dsi-ref", },
.num_clks = 2,
.gpc_name = "mipi-dsi",
.rst_mask = BIT(0) | BIT(1),
.clk_mask = BIT(0) | BIT(1),
.mipi_phy_rst_mask = BIT(17),
},
[IMX8MN_DISPBLK_PD_MIPI_CSI] = {
.name = "dispblk-mipi-csi",
.clk_names = (const char *[]){ "csi-aclk", "csi-pclk" },
.num_clks = 2,
.gpc_name = "mipi-csi",
.rst_mask = BIT(2) | BIT(3),
.clk_mask = BIT(2) | BIT(3),
.mipi_phy_rst_mask = BIT(16),
},
[IMX8MN_DISPBLK_PD_LCDIF] = {
.name = "dispblk-lcdif",
.clk_names = (const char *[]){ "lcdif-axi", "lcdif-apb", "lcdif-pix", },
.num_clks = 3,
.gpc_name = "lcdif",
.rst_mask = BIT(4) | BIT(5),
.clk_mask = BIT(4) | BIT(5),
},
[IMX8MN_DISPBLK_PD_ISI] = {
.name = "dispblk-isi",
.clk_names = (const char *[]){ "disp_axi", "disp_apb", "disp_axi_root",
"disp_apb_root"},
.num_clks = 4,
.gpc_name = "isi",
.rst_mask = BIT(6) | BIT(7),
.clk_mask = BIT(6) | BIT(7),
},
};
static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = {
.max_reg = 0x84,
.power_notifier_fn = imx8mn_disp_power_notifier,
.domains = imx8mn_disp_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data),
};
#define LCDIF_ARCACHE_CTRL 0x4c
#define LCDIF_1_RD_HURRY GENMASK(15, 13)
#define LCDIF_0_RD_HURRY GENMASK(12, 10)
static int imx8mp_media_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
power_nb);
if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
return NOTIFY_OK;
/* Enable bus clock and deassert bus reset */
regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8));
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8));
if (action == GENPD_NOTIFY_ON) {
/*
* On power up we have no software backchannel to the GPC to
* wait for the ADB handshake to happen, so we just delay for a
* bit. On power down the GPC driver waits for the handshake.
*/
udelay(5);
/*
* Set panic read hurry level for both LCDIF interfaces to
* maximum priority to minimize chances of display FIFO
* underflow.
*/
regmap_set_bits(bc->regmap, LCDIF_ARCACHE_CTRL,
FIELD_PREP(LCDIF_1_RD_HURRY, 7) |
FIELD_PREP(LCDIF_0_RD_HURRY, 7));
}
return NOTIFY_OK;
}
/*
* From i.MX 8M Plus Applications Processor Reference Manual, Rev. 1,
* section 13.2.2, 13.2.3
* isp-ahb and dwe are not in Figure 13-5. Media BLK_CTRL Clocks
*/
static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[] = {
[IMX8MP_MEDIABLK_PD_MIPI_DSI_1] = {
.name = "mediablk-mipi-dsi-1",
.clk_names = (const char *[]){ "apb", "phy", },
.num_clks = 2,
.gpc_name = "mipi-dsi1",
.rst_mask = BIT(0) | BIT(1),
.clk_mask = BIT(0) | BIT(1),
.mipi_phy_rst_mask = BIT(17),
},
[IMX8MP_MEDIABLK_PD_MIPI_CSI2_1] = {
.name = "mediablk-mipi-csi2-1",
.clk_names = (const char *[]){ "apb", "cam1" },
.num_clks = 2,
.gpc_name = "mipi-csi1",
.rst_mask = BIT(2) | BIT(3),
.clk_mask = BIT(2) | BIT(3),
.mipi_phy_rst_mask = BIT(16),
},
[IMX8MP_MEDIABLK_PD_LCDIF_1] = {
.name = "mediablk-lcdif-1",
.clk_names = (const char *[]){ "disp1", "apb", "axi", },
.num_clks = 3,
.gpc_name = "lcdif1",
.rst_mask = BIT(4) | BIT(5) | BIT(23),
.clk_mask = BIT(4) | BIT(5) | BIT(23),
.path_names = (const char *[]){"lcdif-rd", "lcdif-wr"},
.num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_ISI] = {
.name = "mediablk-isi",
.clk_names = (const char *[]){ "axi", "apb" },
.num_clks = 2,
.gpc_name = "isi",
.rst_mask = BIT(6) | BIT(7),
.clk_mask = BIT(6) | BIT(7),
.path_names = (const char *[]){"isi0", "isi1", "isi2"},
.num_paths = 3,
},
[IMX8MP_MEDIABLK_PD_MIPI_CSI2_2] = {
.name = "mediablk-mipi-csi2-2",
.clk_names = (const char *[]){ "apb", "cam2" },
.num_clks = 2,
.gpc_name = "mipi-csi2",
.rst_mask = BIT(9) | BIT(10),
.clk_mask = BIT(9) | BIT(10),
.mipi_phy_rst_mask = BIT(30),
},
[IMX8MP_MEDIABLK_PD_LCDIF_2] = {
.name = "mediablk-lcdif-2",
.clk_names = (const char *[]){ "disp2", "apb", "axi", },
.num_clks = 3,
.gpc_name = "lcdif2",
.rst_mask = BIT(11) | BIT(12) | BIT(24),
.clk_mask = BIT(11) | BIT(12) | BIT(24),
.path_names = (const char *[]){"lcdif-rd", "lcdif-wr"},
.num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_ISP] = {
.name = "mediablk-isp",
.clk_names = (const char *[]){ "isp", "axi", "apb" },
.num_clks = 3,
.gpc_name = "isp",
.rst_mask = BIT(16) | BIT(17) | BIT(18),
.clk_mask = BIT(16) | BIT(17) | BIT(18),
.path_names = (const char *[]){"isp0", "isp1"},
.num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_DWE] = {
.name = "mediablk-dwe",
.clk_names = (const char *[]){ "axi", "apb" },
.num_clks = 2,
.gpc_name = "dwe",
.rst_mask = BIT(19) | BIT(20) | BIT(21),
.clk_mask = BIT(19) | BIT(20) | BIT(21),
.path_names = (const char *[]){"dwe"},
.num_paths = 1,
},
[IMX8MP_MEDIABLK_PD_MIPI_DSI_2] = {
.name = "mediablk-mipi-dsi-2",
.clk_names = (const char *[]){ "phy", },
.num_clks = 1,
.gpc_name = "mipi-dsi2",
.rst_mask = BIT(22),
.clk_mask = BIT(22),
.mipi_phy_rst_mask = BIT(29),
},
};
static const struct imx8m_blk_ctrl_data imx8mp_media_blk_ctl_dev_data = {
.max_reg = 0x138,
.power_notifier_fn = imx8mp_media_power_notifier,
.domains = imx8mp_media_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx8mp_media_blk_ctl_domain_data),
};
static int imx8mq_vpu_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
power_nb);
if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
return NOTIFY_OK;
/*
* The ADB in the VPUMIX domain has no separate reset and clock
* enable bits, but is ungated and reset together with the VPUs. The
* reset and clock enable inputs to the ADB is a logical OR of the
* VPU bits. In order to set the G2 fuse bits, the G2 clock must
* also be enabled.
*/
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(0) | BIT(1));
regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(0) | BIT(1));
if (action == GENPD_NOTIFY_ON) {
/*
* On power up we have no software backchannel to the GPC to
* wait for the ADB handshake to happen, so we just delay for a
* bit. On power down the GPC driver waits for the handshake.
*/
udelay(5);
/* set "fuse" bits to enable the VPUs */
regmap_set_bits(bc->regmap, 0x8, 0xffffffff);
regmap_set_bits(bc->regmap, 0xc, 0xffffffff);
regmap_set_bits(bc->regmap, 0x10, 0xffffffff);
}
return NOTIFY_OK;
}
static const struct imx8m_blk_ctrl_domain_data imx8mq_vpu_blk_ctl_domain_data[] = {
[IMX8MQ_VPUBLK_PD_G1] = {
.name = "vpublk-g1",
.clk_names = (const char *[]){ "g1", },
.num_clks = 1,
.gpc_name = "g1",
.rst_mask = BIT(1),
.clk_mask = BIT(1),
},
[IMX8MQ_VPUBLK_PD_G2] = {
.name = "vpublk-g2",
.clk_names = (const char *[]){ "g2", },
.num_clks = 1,
.gpc_name = "g2",
.rst_mask = BIT(0),
.clk_mask = BIT(0),
},
};
static const struct imx8m_blk_ctrl_data imx8mq_vpu_blk_ctl_dev_data = {
.max_reg = 0x14,
.power_notifier_fn = imx8mq_vpu_power_notifier,
.domains = imx8mq_vpu_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx8mq_vpu_blk_ctl_domain_data),
};
static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
{
.compatible = "fsl,imx8mm-vpu-blk-ctrl",
.data = &imx8mm_vpu_blk_ctl_dev_data
}, {
.compatible = "fsl,imx8mm-disp-blk-ctrl",
.data = &imx8mm_disp_blk_ctl_dev_data
}, {
.compatible = "fsl,imx8mn-disp-blk-ctrl",
.data = &imx8mn_disp_blk_ctl_dev_data
}, {
.compatible = "fsl,imx8mp-media-blk-ctrl",
.data = &imx8mp_media_blk_ctl_dev_data
}, {
.compatible = "fsl,imx8mq-vpu-blk-ctrl",
.data = &imx8mq_vpu_blk_ctl_dev_data
}, {
.compatible = "fsl,imx8mp-vpu-blk-ctrl",
.data = &imx8mp_vpu_blk_ctl_dev_data
}, {
/* Sentinel */
}
};
MODULE_DEVICE_TABLE(of, imx8m_blk_ctrl_of_match);
static struct platform_driver imx8m_blk_ctrl_driver = {
.probe = imx8m_blk_ctrl_probe,
.remove = imx8m_blk_ctrl_remove,
.driver = {
.name = "imx8m-blk-ctrl",
.pm = &imx8m_blk_ctrl_pm_ops,
.of_match_table = imx8m_blk_ctrl_of_match,
},
};
module_platform_driver(imx8m_blk_ctrl_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/pmdomain/imx/imx8m-blk-ctrl.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2017 Impinj, Inc
* Author: Andrey Smirnov <[email protected]>
*
* Based on the code of analogus driver:
*
* Copyright 2015-2017 Pengutronix, Lucas Stach <[email protected]>
*/
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <dt-bindings/power/imx7-power.h>
#include <dt-bindings/power/imx8mq-power.h>
#include <dt-bindings/power/imx8mm-power.h>
#include <dt-bindings/power/imx8mn-power.h>
#include <dt-bindings/power/imx8mp-power.h>
#define GPC_LPCR_A_CORE_BSC 0x000
#define GPC_PGC_CPU_MAPPING 0x0ec
#define IMX8MP_GPC_PGC_CPU_MAPPING 0x1cc
#define IMX7_USB_HSIC_PHY_A_CORE_DOMAIN BIT(6)
#define IMX7_USB_OTG2_PHY_A_CORE_DOMAIN BIT(5)
#define IMX7_USB_OTG1_PHY_A_CORE_DOMAIN BIT(4)
#define IMX7_PCIE_PHY_A_CORE_DOMAIN BIT(3)
#define IMX7_MIPI_PHY_A_CORE_DOMAIN BIT(2)
#define IMX8M_PCIE2_A53_DOMAIN BIT(15)
#define IMX8M_MIPI_CSI2_A53_DOMAIN BIT(14)
#define IMX8M_MIPI_CSI1_A53_DOMAIN BIT(13)
#define IMX8M_DISP_A53_DOMAIN BIT(12)
#define IMX8M_HDMI_A53_DOMAIN BIT(11)
#define IMX8M_VPU_A53_DOMAIN BIT(10)
#define IMX8M_GPU_A53_DOMAIN BIT(9)
#define IMX8M_DDR2_A53_DOMAIN BIT(8)
#define IMX8M_DDR1_A53_DOMAIN BIT(7)
#define IMX8M_OTG2_A53_DOMAIN BIT(5)
#define IMX8M_OTG1_A53_DOMAIN BIT(4)
#define IMX8M_PCIE1_A53_DOMAIN BIT(3)
#define IMX8M_MIPI_A53_DOMAIN BIT(2)
#define IMX8MM_VPUH1_A53_DOMAIN BIT(15)
#define IMX8MM_VPUG2_A53_DOMAIN BIT(14)
#define IMX8MM_VPUG1_A53_DOMAIN BIT(13)
#define IMX8MM_DISPMIX_A53_DOMAIN BIT(12)
#define IMX8MM_VPUMIX_A53_DOMAIN BIT(10)
#define IMX8MM_GPUMIX_A53_DOMAIN BIT(9)
#define IMX8MM_GPU_A53_DOMAIN (BIT(8) | BIT(11))
#define IMX8MM_DDR1_A53_DOMAIN BIT(7)
#define IMX8MM_OTG2_A53_DOMAIN BIT(5)
#define IMX8MM_OTG1_A53_DOMAIN BIT(4)
#define IMX8MM_PCIE_A53_DOMAIN BIT(3)
#define IMX8MM_MIPI_A53_DOMAIN BIT(2)
#define IMX8MN_DISPMIX_A53_DOMAIN BIT(12)
#define IMX8MN_GPUMIX_A53_DOMAIN BIT(9)
#define IMX8MN_DDR1_A53_DOMAIN BIT(7)
#define IMX8MN_OTG1_A53_DOMAIN BIT(4)
#define IMX8MN_MIPI_A53_DOMAIN BIT(2)
#define IMX8MP_MEDIA_ISPDWP_A53_DOMAIN BIT(20)
#define IMX8MP_HSIOMIX_A53_DOMAIN BIT(19)
#define IMX8MP_MIPI_PHY2_A53_DOMAIN BIT(18)
#define IMX8MP_HDMI_PHY_A53_DOMAIN BIT(17)
#define IMX8MP_HDMIMIX_A53_DOMAIN BIT(16)
#define IMX8MP_VPU_VC8000E_A53_DOMAIN BIT(15)
#define IMX8MP_VPU_G2_A53_DOMAIN BIT(14)
#define IMX8MP_VPU_G1_A53_DOMAIN BIT(13)
#define IMX8MP_MEDIAMIX_A53_DOMAIN BIT(12)
#define IMX8MP_GPU3D_A53_DOMAIN BIT(11)
#define IMX8MP_VPUMIX_A53_DOMAIN BIT(10)
#define IMX8MP_GPUMIX_A53_DOMAIN BIT(9)
#define IMX8MP_GPU2D_A53_DOMAIN BIT(8)
#define IMX8MP_AUDIOMIX_A53_DOMAIN BIT(7)
#define IMX8MP_MLMIX_A53_DOMAIN BIT(6)
#define IMX8MP_USB2_PHY_A53_DOMAIN BIT(5)
#define IMX8MP_USB1_PHY_A53_DOMAIN BIT(4)
#define IMX8MP_PCIE_PHY_A53_DOMAIN BIT(3)
#define IMX8MP_MIPI_PHY1_A53_DOMAIN BIT(2)
#define IMX8MP_GPC_PU_PGC_SW_PUP_REQ 0x0d8
#define IMX8MP_GPC_PU_PGC_SW_PDN_REQ 0x0e4
#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
#define GPC_PU_PGC_SW_PDN_REQ 0x104
#define IMX7_USB_HSIC_PHY_SW_Pxx_REQ BIT(4)
#define IMX7_USB_OTG2_PHY_SW_Pxx_REQ BIT(3)
#define IMX7_USB_OTG1_PHY_SW_Pxx_REQ BIT(2)
#define IMX7_PCIE_PHY_SW_Pxx_REQ BIT(1)
#define IMX7_MIPI_PHY_SW_Pxx_REQ BIT(0)
#define IMX8M_PCIE2_SW_Pxx_REQ BIT(13)
#define IMX8M_MIPI_CSI2_SW_Pxx_REQ BIT(12)
#define IMX8M_MIPI_CSI1_SW_Pxx_REQ BIT(11)
#define IMX8M_DISP_SW_Pxx_REQ BIT(10)
#define IMX8M_HDMI_SW_Pxx_REQ BIT(9)
#define IMX8M_VPU_SW_Pxx_REQ BIT(8)
#define IMX8M_GPU_SW_Pxx_REQ BIT(7)
#define IMX8M_DDR2_SW_Pxx_REQ BIT(6)
#define IMX8M_DDR1_SW_Pxx_REQ BIT(5)
#define IMX8M_OTG2_SW_Pxx_REQ BIT(3)
#define IMX8M_OTG1_SW_Pxx_REQ BIT(2)
#define IMX8M_PCIE1_SW_Pxx_REQ BIT(1)
#define IMX8M_MIPI_SW_Pxx_REQ BIT(0)
#define IMX8MM_VPUH1_SW_Pxx_REQ BIT(13)
#define IMX8MM_VPUG2_SW_Pxx_REQ BIT(12)
#define IMX8MM_VPUG1_SW_Pxx_REQ BIT(11)
#define IMX8MM_DISPMIX_SW_Pxx_REQ BIT(10)
#define IMX8MM_VPUMIX_SW_Pxx_REQ BIT(8)
#define IMX8MM_GPUMIX_SW_Pxx_REQ BIT(7)
#define IMX8MM_GPU_SW_Pxx_REQ (BIT(6) | BIT(9))
#define IMX8MM_DDR1_SW_Pxx_REQ BIT(5)
#define IMX8MM_OTG2_SW_Pxx_REQ BIT(3)
#define IMX8MM_OTG1_SW_Pxx_REQ BIT(2)
#define IMX8MM_PCIE_SW_Pxx_REQ BIT(1)
#define IMX8MM_MIPI_SW_Pxx_REQ BIT(0)
#define IMX8MN_DISPMIX_SW_Pxx_REQ BIT(10)
#define IMX8MN_GPUMIX_SW_Pxx_REQ BIT(7)
#define IMX8MN_DDR1_SW_Pxx_REQ BIT(5)
#define IMX8MN_OTG1_SW_Pxx_REQ BIT(2)
#define IMX8MN_MIPI_SW_Pxx_REQ BIT(0)
#define IMX8MP_DDRMIX_Pxx_REQ BIT(19)
#define IMX8MP_MEDIA_ISP_DWP_Pxx_REQ BIT(18)
#define IMX8MP_HSIOMIX_Pxx_REQ BIT(17)
#define IMX8MP_MIPI_PHY2_Pxx_REQ BIT(16)
#define IMX8MP_HDMI_PHY_Pxx_REQ BIT(15)
#define IMX8MP_HDMIMIX_Pxx_REQ BIT(14)
#define IMX8MP_VPU_VC8K_Pxx_REQ BIT(13)
#define IMX8MP_VPU_G2_Pxx_REQ BIT(12)
#define IMX8MP_VPU_G1_Pxx_REQ BIT(11)
#define IMX8MP_MEDIMIX_Pxx_REQ BIT(10)
#define IMX8MP_GPU_3D_Pxx_REQ BIT(9)
#define IMX8MP_VPU_MIX_SHARE_LOGIC_Pxx_REQ BIT(8)
#define IMX8MP_GPU_SHARE_LOGIC_Pxx_REQ BIT(7)
#define IMX8MP_GPU_2D_Pxx_REQ BIT(6)
#define IMX8MP_AUDIOMIX_Pxx_REQ BIT(5)
#define IMX8MP_MLMIX_Pxx_REQ BIT(4)
#define IMX8MP_USB2_PHY_Pxx_REQ BIT(3)
#define IMX8MP_USB1_PHY_Pxx_REQ BIT(2)
#define IMX8MP_PCIE_PHY_SW_Pxx_REQ BIT(1)
#define IMX8MP_MIPI_PHY1_SW_Pxx_REQ BIT(0)
#define GPC_M4_PU_PDN_FLG 0x1bc
#define IMX8MP_GPC_PU_PWRHSK 0x190
#define GPC_PU_PWRHSK 0x1fc
#define IMX8M_GPU_HSK_PWRDNACKN BIT(26)
#define IMX8M_VPU_HSK_PWRDNACKN BIT(25)
#define IMX8M_DISP_HSK_PWRDNACKN BIT(24)
#define IMX8M_GPU_HSK_PWRDNREQN BIT(6)
#define IMX8M_VPU_HSK_PWRDNREQN BIT(5)
#define IMX8M_DISP_HSK_PWRDNREQN BIT(4)
#define IMX8MM_GPUMIX_HSK_PWRDNACKN BIT(29)
#define IMX8MM_GPU_HSK_PWRDNACKN (BIT(27) | BIT(28))
#define IMX8MM_VPUMIX_HSK_PWRDNACKN BIT(26)
#define IMX8MM_DISPMIX_HSK_PWRDNACKN BIT(25)
#define IMX8MM_HSIO_HSK_PWRDNACKN (BIT(23) | BIT(24))
#define IMX8MM_GPUMIX_HSK_PWRDNREQN BIT(11)
#define IMX8MM_GPU_HSK_PWRDNREQN (BIT(9) | BIT(10))
#define IMX8MM_VPUMIX_HSK_PWRDNREQN BIT(8)
#define IMX8MM_DISPMIX_HSK_PWRDNREQN BIT(7)
#define IMX8MM_HSIO_HSK_PWRDNREQN (BIT(5) | BIT(6))
#define IMX8MN_GPUMIX_HSK_PWRDNACKN (BIT(29) | BIT(27))
#define IMX8MN_DISPMIX_HSK_PWRDNACKN BIT(25)
#define IMX8MN_HSIO_HSK_PWRDNACKN BIT(23)
#define IMX8MN_GPUMIX_HSK_PWRDNREQN (BIT(11) | BIT(9))
#define IMX8MN_DISPMIX_HSK_PWRDNREQN BIT(7)
#define IMX8MN_HSIO_HSK_PWRDNREQN BIT(5)
#define IMX8MP_MEDIAMIX_PWRDNACKN BIT(30)
#define IMX8MP_HDMIMIX_PWRDNACKN BIT(29)
#define IMX8MP_HSIOMIX_PWRDNACKN BIT(28)
#define IMX8MP_VPUMIX_PWRDNACKN BIT(26)
#define IMX8MP_GPUMIX_PWRDNACKN BIT(25)
#define IMX8MP_MLMIX_PWRDNACKN (BIT(23) | BIT(24))
#define IMX8MP_AUDIOMIX_PWRDNACKN (BIT(20) | BIT(31))
#define IMX8MP_MEDIAMIX_PWRDNREQN BIT(14)
#define IMX8MP_HDMIMIX_PWRDNREQN BIT(13)
#define IMX8MP_HSIOMIX_PWRDNREQN BIT(12)
#define IMX8MP_VPUMIX_PWRDNREQN BIT(10)
#define IMX8MP_GPUMIX_PWRDNREQN BIT(9)
#define IMX8MP_MLMIX_PWRDNREQN (BIT(7) | BIT(8))
#define IMX8MP_AUDIOMIX_PWRDNREQN (BIT(4) | BIT(15))
/*
* The PGC offset values in Reference Manual
* (Rev. 1, 01/2018 and the older ones) GPC chapter's
* GPC_PGC memory map are incorrect, below offset
* values are from design RTL.
*/
#define IMX7_PGC_MIPI 16
#define IMX7_PGC_PCIE 17
#define IMX7_PGC_USB_HSIC 20
#define IMX8M_PGC_MIPI 16
#define IMX8M_PGC_PCIE1 17
#define IMX8M_PGC_OTG1 18
#define IMX8M_PGC_OTG2 19
#define IMX8M_PGC_DDR1 21
#define IMX8M_PGC_GPU 23
#define IMX8M_PGC_VPU 24
#define IMX8M_PGC_DISP 26
#define IMX8M_PGC_MIPI_CSI1 27
#define IMX8M_PGC_MIPI_CSI2 28
#define IMX8M_PGC_PCIE2 29
#define IMX8MM_PGC_MIPI 16
#define IMX8MM_PGC_PCIE 17
#define IMX8MM_PGC_OTG1 18
#define IMX8MM_PGC_OTG2 19
#define IMX8MM_PGC_DDR1 21
#define IMX8MM_PGC_GPU2D 22
#define IMX8MM_PGC_GPUMIX 23
#define IMX8MM_PGC_VPUMIX 24
#define IMX8MM_PGC_GPU3D 25
#define IMX8MM_PGC_DISPMIX 26
#define IMX8MM_PGC_VPUG1 27
#define IMX8MM_PGC_VPUG2 28
#define IMX8MM_PGC_VPUH1 29
#define IMX8MN_PGC_MIPI 16
#define IMX8MN_PGC_OTG1 18
#define IMX8MN_PGC_DDR1 21
#define IMX8MN_PGC_GPUMIX 23
#define IMX8MN_PGC_DISPMIX 26
#define IMX8MP_PGC_NOC 9
#define IMX8MP_PGC_MIPI1 12
#define IMX8MP_PGC_PCIE 13
#define IMX8MP_PGC_USB1 14
#define IMX8MP_PGC_USB2 15
#define IMX8MP_PGC_MLMIX 16
#define IMX8MP_PGC_AUDIOMIX 17
#define IMX8MP_PGC_GPU2D 18
#define IMX8MP_PGC_GPUMIX 19
#define IMX8MP_PGC_VPUMIX 20
#define IMX8MP_PGC_GPU3D 21
#define IMX8MP_PGC_MEDIAMIX 22
#define IMX8MP_PGC_VPU_G1 23
#define IMX8MP_PGC_VPU_G2 24
#define IMX8MP_PGC_VPU_VC8000E 25
#define IMX8MP_PGC_HDMIMIX 26
#define IMX8MP_PGC_HDMI 27
#define IMX8MP_PGC_MIPI2 28
#define IMX8MP_PGC_HSIOMIX 29
#define IMX8MP_PGC_MEDIA_ISP_DWP 30
#define IMX8MP_PGC_DDRMIX 31
#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
#define GPC_PGC_CTRL_PCR BIT(0)
struct imx_pgc_regs {
u16 map;
u16 pup;
u16 pdn;
u16 hsk;
};
struct imx_pgc_domain {
struct generic_pm_domain genpd;
struct regmap *regmap;
const struct imx_pgc_regs *regs;
struct regulator *regulator;
struct reset_control *reset;
struct clk_bulk_data *clks;
int num_clks;
unsigned long pgc;
const struct {
u32 pxx;
u32 map;
u32 hskreq;
u32 hskack;
} bits;
const int voltage;
const bool keep_clocks;
struct device *dev;
unsigned int pgc_sw_pup_reg;
unsigned int pgc_sw_pdn_reg;
};
struct imx_pgc_domain_data {
const struct imx_pgc_domain *domains;
size_t domains_num;
const struct regmap_access_table *reg_access_table;
const struct imx_pgc_regs *pgc_regs;
};
static inline struct imx_pgc_domain *
to_imx_pgc_domain(struct generic_pm_domain *genpd)
{
return container_of(genpd, struct imx_pgc_domain, genpd);
}
static int imx_pgc_power_up(struct generic_pm_domain *genpd)
{
struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
u32 reg_val, pgc;
int ret;
ret = pm_runtime_get_sync(domain->dev);
if (ret < 0) {
pm_runtime_put_noidle(domain->dev);
return ret;
}
if (!IS_ERR(domain->regulator)) {
ret = regulator_enable(domain->regulator);
if (ret) {
dev_err(domain->dev,
"failed to enable regulator: %pe\n",
ERR_PTR(ret));
goto out_put_pm;
}
}
reset_control_assert(domain->reset);
/* Enable reset clocks for all devices in the domain */
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
dev_err(domain->dev, "failed to enable reset clocks\n");
goto out_regulator_disable;
}
/* delays for reset to propagate */
udelay(5);
if (domain->bits.pxx) {
/* request the domain to power up */
regmap_update_bits(domain->regmap, domain->regs->pup,
domain->bits.pxx, domain->bits.pxx);
/*
* As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
* for PUP_REQ/PDN_REQ bit to be cleared
*/
ret = regmap_read_poll_timeout(domain->regmap,
domain->regs->pup, reg_val,
!(reg_val & domain->bits.pxx),
0, USEC_PER_MSEC);
if (ret) {
dev_err(domain->dev, "failed to command PGC\n");
goto out_clk_disable;
}
/* disable power control */
for_each_set_bit(pgc, &domain->pgc, 32) {
regmap_clear_bits(domain->regmap, GPC_PGC_CTRL(pgc),
GPC_PGC_CTRL_PCR);
}
}
/* delay for reset to propagate */
udelay(5);
reset_control_deassert(domain->reset);
/* request the ADB400 to power up */
if (domain->bits.hskreq) {
regmap_update_bits(domain->regmap, domain->regs->hsk,
domain->bits.hskreq, domain->bits.hskreq);
/*
* ret = regmap_read_poll_timeout(domain->regmap, domain->regs->hsk, reg_val,
* (reg_val & domain->bits.hskack), 0,
* USEC_PER_MSEC);
* Technically we need the commented code to wait handshake. But that needs
* the BLK-CTL module BUS clk-en bit being set.
*
* There is a separate BLK-CTL module and we will have such a driver for it,
* that driver will set the BUS clk-en bit and handshake will be triggered
* automatically there. Just add a delay and suppose the handshake finish
* after that.
*/
}
/* Disable reset clocks for all devices in the domain */
if (!domain->keep_clocks)
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return 0;
out_clk_disable:
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
out_regulator_disable:
if (!IS_ERR(domain->regulator))
regulator_disable(domain->regulator);
out_put_pm:
pm_runtime_put(domain->dev);
return ret;
}
static int imx_pgc_power_down(struct generic_pm_domain *genpd)
{
struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
u32 reg_val, pgc;
int ret;
/* Enable reset clocks for all devices in the domain */
if (!domain->keep_clocks) {
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
dev_err(domain->dev, "failed to enable reset clocks\n");
return ret;
}
}
/* request the ADB400 to power down */
if (domain->bits.hskreq) {
regmap_clear_bits(domain->regmap, domain->regs->hsk,
domain->bits.hskreq);
ret = regmap_read_poll_timeout(domain->regmap, domain->regs->hsk,
reg_val,
!(reg_val & domain->bits.hskack),
0, USEC_PER_MSEC);
if (ret) {
dev_err(domain->dev, "failed to power down ADB400\n");
goto out_clk_disable;
}
}
if (domain->bits.pxx) {
/* enable power control */
for_each_set_bit(pgc, &domain->pgc, 32) {
regmap_update_bits(domain->regmap, GPC_PGC_CTRL(pgc),
GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
}
/* request the domain to power down */
regmap_update_bits(domain->regmap, domain->regs->pdn,
domain->bits.pxx, domain->bits.pxx);
/*
* As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
* for PUP_REQ/PDN_REQ bit to be cleared
*/
ret = regmap_read_poll_timeout(domain->regmap,
domain->regs->pdn, reg_val,
!(reg_val & domain->bits.pxx),
0, USEC_PER_MSEC);
if (ret) {
dev_err(domain->dev, "failed to command PGC\n");
goto out_clk_disable;
}
}
/* Disable reset clocks for all devices in the domain */
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
if (!IS_ERR(domain->regulator)) {
ret = regulator_disable(domain->regulator);
if (ret) {
dev_err(domain->dev,
"failed to disable regulator: %pe\n",
ERR_PTR(ret));
return ret;
}
}
pm_runtime_put_sync_suspend(domain->dev);
return 0;
out_clk_disable:
if (!domain->keep_clocks)
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return ret;
}
static const struct imx_pgc_domain imx7_pgc_domains[] = {
[IMX7_POWER_DOMAIN_MIPI_PHY] = {
.genpd = {
.name = "mipi-phy",
},
.bits = {
.pxx = IMX7_MIPI_PHY_SW_Pxx_REQ,
.map = IMX7_MIPI_PHY_A_CORE_DOMAIN,
},
.voltage = 1000000,
.pgc = BIT(IMX7_PGC_MIPI),
},
[IMX7_POWER_DOMAIN_PCIE_PHY] = {
.genpd = {
.name = "pcie-phy",
},
.bits = {
.pxx = IMX7_PCIE_PHY_SW_Pxx_REQ,
.map = IMX7_PCIE_PHY_A_CORE_DOMAIN,
},
.voltage = 1000000,
.pgc = BIT(IMX7_PGC_PCIE),
},
[IMX7_POWER_DOMAIN_USB_HSIC_PHY] = {
.genpd = {
.name = "usb-hsic-phy",
},
.bits = {
.pxx = IMX7_USB_HSIC_PHY_SW_Pxx_REQ,
.map = IMX7_USB_HSIC_PHY_A_CORE_DOMAIN,
},
.voltage = 1200000,
.pgc = BIT(IMX7_PGC_USB_HSIC),
},
};
static const struct regmap_range imx7_yes_ranges[] = {
regmap_reg_range(GPC_LPCR_A_CORE_BSC,
GPC_M4_PU_PDN_FLG),
regmap_reg_range(GPC_PGC_CTRL(IMX7_PGC_MIPI),
GPC_PGC_SR(IMX7_PGC_MIPI)),
regmap_reg_range(GPC_PGC_CTRL(IMX7_PGC_PCIE),
GPC_PGC_SR(IMX7_PGC_PCIE)),
regmap_reg_range(GPC_PGC_CTRL(IMX7_PGC_USB_HSIC),
GPC_PGC_SR(IMX7_PGC_USB_HSIC)),
};
static const struct regmap_access_table imx7_access_table = {
.yes_ranges = imx7_yes_ranges,
.n_yes_ranges = ARRAY_SIZE(imx7_yes_ranges),
};
static const struct imx_pgc_regs imx7_pgc_regs = {
.map = GPC_PGC_CPU_MAPPING,
.pup = GPC_PU_PGC_SW_PUP_REQ,
.pdn = GPC_PU_PGC_SW_PDN_REQ,
.hsk = GPC_PU_PWRHSK,
};
static const struct imx_pgc_domain_data imx7_pgc_domain_data = {
.domains = imx7_pgc_domains,
.domains_num = ARRAY_SIZE(imx7_pgc_domains),
.reg_access_table = &imx7_access_table,
.pgc_regs = &imx7_pgc_regs,
};
static const struct imx_pgc_domain imx8m_pgc_domains[] = {
[IMX8M_POWER_DOMAIN_MIPI] = {
.genpd = {
.name = "mipi",
},
.bits = {
.pxx = IMX8M_MIPI_SW_Pxx_REQ,
.map = IMX8M_MIPI_A53_DOMAIN,
},
.pgc = BIT(IMX8M_PGC_MIPI),
},
[IMX8M_POWER_DOMAIN_PCIE1] = {
.genpd = {
.name = "pcie1",
},
.bits = {
.pxx = IMX8M_PCIE1_SW_Pxx_REQ,
.map = IMX8M_PCIE1_A53_DOMAIN,
},
.pgc = BIT(IMX8M_PGC_PCIE1),
},
[IMX8M_POWER_DOMAIN_USB_OTG1] = {
.genpd = {
.name = "usb-otg1",
},
.bits = {
.pxx = IMX8M_OTG1_SW_Pxx_REQ,
.map = IMX8M_OTG1_A53_DOMAIN,
},
.pgc = BIT(IMX8M_PGC_OTG1),
},
[IMX8M_POWER_DOMAIN_USB_OTG2] = {
.genpd = {
.name = "usb-otg2",
},
.bits = {
.pxx = IMX8M_OTG2_SW_Pxx_REQ,
.map = IMX8M_OTG2_A53_DOMAIN,
},
.pgc = BIT(IMX8M_PGC_OTG2),
},
[IMX8M_POWER_DOMAIN_DDR1] = {
.genpd = {
.name = "ddr1",
},
.bits = {
.pxx = IMX8M_DDR1_SW_Pxx_REQ,
.map = IMX8M_DDR2_A53_DOMAIN,
},
.pgc = BIT(IMX8M_PGC_DDR1),
},
[IMX8M_POWER_DOMAIN_GPU] = {
.genpd = {
.name = "gpu",
},
.bits = {
.pxx = IMX8M_GPU_SW_Pxx_REQ,
.map = IMX8M_GPU_A53_DOMAIN,
.hskreq = IMX8M_GPU_HSK_PWRDNREQN,
.hskack = IMX8M_GPU_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8M_PGC_GPU),
},
[IMX8M_POWER_DOMAIN_VPU] = {
.genpd = {
.name = "vpu",
},
.bits = {
.pxx = IMX8M_VPU_SW_Pxx_REQ,
.map = IMX8M_VPU_A53_DOMAIN,
.hskreq = IMX8M_VPU_HSK_PWRDNREQN,
.hskack = IMX8M_VPU_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8M_PGC_VPU),
.keep_clocks = true,
},
[IMX8M_POWER_DOMAIN_DISP] = {
.genpd = {
.name = "disp",
},
.bits = {
.pxx = IMX8M_DISP_SW_Pxx_REQ,
.map = IMX8M_DISP_A53_DOMAIN,
.hskreq = IMX8M_DISP_HSK_PWRDNREQN,
.hskack = IMX8M_DISP_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8M_PGC_DISP),
},
[IMX8M_POWER_DOMAIN_MIPI_CSI1] = {
.genpd = {
.name = "mipi-csi1",
},
.bits = {
.pxx = IMX8M_MIPI_CSI1_SW_Pxx_REQ,
.map = IMX8M_MIPI_CSI1_A53_DOMAIN,
},
.pgc = BIT(IMX8M_PGC_MIPI_CSI1),
},
[IMX8M_POWER_DOMAIN_MIPI_CSI2] = {
.genpd = {
.name = "mipi-csi2",
},
.bits = {
.pxx = IMX8M_MIPI_CSI2_SW_Pxx_REQ,
.map = IMX8M_MIPI_CSI2_A53_DOMAIN,
},
.pgc = BIT(IMX8M_PGC_MIPI_CSI2),
},
[IMX8M_POWER_DOMAIN_PCIE2] = {
.genpd = {
.name = "pcie2",
},
.bits = {
.pxx = IMX8M_PCIE2_SW_Pxx_REQ,
.map = IMX8M_PCIE2_A53_DOMAIN,
},
.pgc = BIT(IMX8M_PGC_PCIE2),
},
};
static const struct regmap_range imx8m_yes_ranges[] = {
regmap_reg_range(GPC_LPCR_A_CORE_BSC,
GPC_PU_PWRHSK),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI),
GPC_PGC_SR(IMX8M_PGC_MIPI)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE1),
GPC_PGC_SR(IMX8M_PGC_PCIE1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_OTG1),
GPC_PGC_SR(IMX8M_PGC_OTG1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_OTG2),
GPC_PGC_SR(IMX8M_PGC_OTG2)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_DDR1),
GPC_PGC_SR(IMX8M_PGC_DDR1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_GPU),
GPC_PGC_SR(IMX8M_PGC_GPU)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_VPU),
GPC_PGC_SR(IMX8M_PGC_VPU)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_DISP),
GPC_PGC_SR(IMX8M_PGC_DISP)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI_CSI1),
GPC_PGC_SR(IMX8M_PGC_MIPI_CSI1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI_CSI2),
GPC_PGC_SR(IMX8M_PGC_MIPI_CSI2)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE2),
GPC_PGC_SR(IMX8M_PGC_PCIE2)),
};
static const struct regmap_access_table imx8m_access_table = {
.yes_ranges = imx8m_yes_ranges,
.n_yes_ranges = ARRAY_SIZE(imx8m_yes_ranges),
};
static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
.domains = imx8m_pgc_domains,
.domains_num = ARRAY_SIZE(imx8m_pgc_domains),
.reg_access_table = &imx8m_access_table,
.pgc_regs = &imx7_pgc_regs,
};
static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
[IMX8MM_POWER_DOMAIN_HSIOMIX] = {
.genpd = {
.name = "hsiomix",
},
.bits = {
.pxx = 0, /* no power sequence control */
.map = 0, /* no power sequence control */
.hskreq = IMX8MM_HSIO_HSK_PWRDNREQN,
.hskack = IMX8MM_HSIO_HSK_PWRDNACKN,
},
.keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_PCIE] = {
.genpd = {
.name = "pcie",
},
.bits = {
.pxx = IMX8MM_PCIE_SW_Pxx_REQ,
.map = IMX8MM_PCIE_A53_DOMAIN,
},
.pgc = BIT(IMX8MM_PGC_PCIE),
},
[IMX8MM_POWER_DOMAIN_OTG1] = {
.genpd = {
.name = "usb-otg1",
.flags = GENPD_FLAG_ACTIVE_WAKEUP,
},
.bits = {
.pxx = IMX8MM_OTG1_SW_Pxx_REQ,
.map = IMX8MM_OTG1_A53_DOMAIN,
},
.pgc = BIT(IMX8MM_PGC_OTG1),
},
[IMX8MM_POWER_DOMAIN_OTG2] = {
.genpd = {
.name = "usb-otg2",
.flags = GENPD_FLAG_ACTIVE_WAKEUP,
},
.bits = {
.pxx = IMX8MM_OTG2_SW_Pxx_REQ,
.map = IMX8MM_OTG2_A53_DOMAIN,
},
.pgc = BIT(IMX8MM_PGC_OTG2),
},
[IMX8MM_POWER_DOMAIN_GPUMIX] = {
.genpd = {
.name = "gpumix",
},
.bits = {
.pxx = IMX8MM_GPUMIX_SW_Pxx_REQ,
.map = IMX8MM_GPUMIX_A53_DOMAIN,
.hskreq = IMX8MM_GPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_GPUMIX_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8MM_PGC_GPUMIX),
.keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_GPU] = {
.genpd = {
.name = "gpu",
},
.bits = {
.pxx = IMX8MM_GPU_SW_Pxx_REQ,
.map = IMX8MM_GPU_A53_DOMAIN,
.hskreq = IMX8MM_GPU_HSK_PWRDNREQN,
.hskack = IMX8MM_GPU_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8MM_PGC_GPU2D) | BIT(IMX8MM_PGC_GPU3D),
},
[IMX8MM_POWER_DOMAIN_VPUMIX] = {
.genpd = {
.name = "vpumix",
},
.bits = {
.pxx = IMX8MM_VPUMIX_SW_Pxx_REQ,
.map = IMX8MM_VPUMIX_A53_DOMAIN,
.hskreq = IMX8MM_VPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_VPUMIX_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8MM_PGC_VPUMIX),
.keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_VPUG1] = {
.genpd = {
.name = "vpu-g1",
},
.bits = {
.pxx = IMX8MM_VPUG1_SW_Pxx_REQ,
.map = IMX8MM_VPUG1_A53_DOMAIN,
},
.pgc = BIT(IMX8MM_PGC_VPUG1),
},
[IMX8MM_POWER_DOMAIN_VPUG2] = {
.genpd = {
.name = "vpu-g2",
},
.bits = {
.pxx = IMX8MM_VPUG2_SW_Pxx_REQ,
.map = IMX8MM_VPUG2_A53_DOMAIN,
},
.pgc = BIT(IMX8MM_PGC_VPUG2),
},
[IMX8MM_POWER_DOMAIN_VPUH1] = {
.genpd = {
.name = "vpu-h1",
},
.bits = {
.pxx = IMX8MM_VPUH1_SW_Pxx_REQ,
.map = IMX8MM_VPUH1_A53_DOMAIN,
},
.pgc = BIT(IMX8MM_PGC_VPUH1),
.keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_DISPMIX] = {
.genpd = {
.name = "dispmix",
},
.bits = {
.pxx = IMX8MM_DISPMIX_SW_Pxx_REQ,
.map = IMX8MM_DISPMIX_A53_DOMAIN,
.hskreq = IMX8MM_DISPMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_DISPMIX_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8MM_PGC_DISPMIX),
.keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_MIPI] = {
.genpd = {
.name = "mipi",
},
.bits = {
.pxx = IMX8MM_MIPI_SW_Pxx_REQ,
.map = IMX8MM_MIPI_A53_DOMAIN,
},
.pgc = BIT(IMX8MM_PGC_MIPI),
},
};
static const struct regmap_range imx8mm_yes_ranges[] = {
regmap_reg_range(GPC_LPCR_A_CORE_BSC,
GPC_PU_PWRHSK),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_MIPI),
GPC_PGC_SR(IMX8MM_PGC_MIPI)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_PCIE),
GPC_PGC_SR(IMX8MM_PGC_PCIE)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_OTG1),
GPC_PGC_SR(IMX8MM_PGC_OTG1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_OTG2),
GPC_PGC_SR(IMX8MM_PGC_OTG2)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_DDR1),
GPC_PGC_SR(IMX8MM_PGC_DDR1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPU2D),
GPC_PGC_SR(IMX8MM_PGC_GPU2D)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPUMIX),
GPC_PGC_SR(IMX8MM_PGC_GPUMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUMIX),
GPC_PGC_SR(IMX8MM_PGC_VPUMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPU3D),
GPC_PGC_SR(IMX8MM_PGC_GPU3D)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_DISPMIX),
GPC_PGC_SR(IMX8MM_PGC_DISPMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUG1),
GPC_PGC_SR(IMX8MM_PGC_VPUG1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUG2),
GPC_PGC_SR(IMX8MM_PGC_VPUG2)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUH1),
GPC_PGC_SR(IMX8MM_PGC_VPUH1)),
};
static const struct regmap_access_table imx8mm_access_table = {
.yes_ranges = imx8mm_yes_ranges,
.n_yes_ranges = ARRAY_SIZE(imx8mm_yes_ranges),
};
static const struct imx_pgc_domain_data imx8mm_pgc_domain_data = {
.domains = imx8mm_pgc_domains,
.domains_num = ARRAY_SIZE(imx8mm_pgc_domains),
.reg_access_table = &imx8mm_access_table,
.pgc_regs = &imx7_pgc_regs,
};
static const struct imx_pgc_domain imx8mp_pgc_domains[] = {
[IMX8MP_POWER_DOMAIN_MIPI_PHY1] = {
.genpd = {
.name = "mipi-phy1",
},
.bits = {
.pxx = IMX8MP_MIPI_PHY1_SW_Pxx_REQ,
.map = IMX8MP_MIPI_PHY1_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_MIPI1),
},
[IMX8MP_POWER_DOMAIN_PCIE_PHY] = {
.genpd = {
.name = "pcie-phy1",
},
.bits = {
.pxx = IMX8MP_PCIE_PHY_SW_Pxx_REQ,
.map = IMX8MP_PCIE_PHY_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_PCIE),
},
[IMX8MP_POWER_DOMAIN_USB1_PHY] = {
.genpd = {
.name = "usb-otg1",
},
.bits = {
.pxx = IMX8MP_USB1_PHY_Pxx_REQ,
.map = IMX8MP_USB1_PHY_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_USB1),
},
[IMX8MP_POWER_DOMAIN_USB2_PHY] = {
.genpd = {
.name = "usb-otg2",
},
.bits = {
.pxx = IMX8MP_USB2_PHY_Pxx_REQ,
.map = IMX8MP_USB2_PHY_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_USB2),
},
[IMX8MP_POWER_DOMAIN_MLMIX] = {
.genpd = {
.name = "mlmix",
},
.bits = {
.pxx = IMX8MP_MLMIX_Pxx_REQ,
.map = IMX8MP_MLMIX_A53_DOMAIN,
.hskreq = IMX8MP_MLMIX_PWRDNREQN,
.hskack = IMX8MP_MLMIX_PWRDNACKN,
},
.pgc = BIT(IMX8MP_PGC_MLMIX),
.keep_clocks = true,
},
[IMX8MP_POWER_DOMAIN_AUDIOMIX] = {
.genpd = {
.name = "audiomix",
},
.bits = {
.pxx = IMX8MP_AUDIOMIX_Pxx_REQ,
.map = IMX8MP_AUDIOMIX_A53_DOMAIN,
.hskreq = IMX8MP_AUDIOMIX_PWRDNREQN,
.hskack = IMX8MP_AUDIOMIX_PWRDNACKN,
},
.pgc = BIT(IMX8MP_PGC_AUDIOMIX),
.keep_clocks = true,
},
[IMX8MP_POWER_DOMAIN_GPU2D] = {
.genpd = {
.name = "gpu2d",
},
.bits = {
.pxx = IMX8MP_GPU_2D_Pxx_REQ,
.map = IMX8MP_GPU2D_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_GPU2D),
},
[IMX8MP_POWER_DOMAIN_GPUMIX] = {
.genpd = {
.name = "gpumix",
},
.bits = {
.pxx = IMX8MP_GPU_SHARE_LOGIC_Pxx_REQ,
.map = IMX8MP_GPUMIX_A53_DOMAIN,
.hskreq = IMX8MP_GPUMIX_PWRDNREQN,
.hskack = IMX8MP_GPUMIX_PWRDNACKN,
},
.pgc = BIT(IMX8MP_PGC_GPUMIX),
.keep_clocks = true,
},
[IMX8MP_POWER_DOMAIN_VPUMIX] = {
.genpd = {
.name = "vpumix",
},
.bits = {
.pxx = IMX8MP_VPU_MIX_SHARE_LOGIC_Pxx_REQ,
.map = IMX8MP_VPUMIX_A53_DOMAIN,
.hskreq = IMX8MP_VPUMIX_PWRDNREQN,
.hskack = IMX8MP_VPUMIX_PWRDNACKN,
},
.pgc = BIT(IMX8MP_PGC_VPUMIX),
.keep_clocks = true,
},
[IMX8MP_POWER_DOMAIN_GPU3D] = {
.genpd = {
.name = "gpu3d",
},
.bits = {
.pxx = IMX8MP_GPU_3D_Pxx_REQ,
.map = IMX8MP_GPU3D_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_GPU3D),
},
[IMX8MP_POWER_DOMAIN_MEDIAMIX] = {
.genpd = {
.name = "mediamix",
},
.bits = {
.pxx = IMX8MP_MEDIMIX_Pxx_REQ,
.map = IMX8MP_MEDIAMIX_A53_DOMAIN,
.hskreq = IMX8MP_MEDIAMIX_PWRDNREQN,
.hskack = IMX8MP_MEDIAMIX_PWRDNACKN,
},
.pgc = BIT(IMX8MP_PGC_MEDIAMIX),
.keep_clocks = true,
},
[IMX8MP_POWER_DOMAIN_VPU_G1] = {
.genpd = {
.name = "vpu-g1",
},
.bits = {
.pxx = IMX8MP_VPU_G1_Pxx_REQ,
.map = IMX8MP_VPU_G1_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_VPU_G1),
},
[IMX8MP_POWER_DOMAIN_VPU_G2] = {
.genpd = {
.name = "vpu-g2",
},
.bits = {
.pxx = IMX8MP_VPU_G2_Pxx_REQ,
.map = IMX8MP_VPU_G2_A53_DOMAIN
},
.pgc = BIT(IMX8MP_PGC_VPU_G2),
},
[IMX8MP_POWER_DOMAIN_VPU_VC8000E] = {
.genpd = {
.name = "vpu-h1",
},
.bits = {
.pxx = IMX8MP_VPU_VC8K_Pxx_REQ,
.map = IMX8MP_VPU_VC8000E_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_VPU_VC8000E),
},
[IMX8MP_POWER_DOMAIN_HDMIMIX] = {
.genpd = {
.name = "hdmimix",
},
.bits = {
.pxx = IMX8MP_HDMIMIX_Pxx_REQ,
.map = IMX8MP_HDMIMIX_A53_DOMAIN,
.hskreq = IMX8MP_HDMIMIX_PWRDNREQN,
.hskack = IMX8MP_HDMIMIX_PWRDNACKN,
},
.pgc = BIT(IMX8MP_PGC_HDMIMIX),
.keep_clocks = true,
},
[IMX8MP_POWER_DOMAIN_HDMI_PHY] = {
.genpd = {
.name = "hdmi-phy",
},
.bits = {
.pxx = IMX8MP_HDMI_PHY_Pxx_REQ,
.map = IMX8MP_HDMI_PHY_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_HDMI),
},
[IMX8MP_POWER_DOMAIN_MIPI_PHY2] = {
.genpd = {
.name = "mipi-phy2",
},
.bits = {
.pxx = IMX8MP_MIPI_PHY2_Pxx_REQ,
.map = IMX8MP_MIPI_PHY2_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_MIPI2),
},
[IMX8MP_POWER_DOMAIN_HSIOMIX] = {
.genpd = {
.name = "hsiomix",
},
.bits = {
.pxx = IMX8MP_HSIOMIX_Pxx_REQ,
.map = IMX8MP_HSIOMIX_A53_DOMAIN,
.hskreq = IMX8MP_HSIOMIX_PWRDNREQN,
.hskack = IMX8MP_HSIOMIX_PWRDNACKN,
},
.pgc = BIT(IMX8MP_PGC_HSIOMIX),
.keep_clocks = true,
},
[IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP] = {
.genpd = {
.name = "mediamix-isp-dwp",
},
.bits = {
.pxx = IMX8MP_MEDIA_ISP_DWP_Pxx_REQ,
.map = IMX8MP_MEDIA_ISPDWP_A53_DOMAIN,
},
.pgc = BIT(IMX8MP_PGC_MEDIA_ISP_DWP),
},
};
static const struct regmap_range imx8mp_yes_ranges[] = {
regmap_reg_range(GPC_LPCR_A_CORE_BSC,
IMX8MP_GPC_PGC_CPU_MAPPING),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_NOC),
GPC_PGC_SR(IMX8MP_PGC_NOC)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MIPI1),
GPC_PGC_SR(IMX8MP_PGC_MIPI1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_PCIE),
GPC_PGC_SR(IMX8MP_PGC_PCIE)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_USB1),
GPC_PGC_SR(IMX8MP_PGC_USB1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_USB2),
GPC_PGC_SR(IMX8MP_PGC_USB2)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MLMIX),
GPC_PGC_SR(IMX8MP_PGC_MLMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_AUDIOMIX),
GPC_PGC_SR(IMX8MP_PGC_AUDIOMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPU2D),
GPC_PGC_SR(IMX8MP_PGC_GPU2D)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPUMIX),
GPC_PGC_SR(IMX8MP_PGC_GPUMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPUMIX),
GPC_PGC_SR(IMX8MP_PGC_VPUMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPU3D),
GPC_PGC_SR(IMX8MP_PGC_GPU3D)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MEDIAMIX),
GPC_PGC_SR(IMX8MP_PGC_MEDIAMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_G1),
GPC_PGC_SR(IMX8MP_PGC_VPU_G1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_G2),
GPC_PGC_SR(IMX8MP_PGC_VPU_G2)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_VC8000E),
GPC_PGC_SR(IMX8MP_PGC_VPU_VC8000E)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HDMIMIX),
GPC_PGC_SR(IMX8MP_PGC_HDMIMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HDMI),
GPC_PGC_SR(IMX8MP_PGC_HDMI)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MIPI2),
GPC_PGC_SR(IMX8MP_PGC_MIPI2)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HSIOMIX),
GPC_PGC_SR(IMX8MP_PGC_HSIOMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MEDIA_ISP_DWP),
GPC_PGC_SR(IMX8MP_PGC_MEDIA_ISP_DWP)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_DDRMIX),
GPC_PGC_SR(IMX8MP_PGC_DDRMIX)),
};
static const struct regmap_access_table imx8mp_access_table = {
.yes_ranges = imx8mp_yes_ranges,
.n_yes_ranges = ARRAY_SIZE(imx8mp_yes_ranges),
};
static const struct imx_pgc_regs imx8mp_pgc_regs = {
.map = IMX8MP_GPC_PGC_CPU_MAPPING,
.pup = IMX8MP_GPC_PU_PGC_SW_PUP_REQ,
.pdn = IMX8MP_GPC_PU_PGC_SW_PDN_REQ,
.hsk = IMX8MP_GPC_PU_PWRHSK,
};
static const struct imx_pgc_domain_data imx8mp_pgc_domain_data = {
.domains = imx8mp_pgc_domains,
.domains_num = ARRAY_SIZE(imx8mp_pgc_domains),
.reg_access_table = &imx8mp_access_table,
.pgc_regs = &imx8mp_pgc_regs,
};
static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
[IMX8MN_POWER_DOMAIN_HSIOMIX] = {
.genpd = {
.name = "hsiomix",
},
.bits = {
.pxx = 0, /* no power sequence control */
.map = 0, /* no power sequence control */
.hskreq = IMX8MN_HSIO_HSK_PWRDNREQN,
.hskack = IMX8MN_HSIO_HSK_PWRDNACKN,
},
.keep_clocks = true,
},
[IMX8MN_POWER_DOMAIN_OTG1] = {
.genpd = {
.name = "usb-otg1",
.flags = GENPD_FLAG_ACTIVE_WAKEUP,
},
.bits = {
.pxx = IMX8MN_OTG1_SW_Pxx_REQ,
.map = IMX8MN_OTG1_A53_DOMAIN,
},
.pgc = BIT(IMX8MN_PGC_OTG1),
},
[IMX8MN_POWER_DOMAIN_GPUMIX] = {
.genpd = {
.name = "gpumix",
},
.bits = {
.pxx = IMX8MN_GPUMIX_SW_Pxx_REQ,
.map = IMX8MN_GPUMIX_A53_DOMAIN,
.hskreq = IMX8MN_GPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8MN_PGC_GPUMIX),
.keep_clocks = true,
},
[IMX8MN_POWER_DOMAIN_DISPMIX] = {
.genpd = {
.name = "dispmix",
},
.bits = {
.pxx = IMX8MN_DISPMIX_SW_Pxx_REQ,
.map = IMX8MN_DISPMIX_A53_DOMAIN,
.hskreq = IMX8MN_DISPMIX_HSK_PWRDNREQN,
.hskack = IMX8MN_DISPMIX_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8MN_PGC_DISPMIX),
.keep_clocks = true,
},
[IMX8MN_POWER_DOMAIN_MIPI] = {
.genpd = {
.name = "mipi",
},
.bits = {
.pxx = IMX8MN_MIPI_SW_Pxx_REQ,
.map = IMX8MN_MIPI_A53_DOMAIN,
},
.pgc = BIT(IMX8MN_PGC_MIPI),
},
};
static const struct regmap_range imx8mn_yes_ranges[] = {
regmap_reg_range(GPC_LPCR_A_CORE_BSC,
GPC_PU_PWRHSK),
regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_MIPI),
GPC_PGC_SR(IMX8MN_PGC_MIPI)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_OTG1),
GPC_PGC_SR(IMX8MN_PGC_OTG1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_DDR1),
GPC_PGC_SR(IMX8MN_PGC_DDR1)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_GPUMIX),
GPC_PGC_SR(IMX8MN_PGC_GPUMIX)),
regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_DISPMIX),
GPC_PGC_SR(IMX8MN_PGC_DISPMIX)),
};
static const struct regmap_access_table imx8mn_access_table = {
.yes_ranges = imx8mn_yes_ranges,
.n_yes_ranges = ARRAY_SIZE(imx8mn_yes_ranges),
};
static const struct imx_pgc_domain_data imx8mn_pgc_domain_data = {
.domains = imx8mn_pgc_domains,
.domains_num = ARRAY_SIZE(imx8mn_pgc_domains),
.reg_access_table = &imx8mn_access_table,
.pgc_regs = &imx7_pgc_regs,
};
static int imx_pgc_domain_probe(struct platform_device *pdev)
{
struct imx_pgc_domain *domain = pdev->dev.platform_data;
int ret;
domain->dev = &pdev->dev;
domain->regulator = devm_regulator_get_optional(domain->dev, "power");
if (IS_ERR(domain->regulator)) {
if (PTR_ERR(domain->regulator) != -ENODEV)
return dev_err_probe(domain->dev, PTR_ERR(domain->regulator),
"Failed to get domain's regulator\n");
} else if (domain->voltage) {
regulator_set_voltage(domain->regulator,
domain->voltage, domain->voltage);
}
domain->num_clks = devm_clk_bulk_get_all(domain->dev, &domain->clks);
if (domain->num_clks < 0)
return dev_err_probe(domain->dev, domain->num_clks,
"Failed to get domain's clocks\n");
domain->reset = devm_reset_control_array_get_optional_exclusive(domain->dev);
if (IS_ERR(domain->reset))
return dev_err_probe(domain->dev, PTR_ERR(domain->reset),
"Failed to get domain's resets\n");
pm_runtime_enable(domain->dev);
if (domain->bits.map)
regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, domain->bits.map);
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err(domain->dev, "Failed to init power domain\n");
goto out_domain_unmap;
}
if (IS_ENABLED(CONFIG_LOCKDEP) &&
of_property_read_bool(domain->dev->of_node, "power-domains"))
lockdep_set_subclass(&domain->genpd.mlock, 1);
ret = of_genpd_add_provider_simple(domain->dev->of_node,
&domain->genpd);
if (ret) {
dev_err(domain->dev, "Failed to add genpd provider\n");
goto out_genpd_remove;
}
return 0;
out_genpd_remove:
pm_genpd_remove(&domain->genpd);
out_domain_unmap:
if (domain->bits.map)
regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, 0);
pm_runtime_disable(domain->dev);
return ret;
}
static int imx_pgc_domain_remove(struct platform_device *pdev)
{
struct imx_pgc_domain *domain = pdev->dev.platform_data;
of_genpd_del_provider(domain->dev->of_node);
pm_genpd_remove(&domain->genpd);
if (domain->bits.map)
regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, 0);
pm_runtime_disable(domain->dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int imx_pgc_domain_suspend(struct device *dev)
{
int ret;
/*
* This may look strange, but is done so the generic PM_SLEEP code
* can power down our domain and more importantly power it up again
* after resume, without tripping over our usage of runtime PM to
* power up/down the nested domains.
*/
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
return 0;
}
static int imx_pgc_domain_resume(struct device *dev)
{
return pm_runtime_put(dev);
}
#endif
static const struct dev_pm_ops imx_pgc_domain_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(imx_pgc_domain_suspend, imx_pgc_domain_resume)
};
static const struct platform_device_id imx_pgc_domain_id[] = {
{ "imx-pgc-domain", },
{ },
};
static struct platform_driver imx_pgc_domain_driver = {
.driver = {
.name = "imx-pgc",
.pm = &imx_pgc_domain_pm_ops,
},
.probe = imx_pgc_domain_probe,
.remove = imx_pgc_domain_remove,
.id_table = imx_pgc_domain_id,
};
builtin_platform_driver(imx_pgc_domain_driver)
static int imx_gpcv2_probe(struct platform_device *pdev)
{
const struct imx_pgc_domain_data *domain_data =
of_device_get_match_data(&pdev->dev);
struct regmap_config regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.rd_table = domain_data->reg_access_table,
.wr_table = domain_data->reg_access_table,
.max_register = SZ_4K,
};
struct device *dev = &pdev->dev;
struct device_node *pgc_np, *np;
struct regmap *regmap;
void __iomem *base;
int ret;
pgc_np = of_get_child_by_name(dev->of_node, "pgc");
if (!pgc_np) {
dev_err(dev, "No power domains specified in DT\n");
return -EINVAL;
}
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(dev, base, ®map_config);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(dev, "failed to init regmap (%d)\n", ret);
return ret;
}
for_each_child_of_node(pgc_np, np) {
struct platform_device *pd_pdev;
struct imx_pgc_domain *domain;
u32 domain_index;
if (!of_device_is_available(np))
continue;
ret = of_property_read_u32(np, "reg", &domain_index);
if (ret) {
dev_err(dev, "Failed to read 'reg' property\n");
of_node_put(np);
return ret;
}
if (domain_index >= domain_data->domains_num) {
dev_warn(dev,
"Domain index %d is out of bounds\n",
domain_index);
continue;
}
pd_pdev = platform_device_alloc("imx-pgc-domain",
domain_index);
if (!pd_pdev) {
dev_err(dev, "Failed to allocate platform device\n");
of_node_put(np);
return -ENOMEM;
}
ret = platform_device_add_data(pd_pdev,
&domain_data->domains[domain_index],
sizeof(domain_data->domains[domain_index]));
if (ret) {
platform_device_put(pd_pdev);
of_node_put(np);
return ret;
}
domain = pd_pdev->dev.platform_data;
domain->regmap = regmap;
domain->regs = domain_data->pgc_regs;
domain->genpd.power_on = imx_pgc_power_up;
domain->genpd.power_off = imx_pgc_power_down;
pd_pdev->dev.parent = dev;
device_set_node(&pd_pdev->dev, of_fwnode_handle(np));
ret = platform_device_add(pd_pdev);
if (ret) {
platform_device_put(pd_pdev);
of_node_put(np);
return ret;
}
}
return 0;
}
static const struct of_device_id imx_gpcv2_dt_ids[] = {
{ .compatible = "fsl,imx7d-gpc", .data = &imx7_pgc_domain_data, },
{ .compatible = "fsl,imx8mm-gpc", .data = &imx8mm_pgc_domain_data, },
{ .compatible = "fsl,imx8mn-gpc", .data = &imx8mn_pgc_domain_data, },
{ .compatible = "fsl,imx8mp-gpc", .data = &imx8mp_pgc_domain_data, },
{ .compatible = "fsl,imx8mq-gpc", .data = &imx8m_pgc_domain_data, },
{ }
};
static struct platform_driver imx_gpc_driver = {
.driver = {
.name = "imx-gpcv2",
.of_match_table = imx_gpcv2_dt_ids,
},
.probe = imx_gpcv2_probe,
};
builtin_platform_driver(imx_gpc_driver)
| linux-master | drivers/pmdomain/imx/gpcv2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ZynqMP Generic PM domain support
*
* Copyright (C) 2015-2019 Xilinx, Inc.
*
* Davorin Mista <[email protected]>
* Jolly Shah <[email protected]>
* Rajan Vaja <[email protected]>
*/
#include <linux/err.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/firmware/xlnx-zynqmp.h>
#define ZYNQMP_NUM_DOMAINS (100)
static int min_capability;
/**
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
* @gpd: Generic power domain
* @node_id: PM node ID corresponding to device inside PM domain
* @requested: The PM node mapped to the PM domain has been requested
*/
struct zynqmp_pm_domain {
struct generic_pm_domain gpd;
u32 node_id;
bool requested;
};
#define to_zynqmp_pm_domain(pm_domain) \
container_of(pm_domain, struct zynqmp_pm_domain, gpd)
/**
* zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source
* path
* @dev: Device to check for wakeup source path
* @not_used: Data member (not required)
*
* This function is checks device's child hierarchy and checks if any device is
* set as wakeup source.
*
* Return: 1 if device is in wakeup source path else 0
*/
static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used)
{
int may_wakeup;
may_wakeup = device_may_wakeup(dev);
if (may_wakeup)
return may_wakeup;
return device_for_each_child(dev, NULL,
zynqmp_gpd_is_active_wakeup_path);
}
/**
* zynqmp_gpd_power_on() - Power on PM domain
* @domain: Generic PM domain
*
* This function is called before devices inside a PM domain are resumed, to
* power on PM domain.
*
* Return: 0 on success, error code otherwise
*/
static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
{
struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
ret = zynqmp_pm_set_requirement(pd->node_id,
ZYNQMP_PM_CAPABILITY_ACCESS,
ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret) {
dev_err(&domain->dev,
"failed to set requirement to 0x%x for PM node id %d: %d\n",
ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id, ret);
return ret;
}
dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n",
ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id);
return 0;
}
/**
* zynqmp_gpd_power_off() - Power off PM domain
* @domain: Generic PM domain
*
* This function is called after devices inside a PM domain are suspended, to
* power off PM domain.
*
* Return: 0 on success, error code otherwise
*/
static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
{
struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
struct pm_domain_data *pdd, *tmp;
u32 capabilities = min_capability;
bool may_wakeup;
/* If domain is already released there is nothing to be done */
if (!pd->requested) {
dev_dbg(&domain->dev, "PM node id %d is already released\n",
pd->node_id);
return 0;
}
list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) {
/* If device is in wakeup path, set capability to WAKEUP */
may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL);
if (may_wakeup) {
dev_dbg(pdd->dev, "device is in wakeup path in %s\n",
domain->name);
capabilities = ZYNQMP_PM_CAPABILITY_WAKEUP;
break;
}
}
ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0,
ZYNQMP_PM_REQUEST_ACK_NO);
if (ret) {
dev_err(&domain->dev,
"failed to set requirement to 0x%x for PM node id %d: %d\n",
capabilities, pd->node_id, ret);
return ret;
}
dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n",
capabilities, pd->node_id);
return 0;
}
/**
* zynqmp_gpd_attach_dev() - Attach device to the PM domain
* @domain: Generic PM domain
* @dev: Device to attach
*
* Return: 0 on success, error code otherwise
*/
static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
struct device_link *link;
int ret;
link = device_link_add(dev, &domain->dev, DL_FLAG_SYNC_STATE_ONLY);
if (!link)
dev_dbg(&domain->dev, "failed to create device link for %s\n",
dev_name(dev));
/* If this is not the first device to attach there is nothing to do */
if (domain->device_count)
return 0;
ret = zynqmp_pm_request_node(pd->node_id, 0, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret) {
dev_err(&domain->dev, "%s request failed for node %d: %d\n",
domain->name, pd->node_id, ret);
return ret;
}
pd->requested = true;
dev_dbg(&domain->dev, "%s requested PM node id %d\n",
dev_name(dev), pd->node_id);
return 0;
}
/**
* zynqmp_gpd_detach_dev() - Detach device from the PM domain
* @domain: Generic PM domain
* @dev: Device to detach
*/
static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
/* If this is not the last device to detach there is nothing to do */
if (domain->device_count)
return;
ret = zynqmp_pm_release_node(pd->node_id);
if (ret) {
dev_err(&domain->dev, "failed to release PM node id %d: %d\n",
pd->node_id, ret);
return;
}
pd->requested = false;
dev_dbg(&domain->dev, "%s released PM node id %d\n",
dev_name(dev), pd->node_id);
}
static struct generic_pm_domain *zynqmp_gpd_xlate
(struct of_phandle_args *genpdspec, void *data)
{
struct genpd_onecell_data *genpd_data = data;
unsigned int i, idx = genpdspec->args[0];
struct zynqmp_pm_domain *pd;
pd = to_zynqmp_pm_domain(genpd_data->domains[0]);
if (genpdspec->args_count != 1)
return ERR_PTR(-EINVAL);
/* Check for existing pm domains */
for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
if (pd[i].node_id == idx)
goto done;
}
/*
* Add index in empty node_id of power domain list as no existing
* power domain found for current index.
*/
for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
if (pd[i].node_id == 0) {
pd[i].node_id = idx;
break;
}
}
done:
if (!genpd_data->domains[i] || i == ZYNQMP_NUM_DOMAINS)
return ERR_PTR(-ENOENT);
return genpd_data->domains[i];
}
static int zynqmp_gpd_probe(struct platform_device *pdev)
{
int i;
struct genpd_onecell_data *zynqmp_pd_data;
struct generic_pm_domain **domains;
struct zynqmp_pm_domain *pd;
struct device *dev = &pdev->dev;
pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
zynqmp_pd_data = devm_kzalloc(dev, sizeof(*zynqmp_pd_data), GFP_KERNEL);
if (!zynqmp_pd_data)
return -ENOMEM;
zynqmp_pd_data->xlate = zynqmp_gpd_xlate;
domains = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*domains),
GFP_KERNEL);
if (!domains)
return -ENOMEM;
if (!of_device_is_compatible(dev->parent->of_node,
"xlnx,zynqmp-firmware"))
min_capability = ZYNQMP_PM_CAPABILITY_UNUSABLE;
for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) {
pd->node_id = 0;
pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i);
pd->gpd.power_off = zynqmp_gpd_power_off;
pd->gpd.power_on = zynqmp_gpd_power_on;
pd->gpd.attach_dev = zynqmp_gpd_attach_dev;
pd->gpd.detach_dev = zynqmp_gpd_detach_dev;
domains[i] = &pd->gpd;
/* Mark all PM domains as initially powered off */
pm_genpd_init(&pd->gpd, NULL, true);
}
zynqmp_pd_data->domains = domains;
zynqmp_pd_data->num_domains = ZYNQMP_NUM_DOMAINS;
of_genpd_add_provider_onecell(dev->parent->of_node, zynqmp_pd_data);
return 0;
}
static int zynqmp_gpd_remove(struct platform_device *pdev)
{
of_genpd_del_provider(pdev->dev.parent->of_node);
return 0;
}
static void zynqmp_gpd_sync_state(struct device *dev)
{
int ret;
ret = zynqmp_pm_init_finalize();
if (ret)
dev_warn(dev, "failed to release power management to firmware\n");
}
static struct platform_driver zynqmp_power_domain_driver = {
.driver = {
.name = "zynqmp_power_controller",
.sync_state = zynqmp_gpd_sync_state,
},
.probe = zynqmp_gpd_probe,
.remove = zynqmp_gpd_remove,
};
module_platform_driver(zynqmp_power_domain_driver);
MODULE_ALIAS("platform:zynqmp_power_controller");
| linux-master | drivers/pmdomain/xilinx/zynqmp-pm-domains.c |
/*
* Copyright (c) 2017 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/module.h>
/* AO Offsets */
#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
#define GEN_PWR_VPU_HDMI BIT(8)
#define GEN_PWR_VPU_HDMI_ISO BIT(9)
/* HHI Offsets */
#define HHI_MEM_PD_REG0 (0x40 << 2)
#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
#define HHI_VPU_MEM_PD_REG2 (0x4d << 2)
struct meson_gx_pwrc_vpu {
struct generic_pm_domain genpd;
struct regmap *regmap_ao;
struct regmap *regmap_hhi;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
};
static inline
struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d)
{
return container_of(d, struct meson_gx_pwrc_vpu, genpd);
}
static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
{
struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
int i;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
udelay(20);
/* Power Down Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 8; i < 16; i++) {
regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
BIT(i), BIT(i));
udelay(5);
}
udelay(20);
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
msleep(20);
clk_disable_unprepare(pd->vpu_clk);
clk_disable_unprepare(pd->vapb_clk);
return 0;
}
static int meson_g12a_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
{
struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
int i;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
udelay(20);
/* Power Down Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 8; i < 16; i++) {
regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
BIT(i), BIT(i));
udelay(5);
}
udelay(20);
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
msleep(20);
clk_disable_unprepare(pd->vpu_clk);
clk_disable_unprepare(pd->vapb_clk);
return 0;
}
static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd)
{
int ret;
ret = clk_prepare_enable(pd->vpu_clk);
if (ret)
return ret;
ret = clk_prepare_enable(pd->vapb_clk);
if (ret)
clk_disable_unprepare(pd->vpu_clk);
return ret;
}
static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
{
struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
int ret;
int i;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI, 0);
udelay(20);
/* Power Up Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
0x3 << i, 0);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
0x3 << i, 0);
udelay(5);
}
for (i = 8; i < 16; i++) {
regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
BIT(i), 0);
udelay(5);
}
udelay(20);
ret = reset_control_assert(pd->rstc);
if (ret)
return ret;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI_ISO, 0);
ret = reset_control_deassert(pd->rstc);
if (ret)
return ret;
ret = meson_gx_pwrc_vpu_setup_clk(pd);
if (ret)
return ret;
return 0;
}
static int meson_g12a_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
{
struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
int ret;
int i;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI, 0);
udelay(20);
/* Power Up Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
0x3 << i, 0);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
0x3 << i, 0);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
0x3 << i, 0);
udelay(5);
}
for (i = 8; i < 16; i++) {
regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
BIT(i), 0);
udelay(5);
}
udelay(20);
ret = reset_control_assert(pd->rstc);
if (ret)
return ret;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI_ISO, 0);
ret = reset_control_deassert(pd->rstc);
if (ret)
return ret;
ret = meson_gx_pwrc_vpu_setup_clk(pd);
if (ret)
return ret;
return 0;
}
static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd)
{
u32 reg;
regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, ®);
return (reg & GEN_PWR_VPU_HDMI);
}
static struct meson_gx_pwrc_vpu vpu_hdmi_pd = {
.genpd = {
.name = "vpu_hdmi",
.power_off = meson_gx_pwrc_vpu_power_off,
.power_on = meson_gx_pwrc_vpu_power_on,
},
};
static struct meson_gx_pwrc_vpu vpu_hdmi_pd_g12a = {
.genpd = {
.name = "vpu_hdmi",
.power_off = meson_g12a_pwrc_vpu_power_off,
.power_on = meson_g12a_pwrc_vpu_power_on,
},
};
static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
{
const struct meson_gx_pwrc_vpu *vpu_pd_match;
struct regmap *regmap_ao, *regmap_hhi;
struct meson_gx_pwrc_vpu *vpu_pd;
struct device_node *parent_np;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
bool powered_off;
int ret;
vpu_pd_match = of_device_get_match_data(&pdev->dev);
if (!vpu_pd_match) {
dev_err(&pdev->dev, "failed to get match data\n");
return -ENODEV;
}
vpu_pd = devm_kzalloc(&pdev->dev, sizeof(*vpu_pd), GFP_KERNEL);
if (!vpu_pd)
return -ENOMEM;
memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd));
parent_np = of_get_parent(pdev->dev.of_node);
regmap_ao = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(regmap_ao)) {
dev_err(&pdev->dev, "failed to get regmap\n");
return PTR_ERR(regmap_ao);
}
regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"amlogic,hhi-sysctrl");
if (IS_ERR(regmap_hhi)) {
dev_err(&pdev->dev, "failed to get HHI regmap\n");
return PTR_ERR(regmap_hhi);
}
rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get reset lines\n");
vpu_clk = devm_clk_get(&pdev->dev, "vpu");
if (IS_ERR(vpu_clk)) {
dev_err(&pdev->dev, "vpu clock request failed\n");
return PTR_ERR(vpu_clk);
}
vapb_clk = devm_clk_get(&pdev->dev, "vapb");
if (IS_ERR(vapb_clk)) {
dev_err(&pdev->dev, "vapb clock request failed\n");
return PTR_ERR(vapb_clk);
}
vpu_pd->regmap_ao = regmap_ao;
vpu_pd->regmap_hhi = regmap_hhi;
vpu_pd->rstc = rstc;
vpu_pd->vpu_clk = vpu_clk;
vpu_pd->vapb_clk = vapb_clk;
platform_set_drvdata(pdev, vpu_pd);
powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
/* If already powered, sync the clock states */
if (!powered_off) {
ret = meson_gx_pwrc_vpu_setup_clk(vpu_pd);
if (ret)
return ret;
}
vpu_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
pm_genpd_init(&vpu_pd->genpd, NULL, powered_off);
return of_genpd_add_provider_simple(pdev->dev.of_node,
&vpu_pd->genpd);
}
static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
{
struct meson_gx_pwrc_vpu *vpu_pd = platform_get_drvdata(pdev);
bool powered_off;
powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
if (!powered_off)
vpu_pd->genpd.power_off(&vpu_pd->genpd);
}
static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
{ .compatible = "amlogic,meson-gx-pwrc-vpu", .data = &vpu_hdmi_pd },
{
.compatible = "amlogic,meson-g12a-pwrc-vpu",
.data = &vpu_hdmi_pd_g12a
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_gx_pwrc_vpu_match_table);
static struct platform_driver meson_gx_pwrc_vpu_driver = {
.probe = meson_gx_pwrc_vpu_probe,
.shutdown = meson_gx_pwrc_vpu_shutdown,
.driver = {
.name = "meson_gx_pwrc_vpu",
.of_match_table = meson_gx_pwrc_vpu_match_table,
},
};
module_platform_driver(meson_gx_pwrc_vpu_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <dt-bindings/power/meson8-power.h>
#include <dt-bindings/power/meson-axg-power.h>
#include <dt-bindings/power/meson-g12a-power.h>
#include <dt-bindings/power/meson-gxbb-power.h>
#include <dt-bindings/power/meson-sm1-power.h>
/* AO Offsets */
#define GX_AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
#define GX_AO_RTI_GEN_PWR_ISO0 (0x3b << 2)
/*
* Meson8/Meson8b/Meson8m2 only expose the power management registers of the
* AO-bus as syscon. 0x3a from GX translates to 0x02, 0x3b translates to 0x03
* and so on.
*/
#define MESON8_AO_RTI_GEN_PWR_SLEEP0 (0x02 << 2)
#define MESON8_AO_RTI_GEN_PWR_ISO0 (0x03 << 2)
/* HHI Offsets */
#define HHI_MEM_PD_REG0 (0x40 << 2)
#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
#define HHI_VPU_MEM_PD_REG3 (0x43 << 2)
#define HHI_VPU_MEM_PD_REG4 (0x44 << 2)
#define HHI_AUDIO_MEM_PD_REG0 (0x45 << 2)
#define HHI_NANOQ_MEM_PD_REG0 (0x46 << 2)
#define HHI_NANOQ_MEM_PD_REG1 (0x47 << 2)
#define HHI_VPU_MEM_PD_REG2 (0x4d << 2)
#define G12A_HHI_NANOQ_MEM_PD_REG0 (0x43 << 2)
#define G12A_HHI_NANOQ_MEM_PD_REG1 (0x44 << 2)
struct meson_ee_pwrc;
struct meson_ee_pwrc_domain;
struct meson_ee_pwrc_mem_domain {
unsigned int reg;
unsigned int mask;
};
struct meson_ee_pwrc_top_domain {
unsigned int sleep_reg;
unsigned int sleep_mask;
unsigned int iso_reg;
unsigned int iso_mask;
};
struct meson_ee_pwrc_domain_desc {
char *name;
unsigned int reset_names_count;
unsigned int clk_names_count;
struct meson_ee_pwrc_top_domain *top_pd;
unsigned int mem_pd_count;
struct meson_ee_pwrc_mem_domain *mem_pd;
bool (*is_powered_off)(struct meson_ee_pwrc_domain *pwrc_domain);
};
struct meson_ee_pwrc_domain_data {
unsigned int count;
struct meson_ee_pwrc_domain_desc *domains;
};
/* TOP Power Domains */
static struct meson_ee_pwrc_top_domain gx_pwrc_vpu = {
.sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(8),
.iso_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
.iso_mask = BIT(9),
};
static struct meson_ee_pwrc_top_domain meson8_pwrc_vpu = {
.sleep_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(8),
.iso_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
.iso_mask = BIT(9),
};
#define SM1_EE_PD(__bit) \
{ \
.sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0, \
.sleep_mask = BIT(__bit), \
.iso_reg = GX_AO_RTI_GEN_PWR_ISO0, \
.iso_mask = BIT(__bit), \
}
static struct meson_ee_pwrc_top_domain sm1_pwrc_vpu = SM1_EE_PD(8);
static struct meson_ee_pwrc_top_domain sm1_pwrc_nna = SM1_EE_PD(16);
static struct meson_ee_pwrc_top_domain sm1_pwrc_usb = SM1_EE_PD(17);
static struct meson_ee_pwrc_top_domain sm1_pwrc_pci = SM1_EE_PD(18);
static struct meson_ee_pwrc_top_domain sm1_pwrc_ge2d = SM1_EE_PD(19);
static struct meson_ee_pwrc_top_domain g12a_pwrc_nna = {
.sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(16) | BIT(17),
.iso_reg = GX_AO_RTI_GEN_PWR_ISO0,
.iso_mask = BIT(16) | BIT(17),
};
/* Memory PD Domains */
#define VPU_MEMPD(__reg) \
{ __reg, GENMASK(1, 0) }, \
{ __reg, GENMASK(3, 2) }, \
{ __reg, GENMASK(5, 4) }, \
{ __reg, GENMASK(7, 6) }, \
{ __reg, GENMASK(9, 8) }, \
{ __reg, GENMASK(11, 10) }, \
{ __reg, GENMASK(13, 12) }, \
{ __reg, GENMASK(15, 14) }, \
{ __reg, GENMASK(17, 16) }, \
{ __reg, GENMASK(19, 18) }, \
{ __reg, GENMASK(21, 20) }, \
{ __reg, GENMASK(23, 22) }, \
{ __reg, GENMASK(25, 24) }, \
{ __reg, GENMASK(27, 26) }, \
{ __reg, GENMASK(29, 28) }, \
{ __reg, GENMASK(31, 30) }
#define VPU_HHI_MEMPD(__reg) \
{ __reg, BIT(8) }, \
{ __reg, BIT(9) }, \
{ __reg, BIT(10) }, \
{ __reg, BIT(11) }, \
{ __reg, BIT(12) }, \
{ __reg, BIT(13) }, \
{ __reg, BIT(14) }, \
{ __reg, BIT(15) }
static struct meson_ee_pwrc_mem_domain axg_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
VPU_MEMPD(HHI_VPU_MEM_PD_REG2),
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
static struct meson_ee_pwrc_mem_domain gxbb_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
static struct meson_ee_pwrc_mem_domain meson_pwrc_mem_eth[] = {
{ HHI_MEM_PD_REG0, GENMASK(3, 2) },
};
static struct meson_ee_pwrc_mem_domain meson8_pwrc_audio_dsp_mem[] = {
{ HHI_MEM_PD_REG0, GENMASK(1, 0) },
};
static struct meson_ee_pwrc_mem_domain meson8_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
VPU_MEMPD(HHI_VPU_MEM_PD_REG2),
VPU_MEMPD(HHI_VPU_MEM_PD_REG3),
{ HHI_VPU_MEM_PD_REG4, GENMASK(1, 0) },
{ HHI_VPU_MEM_PD_REG4, GENMASK(3, 2) },
{ HHI_VPU_MEM_PD_REG4, GENMASK(5, 4) },
{ HHI_VPU_MEM_PD_REG4, GENMASK(7, 6) },
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_nna[] = {
{ HHI_NANOQ_MEM_PD_REG0, 0xff },
{ HHI_NANOQ_MEM_PD_REG1, 0xff },
};
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_usb[] = {
{ HHI_MEM_PD_REG0, GENMASK(31, 30) },
};
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_pcie[] = {
{ HHI_MEM_PD_REG0, GENMASK(29, 26) },
};
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_ge2d[] = {
{ HHI_MEM_PD_REG0, GENMASK(25, 18) },
};
static struct meson_ee_pwrc_mem_domain axg_pwrc_mem_audio[] = {
{ HHI_MEM_PD_REG0, GENMASK(5, 4) },
};
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
{ HHI_MEM_PD_REG0, GENMASK(5, 4) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(1, 0) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(3, 2) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(5, 4) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(7, 6) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(13, 12) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(15, 14) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(17, 16) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(19, 18) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(21, 20) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(23, 22) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(25, 24) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(27, 26) },
};
static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
{ G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) },
{ G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(23, 0) },
};
#define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \
{ \
.name = __name, \
.reset_names_count = __resets, \
.clk_names_count = __clks, \
.top_pd = __top_pd, \
.mem_pd_count = ARRAY_SIZE(__mem), \
.mem_pd = __mem, \
.is_powered_off = __is_pwr_off, \
}
#define TOP_PD(__name, __top_pd, __mem, __is_pwr_off) \
{ \
.name = __name, \
.top_pd = __top_pd, \
.mem_pd_count = ARRAY_SIZE(__mem), \
.mem_pd = __mem, \
.is_powered_off = __is_pwr_off, \
}
#define MEM_PD(__name, __mem) \
TOP_PD(__name, NULL, __mem, NULL)
static bool pwrc_ee_is_powered_off(struct meson_ee_pwrc_domain *pwrc_domain);
static struct meson_ee_pwrc_domain_desc axg_pwrc_domains[] = {
[PWRC_AXG_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, axg_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 5, 2),
[PWRC_AXG_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
[PWRC_AXG_AUDIO_ID] = MEM_PD("AUDIO", axg_pwrc_mem_audio),
};
static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
[PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 11, 2),
[PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
[PWRC_G12A_NNA_ID] = TOP_PD("NNA", &g12a_pwrc_nna, g12a_pwrc_mem_nna,
pwrc_ee_is_powered_off),
};
static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
[PWRC_GXBB_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, gxbb_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 12, 2),
[PWRC_GXBB_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
static struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
[PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
meson8_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 0, 1),
[PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
meson_pwrc_mem_eth),
[PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
meson8_pwrc_audio_dsp_mem),
};
static struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
[PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
meson8_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 11, 1),
[PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
meson_pwrc_mem_eth),
[PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
meson8_pwrc_audio_dsp_mem),
};
static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
[PWRC_SM1_VPU_ID] = VPU_PD("VPU", &sm1_pwrc_vpu, sm1_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 11, 2),
[PWRC_SM1_NNA_ID] = TOP_PD("NNA", &sm1_pwrc_nna, sm1_pwrc_mem_nna,
pwrc_ee_is_powered_off),
[PWRC_SM1_USB_ID] = TOP_PD("USB", &sm1_pwrc_usb, sm1_pwrc_mem_usb,
pwrc_ee_is_powered_off),
[PWRC_SM1_PCIE_ID] = TOP_PD("PCI", &sm1_pwrc_pci, sm1_pwrc_mem_pcie,
pwrc_ee_is_powered_off),
[PWRC_SM1_GE2D_ID] = TOP_PD("GE2D", &sm1_pwrc_ge2d, sm1_pwrc_mem_ge2d,
pwrc_ee_is_powered_off),
[PWRC_SM1_AUDIO_ID] = MEM_PD("AUDIO", sm1_pwrc_mem_audio),
[PWRC_SM1_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
struct meson_ee_pwrc_domain {
struct generic_pm_domain base;
bool enabled;
struct meson_ee_pwrc *pwrc;
struct meson_ee_pwrc_domain_desc desc;
struct clk_bulk_data *clks;
int num_clks;
struct reset_control *rstc;
int num_rstc;
};
struct meson_ee_pwrc {
struct regmap *regmap_ao;
struct regmap *regmap_hhi;
struct meson_ee_pwrc_domain *domains;
struct genpd_onecell_data xlate;
};
static bool pwrc_ee_is_powered_off(struct meson_ee_pwrc_domain *pwrc_domain)
{
u32 reg;
regmap_read(pwrc_domain->pwrc->regmap_ao,
pwrc_domain->desc.top_pd->sleep_reg, ®);
return (reg & pwrc_domain->desc.top_pd->sleep_mask);
}
static int meson_ee_pwrc_off(struct generic_pm_domain *domain)
{
struct meson_ee_pwrc_domain *pwrc_domain =
container_of(domain, struct meson_ee_pwrc_domain, base);
int i;
if (pwrc_domain->desc.top_pd)
regmap_update_bits(pwrc_domain->pwrc->regmap_ao,
pwrc_domain->desc.top_pd->sleep_reg,
pwrc_domain->desc.top_pd->sleep_mask,
pwrc_domain->desc.top_pd->sleep_mask);
udelay(20);
for (i = 0 ; i < pwrc_domain->desc.mem_pd_count ; ++i)
regmap_update_bits(pwrc_domain->pwrc->regmap_hhi,
pwrc_domain->desc.mem_pd[i].reg,
pwrc_domain->desc.mem_pd[i].mask,
pwrc_domain->desc.mem_pd[i].mask);
udelay(20);
if (pwrc_domain->desc.top_pd)
regmap_update_bits(pwrc_domain->pwrc->regmap_ao,
pwrc_domain->desc.top_pd->iso_reg,
pwrc_domain->desc.top_pd->iso_mask,
pwrc_domain->desc.top_pd->iso_mask);
if (pwrc_domain->num_clks) {
msleep(20);
clk_bulk_disable_unprepare(pwrc_domain->num_clks,
pwrc_domain->clks);
}
return 0;
}
static int meson_ee_pwrc_on(struct generic_pm_domain *domain)
{
struct meson_ee_pwrc_domain *pwrc_domain =
container_of(domain, struct meson_ee_pwrc_domain, base);
int i, ret;
if (pwrc_domain->desc.top_pd)
regmap_update_bits(pwrc_domain->pwrc->regmap_ao,
pwrc_domain->desc.top_pd->sleep_reg,
pwrc_domain->desc.top_pd->sleep_mask, 0);
udelay(20);
for (i = 0 ; i < pwrc_domain->desc.mem_pd_count ; ++i)
regmap_update_bits(pwrc_domain->pwrc->regmap_hhi,
pwrc_domain->desc.mem_pd[i].reg,
pwrc_domain->desc.mem_pd[i].mask, 0);
udelay(20);
ret = reset_control_assert(pwrc_domain->rstc);
if (ret)
return ret;
if (pwrc_domain->desc.top_pd)
regmap_update_bits(pwrc_domain->pwrc->regmap_ao,
pwrc_domain->desc.top_pd->iso_reg,
pwrc_domain->desc.top_pd->iso_mask, 0);
ret = reset_control_deassert(pwrc_domain->rstc);
if (ret)
return ret;
return clk_bulk_prepare_enable(pwrc_domain->num_clks,
pwrc_domain->clks);
}
static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
struct meson_ee_pwrc *pwrc,
struct meson_ee_pwrc_domain *dom)
{
int ret;
dom->pwrc = pwrc;
dom->num_rstc = dom->desc.reset_names_count;
dom->num_clks = dom->desc.clk_names_count;
if (dom->num_rstc) {
int count = reset_control_get_count(&pdev->dev);
if (count != dom->num_rstc)
dev_warn(&pdev->dev, "Invalid resets count %d for domain %s\n",
count, dom->desc.name);
dom->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(dom->rstc))
return PTR_ERR(dom->rstc);
}
if (dom->num_clks) {
int ret = devm_clk_bulk_get_all(&pdev->dev, &dom->clks);
if (ret < 0)
return ret;
if (dom->num_clks != ret) {
dev_warn(&pdev->dev, "Invalid clocks count %d for domain %s\n",
ret, dom->desc.name);
dom->num_clks = ret;
}
}
dom->base.name = dom->desc.name;
dom->base.power_on = meson_ee_pwrc_on;
dom->base.power_off = meson_ee_pwrc_off;
/*
* TOFIX: This is a special case for the VPU power domain, which can
* be enabled previously by the bootloader. In this case the VPU
* pipeline may be functional but no driver maybe never attach
* to this power domain, and if the domain is disabled it could
* cause system errors. This is why the pm_domain_always_on_gov
* is used here.
* For the same reason, the clocks should be enabled in case
* we need to power the domain off, otherwise the internal clocks
* prepare/enable counters won't be in sync.
*/
if (dom->num_clks && dom->desc.is_powered_off && !dom->desc.is_powered_off(dom)) {
ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
if (ret)
return ret;
dom->base.flags = GENPD_FLAG_ALWAYS_ON;
ret = pm_genpd_init(&dom->base, NULL, false);
if (ret)
return ret;
} else {
ret = pm_genpd_init(&dom->base, NULL,
(dom->desc.is_powered_off ?
dom->desc.is_powered_off(dom) : true));
if (ret)
return ret;
}
return 0;
}
static int meson_ee_pwrc_probe(struct platform_device *pdev)
{
const struct meson_ee_pwrc_domain_data *match;
struct regmap *regmap_ao, *regmap_hhi;
struct device_node *parent_np;
struct meson_ee_pwrc *pwrc;
int i, ret;
match = of_device_get_match_data(&pdev->dev);
if (!match) {
dev_err(&pdev->dev, "failed to get match data\n");
return -ENODEV;
}
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
if (!pwrc)
return -ENOMEM;
pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count,
sizeof(*pwrc->xlate.domains),
GFP_KERNEL);
if (!pwrc->xlate.domains)
return -ENOMEM;
pwrc->domains = devm_kcalloc(&pdev->dev, match->count,
sizeof(*pwrc->domains), GFP_KERNEL);
if (!pwrc->domains)
return -ENOMEM;
pwrc->xlate.num_domains = match->count;
parent_np = of_get_parent(pdev->dev.of_node);
regmap_hhi = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(regmap_hhi)) {
dev_err(&pdev->dev, "failed to get HHI regmap\n");
return PTR_ERR(regmap_hhi);
}
regmap_ao = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"amlogic,ao-sysctrl");
if (IS_ERR(regmap_ao)) {
dev_err(&pdev->dev, "failed to get AO regmap\n");
return PTR_ERR(regmap_ao);
}
pwrc->regmap_ao = regmap_ao;
pwrc->regmap_hhi = regmap_hhi;
platform_set_drvdata(pdev, pwrc);
for (i = 0 ; i < match->count ; ++i) {
struct meson_ee_pwrc_domain *dom = &pwrc->domains[i];
memcpy(&dom->desc, &match->domains[i], sizeof(dom->desc));
ret = meson_ee_pwrc_init_domain(pdev, pwrc, dom);
if (ret)
return ret;
pwrc->xlate.domains[i] = &dom->base;
}
return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
}
static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
{
struct meson_ee_pwrc *pwrc = platform_get_drvdata(pdev);
int i;
for (i = 0 ; i < pwrc->xlate.num_domains ; ++i) {
struct meson_ee_pwrc_domain *dom = &pwrc->domains[i];
if (dom->desc.is_powered_off && !dom->desc.is_powered_off(dom))
meson_ee_pwrc_off(&dom->base);
}
}
static struct meson_ee_pwrc_domain_data meson_ee_g12a_pwrc_data = {
.count = ARRAY_SIZE(g12a_pwrc_domains),
.domains = g12a_pwrc_domains,
};
static struct meson_ee_pwrc_domain_data meson_ee_axg_pwrc_data = {
.count = ARRAY_SIZE(axg_pwrc_domains),
.domains = axg_pwrc_domains,
};
static struct meson_ee_pwrc_domain_data meson_ee_gxbb_pwrc_data = {
.count = ARRAY_SIZE(gxbb_pwrc_domains),
.domains = gxbb_pwrc_domains,
};
static struct meson_ee_pwrc_domain_data meson_ee_m8_pwrc_data = {
.count = ARRAY_SIZE(meson8_pwrc_domains),
.domains = meson8_pwrc_domains,
};
static struct meson_ee_pwrc_domain_data meson_ee_m8b_pwrc_data = {
.count = ARRAY_SIZE(meson8b_pwrc_domains),
.domains = meson8b_pwrc_domains,
};
static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
.count = ARRAY_SIZE(sm1_pwrc_domains),
.domains = sm1_pwrc_domains,
};
static const struct of_device_id meson_ee_pwrc_match_table[] = {
{
.compatible = "amlogic,meson8-pwrc",
.data = &meson_ee_m8_pwrc_data,
},
{
.compatible = "amlogic,meson8b-pwrc",
.data = &meson_ee_m8b_pwrc_data,
},
{
.compatible = "amlogic,meson8m2-pwrc",
.data = &meson_ee_m8b_pwrc_data,
},
{
.compatible = "amlogic,meson-axg-pwrc",
.data = &meson_ee_axg_pwrc_data,
},
{
.compatible = "amlogic,meson-gxbb-pwrc",
.data = &meson_ee_gxbb_pwrc_data,
},
{
.compatible = "amlogic,meson-g12a-pwrc",
.data = &meson_ee_g12a_pwrc_data,
},
{
.compatible = "amlogic,meson-sm1-pwrc",
.data = &meson_ee_sm1_pwrc_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_ee_pwrc_match_table);
static struct platform_driver meson_ee_pwrc_driver = {
.probe = meson_ee_pwrc_probe,
.shutdown = meson_ee_pwrc_shutdown,
.driver = {
.name = "meson_ee_pwrc",
.of_match_table = meson_ee_pwrc_match_table,
},
};
module_platform_driver(meson_ee_pwrc_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pmdomain/amlogic/meson-ee-pwrc.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2019 Amlogic, Inc.
* Author: Jianxin Pan <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <dt-bindings/power/meson-a1-power.h>
#include <dt-bindings/power/amlogic,c3-pwrc.h>
#include <dt-bindings/power/meson-s4-power.h>
#include <linux/arm-smccc.h>
#include <linux/firmware/meson/meson_sm.h>
#include <linux/module.h>
#define PWRC_ON 1
#define PWRC_OFF 0
struct meson_secure_pwrc_domain {
struct generic_pm_domain base;
unsigned int index;
struct meson_secure_pwrc *pwrc;
};
struct meson_secure_pwrc {
struct meson_secure_pwrc_domain *domains;
struct genpd_onecell_data xlate;
struct meson_sm_firmware *fw;
};
struct meson_secure_pwrc_domain_desc {
unsigned int index;
unsigned int flags;
char *name;
bool (*is_off)(struct meson_secure_pwrc_domain *pwrc_domain);
};
struct meson_secure_pwrc_domain_data {
unsigned int count;
struct meson_secure_pwrc_domain_desc *domains;
};
static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain)
{
int is_off = 1;
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_GET, &is_off,
pwrc_domain->index, 0, 0, 0, 0) < 0)
pr_err("failed to get power domain status\n");
return is_off;
}
static int meson_secure_pwrc_off(struct generic_pm_domain *domain)
{
int ret = 0;
struct meson_secure_pwrc_domain *pwrc_domain =
container_of(domain, struct meson_secure_pwrc_domain, base);
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
pwrc_domain->index, PWRC_OFF, 0, 0, 0) < 0) {
pr_err("failed to set power domain off\n");
ret = -EINVAL;
}
return ret;
}
static int meson_secure_pwrc_on(struct generic_pm_domain *domain)
{
int ret = 0;
struct meson_secure_pwrc_domain *pwrc_domain =
container_of(domain, struct meson_secure_pwrc_domain, base);
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
pwrc_domain->index, PWRC_ON, 0, 0, 0) < 0) {
pr_err("failed to set power domain on\n");
ret = -EINVAL;
}
return ret;
}
#define SEC_PD(__name, __flag) \
[PWRC_##__name##_ID] = \
{ \
.name = #__name, \
.index = PWRC_##__name##_ID, \
.is_off = pwrc_secure_is_off, \
.flags = __flag, \
}
static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
SEC_PD(DSPA, 0),
SEC_PD(DSPB, 0),
/* UART should keep working in ATF after suspend and before resume */
SEC_PD(UART, GENPD_FLAG_ALWAYS_ON),
/* DMC is for DDR PHY ana/dig and DMC, and should be always on */
SEC_PD(DMC, GENPD_FLAG_ALWAYS_ON),
SEC_PD(I2C, 0),
SEC_PD(PSRAM, 0),
SEC_PD(ACODEC, 0),
SEC_PD(AUDIO, 0),
SEC_PD(OTP, 0),
SEC_PD(DMA, GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_IRQ_SAFE),
SEC_PD(SD_EMMC, 0),
SEC_PD(RAMA, 0),
/* SRAMB is used as ATF runtime memory, and should be always on */
SEC_PD(RAMB, GENPD_FLAG_ALWAYS_ON),
SEC_PD(IR, 0),
SEC_PD(SPICC, 0),
SEC_PD(SPIFC, 0),
SEC_PD(USB, 0),
/* NIC is for the Arm NIC-400 interconnect, and should be always on */
SEC_PD(NIC, GENPD_FLAG_ALWAYS_ON),
SEC_PD(PDMIN, 0),
SEC_PD(RSA, 0),
};
static struct meson_secure_pwrc_domain_desc c3_pwrc_domains[] = {
SEC_PD(C3_NNA, 0),
SEC_PD(C3_AUDIO, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_SDIOA, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_EMMC, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_USB_COMB, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_SDCARD, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_ETH, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_GE2D, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_CVE, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_GDC_WRAP, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_ISP_TOP, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_MIPI_ISP_WRAP, GENPD_FLAG_ALWAYS_ON),
SEC_PD(C3_VCODEC, 0),
};
static struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = {
SEC_PD(S4_DOS_HEVC, 0),
SEC_PD(S4_DOS_VDEC, 0),
SEC_PD(S4_VPU_HDMI, 0),
SEC_PD(S4_USB_COMB, 0),
SEC_PD(S4_GE2D, 0),
/* ETH is for ethernet online wakeup, and should be always on */
SEC_PD(S4_ETH, GENPD_FLAG_ALWAYS_ON),
SEC_PD(S4_DEMOD, 0),
SEC_PD(S4_AUDIO, 0),
};
static int meson_secure_pwrc_probe(struct platform_device *pdev)
{
int i;
struct device_node *sm_np;
struct meson_secure_pwrc *pwrc;
const struct meson_secure_pwrc_domain_data *match;
match = of_device_get_match_data(&pdev->dev);
if (!match) {
dev_err(&pdev->dev, "failed to get match data\n");
return -ENODEV;
}
sm_np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gxbb-sm");
if (!sm_np) {
dev_err(&pdev->dev, "no secure-monitor node\n");
return -ENODEV;
}
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
if (!pwrc) {
of_node_put(sm_np);
return -ENOMEM;
}
pwrc->fw = meson_sm_get(sm_np);
of_node_put(sm_np);
if (!pwrc->fw)
return -EPROBE_DEFER;
pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count,
sizeof(*pwrc->xlate.domains),
GFP_KERNEL);
if (!pwrc->xlate.domains)
return -ENOMEM;
pwrc->domains = devm_kcalloc(&pdev->dev, match->count,
sizeof(*pwrc->domains), GFP_KERNEL);
if (!pwrc->domains)
return -ENOMEM;
pwrc->xlate.num_domains = match->count;
platform_set_drvdata(pdev, pwrc);
for (i = 0 ; i < match->count ; ++i) {
struct meson_secure_pwrc_domain *dom = &pwrc->domains[i];
if (!match->domains[i].name)
continue;
dom->pwrc = pwrc;
dom->index = match->domains[i].index;
dom->base.name = match->domains[i].name;
dom->base.flags = match->domains[i].flags;
dom->base.power_on = meson_secure_pwrc_on;
dom->base.power_off = meson_secure_pwrc_off;
pm_genpd_init(&dom->base, NULL, match->domains[i].is_off(dom));
pwrc->xlate.domains[i] = &dom->base;
}
return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
}
static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = {
.domains = a1_pwrc_domains,
.count = ARRAY_SIZE(a1_pwrc_domains),
};
static struct meson_secure_pwrc_domain_data amlogic_secure_c3_pwrc_data = {
.domains = c3_pwrc_domains,
.count = ARRAY_SIZE(c3_pwrc_domains),
};
static struct meson_secure_pwrc_domain_data meson_secure_s4_pwrc_data = {
.domains = s4_pwrc_domains,
.count = ARRAY_SIZE(s4_pwrc_domains),
};
static const struct of_device_id meson_secure_pwrc_match_table[] = {
{
.compatible = "amlogic,meson-a1-pwrc",
.data = &meson_secure_a1_pwrc_data,
},
{
.compatible = "amlogic,c3-pwrc",
.data = &amlogic_secure_c3_pwrc_data,
},
{
.compatible = "amlogic,meson-s4-pwrc",
.data = &meson_secure_s4_pwrc_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_secure_pwrc_match_table);
static struct platform_driver meson_secure_pwrc_driver = {
.probe = meson_secure_pwrc_probe,
.driver = {
.name = "meson_secure_pwrc",
.of_match_table = meson_secure_pwrc_match_table,
},
};
module_platform_driver(meson_secure_pwrc_driver);
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/pmdomain/amlogic/meson-secure-pwrc.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/rpmh.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
#define RPMH_ARC_MAX_LEVELS 16
/**
* struct rpmhpd - top level RPMh power domain resource data structure
* @dev: rpmh power domain controller device
* @pd: generic_pm_domain corresponding to the power domain
* @parent: generic_pm_domain corresponding to the parent's power domain
* @peer: A peer power domain in case Active only Voting is
* supported
* @active_only: True if it represents an Active only peer
* @corner: current corner
* @active_corner: current active corner
* @enable_corner: lowest non-zero corner
* @level: An array of level (vlvl) to corner (hlvl) mappings
* derived from cmd-db
* @level_count: Number of levels supported by the power domain. max
* being 16 (0 - 15)
* @enabled: true if the power domain is enabled
* @res_name: Resource name used for cmd-db lookup
* @addr: Resource address as looped up using resource name from
* cmd-db
* @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource
*/
struct rpmhpd {
struct device *dev;
struct generic_pm_domain pd;
struct generic_pm_domain *parent;
struct rpmhpd *peer;
const bool active_only;
unsigned int corner;
unsigned int active_corner;
unsigned int enable_corner;
u32 level[RPMH_ARC_MAX_LEVELS];
size_t level_count;
bool enabled;
const char *res_name;
u32 addr;
bool state_synced;
};
struct rpmhpd_desc {
struct rpmhpd **rpmhpds;
size_t num_pds;
};
static DEFINE_MUTEX(rpmhpd_lock);
/* RPMH powerdomains */
static struct rpmhpd cx_ao;
static struct rpmhpd mx;
static struct rpmhpd mx_ao;
static struct rpmhpd cx = {
.pd = { .name = "cx", },
.peer = &cx_ao,
.res_name = "cx.lvl",
};
static struct rpmhpd cx_ao = {
.pd = { .name = "cx_ao", },
.active_only = true,
.peer = &cx,
.res_name = "cx.lvl",
};
static struct rpmhpd cx_ao_w_mx_parent;
static struct rpmhpd cx_w_mx_parent = {
.pd = { .name = "cx", },
.peer = &cx_ao_w_mx_parent,
.parent = &mx.pd,
.res_name = "cx.lvl",
};
static struct rpmhpd cx_ao_w_mx_parent = {
.pd = { .name = "cx_ao", },
.active_only = true,
.peer = &cx_w_mx_parent,
.parent = &mx_ao.pd,
.res_name = "cx.lvl",
};
static struct rpmhpd ebi = {
.pd = { .name = "ebi", },
.res_name = "ebi.lvl",
};
static struct rpmhpd gfx = {
.pd = { .name = "gfx", },
.res_name = "gfx.lvl",
};
static struct rpmhpd lcx = {
.pd = { .name = "lcx", },
.res_name = "lcx.lvl",
};
static struct rpmhpd lmx = {
.pd = { .name = "lmx", },
.res_name = "lmx.lvl",
};
static struct rpmhpd mmcx_ao;
static struct rpmhpd mmcx = {
.pd = { .name = "mmcx", },
.peer = &mmcx_ao,
.res_name = "mmcx.lvl",
};
static struct rpmhpd mmcx_ao = {
.pd = { .name = "mmcx_ao", },
.active_only = true,
.peer = &mmcx,
.res_name = "mmcx.lvl",
};
static struct rpmhpd mmcx_ao_w_cx_parent;
static struct rpmhpd mmcx_w_cx_parent = {
.pd = { .name = "mmcx", },
.peer = &mmcx_ao_w_cx_parent,
.parent = &cx.pd,
.res_name = "mmcx.lvl",
};
static struct rpmhpd mmcx_ao_w_cx_parent = {
.pd = { .name = "mmcx_ao", },
.active_only = true,
.peer = &mmcx_w_cx_parent,
.parent = &cx_ao.pd,
.res_name = "mmcx.lvl",
};
static struct rpmhpd mss = {
.pd = { .name = "mss", },
.res_name = "mss.lvl",
};
static struct rpmhpd mx_ao;
static struct rpmhpd mx = {
.pd = { .name = "mx", },
.peer = &mx_ao,
.res_name = "mx.lvl",
};
static struct rpmhpd mx_ao = {
.pd = { .name = "mx_ao", },
.active_only = true,
.peer = &mx,
.res_name = "mx.lvl",
};
static struct rpmhpd mxc_ao;
static struct rpmhpd mxc = {
.pd = { .name = "mxc", },
.peer = &mxc_ao,
.res_name = "mxc.lvl",
};
static struct rpmhpd mxc_ao = {
.pd = { .name = "mxc_ao", },
.active_only = true,
.peer = &mxc,
.res_name = "mxc.lvl",
};
static struct rpmhpd nsp = {
.pd = { .name = "nsp", },
.res_name = "nsp.lvl",
};
static struct rpmhpd nsp0 = {
.pd = { .name = "nsp0", },
.res_name = "nsp0.lvl",
};
static struct rpmhpd nsp1 = {
.pd = { .name = "nsp1", },
.res_name = "nsp1.lvl",
};
static struct rpmhpd qphy = {
.pd = { .name = "qphy", },
.res_name = "qphy.lvl",
};
/* SA8540P RPMH powerdomains */
static struct rpmhpd *sa8540p_rpmhpds[] = {
[SC8280XP_CX] = &cx,
[SC8280XP_CX_AO] = &cx_ao,
[SC8280XP_EBI] = &ebi,
[SC8280XP_GFX] = &gfx,
[SC8280XP_LCX] = &lcx,
[SC8280XP_LMX] = &lmx,
[SC8280XP_MMCX] = &mmcx,
[SC8280XP_MMCX_AO] = &mmcx_ao,
[SC8280XP_MX] = &mx,
[SC8280XP_MX_AO] = &mx_ao,
[SC8280XP_NSP] = &nsp,
};
static const struct rpmhpd_desc sa8540p_desc = {
.rpmhpds = sa8540p_rpmhpds,
.num_pds = ARRAY_SIZE(sa8540p_rpmhpds),
};
/* SA8775P RPMH power domains */
static struct rpmhpd *sa8775p_rpmhpds[] = {
[SA8775P_CX] = &cx,
[SA8775P_CX_AO] = &cx_ao,
[SA8775P_EBI] = &ebi,
[SA8775P_GFX] = &gfx,
[SA8775P_LCX] = &lcx,
[SA8775P_LMX] = &lmx,
[SA8775P_MMCX] = &mmcx,
[SA8775P_MMCX_AO] = &mmcx_ao,
[SA8775P_MXC] = &mxc,
[SA8775P_MXC_AO] = &mxc_ao,
[SA8775P_MX] = &mx,
[SA8775P_MX_AO] = &mx_ao,
[SA8775P_NSP0] = &nsp0,
[SA8775P_NSP1] = &nsp1,
};
static const struct rpmhpd_desc sa8775p_desc = {
.rpmhpds = sa8775p_rpmhpds,
.num_pds = ARRAY_SIZE(sa8775p_rpmhpds),
};
/* SDM670 RPMH powerdomains */
static struct rpmhpd *sdm670_rpmhpds[] = {
[SDM670_CX] = &cx_w_mx_parent,
[SDM670_CX_AO] = &cx_ao_w_mx_parent,
[SDM670_GFX] = &gfx,
[SDM670_LCX] = &lcx,
[SDM670_LMX] = &lmx,
[SDM670_MSS] = &mss,
[SDM670_MX] = &mx,
[SDM670_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sdm670_desc = {
.rpmhpds = sdm670_rpmhpds,
.num_pds = ARRAY_SIZE(sdm670_rpmhpds),
};
/* SDM845 RPMH powerdomains */
static struct rpmhpd *sdm845_rpmhpds[] = {
[SDM845_CX] = &cx_w_mx_parent,
[SDM845_CX_AO] = &cx_ao_w_mx_parent,
[SDM845_EBI] = &ebi,
[SDM845_GFX] = &gfx,
[SDM845_LCX] = &lcx,
[SDM845_LMX] = &lmx,
[SDM845_MSS] = &mss,
[SDM845_MX] = &mx,
[SDM845_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sdm845_desc = {
.rpmhpds = sdm845_rpmhpds,
.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
};
/* SDX55 RPMH powerdomains */
static struct rpmhpd *sdx55_rpmhpds[] = {
[SDX55_CX] = &cx_w_mx_parent,
[SDX55_MSS] = &mss,
[SDX55_MX] = &mx,
};
static const struct rpmhpd_desc sdx55_desc = {
.rpmhpds = sdx55_rpmhpds,
.num_pds = ARRAY_SIZE(sdx55_rpmhpds),
};
/* SDX65 RPMH powerdomains */
static struct rpmhpd *sdx65_rpmhpds[] = {
[SDX65_CX] = &cx_w_mx_parent,
[SDX65_CX_AO] = &cx_ao_w_mx_parent,
[SDX65_MSS] = &mss,
[SDX65_MX] = &mx,
[SDX65_MX_AO] = &mx_ao,
[SDX65_MXC] = &mxc,
};
static const struct rpmhpd_desc sdx65_desc = {
.rpmhpds = sdx65_rpmhpds,
.num_pds = ARRAY_SIZE(sdx65_rpmhpds),
};
/* SDX75 RPMH powerdomains */
static struct rpmhpd *sdx75_rpmhpds[] = {
[RPMHPD_CX] = &cx,
[RPMHPD_CX_AO] = &cx_ao,
[RPMHPD_MSS] = &mss,
[RPMHPD_MX] = &mx,
[RPMHPD_MX_AO] = &mx_ao,
[RPMHPD_MXC] = &mxc,
};
static const struct rpmhpd_desc sdx75_desc = {
.rpmhpds = sdx75_rpmhpds,
.num_pds = ARRAY_SIZE(sdx75_rpmhpds),
};
/* SM6350 RPMH powerdomains */
static struct rpmhpd *sm6350_rpmhpds[] = {
[SM6350_CX] = &cx_w_mx_parent,
[SM6350_GFX] = &gfx,
[SM6350_LCX] = &lcx,
[SM6350_LMX] = &lmx,
[SM6350_MSS] = &mss,
[SM6350_MX] = &mx,
};
static const struct rpmhpd_desc sm6350_desc = {
.rpmhpds = sm6350_rpmhpds,
.num_pds = ARRAY_SIZE(sm6350_rpmhpds),
};
/* SM8150 RPMH powerdomains */
static struct rpmhpd *sm8150_rpmhpds[] = {
[SM8150_CX] = &cx_w_mx_parent,
[SM8150_CX_AO] = &cx_ao_w_mx_parent,
[SM8150_EBI] = &ebi,
[SM8150_GFX] = &gfx,
[SM8150_LCX] = &lcx,
[SM8150_LMX] = &lmx,
[SM8150_MMCX] = &mmcx,
[SM8150_MMCX_AO] = &mmcx_ao,
[SM8150_MSS] = &mss,
[SM8150_MX] = &mx,
[SM8150_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sm8150_desc = {
.rpmhpds = sm8150_rpmhpds,
.num_pds = ARRAY_SIZE(sm8150_rpmhpds),
};
static struct rpmhpd *sa8155p_rpmhpds[] = {
[SA8155P_CX] = &cx_w_mx_parent,
[SA8155P_CX_AO] = &cx_ao_w_mx_parent,
[SA8155P_EBI] = &ebi,
[SA8155P_GFX] = &gfx,
[SA8155P_MSS] = &mss,
[SA8155P_MX] = &mx,
[SA8155P_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sa8155p_desc = {
.rpmhpds = sa8155p_rpmhpds,
.num_pds = ARRAY_SIZE(sa8155p_rpmhpds),
};
/* SM8250 RPMH powerdomains */
static struct rpmhpd *sm8250_rpmhpds[] = {
[RPMHPD_CX] = &cx_w_mx_parent,
[RPMHPD_CX_AO] = &cx_ao_w_mx_parent,
[RPMHPD_EBI] = &ebi,
[RPMHPD_GFX] = &gfx,
[RPMHPD_LCX] = &lcx,
[RPMHPD_LMX] = &lmx,
[RPMHPD_MMCX] = &mmcx,
[RPMHPD_MMCX_AO] = &mmcx_ao,
[RPMHPD_MX] = &mx,
[RPMHPD_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sm8250_desc = {
.rpmhpds = sm8250_rpmhpds,
.num_pds = ARRAY_SIZE(sm8250_rpmhpds),
};
/* SM8350 Power domains */
static struct rpmhpd *sm8350_rpmhpds[] = {
[RPMHPD_CX] = &cx_w_mx_parent,
[RPMHPD_CX_AO] = &cx_ao_w_mx_parent,
[RPMHPD_EBI] = &ebi,
[RPMHPD_GFX] = &gfx,
[RPMHPD_LCX] = &lcx,
[RPMHPD_LMX] = &lmx,
[RPMHPD_MMCX] = &mmcx,
[RPMHPD_MMCX_AO] = &mmcx_ao,
[RPMHPD_MSS] = &mss,
[RPMHPD_MX] = &mx,
[RPMHPD_MX_AO] = &mx_ao,
[RPMHPD_MXC] = &mxc,
[RPMHPD_MXC_AO] = &mxc_ao,
};
static const struct rpmhpd_desc sm8350_desc = {
.rpmhpds = sm8350_rpmhpds,
.num_pds = ARRAY_SIZE(sm8350_rpmhpds),
};
/* SM8450 RPMH powerdomains */
static struct rpmhpd *sm8450_rpmhpds[] = {
[RPMHPD_CX] = &cx,
[RPMHPD_CX_AO] = &cx_ao,
[RPMHPD_EBI] = &ebi,
[RPMHPD_GFX] = &gfx,
[RPMHPD_LCX] = &lcx,
[RPMHPD_LMX] = &lmx,
[RPMHPD_MMCX] = &mmcx_w_cx_parent,
[RPMHPD_MMCX_AO] = &mmcx_ao_w_cx_parent,
[RPMHPD_MSS] = &mss,
[RPMHPD_MX] = &mx,
[RPMHPD_MX_AO] = &mx_ao,
[RPMHPD_MXC] = &mxc,
[RPMHPD_MXC_AO] = &mxc_ao,
};
static const struct rpmhpd_desc sm8450_desc = {
.rpmhpds = sm8450_rpmhpds,
.num_pds = ARRAY_SIZE(sm8450_rpmhpds),
};
/* SM8550 RPMH powerdomains */
static struct rpmhpd *sm8550_rpmhpds[] = {
[RPMHPD_CX] = &cx,
[RPMHPD_CX_AO] = &cx_ao,
[RPMHPD_EBI] = &ebi,
[RPMHPD_GFX] = &gfx,
[RPMHPD_LCX] = &lcx,
[RPMHPD_LMX] = &lmx,
[RPMHPD_MMCX] = &mmcx_w_cx_parent,
[RPMHPD_MMCX_AO] = &mmcx_ao_w_cx_parent,
[RPMHPD_MSS] = &mss,
[RPMHPD_MX] = &mx,
[RPMHPD_MX_AO] = &mx_ao,
[RPMHPD_MXC] = &mxc,
[RPMHPD_MXC_AO] = &mxc_ao,
[RPMHPD_NSP] = &nsp,
};
static const struct rpmhpd_desc sm8550_desc = {
.rpmhpds = sm8550_rpmhpds,
.num_pds = ARRAY_SIZE(sm8550_rpmhpds),
};
/* QDU1000/QRU1000 RPMH powerdomains */
static struct rpmhpd *qdu1000_rpmhpds[] = {
[QDU1000_CX] = &cx,
[QDU1000_EBI] = &ebi,
[QDU1000_MSS] = &mss,
[QDU1000_MX] = &mx,
};
static const struct rpmhpd_desc qdu1000_desc = {
.rpmhpds = qdu1000_rpmhpds,
.num_pds = ARRAY_SIZE(qdu1000_rpmhpds),
};
/* SC7180 RPMH powerdomains */
static struct rpmhpd *sc7180_rpmhpds[] = {
[SC7180_CX] = &cx_w_mx_parent,
[SC7180_CX_AO] = &cx_ao_w_mx_parent,
[SC7180_GFX] = &gfx,
[SC7180_LCX] = &lcx,
[SC7180_LMX] = &lmx,
[SC7180_MSS] = &mss,
[SC7180_MX] = &mx,
[SC7180_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sc7180_desc = {
.rpmhpds = sc7180_rpmhpds,
.num_pds = ARRAY_SIZE(sc7180_rpmhpds),
};
/* SC7280 RPMH powerdomains */
static struct rpmhpd *sc7280_rpmhpds[] = {
[SC7280_CX] = &cx,
[SC7280_CX_AO] = &cx_ao,
[SC7280_EBI] = &ebi,
[SC7280_GFX] = &gfx,
[SC7280_LCX] = &lcx,
[SC7280_LMX] = &lmx,
[SC7280_MSS] = &mss,
[SC7280_MX] = &mx,
[SC7280_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sc7280_desc = {
.rpmhpds = sc7280_rpmhpds,
.num_pds = ARRAY_SIZE(sc7280_rpmhpds),
};
/* SC8180x RPMH powerdomains */
static struct rpmhpd *sc8180x_rpmhpds[] = {
[SC8180X_CX] = &cx_w_mx_parent,
[SC8180X_CX_AO] = &cx_ao_w_mx_parent,
[SC8180X_EBI] = &ebi,
[SC8180X_GFX] = &gfx,
[SC8180X_LCX] = &lcx,
[SC8180X_LMX] = &lmx,
[SC8180X_MMCX] = &mmcx,
[SC8180X_MMCX_AO] = &mmcx_ao,
[SC8180X_MSS] = &mss,
[SC8180X_MX] = &mx,
[SC8180X_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sc8180x_desc = {
.rpmhpds = sc8180x_rpmhpds,
.num_pds = ARRAY_SIZE(sc8180x_rpmhpds),
};
/* SC8280xp RPMH powerdomains */
static struct rpmhpd *sc8280xp_rpmhpds[] = {
[SC8280XP_CX] = &cx,
[SC8280XP_CX_AO] = &cx_ao,
[SC8280XP_EBI] = &ebi,
[SC8280XP_GFX] = &gfx,
[SC8280XP_LCX] = &lcx,
[SC8280XP_LMX] = &lmx,
[SC8280XP_MMCX] = &mmcx,
[SC8280XP_MMCX_AO] = &mmcx_ao,
[SC8280XP_MX] = &mx,
[SC8280XP_MX_AO] = &mx_ao,
[SC8280XP_NSP] = &nsp,
[SC8280XP_QPHY] = &qphy,
};
static const struct rpmhpd_desc sc8280xp_desc = {
.rpmhpds = sc8280xp_rpmhpds,
.num_pds = ARRAY_SIZE(sc8280xp_rpmhpds),
};
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,qdu1000-rpmhpd", .data = &qdu1000_desc },
{ .compatible = "qcom,sa8155p-rpmhpd", .data = &sa8155p_desc },
{ .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc },
{ .compatible = "qcom,sa8775p-rpmhpd", .data = &sa8775p_desc },
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc },
{ .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
{ .compatible = "qcom,sc8280xp-rpmhpd", .data = &sc8280xp_desc },
{ .compatible = "qcom,sdm670-rpmhpd", .data = &sdm670_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
{ .compatible = "qcom,sdx65-rpmhpd", .data = &sdx65_desc},
{ .compatible = "qcom,sdx75-rpmhpd", .data = &sdx75_desc},
{ .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc },
{ .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc },
{ .compatible = "qcom,sm8550-rpmhpd", .data = &sm8550_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
unsigned int corner, bool sync)
{
struct tcs_cmd cmd = {
.addr = pd->addr,
.data = corner,
};
/*
* Wait for an ack only when we are increasing the
* perf state of the power domain
*/
if (sync)
return rpmh_write(pd->dev, state, &cmd, 1);
else
return rpmh_write_async(pd->dev, state, &cmd, 1);
}
static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
unsigned int *active, unsigned int *sleep)
{
*active = corner;
if (pd->active_only)
*sleep = 0;
else
*sleep = *active;
}
/*
* This function is used to aggregate the votes across the active only
* resources and its peers. The aggregated votes are sent to RPMh as
* ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
* (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
* on system sleep).
* We send ACTIVE_ONLY votes for resources without any peers. For others,
* which have an active only peer, all 3 votes are sent.
*/
static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
{
int ret;
struct rpmhpd *peer = pd->peer;
unsigned int active_corner, sleep_corner;
unsigned int this_active_corner = 0, this_sleep_corner = 0;
unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
if (pd->state_synced) {
to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
} else {
/* Clamp to highest corner if sync_state hasn't happened */
this_active_corner = pd->level_count - 1;
this_sleep_corner = pd->level_count - 1;
}
if (peer && peer->enabled)
to_active_sleep(peer, peer->corner, &peer_active_corner,
&peer_sleep_corner);
active_corner = max(this_active_corner, peer_active_corner);
ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
active_corner > pd->active_corner);
if (ret)
return ret;
pd->active_corner = active_corner;
if (peer) {
peer->active_corner = active_corner;
ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
active_corner, false);
if (ret)
return ret;
sleep_corner = max(this_sleep_corner, peer_sleep_corner);
return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
false);
}
return ret;
}
static int rpmhpd_power_on(struct generic_pm_domain *domain)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
unsigned int corner;
int ret;
mutex_lock(&rpmhpd_lock);
corner = max(pd->corner, pd->enable_corner);
ret = rpmhpd_aggregate_corner(pd, corner);
if (!ret)
pd->enabled = true;
mutex_unlock(&rpmhpd_lock);
return ret;
}
static int rpmhpd_power_off(struct generic_pm_domain *domain)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
int ret;
mutex_lock(&rpmhpd_lock);
ret = rpmhpd_aggregate_corner(pd, 0);
if (!ret)
pd->enabled = false;
mutex_unlock(&rpmhpd_lock);
return ret;
}
static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
unsigned int level)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
int ret = 0, i;
mutex_lock(&rpmhpd_lock);
for (i = 0; i < pd->level_count; i++)
if (level <= pd->level[i])
break;
/*
* If the level requested is more than that supported by the
* max corner, just set it to max anyway.
*/
if (i == pd->level_count)
i--;
if (pd->enabled) {
/* Ensure that the domain isn't turn off */
if (i < pd->enable_corner)
i = pd->enable_corner;
ret = rpmhpd_aggregate_corner(pd, i);
if (ret)
goto out;
}
pd->corner = i;
out:
mutex_unlock(&rpmhpd_lock);
return ret;
}
static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp)
{
return dev_pm_opp_get_level(opp);
}
static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
{
int i;
const u16 *buf;
buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
if (IS_ERR(buf))
return PTR_ERR(buf);
/* 2 bytes used for each command DB aux data entry */
rpmhpd->level_count >>= 1;
if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
return -EINVAL;
for (i = 0; i < rpmhpd->level_count; i++) {
rpmhpd->level[i] = buf[i];
/* Remember the first corner with non-zero level */
if (!rpmhpd->level[rpmhpd->enable_corner] && rpmhpd->level[i])
rpmhpd->enable_corner = i;
/*
* The AUX data may be zero padded. These 0 valued entries at
* the end of the map must be ignored.
*/
if (i > 0 && rpmhpd->level[i] == 0) {
rpmhpd->level_count = i;
break;
}
pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
rpmhpd->level[i]);
}
return 0;
}
static int rpmhpd_probe(struct platform_device *pdev)
{
int i, ret;
size_t num_pds;
struct device *dev = &pdev->dev;
struct genpd_onecell_data *data;
struct rpmhpd **rpmhpds;
const struct rpmhpd_desc *desc;
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
rpmhpds = desc->rpmhpds;
num_pds = desc->num_pds;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
GFP_KERNEL);
if (!data->domains)
return -ENOMEM;
data->num_domains = num_pds;
for (i = 0; i < num_pds; i++) {
if (!rpmhpds[i])
continue;
rpmhpds[i]->dev = dev;
rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
if (!rpmhpds[i]->addr) {
dev_err(dev, "Could not find RPMh address for resource %s\n",
rpmhpds[i]->res_name);
return -ENODEV;
}
ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
if (ret != CMD_DB_HW_ARC) {
dev_err(dev, "RPMh slave ID mismatch\n");
return -EINVAL;
}
ret = rpmhpd_update_level_mapping(rpmhpds[i]);
if (ret)
return ret;
rpmhpds[i]->pd.power_off = rpmhpd_power_off;
rpmhpds[i]->pd.power_on = rpmhpd_power_on;
rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
data->domains[i] = &rpmhpds[i]->pd;
}
/* Add subdomains */
for (i = 0; i < num_pds; i++) {
if (!rpmhpds[i])
continue;
if (rpmhpds[i]->parent)
pm_genpd_add_subdomain(rpmhpds[i]->parent,
&rpmhpds[i]->pd);
}
return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
}
static void rpmhpd_sync_state(struct device *dev)
{
const struct rpmhpd_desc *desc = of_device_get_match_data(dev);
struct rpmhpd **rpmhpds = desc->rpmhpds;
unsigned int corner;
struct rpmhpd *pd;
unsigned int i;
int ret;
mutex_lock(&rpmhpd_lock);
for (i = 0; i < desc->num_pds; i++) {
pd = rpmhpds[i];
if (!pd)
continue;
pd->state_synced = true;
if (pd->enabled)
corner = max(pd->corner, pd->enable_corner);
else
corner = 0;
ret = rpmhpd_aggregate_corner(pd, corner);
if (ret)
dev_err(dev, "failed to sync %s\n", pd->res_name);
}
mutex_unlock(&rpmhpd_lock);
}
static struct platform_driver rpmhpd_driver = {
.driver = {
.name = "qcom-rpmhpd",
.of_match_table = rpmhpd_match_table,
.suppress_bind_attrs = true,
.sync_state = rpmhpd_sync_state,
},
.probe = rpmhpd_probe,
};
static int __init rpmhpd_init(void)
{
return platform_driver_register(&rpmhpd_driver);
}
core_initcall(rpmhpd_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pmdomain/qcom/rpmhpd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2019, Linaro Limited
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/debugfs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/regulator/consumer.h>
#include <linux/clk.h>
#include <linux/nvmem-consumer.h>
/* Register Offsets for RB-CPR and Bit Definitions */
/* RBCPR Version Register */
#define REG_RBCPR_VERSION 0
#define RBCPR_VER_2 0x02
#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
/* RBCPR Gate Count and Target Registers */
#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
/* RBCPR Timer Control */
#define REG_RBCPR_TIMER_INTERVAL 0x44
#define REG_RBIF_TIMER_ADJUST 0x4c
#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
/* RBCPR Config Register */
#define REG_RBIF_LIMIT 0x48
#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
#define RBIF_LIMIT_CEILING_SHIFT 6
#define RBIF_LIMIT_FLOOR_BITS 6
#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
#define RBIF_LIMIT_FLOOR_DEFAULT 0
#define REG_RBIF_SW_VLEVEL 0x94
#define RBIF_SW_VLEVEL_DEFAULT 0x20
#define REG_RBCPR_STEP_QUOT 0x80
#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
/* RBCPR Control Register */
#define REG_RBCPR_CTL 0x90
#define RBCPR_CTL_LOOP_EN BIT(0)
#define RBCPR_CTL_TIMER_EN BIT(3)
#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
#define RBCPR_CTL_COUNT_MODE BIT(10)
#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
/* RBCPR Ack/Nack Response */
#define REG_RBIF_CONT_ACK_CMD 0x98
#define REG_RBIF_CONT_NACK_CMD 0x9c
/* RBCPR Result status Register */
#define REG_RBCPR_RESULT_0 0xa0
#define RBCPR_RESULT0_BUSY_SHIFT 19
#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
#define RBCPR_RESULT0_ERROR_SHIFT 6
#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
#define RBCPR_RESULT0_STEP_UP_SHIFT 1
/* RBCPR Interrupt Control Register */
#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
#define REG_RBIF_IRQ_CLEAR 0x110
#define REG_RBIF_IRQ_STATUS 0x114
#define CPR_INT_DONE BIT(0)
#define CPR_INT_MIN BIT(1)
#define CPR_INT_DOWN BIT(2)
#define CPR_INT_MID BIT(3)
#define CPR_INT_UP BIT(4)
#define CPR_INT_MAX BIT(5)
#define CPR_INT_CLAMP BIT(6)
#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
#define CPR_NUM_RING_OSC 8
/* CPR eFuse parameters */
#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
#define CPR_FUSE_MIN_QUOT_DIFF 50
#define FUSE_REVISION_UNKNOWN (-1)
enum voltage_change_dir {
NO_CHANGE,
DOWN,
UP,
};
struct cpr_fuse {
char *ring_osc;
char *init_voltage;
char *quotient;
char *quotient_offset;
};
struct fuse_corner_data {
int ref_uV;
int max_uV;
int min_uV;
int max_volt_scale;
int max_quot_scale;
/* fuse quot */
int quot_offset;
int quot_scale;
int quot_adjust;
/* fuse quot_offset */
int quot_offset_scale;
int quot_offset_adjust;
};
struct cpr_fuses {
int init_voltage_step;
int init_voltage_width;
struct fuse_corner_data *fuse_corner_data;
};
struct corner_data {
unsigned int fuse_corner;
unsigned long freq;
};
struct cpr_desc {
unsigned int num_fuse_corners;
int min_diff_quot;
int *step_quot;
unsigned int timer_delay_us;
unsigned int timer_cons_up;
unsigned int timer_cons_down;
unsigned int up_threshold;
unsigned int down_threshold;
unsigned int idle_clocks;
unsigned int gcnt_us;
unsigned int vdd_apc_step_up_limit;
unsigned int vdd_apc_step_down_limit;
unsigned int clamp_timer_interval;
struct cpr_fuses cpr_fuses;
bool reduce_to_fuse_uV;
bool reduce_to_corner_uV;
};
struct acc_desc {
unsigned int enable_reg;
u32 enable_mask;
struct reg_sequence *config;
struct reg_sequence *settings;
int num_regs_per_fuse;
};
struct cpr_acc_desc {
const struct cpr_desc *cpr_desc;
const struct acc_desc *acc_desc;
};
struct fuse_corner {
int min_uV;
int max_uV;
int uV;
int quot;
int step_quot;
const struct reg_sequence *accs;
int num_accs;
unsigned long max_freq;
u8 ring_osc_idx;
};
struct corner {
int min_uV;
int max_uV;
int uV;
int last_uV;
int quot_adjust;
u32 save_ctl;
u32 save_irq;
unsigned long freq;
struct fuse_corner *fuse_corner;
};
struct cpr_drv {
unsigned int num_corners;
unsigned int ref_clk_khz;
struct generic_pm_domain pd;
struct device *dev;
struct device *attached_cpu_dev;
struct mutex lock;
void __iomem *base;
struct corner *corner;
struct regulator *vdd_apc;
struct clk *cpu_clk;
struct regmap *tcsr;
bool loop_disabled;
u32 gcnt;
unsigned long flags;
struct fuse_corner *fuse_corners;
struct corner *corners;
const struct cpr_desc *desc;
const struct acc_desc *acc_desc;
const struct cpr_fuse *cpr_fuses;
struct dentry *debugfs;
};
static bool cpr_is_allowed(struct cpr_drv *drv)
{
return !drv->loop_disabled;
}
static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
{
writel_relaxed(value, drv->base + offset);
}
static u32 cpr_read(struct cpr_drv *drv, u32 offset)
{
return readl_relaxed(drv->base + offset);
}
static void
cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
{
u32 val;
val = readl_relaxed(drv->base + offset);
val &= ~mask;
val |= value & mask;
writel_relaxed(val, drv->base + offset);
}
static void cpr_irq_clr(struct cpr_drv *drv)
{
cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
}
static void cpr_irq_clr_nack(struct cpr_drv *drv)
{
cpr_irq_clr(drv);
cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
}
static void cpr_irq_clr_ack(struct cpr_drv *drv)
{
cpr_irq_clr(drv);
cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
}
static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
{
cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
}
static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
{
cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
}
static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
{
u32 val, mask;
const struct cpr_desc *desc = drv->desc;
/* Program Consecutive Up & Down */
val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
cpr_masked_write(drv, REG_RBCPR_CTL,
RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
corner->save_ctl);
cpr_irq_set(drv, corner->save_irq);
if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
val = RBCPR_CTL_LOOP_EN;
else
val = 0;
cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
}
static void cpr_ctl_disable(struct cpr_drv *drv)
{
cpr_irq_set(drv, 0);
cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
RBIF_TIMER_ADJ_CONS_UP_MASK |
RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
cpr_irq_clr(drv);
cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
}
static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
{
u32 reg_val;
reg_val = cpr_read(drv, REG_RBCPR_CTL);
return reg_val & RBCPR_CTL_LOOP_EN;
}
static bool cpr_ctl_is_busy(struct cpr_drv *drv)
{
u32 reg_val;
reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
return reg_val & RBCPR_RESULT0_BUSY_MASK;
}
static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
{
corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
}
static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
{
u32 gcnt, ctl, irq, ro_sel, step_quot;
struct fuse_corner *fuse = corner->fuse_corner;
const struct cpr_desc *desc = drv->desc;
int i;
ro_sel = fuse->ring_osc_idx;
gcnt = drv->gcnt;
gcnt |= fuse->quot - corner->quot_adjust;
/* Program the step quotient and idle clocks */
step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
/* Clear the target quotient value and gate count of all ROs */
for (i = 0; i < CPR_NUM_RING_OSC; i++)
cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
ctl = corner->save_ctl;
cpr_write(drv, REG_RBCPR_CTL, ctl);
irq = corner->save_irq;
cpr_irq_set(drv, irq);
dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
ctl, irq);
}
static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
struct fuse_corner *end)
{
if (f == end)
return;
if (f < end) {
for (f += 1; f <= end; f++)
regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
} else {
for (f -= 1; f >= end; f--)
regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
}
}
static int cpr_pre_voltage(struct cpr_drv *drv,
struct fuse_corner *fuse_corner,
enum voltage_change_dir dir)
{
struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
if (drv->tcsr && dir == DOWN)
cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
return 0;
}
static int cpr_post_voltage(struct cpr_drv *drv,
struct fuse_corner *fuse_corner,
enum voltage_change_dir dir)
{
struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
if (drv->tcsr && dir == UP)
cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
return 0;
}
static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
int new_uV, enum voltage_change_dir dir)
{
int ret;
struct fuse_corner *fuse_corner = corner->fuse_corner;
ret = cpr_pre_voltage(drv, fuse_corner, dir);
if (ret)
return ret;
ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
if (ret) {
dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
new_uV);
return ret;
}
ret = cpr_post_voltage(drv, fuse_corner, dir);
if (ret)
return ret;
return 0;
}
static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
{
return drv->corner ? drv->corner - drv->corners + 1 : 0;
}
static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
{
u32 val, error_steps, reg_mask;
int last_uV, new_uV, step_uV, ret;
struct corner *corner;
const struct cpr_desc *desc = drv->desc;
if (dir != UP && dir != DOWN)
return 0;
step_uV = regulator_get_linear_step(drv->vdd_apc);
if (!step_uV)
return -EINVAL;
corner = drv->corner;
val = cpr_read(drv, REG_RBCPR_RESULT_0);
error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
last_uV = corner->last_uV;
if (dir == UP) {
if (desc->clamp_timer_interval &&
error_steps < desc->up_threshold) {
/*
* Handle the case where another measurement started
* after the interrupt was triggered due to a core
* exiting from power collapse.
*/
error_steps = max(desc->up_threshold,
desc->vdd_apc_step_up_limit);
}
if (last_uV >= corner->max_uV) {
cpr_irq_clr_nack(drv);
/* Maximize the UP threshold */
reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
val = reg_mask;
cpr_ctl_modify(drv, reg_mask, val);
/* Disable UP interrupt */
cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
return 0;
}
if (error_steps > desc->vdd_apc_step_up_limit)
error_steps = desc->vdd_apc_step_up_limit;
/* Calculate new voltage */
new_uV = last_uV + error_steps * step_uV;
new_uV = min(new_uV, corner->max_uV);
dev_dbg(drv->dev,
"UP: -> new_uV: %d last_uV: %d perf state: %u\n",
new_uV, last_uV, cpr_get_cur_perf_state(drv));
} else {
if (desc->clamp_timer_interval &&
error_steps < desc->down_threshold) {
/*
* Handle the case where another measurement started
* after the interrupt was triggered due to a core
* exiting from power collapse.
*/
error_steps = max(desc->down_threshold,
desc->vdd_apc_step_down_limit);
}
if (last_uV <= corner->min_uV) {
cpr_irq_clr_nack(drv);
/* Enable auto nack down */
reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
cpr_ctl_modify(drv, reg_mask, val);
/* Disable DOWN interrupt */
cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
return 0;
}
if (error_steps > desc->vdd_apc_step_down_limit)
error_steps = desc->vdd_apc_step_down_limit;
/* Calculate new voltage */
new_uV = last_uV - error_steps * step_uV;
new_uV = max(new_uV, corner->min_uV);
dev_dbg(drv->dev,
"DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
new_uV, last_uV, cpr_get_cur_perf_state(drv));
}
ret = cpr_scale_voltage(drv, corner, new_uV, dir);
if (ret) {
cpr_irq_clr_nack(drv);
return ret;
}
drv->corner->last_uV = new_uV;
if (dir == UP) {
/* Disable auto nack down */
reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
val = 0;
} else {
/* Restore default threshold for UP */
reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
val = desc->up_threshold;
val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
}
cpr_ctl_modify(drv, reg_mask, val);
/* Re-enable default interrupts */
cpr_irq_set(drv, CPR_INT_DEFAULT);
/* Ack */
cpr_irq_clr_ack(drv);
return 0;
}
static irqreturn_t cpr_irq_handler(int irq, void *dev)
{
struct cpr_drv *drv = dev;
const struct cpr_desc *desc = drv->desc;
irqreturn_t ret = IRQ_HANDLED;
u32 val;
mutex_lock(&drv->lock);
val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
if (!cpr_ctl_is_enabled(drv)) {
dev_dbg(drv->dev, "CPR is disabled\n");
ret = IRQ_NONE;
} else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
dev_dbg(drv->dev, "CPR measurement is not ready\n");
} else if (!cpr_is_allowed(drv)) {
val = cpr_read(drv, REG_RBCPR_CTL);
dev_err_ratelimited(drv->dev,
"Interrupt broken? RBCPR_CTL = %#02x\n",
val);
ret = IRQ_NONE;
} else {
/*
* Following sequence of handling is as per each IRQ's
* priority
*/
if (val & CPR_INT_UP) {
cpr_scale(drv, UP);
} else if (val & CPR_INT_DOWN) {
cpr_scale(drv, DOWN);
} else if (val & CPR_INT_MIN) {
cpr_irq_clr_nack(drv);
} else if (val & CPR_INT_MAX) {
cpr_irq_clr_nack(drv);
} else if (val & CPR_INT_MID) {
/* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
} else {
dev_dbg(drv->dev,
"IRQ occurred for unknown flag (%#08x)\n", val);
}
/* Save register values for the corner */
cpr_corner_save(drv, drv->corner);
}
mutex_unlock(&drv->lock);
return ret;
}
static int cpr_enable(struct cpr_drv *drv)
{
int ret;
ret = regulator_enable(drv->vdd_apc);
if (ret)
return ret;
mutex_lock(&drv->lock);
if (cpr_is_allowed(drv) && drv->corner) {
cpr_irq_clr(drv);
cpr_corner_restore(drv, drv->corner);
cpr_ctl_enable(drv, drv->corner);
}
mutex_unlock(&drv->lock);
return 0;
}
static int cpr_disable(struct cpr_drv *drv)
{
mutex_lock(&drv->lock);
if (cpr_is_allowed(drv)) {
cpr_ctl_disable(drv);
cpr_irq_clr(drv);
}
mutex_unlock(&drv->lock);
return regulator_disable(drv->vdd_apc);
}
static int cpr_config(struct cpr_drv *drv)
{
int i;
u32 val, gcnt;
struct corner *corner;
const struct cpr_desc *desc = drv->desc;
/* Disable interrupt and CPR */
cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
cpr_write(drv, REG_RBCPR_CTL, 0);
/* Program the default HW ceiling, floor and vlevel */
val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
<< RBIF_LIMIT_CEILING_SHIFT;
val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
cpr_write(drv, REG_RBIF_LIMIT, val);
cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
/*
* Clear the target quotient value and gate count of all
* ring oscillators
*/
for (i = 0; i < CPR_NUM_RING_OSC; i++)
cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
/* Init and save gcnt */
gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
drv->gcnt = gcnt;
/* Program the delay count for the timer */
val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
desc->timer_delay_us);
/* Program Consecutive Up & Down */
val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
/* Program the control register */
val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
cpr_write(drv, REG_RBCPR_CTL, val);
for (i = 0; i < drv->num_corners; i++) {
corner = &drv->corners[i];
corner->save_ctl = val;
corner->save_irq = CPR_INT_DEFAULT;
}
cpr_irq_set(drv, CPR_INT_DEFAULT);
val = cpr_read(drv, REG_RBCPR_VERSION);
if (val <= RBCPR_VER_2)
drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
return 0;
}
static int cpr_set_performance_state(struct generic_pm_domain *domain,
unsigned int state)
{
struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
struct corner *corner, *end;
enum voltage_change_dir dir;
int ret = 0, new_uV;
mutex_lock(&drv->lock);
dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
__func__, state, cpr_get_cur_perf_state(drv));
/*
* Determine new corner we're going to.
* Remove one since lowest performance state is 1.
*/
corner = drv->corners + state - 1;
end = &drv->corners[drv->num_corners - 1];
if (corner > end || corner < drv->corners) {
ret = -EINVAL;
goto unlock;
}
/* Determine direction */
if (drv->corner > corner)
dir = DOWN;
else if (drv->corner < corner)
dir = UP;
else
dir = NO_CHANGE;
if (cpr_is_allowed(drv))
new_uV = corner->last_uV;
else
new_uV = corner->uV;
if (cpr_is_allowed(drv))
cpr_ctl_disable(drv);
ret = cpr_scale_voltage(drv, corner, new_uV, dir);
if (ret)
goto unlock;
if (cpr_is_allowed(drv)) {
cpr_irq_clr(drv);
if (drv->corner != corner)
cpr_corner_restore(drv, corner);
cpr_ctl_enable(drv, corner);
}
drv->corner = corner;
unlock:
mutex_unlock(&drv->lock);
return ret;
}
static int
cpr_populate_ring_osc_idx(struct cpr_drv *drv)
{
struct fuse_corner *fuse = drv->fuse_corners;
struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
const struct cpr_fuse *fuses = drv->cpr_fuses;
u32 data;
int ret;
for (; fuse < end; fuse++, fuses++) {
ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->ring_osc, &data);
if (ret)
return ret;
fuse->ring_osc_idx = data;
}
return 0;
}
static int cpr_read_fuse_uV(const struct cpr_desc *desc,
const struct fuse_corner_data *fdata,
const char *init_v_efuse,
int step_volt,
struct cpr_drv *drv)
{
int step_size_uV, steps, uV;
u32 bits = 0;
int ret;
ret = nvmem_cell_read_variable_le_u32(drv->dev, init_v_efuse, &bits);
if (ret)
return ret;
steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
/* Not two's complement.. instead highest bit is sign bit */
if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
steps = -steps;
step_size_uV = desc->cpr_fuses.init_voltage_step;
uV = fdata->ref_uV + steps * step_size_uV;
return DIV_ROUND_UP(uV, step_volt) * step_volt;
}
static int cpr_fuse_corner_init(struct cpr_drv *drv)
{
const struct cpr_desc *desc = drv->desc;
const struct cpr_fuse *fuses = drv->cpr_fuses;
const struct acc_desc *acc_desc = drv->acc_desc;
int i;
unsigned int step_volt;
struct fuse_corner_data *fdata;
struct fuse_corner *fuse, *end;
int uV;
const struct reg_sequence *accs;
int ret;
accs = acc_desc->settings;
step_volt = regulator_get_linear_step(drv->vdd_apc);
if (!step_volt)
return -EINVAL;
/* Populate fuse_corner members */
fuse = drv->fuse_corners;
end = &fuse[desc->num_fuse_corners - 1];
fdata = desc->cpr_fuses.fuse_corner_data;
for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
/*
* Update SoC voltages: platforms might choose a different
* regulators than the one used to characterize the algorithms
* (ie, init_voltage_step).
*/
fdata->min_uV = roundup(fdata->min_uV, step_volt);
fdata->max_uV = roundup(fdata->max_uV, step_volt);
/* Populate uV */
uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
step_volt, drv);
if (uV < 0)
return uV;
fuse->min_uV = fdata->min_uV;
fuse->max_uV = fdata->max_uV;
fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
if (fuse == end) {
/*
* Allow the highest fuse corner's PVS voltage to
* define the ceiling voltage for that corner in order
* to support SoC's in which variable ceiling values
* are required.
*/
end->max_uV = max(end->max_uV, end->uV);
}
/* Populate target quotient by scaling */
ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->quotient, &fuse->quot);
if (ret)
return ret;
fuse->quot *= fdata->quot_scale;
fuse->quot += fdata->quot_offset;
fuse->quot += fdata->quot_adjust;
fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
/* Populate acc settings */
fuse->accs = accs;
fuse->num_accs = acc_desc->num_regs_per_fuse;
accs += acc_desc->num_regs_per_fuse;
}
/*
* Restrict all fuse corner PVS voltages based upon per corner
* ceiling and floor voltages.
*/
for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
if (fuse->uV > fuse->max_uV)
fuse->uV = fuse->max_uV;
else if (fuse->uV < fuse->min_uV)
fuse->uV = fuse->min_uV;
ret = regulator_is_supported_voltage(drv->vdd_apc,
fuse->min_uV,
fuse->min_uV);
if (!ret) {
dev_err(drv->dev,
"min uV: %d (fuse corner: %d) not supported by regulator\n",
fuse->min_uV, i);
return -EINVAL;
}
ret = regulator_is_supported_voltage(drv->vdd_apc,
fuse->max_uV,
fuse->max_uV);
if (!ret) {
dev_err(drv->dev,
"max uV: %d (fuse corner: %d) not supported by regulator\n",
fuse->max_uV, i);
return -EINVAL;
}
dev_dbg(drv->dev,
"fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
i, fuse->min_uV, fuse->uV, fuse->max_uV,
fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
}
return 0;
}
static int cpr_calculate_scaling(const char *quot_offset,
struct cpr_drv *drv,
const struct fuse_corner_data *fdata,
const struct corner *corner)
{
u32 quot_diff = 0;
unsigned long freq_diff;
int scaling;
const struct fuse_corner *fuse, *prev_fuse;
int ret;
fuse = corner->fuse_corner;
prev_fuse = fuse - 1;
if (quot_offset) {
ret = nvmem_cell_read_variable_le_u32(drv->dev, quot_offset, "_diff);
if (ret)
return ret;
quot_diff *= fdata->quot_offset_scale;
quot_diff += fdata->quot_offset_adjust;
} else {
quot_diff = fuse->quot - prev_fuse->quot;
}
freq_diff = fuse->max_freq - prev_fuse->max_freq;
freq_diff /= 1000000; /* Convert to MHz */
scaling = 1000 * quot_diff / freq_diff;
return min(scaling, fdata->max_quot_scale);
}
static int cpr_interpolate(const struct corner *corner, int step_volt,
const struct fuse_corner_data *fdata)
{
unsigned long f_high, f_low, f_diff;
int uV_high, uV_low, uV;
u64 temp, temp_limit;
const struct fuse_corner *fuse, *prev_fuse;
fuse = corner->fuse_corner;
prev_fuse = fuse - 1;
f_high = fuse->max_freq;
f_low = prev_fuse->max_freq;
uV_high = fuse->uV;
uV_low = prev_fuse->uV;
f_diff = fuse->max_freq - corner->freq;
/*
* Don't interpolate in the wrong direction. This could happen
* if the adjusted fuse voltage overlaps with the previous fuse's
* adjusted voltage.
*/
if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
return corner->uV;
temp = f_diff * (uV_high - uV_low);
temp = div64_ul(temp, f_high - f_low);
/*
* max_volt_scale has units of uV/MHz while freq values
* have units of Hz. Divide by 1000000 to convert to.
*/
temp_limit = f_diff * fdata->max_volt_scale;
do_div(temp_limit, 1000000);
uV = uV_high - min(temp, temp_limit);
return roundup(uV, step_volt);
}
static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
{
struct device_node *np;
unsigned int fuse_corner = 0;
np = dev_pm_opp_get_of_node(opp);
if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
__func__);
of_node_put(np);
return fuse_corner;
}
static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
struct device *cpu_dev)
{
u64 rate = 0;
struct device_node *ref_np;
struct device_node *desc_np;
struct device_node *child_np = NULL;
struct device_node *child_req_np = NULL;
desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!desc_np)
return 0;
ref_np = dev_pm_opp_get_of_node(ref);
if (!ref_np)
goto out_ref;
do {
of_node_put(child_req_np);
child_np = of_get_next_available_child(desc_np, child_np);
child_req_np = of_parse_phandle(child_np, "required-opps", 0);
} while (child_np && child_req_np != ref_np);
if (child_np && child_req_np == ref_np)
of_property_read_u64(child_np, "opp-hz", &rate);
of_node_put(child_req_np);
of_node_put(child_np);
of_node_put(ref_np);
out_ref:
of_node_put(desc_np);
return (unsigned long) rate;
}
static int cpr_corner_init(struct cpr_drv *drv)
{
const struct cpr_desc *desc = drv->desc;
const struct cpr_fuse *fuses = drv->cpr_fuses;
int i, level, scaling = 0;
unsigned int fnum, fc;
const char *quot_offset;
struct fuse_corner *fuse, *prev_fuse;
struct corner *corner, *end;
struct corner_data *cdata;
const struct fuse_corner_data *fdata;
bool apply_scaling;
unsigned long freq_diff, freq_diff_mhz;
unsigned long freq;
int step_volt = regulator_get_linear_step(drv->vdd_apc);
struct dev_pm_opp *opp;
if (!step_volt)
return -EINVAL;
corner = drv->corners;
end = &corner[drv->num_corners - 1];
cdata = devm_kcalloc(drv->dev, drv->num_corners,
sizeof(struct corner_data),
GFP_KERNEL);
if (!cdata)
return -ENOMEM;
/*
* Store maximum frequency for each fuse corner based on the frequency
* plan
*/
for (level = 1; level <= drv->num_corners; level++) {
opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
if (IS_ERR(opp))
return -EINVAL;
fc = cpr_get_fuse_corner(opp);
if (!fc) {
dev_pm_opp_put(opp);
return -EINVAL;
}
fnum = fc - 1;
freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
if (!freq) {
dev_pm_opp_put(opp);
return -EINVAL;
}
cdata[level - 1].fuse_corner = fnum;
cdata[level - 1].freq = freq;
fuse = &drv->fuse_corners[fnum];
dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
freq, dev_pm_opp_get_level(opp) - 1, fnum);
if (freq > fuse->max_freq)
fuse->max_freq = freq;
dev_pm_opp_put(opp);
}
/*
* Get the quotient adjustment scaling factor, according to:
*
* scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
* / (freq(corner_N) - freq(corner_N-1)), max_factor)
*
* QUOT(corner_N): quotient read from fuse for fuse corner N
* QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
* freq(corner_N): max frequency in MHz supported by fuse corner N
* freq(corner_N-1): max frequency in MHz supported by fuse corner
* (N - 1)
*
* Then walk through the corners mapped to each fuse corner
* and calculate the quotient adjustment for each one using the
* following formula:
*
* quot_adjust = (freq_max - freq_corner) * scaling / 1000
*
* freq_max: max frequency in MHz supported by the fuse corner
* freq_corner: frequency in MHz corresponding to the corner
* scaling: calculated from above equation
*
*
* + +
* | v |
* q | f c o | f c
* u | c l | c
* o | f t | f
* t | c a | c
* | c f g | c f
* | e |
* +--------------- +----------------
* 0 1 2 3 4 5 6 0 1 2 3 4 5 6
* corner corner
*
* c = corner
* f = fuse corner
*
*/
for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
fnum = cdata[i].fuse_corner;
fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
quot_offset = fuses[fnum].quotient_offset;
fuse = &drv->fuse_corners[fnum];
if (fnum)
prev_fuse = &drv->fuse_corners[fnum - 1];
else
prev_fuse = NULL;
corner->fuse_corner = fuse;
corner->freq = cdata[i].freq;
corner->uV = fuse->uV;
if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
scaling = cpr_calculate_scaling(quot_offset, drv,
fdata, corner);
if (scaling < 0)
return scaling;
apply_scaling = true;
} else if (corner->freq == fuse->max_freq) {
/* This is a fuse corner; don't scale anything */
apply_scaling = false;
}
if (apply_scaling) {
freq_diff = fuse->max_freq - corner->freq;
freq_diff_mhz = freq_diff / 1000000;
corner->quot_adjust = scaling * freq_diff_mhz / 1000;
corner->uV = cpr_interpolate(corner, step_volt, fdata);
}
corner->max_uV = fuse->max_uV;
corner->min_uV = fuse->min_uV;
corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
corner->last_uV = corner->uV;
/* Reduce the ceiling voltage if needed */
if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
corner->max_uV = corner->uV;
else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
corner->max_uV = max(corner->min_uV, fuse->uV);
dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
corner->min_uV, corner->uV, corner->max_uV,
fuse->quot - corner->quot_adjust);
}
return 0;
}
static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
{
const struct cpr_desc *desc = drv->desc;
struct cpr_fuse *fuses;
int i;
fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
sizeof(struct cpr_fuse),
GFP_KERNEL);
if (!fuses)
return ERR_PTR(-ENOMEM);
for (i = 0; i < desc->num_fuse_corners; i++) {
char tbuf[32];
snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
if (!fuses[i].ring_osc)
return ERR_PTR(-ENOMEM);
snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
GFP_KERNEL);
if (!fuses[i].init_voltage)
return ERR_PTR(-ENOMEM);
snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
if (!fuses[i].quotient)
return ERR_PTR(-ENOMEM);
snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
GFP_KERNEL);
if (!fuses[i].quotient_offset)
return ERR_PTR(-ENOMEM);
}
return fuses;
}
static void cpr_set_loop_allowed(struct cpr_drv *drv)
{
drv->loop_disabled = false;
}
static int cpr_init_parameters(struct cpr_drv *drv)
{
const struct cpr_desc *desc = drv->desc;
struct clk *clk;
clk = clk_get(drv->dev, "ref");
if (IS_ERR(clk))
return PTR_ERR(clk);
drv->ref_clk_khz = clk_get_rate(clk) / 1000;
clk_put(clk);
if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
return -EINVAL;
dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
desc->up_threshold, desc->down_threshold);
return 0;
}
static int cpr_find_initial_corner(struct cpr_drv *drv)
{
unsigned long rate;
const struct corner *end;
struct corner *iter;
unsigned int i = 0;
if (!drv->cpu_clk) {
dev_err(drv->dev, "cannot get rate from NULL clk\n");
return -EINVAL;
}
end = &drv->corners[drv->num_corners - 1];
rate = clk_get_rate(drv->cpu_clk);
/*
* Some bootloaders set a CPU clock frequency that is not defined
* in the OPP table. When running at an unlisted frequency,
* cpufreq_online() will change to the OPP which has the lowest
* frequency, at or above the unlisted frequency.
* Since cpufreq_online() always "rounds up" in the case of an
* unlisted frequency, this function always "rounds down" in case
* of an unlisted frequency. That way, when cpufreq_online()
* triggers the first ever call to cpr_set_performance_state(),
* it will correctly determine the direction as UP.
*/
for (iter = drv->corners; iter <= end; iter++) {
if (iter->freq > rate)
break;
i++;
if (iter->freq == rate) {
drv->corner = iter;
break;
}
if (iter->freq < rate)
drv->corner = iter;
}
if (!drv->corner) {
dev_err(drv->dev, "boot up corner not found\n");
return -EINVAL;
}
dev_dbg(drv->dev, "boot up perf state: %u\n", i);
return 0;
}
static const struct cpr_desc qcs404_cpr_desc = {
.num_fuse_corners = 3,
.min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
.step_quot = (int []){ 25, 25, 25, },
.timer_delay_us = 5000,
.timer_cons_up = 0,
.timer_cons_down = 2,
.up_threshold = 1,
.down_threshold = 3,
.idle_clocks = 15,
.gcnt_us = 1,
.vdd_apc_step_up_limit = 1,
.vdd_apc_step_down_limit = 1,
.cpr_fuses = {
.init_voltage_step = 8000,
.init_voltage_width = 6,
.fuse_corner_data = (struct fuse_corner_data[]){
/* fuse corner 0 */
{
.ref_uV = 1224000,
.max_uV = 1224000,
.min_uV = 1048000,
.max_volt_scale = 0,
.max_quot_scale = 0,
.quot_offset = 0,
.quot_scale = 1,
.quot_adjust = 0,
.quot_offset_scale = 5,
.quot_offset_adjust = 0,
},
/* fuse corner 1 */
{
.ref_uV = 1288000,
.max_uV = 1288000,
.min_uV = 1048000,
.max_volt_scale = 2000,
.max_quot_scale = 1400,
.quot_offset = 0,
.quot_scale = 1,
.quot_adjust = -20,
.quot_offset_scale = 5,
.quot_offset_adjust = 0,
},
/* fuse corner 2 */
{
.ref_uV = 1352000,
.max_uV = 1384000,
.min_uV = 1088000,
.max_volt_scale = 2000,
.max_quot_scale = 1400,
.quot_offset = 0,
.quot_scale = 1,
.quot_adjust = 0,
.quot_offset_scale = 5,
.quot_offset_adjust = 0,
},
},
},
};
static const struct acc_desc qcs404_acc_desc = {
.settings = (struct reg_sequence[]){
{ 0xb120, 0x1041040 },
{ 0xb124, 0x41 },
{ 0xb120, 0x0 },
{ 0xb124, 0x0 },
{ 0xb120, 0x0 },
{ 0xb124, 0x0 },
},
.config = (struct reg_sequence[]){
{ 0xb138, 0xff },
{ 0xb130, 0x5555 },
},
.num_regs_per_fuse = 2,
};
static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
.cpr_desc = &qcs404_cpr_desc,
.acc_desc = &qcs404_acc_desc,
};
static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp)
{
return dev_pm_opp_get_level(opp);
}
static int cpr_power_off(struct generic_pm_domain *domain)
{
struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
return cpr_disable(drv);
}
static int cpr_power_on(struct generic_pm_domain *domain)
{
struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
return cpr_enable(drv);
}
static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
const struct acc_desc *acc_desc = drv->acc_desc;
int ret = 0;
mutex_lock(&drv->lock);
dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
/*
* This driver only supports scaling voltage for a CPU cluster
* where all CPUs in the cluster share a single regulator.
* Therefore, save the struct device pointer only for the first
* CPU device that gets attached. There is no need to do any
* additional initialization when further CPUs get attached.
*/
if (drv->attached_cpu_dev)
goto unlock;
/*
* cpr_scale_voltage() requires the direction (if we are changing
* to a higher or lower OPP). The first time
* cpr_set_performance_state() is called, there is no previous
* performance state defined. Therefore, we call
* cpr_find_initial_corner() that gets the CPU clock frequency
* set by the bootloader, so that we can determine the direction
* the first time cpr_set_performance_state() is called.
*/
drv->cpu_clk = devm_clk_get(dev, NULL);
if (IS_ERR(drv->cpu_clk)) {
ret = PTR_ERR(drv->cpu_clk);
if (ret != -EPROBE_DEFER)
dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
goto unlock;
}
drv->attached_cpu_dev = dev;
dev_dbg(drv->dev, "using cpu clk from: %s\n",
dev_name(drv->attached_cpu_dev));
/*
* Everything related to (virtual) corners has to be initialized
* here, when attaching to the power domain, since we need to know
* the maximum frequency for each fuse corner, and this is only
* available after the cpufreq driver has attached to us.
* The reason for this is that we need to know the highest
* frequency associated with each fuse corner.
*/
ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
if (ret < 0) {
dev_err(drv->dev, "could not get OPP count\n");
goto unlock;
}
drv->num_corners = ret;
if (drv->num_corners < 2) {
dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
ret = -EINVAL;
goto unlock;
}
drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
sizeof(*drv->corners),
GFP_KERNEL);
if (!drv->corners) {
ret = -ENOMEM;
goto unlock;
}
ret = cpr_corner_init(drv);
if (ret)
goto unlock;
cpr_set_loop_allowed(drv);
ret = cpr_init_parameters(drv);
if (ret)
goto unlock;
/* Configure CPR HW but keep it disabled */
ret = cpr_config(drv);
if (ret)
goto unlock;
ret = cpr_find_initial_corner(drv);
if (ret)
goto unlock;
if (acc_desc->config)
regmap_multi_reg_write(drv->tcsr, acc_desc->config,
acc_desc->num_regs_per_fuse);
/* Enable ACC if required */
if (acc_desc->enable_mask)
regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
acc_desc->enable_mask,
acc_desc->enable_mask);
dev_info(drv->dev, "driver initialized with %u OPPs\n",
drv->num_corners);
unlock:
mutex_unlock(&drv->lock);
return ret;
}
static int cpr_debug_info_show(struct seq_file *s, void *unused)
{
u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
u32 step_dn, step_up, error, error_lt0, busy;
struct cpr_drv *drv = s->private;
struct fuse_corner *fuse_corner;
struct corner *corner;
corner = drv->corner;
fuse_corner = corner->fuse_corner;
seq_printf(s, "corner, current_volt = %d uV\n",
corner->last_uV);
ro_sel = fuse_corner->ring_osc_idx;
gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
ctl = cpr_read(drv, REG_RBCPR_CTL);
seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
reg = cpr_read(drv, REG_RBCPR_RESULT_0);
seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
step_dn = reg & 0x01;
step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
seq_printf(s, " [step_dn = %u", step_dn);
seq_printf(s, ", step_up = %u", step_up);
error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
& RBCPR_RESULT0_ERROR_STEPS_MASK;
seq_printf(s, ", error_steps = %u", error_steps);
error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
seq_printf(s, ", error = %u", error);
error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
seq_printf(s, ", error_lt_0 = %u", error_lt0);
busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
seq_printf(s, ", busy = %u]\n", busy);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
static void cpr_debugfs_init(struct cpr_drv *drv)
{
drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
debugfs_create_file("debug_info", 0444, drv->debugfs,
drv, &cpr_debug_info_fops);
}
static int cpr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cpr_drv *drv;
int irq, ret;
const struct cpr_acc_desc *data;
struct device_node *np;
u32 cpr_rev = FUSE_REVISION_UNKNOWN;
data = of_device_get_match_data(dev);
if (!data || !data->cpr_desc || !data->acc_desc)
return -EINVAL;
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
drv->dev = dev;
drv->desc = data->cpr_desc;
drv->acc_desc = data->acc_desc;
drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
sizeof(*drv->fuse_corners),
GFP_KERNEL);
if (!drv->fuse_corners)
return -ENOMEM;
np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
if (!np)
return -ENODEV;
drv->tcsr = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(drv->tcsr))
return PTR_ERR(drv->tcsr);
drv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drv->base))
return PTR_ERR(drv->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
if (IS_ERR(drv->vdd_apc))
return PTR_ERR(drv->vdd_apc);
/*
* Initialize fuse corners, since it simply depends
* on data in efuses.
* Everything related to (virtual) corners has to be
* initialized after attaching to the power domain,
* since it depends on the CPU's OPP table.
*/
ret = nvmem_cell_read_variable_le_u32(dev, "cpr_fuse_revision", &cpr_rev);
if (ret)
return ret;
drv->cpr_fuses = cpr_get_fuses(drv);
if (IS_ERR(drv->cpr_fuses))
return PTR_ERR(drv->cpr_fuses);
ret = cpr_populate_ring_osc_idx(drv);
if (ret)
return ret;
ret = cpr_fuse_corner_init(drv);
if (ret)
return ret;
mutex_init(&drv->lock);
ret = devm_request_threaded_irq(dev, irq, NULL,
cpr_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_RISING,
"cpr", drv);
if (ret)
return ret;
drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
GFP_KERNEL);
if (!drv->pd.name)
return -EINVAL;
drv->pd.power_off = cpr_power_off;
drv->pd.power_on = cpr_power_on;
drv->pd.set_performance_state = cpr_set_performance_state;
drv->pd.opp_to_performance_state = cpr_get_performance_state;
drv->pd.attach_dev = cpr_pd_attach_dev;
ret = pm_genpd_init(&drv->pd, NULL, true);
if (ret)
return ret;
ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
if (ret)
goto err_remove_genpd;
platform_set_drvdata(pdev, drv);
cpr_debugfs_init(drv);
return 0;
err_remove_genpd:
pm_genpd_remove(&drv->pd);
return ret;
}
static int cpr_remove(struct platform_device *pdev)
{
struct cpr_drv *drv = platform_get_drvdata(pdev);
if (cpr_is_allowed(drv)) {
cpr_ctl_disable(drv);
cpr_irq_set(drv, 0);
}
of_genpd_del_provider(pdev->dev.of_node);
pm_genpd_remove(&drv->pd);
debugfs_remove_recursive(drv->debugfs);
return 0;
}
static const struct of_device_id cpr_match_table[] = {
{ .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
{ }
};
MODULE_DEVICE_TABLE(of, cpr_match_table);
static struct platform_driver cpr_driver = {
.probe = cpr_probe,
.remove = cpr_remove,
.driver = {
.name = "qcom-cpr",
.of_match_table = cpr_match_table,
},
};
module_platform_driver(cpr_driver);
MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pmdomain/qcom/cpr.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_domain.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/soc/qcom/smd-rpm.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
/* Resource types:
* RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */
#define RPMPD_SMPA 0x61706d73
#define RPMPD_LDOA 0x616f646c
#define RPMPD_SMPB 0x62706d73
#define RPMPD_LDOB 0x626f646c
#define RPMPD_RWCX 0x78637772
#define RPMPD_RWMX 0x786d7772
#define RPMPD_RWLC 0x636c7772
#define RPMPD_RWLM 0x6d6c7772
#define RPMPD_RWSC 0x63737772
#define RPMPD_RWSM 0x6d737772
#define RPMPD_RWGX 0x78677772
/* Operation Keys */
#define KEY_CORNER 0x6e726f63 /* corn */
#define KEY_ENABLE 0x6e657773 /* swen */
#define KEY_FLOOR_CORNER 0x636676 /* vfc */
#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */
#define KEY_LEVEL 0x6c766c76 /* vlvl */
#define MAX_CORNER_RPMPD_STATE 6
struct rpmpd_req {
__le32 key;
__le32 nbytes;
__le32 value;
};
struct rpmpd {
struct generic_pm_domain pd;
struct generic_pm_domain *parent;
struct rpmpd *peer;
const bool active_only;
unsigned int corner;
bool enabled;
const int res_type;
const int res_id;
struct qcom_smd_rpm *rpm;
unsigned int max_state;
__le32 key;
bool state_synced;
};
struct rpmpd_desc {
struct rpmpd **rpmpds;
size_t num_pds;
unsigned int max_state;
};
static DEFINE_MUTEX(rpmpd_lock);
/* CX */
static struct rpmpd cx_rwcx0_lvl_ao;
static struct rpmpd cx_rwcx0_lvl = {
.pd = { .name = "cx", },
.peer = &cx_rwcx0_lvl_ao,
.res_type = RPMPD_RWCX,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd cx_rwcx0_lvl_ao = {
.pd = { .name = "cx_ao", },
.peer = &cx_rwcx0_lvl,
.active_only = true,
.res_type = RPMPD_RWCX,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd cx_s1a_corner_ao;
static struct rpmpd cx_s1a_corner = {
.pd = { .name = "cx", },
.peer = &cx_s1a_corner_ao,
.res_type = RPMPD_SMPA,
.res_id = 1,
.key = KEY_CORNER,
};
static struct rpmpd cx_s1a_corner_ao = {
.pd = { .name = "cx_ao", },
.peer = &cx_s1a_corner,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 1,
.key = KEY_CORNER,
};
static struct rpmpd cx_s2a_corner_ao;
static struct rpmpd cx_s2a_corner = {
.pd = { .name = "cx", },
.peer = &cx_s2a_corner_ao,
.res_type = RPMPD_SMPA,
.res_id = 2,
.key = KEY_CORNER,
};
static struct rpmpd cx_s2a_corner_ao = {
.pd = { .name = "cx_ao", },
.peer = &cx_s2a_corner,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 2,
.key = KEY_CORNER,
};
static struct rpmpd cx_s2a_lvl_ao;
static struct rpmpd cx_s2a_lvl = {
.pd = { .name = "cx", },
.peer = &cx_s2a_lvl_ao,
.res_type = RPMPD_SMPA,
.res_id = 2,
.key = KEY_LEVEL,
};
static struct rpmpd cx_s2a_lvl_ao = {
.pd = { .name = "cx_ao", },
.peer = &cx_s2a_lvl,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 2,
.key = KEY_LEVEL,
};
static struct rpmpd cx_s3a_lvl_ao;
static struct rpmpd cx_s3a_lvl = {
.pd = { .name = "cx", },
.peer = &cx_s3a_lvl_ao,
.res_type = RPMPD_SMPA,
.res_id = 3,
.key = KEY_LEVEL,
};
static struct rpmpd cx_s3a_lvl_ao = {
.pd = { .name = "cx_ao", },
.peer = &cx_s3a_lvl,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 3,
.key = KEY_LEVEL,
};
static struct rpmpd cx_rwcx0_vfl = {
.pd = { .name = "cx_vfl", },
.res_type = RPMPD_RWCX,
.res_id = 0,
.key = KEY_FLOOR_LEVEL,
};
static struct rpmpd cx_rwsc2_vfl = {
.pd = { .name = "cx_vfl", },
.res_type = RPMPD_RWSC,
.res_id = 2,
.key = KEY_FLOOR_LEVEL,
};
static struct rpmpd cx_s1a_vfc = {
.pd = { .name = "cx_vfc", },
.res_type = RPMPD_SMPA,
.res_id = 1,
.key = KEY_FLOOR_CORNER,
};
static struct rpmpd cx_s2a_vfc = {
.pd = { .name = "cx_vfc", },
.res_type = RPMPD_SMPA,
.res_id = 2,
.key = KEY_FLOOR_CORNER,
};
static struct rpmpd cx_s2a_vfl = {
.pd = { .name = "cx_vfl", },
.res_type = RPMPD_SMPA,
.res_id = 2,
.key = KEY_FLOOR_LEVEL,
};
static struct rpmpd cx_s3a_vfl = {
.pd = { .name = "cx_vfl", },
.res_type = RPMPD_SMPA,
.res_id = 3,
.key = KEY_FLOOR_LEVEL,
};
/* G(F)X */
static struct rpmpd gfx_s2b_corner = {
.pd = { .name = "gfx", },
.res_type = RPMPD_SMPB,
.res_id = 2,
.key = KEY_CORNER,
};
static struct rpmpd gfx_s2b_vfc = {
.pd = { .name = "gfx_vfc", },
.res_type = RPMPD_SMPB,
.res_id = 2,
.key = KEY_FLOOR_CORNER,
};
static struct rpmpd mx_rwmx0_lvl;
static struct rpmpd gx_rwgx0_lvl_ao;
static struct rpmpd gx_rwgx0_lvl = {
.pd = { .name = "gx", },
.peer = &gx_rwgx0_lvl_ao,
.res_type = RPMPD_RWGX,
.parent = &mx_rwmx0_lvl.pd,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd mx_rwmx0_lvl_ao;
static struct rpmpd gx_rwgx0_lvl_ao = {
.pd = { .name = "gx_ao", },
.peer = &gx_rwgx0_lvl,
.parent = &mx_rwmx0_lvl_ao.pd,
.active_only = true,
.res_type = RPMPD_RWGX,
.res_id = 0,
.key = KEY_LEVEL,
};
/* MX */
static struct rpmpd mx_l3a_corner_ao;
static struct rpmpd mx_l3a_corner = {
.pd = { .name = "mx", },
.peer = &mx_l3a_corner_ao,
.res_type = RPMPD_LDOA,
.res_id = 3,
.key = KEY_CORNER,
};
static struct rpmpd mx_l3a_corner_ao = {
.pd = { .name = "mx_ao", },
.peer = &mx_l3a_corner,
.active_only = true,
.res_type = RPMPD_LDOA,
.res_id = 3,
.key = KEY_CORNER,
};
static struct rpmpd mx_l12a_lvl_ao;
static struct rpmpd mx_l12a_lvl = {
.pd = { .name = "mx", },
.peer = &mx_l12a_lvl_ao,
.res_type = RPMPD_LDOA,
.res_id = 12,
.key = KEY_LEVEL,
};
static struct rpmpd mx_l12a_lvl_ao = {
.pd = { .name = "mx_ao", },
.peer = &mx_l12a_lvl,
.active_only = true,
.res_type = RPMPD_LDOA,
.res_id = 12,
.key = KEY_LEVEL,
};
static struct rpmpd mx_s2a_corner_ao;
static struct rpmpd mx_s2a_corner = {
.pd = { .name = "mx", },
.peer = &mx_s2a_corner_ao,
.res_type = RPMPD_SMPA,
.res_id = 2,
.key = KEY_CORNER,
};
static struct rpmpd mx_s2a_corner_ao = {
.pd = { .name = "mx_ao", },
.peer = &mx_s2a_corner,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 2,
.key = KEY_CORNER,
};
static struct rpmpd mx_rwmx0_lvl_ao;
static struct rpmpd mx_rwmx0_lvl = {
.pd = { .name = "mx", },
.peer = &mx_rwmx0_lvl_ao,
.res_type = RPMPD_RWMX,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd mx_rwmx0_lvl_ao = {
.pd = { .name = "mx_ao", },
.peer = &mx_rwmx0_lvl,
.active_only = true,
.res_type = RPMPD_RWMX,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd mx_s6a_lvl_ao;
static struct rpmpd mx_s6a_lvl = {
.pd = { .name = "mx", },
.peer = &mx_s6a_lvl_ao,
.res_type = RPMPD_SMPA,
.res_id = 6,
.key = KEY_LEVEL,
};
static struct rpmpd mx_s6a_lvl_ao = {
.pd = { .name = "mx_ao", },
.peer = &mx_s6a_lvl,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 6,
.key = KEY_LEVEL,
};
static struct rpmpd mx_s7a_lvl_ao;
static struct rpmpd mx_s7a_lvl = {
.pd = { .name = "mx", },
.peer = &mx_s7a_lvl_ao,
.res_type = RPMPD_SMPA,
.res_id = 7,
.key = KEY_LEVEL,
};
static struct rpmpd mx_s7a_lvl_ao = {
.pd = { .name = "mx_ao", },
.peer = &mx_s7a_lvl,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 7,
.key = KEY_LEVEL,
};
static struct rpmpd mx_l12a_vfl = {
.pd = { .name = "mx_vfl", },
.res_type = RPMPD_LDOA,
.res_id = 12,
.key = KEY_FLOOR_LEVEL,
};
static struct rpmpd mx_rwmx0_vfl = {
.pd = { .name = "mx_vfl", },
.res_type = RPMPD_RWMX,
.res_id = 0,
.key = KEY_FLOOR_LEVEL,
};
static struct rpmpd mx_rwsm6_vfl = {
.pd = { .name = "mx_vfl", },
.res_type = RPMPD_RWSM,
.res_id = 6,
.key = KEY_FLOOR_LEVEL,
};
/* MD */
static struct rpmpd md_s1a_corner_ao;
static struct rpmpd md_s1a_corner = {
.pd = { .name = "md", },
.peer = &md_s1a_corner_ao,
.res_type = RPMPD_SMPA,
.res_id = 1,
.key = KEY_CORNER,
};
static struct rpmpd md_s1a_corner_ao = {
.pd = { .name = "md_ao", },
.peer = &md_s1a_corner,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 1,
.key = KEY_CORNER,
};
static struct rpmpd md_s1a_lvl_ao;
static struct rpmpd md_s1a_lvl = {
.pd = { .name = "md", },
.peer = &md_s1a_lvl_ao,
.res_type = RPMPD_SMPA,
.res_id = 1,
.key = KEY_LEVEL,
};
static struct rpmpd md_s1a_lvl_ao = {
.pd = { .name = "md_ao", },
.peer = &md_s1a_lvl,
.active_only = true,
.res_type = RPMPD_SMPA,
.res_id = 1,
.key = KEY_LEVEL,
};
static struct rpmpd md_s1a_vfc = {
.pd = { .name = "md_vfc", },
.res_type = RPMPD_SMPA,
.res_id = 1,
.key = KEY_FLOOR_CORNER,
};
/* LPI_CX */
static struct rpmpd lpi_cx_rwlc0_lvl = {
.pd = { .name = "lpi_cx", },
.res_type = RPMPD_RWLC,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd lpi_cx_rwlc0_vfl = {
.pd = { .name = "lpi_cx_vfl", },
.res_type = RPMPD_RWLC,
.res_id = 0,
.key = KEY_FLOOR_LEVEL,
};
/* LPI_MX */
static struct rpmpd lpi_mx_rwlm0_lvl = {
.pd = { .name = "lpi_mx", },
.res_type = RPMPD_RWLM,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd lpi_mx_rwlm0_vfl = {
.pd = { .name = "lpi_mx_vfl", },
.res_type = RPMPD_RWLM,
.res_id = 0,
.key = KEY_FLOOR_LEVEL,
};
/* SSC_CX */
static struct rpmpd ssc_cx_l26a_corner = {
.pd = { .name = "ssc_cx", },
.res_type = RPMPD_LDOA,
.res_id = 26,
.key = KEY_CORNER,
};
static struct rpmpd ssc_cx_rwlc0_lvl = {
.pd = { .name = "ssc_cx", },
.res_type = RPMPD_RWLC,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd ssc_cx_rwsc0_lvl = {
.pd = { .name = "ssc_cx", },
.res_type = RPMPD_RWSC,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd ssc_cx_l26a_vfc = {
.pd = { .name = "ssc_cx_vfc", },
.res_type = RPMPD_LDOA,
.res_id = 26,
.key = KEY_FLOOR_CORNER,
};
static struct rpmpd ssc_cx_rwlc0_vfl = {
.pd = { .name = "ssc_cx_vfl", },
.res_type = RPMPD_RWLC,
.res_id = 0,
.key = KEY_FLOOR_LEVEL,
};
static struct rpmpd ssc_cx_rwsc0_vfl = {
.pd = { .name = "ssc_cx_vfl", },
.res_type = RPMPD_RWSC,
.res_id = 0,
.key = KEY_FLOOR_LEVEL,
};
/* SSC_MX */
static struct rpmpd ssc_mx_rwlm0_lvl = {
.pd = { .name = "ssc_mx", },
.res_type = RPMPD_RWLM,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd ssc_mx_rwsm0_lvl = {
.pd = { .name = "ssc_mx", },
.res_type = RPMPD_RWSM,
.res_id = 0,
.key = KEY_LEVEL,
};
static struct rpmpd ssc_mx_rwlm0_vfl = {
.pd = { .name = "ssc_mx_vfl", },
.res_type = RPMPD_RWLM,
.res_id = 0,
.key = KEY_FLOOR_LEVEL,
};
static struct rpmpd ssc_mx_rwsm0_vfl = {
.pd = { .name = "ssc_mx_vfl", },
.res_type = RPMPD_RWSM,
.res_id = 0,
.key = KEY_FLOOR_LEVEL,
};
static struct rpmpd *mdm9607_rpmpds[] = {
[MDM9607_VDDCX] = &cx_s3a_lvl,
[MDM9607_VDDCX_AO] = &cx_s3a_lvl_ao,
[MDM9607_VDDCX_VFL] = &cx_s3a_vfl,
[MDM9607_VDDMX] = &mx_l12a_lvl,
[MDM9607_VDDMX_AO] = &mx_l12a_lvl_ao,
[MDM9607_VDDMX_VFL] = &mx_l12a_vfl,
};
static const struct rpmpd_desc mdm9607_desc = {
.rpmpds = mdm9607_rpmpds,
.num_pds = ARRAY_SIZE(mdm9607_rpmpds),
.max_state = RPM_SMD_LEVEL_TURBO,
};
static struct rpmpd *msm8226_rpmpds[] = {
[MSM8226_VDDCX] = &cx_s1a_corner,
[MSM8226_VDDCX_AO] = &cx_s1a_corner_ao,
[MSM8226_VDDCX_VFC] = &cx_s1a_vfc,
};
static const struct rpmpd_desc msm8226_desc = {
.rpmpds = msm8226_rpmpds,
.num_pds = ARRAY_SIZE(msm8226_rpmpds),
.max_state = MAX_CORNER_RPMPD_STATE,
};
static struct rpmpd *msm8939_rpmpds[] = {
[MSM8939_VDDMDCX] = &md_s1a_corner,
[MSM8939_VDDMDCX_AO] = &md_s1a_corner_ao,
[MSM8939_VDDMDCX_VFC] = &md_s1a_vfc,
[MSM8939_VDDCX] = &cx_s2a_corner,
[MSM8939_VDDCX_AO] = &cx_s2a_corner_ao,
[MSM8939_VDDCX_VFC] = &cx_s2a_vfc,
[MSM8939_VDDMX] = &mx_l3a_corner,
[MSM8939_VDDMX_AO] = &mx_l3a_corner_ao,
};
static const struct rpmpd_desc msm8939_desc = {
.rpmpds = msm8939_rpmpds,
.num_pds = ARRAY_SIZE(msm8939_rpmpds),
.max_state = MAX_CORNER_RPMPD_STATE,
};
static struct rpmpd *msm8916_rpmpds[] = {
[MSM8916_VDDCX] = &cx_s1a_corner,
[MSM8916_VDDCX_AO] = &cx_s1a_corner_ao,
[MSM8916_VDDCX_VFC] = &cx_s1a_vfc,
[MSM8916_VDDMX] = &mx_l3a_corner,
[MSM8916_VDDMX_AO] = &mx_l3a_corner_ao,
};
static const struct rpmpd_desc msm8916_desc = {
.rpmpds = msm8916_rpmpds,
.num_pds = ARRAY_SIZE(msm8916_rpmpds),
.max_state = MAX_CORNER_RPMPD_STATE,
};
static struct rpmpd *msm8953_rpmpds[] = {
[MSM8953_VDDMD] = &md_s1a_lvl,
[MSM8953_VDDMD_AO] = &md_s1a_lvl_ao,
[MSM8953_VDDCX] = &cx_s2a_lvl,
[MSM8953_VDDCX_AO] = &cx_s2a_lvl_ao,
[MSM8953_VDDCX_VFL] = &cx_s2a_vfl,
[MSM8953_VDDMX] = &mx_s7a_lvl,
[MSM8953_VDDMX_AO] = &mx_s7a_lvl_ao,
};
static const struct rpmpd_desc msm8953_desc = {
.rpmpds = msm8953_rpmpds,
.num_pds = ARRAY_SIZE(msm8953_rpmpds),
.max_state = RPM_SMD_LEVEL_TURBO,
};
static struct rpmpd *msm8976_rpmpds[] = {
[MSM8976_VDDCX] = &cx_s2a_lvl,
[MSM8976_VDDCX_AO] = &cx_s2a_lvl_ao,
[MSM8976_VDDCX_VFL] = &cx_rwsc2_vfl,
[MSM8976_VDDMX] = &mx_s6a_lvl,
[MSM8976_VDDMX_AO] = &mx_s6a_lvl_ao,
[MSM8976_VDDMX_VFL] = &mx_rwsm6_vfl,
};
static const struct rpmpd_desc msm8976_desc = {
.rpmpds = msm8976_rpmpds,
.num_pds = ARRAY_SIZE(msm8976_rpmpds),
.max_state = RPM_SMD_LEVEL_TURBO_HIGH,
};
static struct rpmpd *msm8994_rpmpds[] = {
[MSM8994_VDDCX] = &cx_s1a_corner,
[MSM8994_VDDCX_AO] = &cx_s1a_corner_ao,
[MSM8994_VDDCX_VFC] = &cx_s1a_vfc,
[MSM8994_VDDMX] = &mx_s2a_corner,
[MSM8994_VDDMX_AO] = &mx_s2a_corner_ao,
/* Attention! *Some* 8994 boards with pm8004 may use SMPC here! */
[MSM8994_VDDGFX] = &gfx_s2b_corner,
[MSM8994_VDDGFX_VFC] = &gfx_s2b_vfc,
};
static const struct rpmpd_desc msm8994_desc = {
.rpmpds = msm8994_rpmpds,
.num_pds = ARRAY_SIZE(msm8994_rpmpds),
.max_state = MAX_CORNER_RPMPD_STATE,
};
static struct rpmpd *msm8996_rpmpds[] = {
[MSM8996_VDDCX] = &cx_s1a_corner,
[MSM8996_VDDCX_AO] = &cx_s1a_corner_ao,
[MSM8996_VDDCX_VFC] = &cx_s1a_vfc,
[MSM8996_VDDMX] = &mx_s2a_corner,
[MSM8996_VDDMX_AO] = &mx_s2a_corner_ao,
[MSM8996_VDDSSCX] = &ssc_cx_l26a_corner,
[MSM8996_VDDSSCX_VFC] = &ssc_cx_l26a_vfc,
};
static const struct rpmpd_desc msm8996_desc = {
.rpmpds = msm8996_rpmpds,
.num_pds = ARRAY_SIZE(msm8996_rpmpds),
.max_state = MAX_CORNER_RPMPD_STATE,
};
static struct rpmpd *msm8998_rpmpds[] = {
[MSM8998_VDDCX] = &cx_rwcx0_lvl,
[MSM8998_VDDCX_AO] = &cx_rwcx0_lvl_ao,
[MSM8998_VDDCX_VFL] = &cx_rwcx0_vfl,
[MSM8998_VDDMX] = &mx_rwmx0_lvl,
[MSM8998_VDDMX_AO] = &mx_rwmx0_lvl_ao,
[MSM8998_VDDMX_VFL] = &mx_rwmx0_vfl,
[MSM8998_SSCCX] = &ssc_cx_rwsc0_lvl,
[MSM8998_SSCCX_VFL] = &ssc_cx_rwsc0_vfl,
[MSM8998_SSCMX] = &ssc_mx_rwsm0_lvl,
[MSM8998_SSCMX_VFL] = &ssc_mx_rwsm0_vfl,
};
static const struct rpmpd_desc msm8998_desc = {
.rpmpds = msm8998_rpmpds,
.num_pds = ARRAY_SIZE(msm8998_rpmpds),
.max_state = RPM_SMD_LEVEL_BINNING,
};
static struct rpmpd *qcs404_rpmpds[] = {
[QCS404_VDDMX] = &mx_rwmx0_lvl,
[QCS404_VDDMX_AO] = &mx_rwmx0_lvl_ao,
[QCS404_VDDMX_VFL] = &mx_rwmx0_vfl,
[QCS404_LPICX] = &lpi_cx_rwlc0_lvl,
[QCS404_LPICX_VFL] = &lpi_cx_rwlc0_vfl,
[QCS404_LPIMX] = &lpi_mx_rwlm0_lvl,
[QCS404_LPIMX_VFL] = &lpi_mx_rwlm0_vfl,
};
static const struct rpmpd_desc qcs404_desc = {
.rpmpds = qcs404_rpmpds,
.num_pds = ARRAY_SIZE(qcs404_rpmpds),
.max_state = RPM_SMD_LEVEL_BINNING,
};
static struct rpmpd *sdm660_rpmpds[] = {
[SDM660_VDDCX] = &cx_rwcx0_lvl,
[SDM660_VDDCX_AO] = &cx_rwcx0_lvl_ao,
[SDM660_VDDCX_VFL] = &cx_rwcx0_vfl,
[SDM660_VDDMX] = &mx_rwmx0_lvl,
[SDM660_VDDMX_AO] = &mx_rwmx0_lvl_ao,
[SDM660_VDDMX_VFL] = &mx_rwmx0_vfl,
[SDM660_SSCCX] = &ssc_cx_rwlc0_lvl,
[SDM660_SSCCX_VFL] = &ssc_cx_rwlc0_vfl,
[SDM660_SSCMX] = &ssc_mx_rwlm0_lvl,
[SDM660_SSCMX_VFL] = &ssc_mx_rwlm0_vfl,
};
static const struct rpmpd_desc sdm660_desc = {
.rpmpds = sdm660_rpmpds,
.num_pds = ARRAY_SIZE(sdm660_rpmpds),
.max_state = RPM_SMD_LEVEL_TURBO,
};
static struct rpmpd *sm6115_rpmpds[] = {
[SM6115_VDDCX] = &cx_rwcx0_lvl,
[SM6115_VDDCX_AO] = &cx_rwcx0_lvl_ao,
[SM6115_VDDCX_VFL] = &cx_rwcx0_vfl,
[SM6115_VDDMX] = &mx_rwmx0_lvl,
[SM6115_VDDMX_AO] = &mx_rwmx0_lvl_ao,
[SM6115_VDDMX_VFL] = &mx_rwmx0_vfl,
[SM6115_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl,
[SM6115_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl,
};
static const struct rpmpd_desc sm6115_desc = {
.rpmpds = sm6115_rpmpds,
.num_pds = ARRAY_SIZE(sm6115_rpmpds),
.max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
};
static struct rpmpd *sm6125_rpmpds[] = {
[SM6125_VDDCX] = &cx_rwcx0_lvl,
[SM6125_VDDCX_AO] = &cx_rwcx0_lvl_ao,
[SM6125_VDDCX_VFL] = &cx_rwcx0_vfl,
[SM6125_VDDMX] = &mx_rwmx0_lvl,
[SM6125_VDDMX_AO] = &mx_rwmx0_lvl_ao,
[SM6125_VDDMX_VFL] = &mx_rwmx0_vfl,
};
static const struct rpmpd_desc sm6125_desc = {
.rpmpds = sm6125_rpmpds,
.num_pds = ARRAY_SIZE(sm6125_rpmpds),
.max_state = RPM_SMD_LEVEL_BINNING,
};
static struct rpmpd *sm6375_rpmpds[] = {
[SM6375_VDDCX] = &cx_rwcx0_lvl,
[SM6375_VDDCX_AO] = &cx_rwcx0_lvl_ao,
[SM6375_VDDCX_VFL] = &cx_rwcx0_vfl,
[SM6375_VDDMX] = &mx_rwmx0_lvl,
[SM6375_VDDMX_AO] = &mx_rwmx0_lvl_ao,
[SM6375_VDDMX_VFL] = &mx_rwmx0_vfl,
[SM6375_VDDGX] = &gx_rwgx0_lvl,
[SM6375_VDDGX_AO] = &gx_rwgx0_lvl_ao,
[SM6375_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl,
[SM6375_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl,
};
static const struct rpmpd_desc sm6375_desc = {
.rpmpds = sm6375_rpmpds,
.num_pds = ARRAY_SIZE(sm6375_rpmpds),
.max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
};
static struct rpmpd *qcm2290_rpmpds[] = {
[QCM2290_VDDCX] = &cx_rwcx0_lvl,
[QCM2290_VDDCX_AO] = &cx_rwcx0_lvl_ao,
[QCM2290_VDDCX_VFL] = &cx_rwcx0_vfl,
[QCM2290_VDDMX] = &mx_rwmx0_lvl,
[QCM2290_VDDMX_AO] = &mx_rwmx0_lvl_ao,
[QCM2290_VDDMX_VFL] = &mx_rwmx0_vfl,
[QCM2290_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl,
[QCM2290_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl,
};
static const struct rpmpd_desc qcm2290_desc = {
.rpmpds = qcm2290_rpmpds,
.num_pds = ARRAY_SIZE(qcm2290_rpmpds),
.max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
};
static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc },
{ .compatible = "qcom,msm8909-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
{ .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
{ .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc },
{ .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc },
{ .compatible = "qcom,sm6375-rpmpd", .data = &sm6375_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmpd_match_table);
static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
{
struct rpmpd_req req = {
.key = KEY_ENABLE,
.nbytes = cpu_to_le32(sizeof(u32)),
.value = cpu_to_le32(enable),
};
return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
pd->res_type, pd->res_id, &req, sizeof(req));
}
static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner)
{
struct rpmpd_req req = {
.key = pd->key,
.nbytes = cpu_to_le32(sizeof(u32)),
.value = cpu_to_le32(corner),
};
return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id,
&req, sizeof(req));
};
static void to_active_sleep(struct rpmpd *pd, unsigned int corner,
unsigned int *active, unsigned int *sleep)
{
*active = corner;
if (pd->active_only)
*sleep = 0;
else
*sleep = *active;
}
static int rpmpd_aggregate_corner(struct rpmpd *pd)
{
int ret;
struct rpmpd *peer = pd->peer;
unsigned int active_corner, sleep_corner;
unsigned int this_active_corner = 0, this_sleep_corner = 0;
unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
/* Clamp to the highest corner/level if sync_state isn't done yet */
if (!pd->state_synced)
this_active_corner = this_sleep_corner = pd->max_state - 1;
else
to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner);
if (peer && peer->enabled)
to_active_sleep(peer, peer->corner, &peer_active_corner,
&peer_sleep_corner);
active_corner = max(this_active_corner, peer_active_corner);
ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner);
if (ret)
return ret;
sleep_corner = max(this_sleep_corner, peer_sleep_corner);
return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner);
}
static int rpmpd_power_on(struct generic_pm_domain *domain)
{
int ret;
struct rpmpd *pd = domain_to_rpmpd(domain);
mutex_lock(&rpmpd_lock);
ret = rpmpd_send_enable(pd, true);
if (ret)
goto out;
pd->enabled = true;
if (pd->corner)
ret = rpmpd_aggregate_corner(pd);
out:
mutex_unlock(&rpmpd_lock);
return ret;
}
static int rpmpd_power_off(struct generic_pm_domain *domain)
{
int ret;
struct rpmpd *pd = domain_to_rpmpd(domain);
mutex_lock(&rpmpd_lock);
ret = rpmpd_send_enable(pd, false);
if (!ret)
pd->enabled = false;
mutex_unlock(&rpmpd_lock);
return ret;
}
static int rpmpd_set_performance(struct generic_pm_domain *domain,
unsigned int state)
{
int ret = 0;
struct rpmpd *pd = domain_to_rpmpd(domain);
if (state > pd->max_state)
state = pd->max_state;
mutex_lock(&rpmpd_lock);
pd->corner = state;
/* Always send updates for vfc and vfl */
if (!pd->enabled && pd->key != cpu_to_le32(KEY_FLOOR_CORNER) &&
pd->key != cpu_to_le32(KEY_FLOOR_LEVEL))
goto out;
ret = rpmpd_aggregate_corner(pd);
out:
mutex_unlock(&rpmpd_lock);
return ret;
}
static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp)
{
return dev_pm_opp_get_level(opp);
}
static int rpmpd_probe(struct platform_device *pdev)
{
int i;
size_t num;
struct genpd_onecell_data *data;
struct qcom_smd_rpm *rpm;
struct rpmpd **rpmpds;
const struct rpmpd_desc *desc;
rpm = dev_get_drvdata(pdev->dev.parent);
if (!rpm) {
dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
return -ENODEV;
}
desc = of_device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
rpmpds = desc->rpmpds;
num = desc->num_pds;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
GFP_KERNEL);
if (!data->domains)
return -ENOMEM;
data->num_domains = num;
for (i = 0; i < num; i++) {
if (!rpmpds[i]) {
dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n",
i);
continue;
}
rpmpds[i]->rpm = rpm;
rpmpds[i]->max_state = desc->max_state;
rpmpds[i]->pd.power_off = rpmpd_power_off;
rpmpds[i]->pd.power_on = rpmpd_power_on;
rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance;
pm_genpd_init(&rpmpds[i]->pd, NULL, true);
data->domains[i] = &rpmpds[i]->pd;
}
/* Add subdomains */
for (i = 0; i < num; i++) {
if (!rpmpds[i])
continue;
if (rpmpds[i]->parent)
pm_genpd_add_subdomain(rpmpds[i]->parent, &rpmpds[i]->pd);
}
return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
}
static void rpmpd_sync_state(struct device *dev)
{
const struct rpmpd_desc *desc = of_device_get_match_data(dev);
struct rpmpd **rpmpds = desc->rpmpds;
struct rpmpd *pd;
unsigned int i;
int ret;
mutex_lock(&rpmpd_lock);
for (i = 0; i < desc->num_pds; i++) {
pd = rpmpds[i];
if (!pd)
continue;
pd->state_synced = true;
if (!pd->enabled)
pd->corner = 0;
ret = rpmpd_aggregate_corner(pd);
if (ret)
dev_err(dev, "failed to sync %s: %d\n", pd->pd.name, ret);
}
mutex_unlock(&rpmpd_lock);
}
static struct platform_driver rpmpd_driver = {
.driver = {
.name = "qcom-rpmpd",
.of_match_table = rpmpd_match_table,
.suppress_bind_attrs = true,
.sync_state = rpmpd_sync_state,
},
.probe = rpmpd_probe,
};
static int __init rpmpd_init(void)
{
return platform_driver_register(&rpmpd_driver);
}
core_initcall(rpmpd_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pmdomain/qcom/rpmpd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Rockchip Generic power domain support.
*
* Copyright (c) 2015 ROCKCHIP, Co. Ltd.
*/
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/of_address.h>
#include <linux/of_clk.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <soc/rockchip/pm_domains.h>
#include <dt-bindings/power/px30-power.h>
#include <dt-bindings/power/rockchip,rv1126-power.h>
#include <dt-bindings/power/rk3036-power.h>
#include <dt-bindings/power/rk3066-power.h>
#include <dt-bindings/power/rk3128-power.h>
#include <dt-bindings/power/rk3188-power.h>
#include <dt-bindings/power/rk3228-power.h>
#include <dt-bindings/power/rk3288-power.h>
#include <dt-bindings/power/rk3328-power.h>
#include <dt-bindings/power/rk3366-power.h>
#include <dt-bindings/power/rk3368-power.h>
#include <dt-bindings/power/rk3399-power.h>
#include <dt-bindings/power/rk3568-power.h>
#include <dt-bindings/power/rk3588-power.h>
struct rockchip_domain_info {
const char *name;
int pwr_mask;
int status_mask;
int req_mask;
int idle_mask;
int ack_mask;
bool active_wakeup;
int pwr_w_mask;
int req_w_mask;
int mem_status_mask;
int repair_status_mask;
u32 pwr_offset;
u32 mem_offset;
u32 req_offset;
};
struct rockchip_pmu_info {
u32 pwr_offset;
u32 status_offset;
u32 req_offset;
u32 idle_offset;
u32 ack_offset;
u32 mem_pwr_offset;
u32 chain_status_offset;
u32 mem_status_offset;
u32 repair_status_offset;
u32 core_pwrcnt_offset;
u32 gpu_pwrcnt_offset;
unsigned int core_power_transition_time;
unsigned int gpu_power_transition_time;
int num_domains;
const struct rockchip_domain_info *domain_info;
};
#define MAX_QOS_REGS_NUM 5
#define QOS_PRIORITY 0x08
#define QOS_MODE 0x0c
#define QOS_BANDWIDTH 0x10
#define QOS_SATURATION 0x14
#define QOS_EXTCONTROL 0x18
struct rockchip_pm_domain {
struct generic_pm_domain genpd;
const struct rockchip_domain_info *info;
struct rockchip_pmu *pmu;
int num_qos;
struct regmap **qos_regmap;
u32 *qos_save_regs[MAX_QOS_REGS_NUM];
int num_clks;
struct clk_bulk_data *clks;
};
struct rockchip_pmu {
struct device *dev;
struct regmap *regmap;
const struct rockchip_pmu_info *info;
struct mutex mutex; /* mutex lock for pmu */
struct genpd_onecell_data genpd_data;
struct generic_pm_domain *domains[];
};
#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
#define DOMAIN(_name, pwr, status, req, idle, ack, wakeup) \
{ \
.name = _name, \
.pwr_mask = (pwr), \
.status_mask = (status), \
.req_mask = (req), \
.idle_mask = (idle), \
.ack_mask = (ack), \
.active_wakeup = (wakeup), \
}
#define DOMAIN_M(_name, pwr, status, req, idle, ack, wakeup) \
{ \
.name = _name, \
.pwr_w_mask = (pwr) << 16, \
.pwr_mask = (pwr), \
.status_mask = (status), \
.req_w_mask = (req) << 16, \
.req_mask = (req), \
.idle_mask = (idle), \
.ack_mask = (ack), \
.active_wakeup = wakeup, \
}
#define DOMAIN_M_O_R(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, wakeup) \
{ \
.name = _name, \
.pwr_offset = p_offset, \
.pwr_w_mask = (pwr) << 16, \
.pwr_mask = (pwr), \
.status_mask = (status), \
.mem_offset = m_offset, \
.mem_status_mask = (m_status), \
.repair_status_mask = (r_status), \
.req_offset = r_offset, \
.req_w_mask = (req) << 16, \
.req_mask = (req), \
.idle_mask = (idle), \
.ack_mask = (ack), \
.active_wakeup = wakeup, \
}
#define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \
{ \
.name = _name, \
.req_mask = (req), \
.req_w_mask = (req) << 16, \
.ack_mask = (ack), \
.idle_mask = (idle), \
.active_wakeup = wakeup, \
}
#define DOMAIN_PX30(name, pwr, status, req, wakeup) \
DOMAIN_M(name, pwr, status, req, (req) << 16, req, wakeup)
#define DOMAIN_RV1126(name, pwr, req, idle, wakeup) \
DOMAIN_M(name, pwr, pwr, req, idle, idle, wakeup)
#define DOMAIN_RK3288(name, pwr, status, req, wakeup) \
DOMAIN(name, pwr, status, req, req, (req) << 16, wakeup)
#define DOMAIN_RK3328(name, pwr, status, req, wakeup) \
DOMAIN_M(name, pwr, pwr, req, (req) << 10, req, wakeup)
#define DOMAIN_RK3368(name, pwr, status, req, wakeup) \
DOMAIN(name, pwr, status, req, (req) << 16, req, wakeup)
#define DOMAIN_RK3399(name, pwr, status, req, wakeup) \
DOMAIN(name, pwr, status, req, req, req, wakeup)
#define DOMAIN_RK3568(name, pwr, req, wakeup) \
DOMAIN_M(name, pwr, pwr, req, req, req, wakeup)
/*
* Dynamic Memory Controller may need to coordinate with us -- see
* rockchip_pmu_block().
*
* dmc_pmu_mutex protects registration-time races, so DMC driver doesn't try to
* block() while we're initializing the PMU.
*/
static DEFINE_MUTEX(dmc_pmu_mutex);
static struct rockchip_pmu *dmc_pmu;
/*
* Block PMU transitions and make sure they don't interfere with ARM Trusted
* Firmware operations. There are two conflicts, noted in the comments below.
*
* Caller must unblock PMU transitions via rockchip_pmu_unblock().
*/
int rockchip_pmu_block(void)
{
struct rockchip_pmu *pmu;
struct generic_pm_domain *genpd;
struct rockchip_pm_domain *pd;
int i, ret;
mutex_lock(&dmc_pmu_mutex);
/* No PMU (yet)? Then we just block rockchip_pmu_probe(). */
if (!dmc_pmu)
return 0;
pmu = dmc_pmu;
/*
* mutex blocks all idle transitions: we can't touch the
* PMU_BUS_IDLE_REQ (our ".idle_offset") register while ARM Trusted
* Firmware might be using it.
*/
mutex_lock(&pmu->mutex);
/*
* Power domain clocks: Per Rockchip, we *must* keep certain clocks
* enabled for the duration of power-domain transitions. Most
* transitions are handled by this driver, but some cases (in
* particular, DRAM DVFS / memory-controller idle) must be handled by
* firmware. Firmware can handle most clock management via a special
* "ungate" register (PMU_CRU_GATEDIS_CON0), but unfortunately, this
* doesn't handle PLLs. We can assist this transition by doing the
* clock management on behalf of firmware.
*/
for (i = 0; i < pmu->genpd_data.num_domains; i++) {
genpd = pmu->genpd_data.domains[i];
if (genpd) {
pd = to_rockchip_pd(genpd);
ret = clk_bulk_enable(pd->num_clks, pd->clks);
if (ret < 0) {
dev_err(pmu->dev,
"failed to enable clks for domain '%s': %d\n",
genpd->name, ret);
goto err;
}
}
}
return 0;
err:
for (i = i - 1; i >= 0; i--) {
genpd = pmu->genpd_data.domains[i];
if (genpd) {
pd = to_rockchip_pd(genpd);
clk_bulk_disable(pd->num_clks, pd->clks);
}
}
mutex_unlock(&pmu->mutex);
mutex_unlock(&dmc_pmu_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(rockchip_pmu_block);
/* Unblock PMU transitions. */
void rockchip_pmu_unblock(void)
{
struct rockchip_pmu *pmu;
struct generic_pm_domain *genpd;
struct rockchip_pm_domain *pd;
int i;
if (dmc_pmu) {
pmu = dmc_pmu;
for (i = 0; i < pmu->genpd_data.num_domains; i++) {
genpd = pmu->genpd_data.domains[i];
if (genpd) {
pd = to_rockchip_pd(genpd);
clk_bulk_disable(pd->num_clks, pd->clks);
}
}
mutex_unlock(&pmu->mutex);
}
mutex_unlock(&dmc_pmu_mutex);
}
EXPORT_SYMBOL_GPL(rockchip_pmu_unblock);
#define DOMAIN_RK3588(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, wakeup) \
DOMAIN_M_O_R(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, idle, wakeup)
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
const struct rockchip_domain_info *pd_info = pd->info;
unsigned int val;
regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
return (val & pd_info->idle_mask) == pd_info->idle_mask;
}
static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
{
unsigned int val;
regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
return val;
}
static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
bool idle)
{
const struct rockchip_domain_info *pd_info = pd->info;
struct generic_pm_domain *genpd = &pd->genpd;
struct rockchip_pmu *pmu = pd->pmu;
u32 pd_req_offset = pd_info->req_offset;
unsigned int target_ack;
unsigned int val;
bool is_idle;
int ret;
if (pd_info->req_mask == 0)
return 0;
else if (pd_info->req_w_mask)
regmap_write(pmu->regmap, pmu->info->req_offset + pd_req_offset,
idle ? (pd_info->req_mask | pd_info->req_w_mask) :
pd_info->req_w_mask);
else
regmap_update_bits(pmu->regmap, pmu->info->req_offset + pd_req_offset,
pd_info->req_mask, idle ? -1U : 0);
wmb();
/* Wait util idle_ack = 1 */
target_ack = idle ? pd_info->ack_mask : 0;
ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
(val & pd_info->ack_mask) == target_ack,
0, 10000);
if (ret) {
dev_err(pmu->dev,
"failed to get ack on domain '%s', val=0x%x\n",
genpd->name, val);
return ret;
}
ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
is_idle, is_idle == idle, 0, 10000);
if (ret) {
dev_err(pmu->dev,
"failed to set idle on domain '%s', val=%d\n",
genpd->name, is_idle);
return ret;
}
return 0;
}
static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
{
int i;
for (i = 0; i < pd->num_qos; i++) {
regmap_read(pd->qos_regmap[i],
QOS_PRIORITY,
&pd->qos_save_regs[0][i]);
regmap_read(pd->qos_regmap[i],
QOS_MODE,
&pd->qos_save_regs[1][i]);
regmap_read(pd->qos_regmap[i],
QOS_BANDWIDTH,
&pd->qos_save_regs[2][i]);
regmap_read(pd->qos_regmap[i],
QOS_SATURATION,
&pd->qos_save_regs[3][i]);
regmap_read(pd->qos_regmap[i],
QOS_EXTCONTROL,
&pd->qos_save_regs[4][i]);
}
return 0;
}
static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
{
int i;
for (i = 0; i < pd->num_qos; i++) {
regmap_write(pd->qos_regmap[i],
QOS_PRIORITY,
pd->qos_save_regs[0][i]);
regmap_write(pd->qos_regmap[i],
QOS_MODE,
pd->qos_save_regs[1][i]);
regmap_write(pd->qos_regmap[i],
QOS_BANDWIDTH,
pd->qos_save_regs[2][i]);
regmap_write(pd->qos_regmap[i],
QOS_SATURATION,
pd->qos_save_regs[3][i]);
regmap_write(pd->qos_regmap[i],
QOS_EXTCONTROL,
pd->qos_save_regs[4][i]);
}
return 0;
}
static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
unsigned int val;
if (pd->info->repair_status_mask) {
regmap_read(pmu->regmap, pmu->info->repair_status_offset, &val);
/* 1'b1: power on, 1'b0: power off */
return val & pd->info->repair_status_mask;
}
/* check idle status for idle-only domains */
if (pd->info->status_mask == 0)
return !rockchip_pmu_domain_is_idle(pd);
regmap_read(pmu->regmap, pmu->info->status_offset, &val);
/* 1'b0: power on, 1'b1: power off */
return !(val & pd->info->status_mask);
}
static bool rockchip_pmu_domain_is_mem_on(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
unsigned int val;
regmap_read(pmu->regmap,
pmu->info->mem_status_offset + pd->info->mem_offset, &val);
/* 1'b0: power on, 1'b1: power off */
return !(val & pd->info->mem_status_mask);
}
static bool rockchip_pmu_domain_is_chain_on(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
unsigned int val;
regmap_read(pmu->regmap,
pmu->info->chain_status_offset + pd->info->mem_offset, &val);
/* 1'b1: power on, 1'b0: power off */
return val & pd->info->mem_status_mask;
}
static int rockchip_pmu_domain_mem_reset(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
struct generic_pm_domain *genpd = &pd->genpd;
bool is_on;
int ret = 0;
ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_chain_on, pd, is_on,
is_on == true, 0, 10000);
if (ret) {
dev_err(pmu->dev,
"failed to get chain status '%s', target_on=1, val=%d\n",
genpd->name, is_on);
goto error;
}
udelay(20);
regmap_write(pmu->regmap, pmu->info->mem_pwr_offset + pd->info->pwr_offset,
(pd->info->pwr_mask | pd->info->pwr_w_mask));
wmb();
ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_mem_on, pd, is_on,
is_on == false, 0, 10000);
if (ret) {
dev_err(pmu->dev,
"failed to get mem status '%s', target_on=0, val=%d\n",
genpd->name, is_on);
goto error;
}
regmap_write(pmu->regmap, pmu->info->mem_pwr_offset + pd->info->pwr_offset,
pd->info->pwr_w_mask);
wmb();
ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_mem_on, pd, is_on,
is_on == true, 0, 10000);
if (ret) {
dev_err(pmu->dev,
"failed to get mem status '%s', target_on=1, val=%d\n",
genpd->name, is_on);
}
error:
return ret;
}
static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
bool on)
{
struct rockchip_pmu *pmu = pd->pmu;
struct generic_pm_domain *genpd = &pd->genpd;
u32 pd_pwr_offset = pd->info->pwr_offset;
bool is_on, is_mem_on = false;
if (pd->info->pwr_mask == 0)
return;
if (on && pd->info->mem_status_mask)
is_mem_on = rockchip_pmu_domain_is_mem_on(pd);
if (pd->info->pwr_w_mask)
regmap_write(pmu->regmap, pmu->info->pwr_offset + pd_pwr_offset,
on ? pd->info->pwr_w_mask :
(pd->info->pwr_mask | pd->info->pwr_w_mask));
else
regmap_update_bits(pmu->regmap, pmu->info->pwr_offset + pd_pwr_offset,
pd->info->pwr_mask, on ? 0 : -1U);
wmb();
if (is_mem_on && rockchip_pmu_domain_mem_reset(pd))
return;
if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
is_on == on, 0, 10000)) {
dev_err(pmu->dev,
"failed to set domain '%s', val=%d\n",
genpd->name, is_on);
return;
}
}
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
{
struct rockchip_pmu *pmu = pd->pmu;
int ret;
mutex_lock(&pmu->mutex);
if (rockchip_pmu_domain_is_on(pd) != power_on) {
ret = clk_bulk_enable(pd->num_clks, pd->clks);
if (ret < 0) {
dev_err(pmu->dev, "failed to enable clocks\n");
mutex_unlock(&pmu->mutex);
return ret;
}
if (!power_on) {
rockchip_pmu_save_qos(pd);
/* if powering down, idle request to NIU first */
rockchip_pmu_set_idle_request(pd, true);
}
rockchip_do_pmu_set_power_domain(pd, power_on);
if (power_on) {
/* if powering up, leave idle mode */
rockchip_pmu_set_idle_request(pd, false);
rockchip_pmu_restore_qos(pd);
}
clk_bulk_disable(pd->num_clks, pd->clks);
}
mutex_unlock(&pmu->mutex);
return 0;
}
static int rockchip_pd_power_on(struct generic_pm_domain *domain)
{
struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
return rockchip_pd_power(pd, true);
}
static int rockchip_pd_power_off(struct generic_pm_domain *domain)
{
struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
return rockchip_pd_power(pd, false);
}
static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
struct device *dev)
{
struct clk *clk;
int i;
int error;
dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name);
error = pm_clk_create(dev);
if (error) {
dev_err(dev, "pm_clk_create failed %d\n", error);
return error;
}
i = 0;
while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk);
error = pm_clk_add_clk(dev, clk);
if (error) {
dev_err(dev, "pm_clk_add_clk failed %d\n", error);
clk_put(clk);
pm_clk_destroy(dev);
return error;
}
}
return 0;
}
static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
struct device *dev)
{
dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name);
pm_clk_destroy(dev);
}
static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
struct device_node *node)
{
const struct rockchip_domain_info *pd_info;
struct rockchip_pm_domain *pd;
struct device_node *qos_node;
int i, j;
u32 id;
int error;
error = of_property_read_u32(node, "reg", &id);
if (error) {
dev_err(pmu->dev,
"%pOFn: failed to retrieve domain id (reg): %d\n",
node, error);
return -EINVAL;
}
if (id >= pmu->info->num_domains) {
dev_err(pmu->dev, "%pOFn: invalid domain id %d\n",
node, id);
return -EINVAL;
}
/* RK3588 has domains with two parents (RKVDEC0/RKVDEC1) */
if (pmu->genpd_data.domains[id])
return 0;
pd_info = &pmu->info->domain_info[id];
if (!pd_info) {
dev_err(pmu->dev, "%pOFn: undefined domain id %d\n",
node, id);
return -EINVAL;
}
pd = devm_kzalloc(pmu->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
pd->info = pd_info;
pd->pmu = pmu;
pd->num_clks = of_clk_get_parent_count(node);
if (pd->num_clks > 0) {
pd->clks = devm_kcalloc(pmu->dev, pd->num_clks,
sizeof(*pd->clks), GFP_KERNEL);
if (!pd->clks)
return -ENOMEM;
} else {
dev_dbg(pmu->dev, "%pOFn: doesn't have clocks: %d\n",
node, pd->num_clks);
pd->num_clks = 0;
}
for (i = 0; i < pd->num_clks; i++) {
pd->clks[i].clk = of_clk_get(node, i);
if (IS_ERR(pd->clks[i].clk)) {
error = PTR_ERR(pd->clks[i].clk);
dev_err(pmu->dev,
"%pOFn: failed to get clk at index %d: %d\n",
node, i, error);
return error;
}
}
error = clk_bulk_prepare(pd->num_clks, pd->clks);
if (error)
goto err_put_clocks;
pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
NULL);
if (pd->num_qos > 0) {
pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
sizeof(*pd->qos_regmap),
GFP_KERNEL);
if (!pd->qos_regmap) {
error = -ENOMEM;
goto err_unprepare_clocks;
}
for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
pd->num_qos,
sizeof(u32),
GFP_KERNEL);
if (!pd->qos_save_regs[j]) {
error = -ENOMEM;
goto err_unprepare_clocks;
}
}
for (j = 0; j < pd->num_qos; j++) {
qos_node = of_parse_phandle(node, "pm_qos", j);
if (!qos_node) {
error = -ENODEV;
goto err_unprepare_clocks;
}
pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
if (IS_ERR(pd->qos_regmap[j])) {
error = -ENODEV;
of_node_put(qos_node);
goto err_unprepare_clocks;
}
of_node_put(qos_node);
}
}
if (pd->info->name)
pd->genpd.name = pd->info->name;
else
pd->genpd.name = kbasename(node->full_name);
pd->genpd.power_off = rockchip_pd_power_off;
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
pd->genpd.flags = GENPD_FLAG_PM_CLK;
if (pd_info->active_wakeup)
pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&pd->genpd, NULL,
!rockchip_pmu_domain_is_on(pd) ||
(pd->info->mem_status_mask && !rockchip_pmu_domain_is_mem_on(pd)));
pmu->genpd_data.domains[id] = &pd->genpd;
return 0;
err_unprepare_clocks:
clk_bulk_unprepare(pd->num_clks, pd->clks);
err_put_clocks:
clk_bulk_put(pd->num_clks, pd->clks);
return error;
}
static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
{
int ret;
/*
* We're in the error cleanup already, so we only complain,
* but won't emit another error on top of the original one.
*/
ret = pm_genpd_remove(&pd->genpd);
if (ret < 0)
dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
pd->genpd.name, ret);
clk_bulk_unprepare(pd->num_clks, pd->clks);
clk_bulk_put(pd->num_clks, pd->clks);
/* protect the zeroing of pm->num_clks */
mutex_lock(&pd->pmu->mutex);
pd->num_clks = 0;
mutex_unlock(&pd->pmu->mutex);
/* devm will free our memory */
}
static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
{
struct generic_pm_domain *genpd;
struct rockchip_pm_domain *pd;
int i;
for (i = 0; i < pmu->genpd_data.num_domains; i++) {
genpd = pmu->genpd_data.domains[i];
if (genpd) {
pd = to_rockchip_pd(genpd);
rockchip_pm_remove_one_domain(pd);
}
}
/* devm will free our memory */
}
static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
u32 domain_reg_offset,
unsigned int count)
{
/* First configure domain power down transition count ... */
regmap_write(pmu->regmap, domain_reg_offset, count);
/* ... and then power up count. */
regmap_write(pmu->regmap, domain_reg_offset + 4, count);
}
static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
struct device_node *parent)
{
struct device_node *np;
struct generic_pm_domain *child_domain, *parent_domain;
int error;
for_each_child_of_node(parent, np) {
u32 idx;
error = of_property_read_u32(parent, "reg", &idx);
if (error) {
dev_err(pmu->dev,
"%pOFn: failed to retrieve domain id (reg): %d\n",
parent, error);
goto err_out;
}
parent_domain = pmu->genpd_data.domains[idx];
error = rockchip_pm_add_one_domain(pmu, np);
if (error) {
dev_err(pmu->dev, "failed to handle node %pOFn: %d\n",
np, error);
goto err_out;
}
error = of_property_read_u32(np, "reg", &idx);
if (error) {
dev_err(pmu->dev,
"%pOFn: failed to retrieve domain id (reg): %d\n",
np, error);
goto err_out;
}
child_domain = pmu->genpd_data.domains[idx];
error = pm_genpd_add_subdomain(parent_domain, child_domain);
if (error) {
dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
parent_domain->name, child_domain->name, error);
goto err_out;
} else {
dev_dbg(pmu->dev, "%s add subdomain: %s\n",
parent_domain->name, child_domain->name);
}
rockchip_pm_add_subdomain(pmu, np);
}
return 0;
err_out:
of_node_put(np);
return error;
}
static int rockchip_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *node;
struct device *parent;
struct rockchip_pmu *pmu;
const struct of_device_id *match;
const struct rockchip_pmu_info *pmu_info;
int error;
if (!np) {
dev_err(dev, "device tree node not found\n");
return -ENODEV;
}
match = of_match_device(dev->driver->of_match_table, dev);
if (!match || !match->data) {
dev_err(dev, "missing pmu data\n");
return -EINVAL;
}
pmu_info = match->data;
pmu = devm_kzalloc(dev,
struct_size(pmu, domains, pmu_info->num_domains),
GFP_KERNEL);
if (!pmu)
return -ENOMEM;
pmu->dev = &pdev->dev;
mutex_init(&pmu->mutex);
pmu->info = pmu_info;
pmu->genpd_data.domains = pmu->domains;
pmu->genpd_data.num_domains = pmu_info->num_domains;
parent = dev->parent;
if (!parent) {
dev_err(dev, "no parent for syscon devices\n");
return -ENODEV;
}
pmu->regmap = syscon_node_to_regmap(parent->of_node);
if (IS_ERR(pmu->regmap)) {
dev_err(dev, "no regmap available\n");
return PTR_ERR(pmu->regmap);
}
/*
* Configure power up and down transition delays for CORE
* and GPU domains.
*/
if (pmu_info->core_power_transition_time)
rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
pmu_info->core_power_transition_time);
if (pmu_info->gpu_pwrcnt_offset)
rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
pmu_info->gpu_power_transition_time);
error = -ENODEV;
/*
* Prevent any rockchip_pmu_block() from racing with the remainder of
* setup (clocks, register initialization).
*/
mutex_lock(&dmc_pmu_mutex);
for_each_available_child_of_node(np, node) {
error = rockchip_pm_add_one_domain(pmu, node);
if (error) {
dev_err(dev, "failed to handle node %pOFn: %d\n",
node, error);
of_node_put(node);
goto err_out;
}
error = rockchip_pm_add_subdomain(pmu, node);
if (error < 0) {
dev_err(dev, "failed to handle subdomain node %pOFn: %d\n",
node, error);
of_node_put(node);
goto err_out;
}
}
if (error) {
dev_dbg(dev, "no power domains defined\n");
goto err_out;
}
error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
if (error) {
dev_err(dev, "failed to add provider: %d\n", error);
goto err_out;
}
/* We only expect one PMU. */
if (!WARN_ON_ONCE(dmc_pmu))
dmc_pmu = pmu;
mutex_unlock(&dmc_pmu_mutex);
return 0;
err_out:
rockchip_pm_domain_cleanup(pmu);
mutex_unlock(&dmc_pmu_mutex);
return error;
}
static const struct rockchip_domain_info px30_pm_domains[] = {
[PX30_PD_USB] = DOMAIN_PX30("usb", BIT(5), BIT(5), BIT(10), false),
[PX30_PD_SDCARD] = DOMAIN_PX30("sdcard", BIT(8), BIT(8), BIT(9), false),
[PX30_PD_GMAC] = DOMAIN_PX30("gmac", BIT(10), BIT(10), BIT(6), false),
[PX30_PD_MMC_NAND] = DOMAIN_PX30("mmc_nand", BIT(11), BIT(11), BIT(5), false),
[PX30_PD_VPU] = DOMAIN_PX30("vpu", BIT(12), BIT(12), BIT(14), false),
[PX30_PD_VO] = DOMAIN_PX30("vo", BIT(13), BIT(13), BIT(7), false),
[PX30_PD_VI] = DOMAIN_PX30("vi", BIT(14), BIT(14), BIT(8), false),
[PX30_PD_GPU] = DOMAIN_PX30("gpu", BIT(15), BIT(15), BIT(2), false),
};
static const struct rockchip_domain_info rv1126_pm_domains[] = {
[RV1126_PD_VEPU] = DOMAIN_RV1126("vepu", BIT(2), BIT(9), BIT(9), false),
[RV1126_PD_VI] = DOMAIN_RV1126("vi", BIT(4), BIT(6), BIT(6), false),
[RV1126_PD_VO] = DOMAIN_RV1126("vo", BIT(5), BIT(7), BIT(7), false),
[RV1126_PD_ISPP] = DOMAIN_RV1126("ispp", BIT(1), BIT(8), BIT(8), false),
[RV1126_PD_VDPU] = DOMAIN_RV1126("vdpu", BIT(3), BIT(10), BIT(10), false),
[RV1126_PD_NVM] = DOMAIN_RV1126("nvm", BIT(7), BIT(11), BIT(11), false),
[RV1126_PD_SDIO] = DOMAIN_RV1126("sdio", BIT(8), BIT(13), BIT(13), false),
[RV1126_PD_USB] = DOMAIN_RV1126("usb", BIT(9), BIT(15), BIT(15), false),
};
static const struct rockchip_domain_info rk3036_pm_domains[] = {
[RK3036_PD_MSCH] = DOMAIN_RK3036("msch", BIT(14), BIT(23), BIT(30), true),
[RK3036_PD_CORE] = DOMAIN_RK3036("core", BIT(13), BIT(17), BIT(24), false),
[RK3036_PD_PERI] = DOMAIN_RK3036("peri", BIT(12), BIT(18), BIT(25), false),
[RK3036_PD_VIO] = DOMAIN_RK3036("vio", BIT(11), BIT(19), BIT(26), false),
[RK3036_PD_VPU] = DOMAIN_RK3036("vpu", BIT(10), BIT(20), BIT(27), false),
[RK3036_PD_GPU] = DOMAIN_RK3036("gpu", BIT(9), BIT(21), BIT(28), false),
[RK3036_PD_SYS] = DOMAIN_RK3036("sys", BIT(8), BIT(22), BIT(29), false),
};
static const struct rockchip_domain_info rk3066_pm_domains[] = {
[RK3066_PD_GPU] = DOMAIN("gpu", BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
[RK3066_PD_VIDEO] = DOMAIN("video", BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
[RK3066_PD_VIO] = DOMAIN("vio", BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
[RK3066_PD_PERI] = DOMAIN("peri", BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
[RK3066_PD_CPU] = DOMAIN("cpu", 0, BIT(5), BIT(1), BIT(26), BIT(31), false),
};
static const struct rockchip_domain_info rk3128_pm_domains[] = {
[RK3128_PD_CORE] = DOMAIN_RK3288("core", BIT(0), BIT(0), BIT(4), false),
[RK3128_PD_MSCH] = DOMAIN_RK3288("msch", 0, 0, BIT(6), true),
[RK3128_PD_VIO] = DOMAIN_RK3288("vio", BIT(3), BIT(3), BIT(2), false),
[RK3128_PD_VIDEO] = DOMAIN_RK3288("video", BIT(2), BIT(2), BIT(1), false),
[RK3128_PD_GPU] = DOMAIN_RK3288("gpu", BIT(1), BIT(1), BIT(3), false),
};
static const struct rockchip_domain_info rk3188_pm_domains[] = {
[RK3188_PD_GPU] = DOMAIN("gpu", BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
[RK3188_PD_VIDEO] = DOMAIN("video", BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
[RK3188_PD_VIO] = DOMAIN("vio", BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
[RK3188_PD_PERI] = DOMAIN("peri", BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
[RK3188_PD_CPU] = DOMAIN("cpu", BIT(5), BIT(5), BIT(1), BIT(26), BIT(31), false),
};
static const struct rockchip_domain_info rk3228_pm_domains[] = {
[RK3228_PD_CORE] = DOMAIN_RK3036("core", BIT(0), BIT(0), BIT(16), true),
[RK3228_PD_MSCH] = DOMAIN_RK3036("msch", BIT(1), BIT(1), BIT(17), true),
[RK3228_PD_BUS] = DOMAIN_RK3036("bus", BIT(2), BIT(2), BIT(18), true),
[RK3228_PD_SYS] = DOMAIN_RK3036("sys", BIT(3), BIT(3), BIT(19), true),
[RK3228_PD_VIO] = DOMAIN_RK3036("vio", BIT(4), BIT(4), BIT(20), false),
[RK3228_PD_VOP] = DOMAIN_RK3036("vop", BIT(5), BIT(5), BIT(21), false),
[RK3228_PD_VPU] = DOMAIN_RK3036("vpu", BIT(6), BIT(6), BIT(22), false),
[RK3228_PD_RKVDEC] = DOMAIN_RK3036("vdec", BIT(7), BIT(7), BIT(23), false),
[RK3228_PD_GPU] = DOMAIN_RK3036("gpu", BIT(8), BIT(8), BIT(24), false),
[RK3228_PD_PERI] = DOMAIN_RK3036("peri", BIT(9), BIT(9), BIT(25), true),
[RK3228_PD_GMAC] = DOMAIN_RK3036("gmac", BIT(10), BIT(10), BIT(26), false),
};
static const struct rockchip_domain_info rk3288_pm_domains[] = {
[RK3288_PD_VIO] = DOMAIN_RK3288("vio", BIT(7), BIT(7), BIT(4), false),
[RK3288_PD_HEVC] = DOMAIN_RK3288("hevc", BIT(14), BIT(10), BIT(9), false),
[RK3288_PD_VIDEO] = DOMAIN_RK3288("video", BIT(8), BIT(8), BIT(3), false),
[RK3288_PD_GPU] = DOMAIN_RK3288("gpu", BIT(9), BIT(9), BIT(2), false),
};
static const struct rockchip_domain_info rk3328_pm_domains[] = {
[RK3328_PD_CORE] = DOMAIN_RK3328("core", 0, BIT(0), BIT(0), false),
[RK3328_PD_GPU] = DOMAIN_RK3328("gpu", 0, BIT(1), BIT(1), false),
[RK3328_PD_BUS] = DOMAIN_RK3328("bus", 0, BIT(2), BIT(2), true),
[RK3328_PD_MSCH] = DOMAIN_RK3328("msch", 0, BIT(3), BIT(3), true),
[RK3328_PD_PERI] = DOMAIN_RK3328("peri", 0, BIT(4), BIT(4), true),
[RK3328_PD_VIDEO] = DOMAIN_RK3328("video", 0, BIT(5), BIT(5), false),
[RK3328_PD_HEVC] = DOMAIN_RK3328("hevc", 0, BIT(6), BIT(6), false),
[RK3328_PD_VIO] = DOMAIN_RK3328("vio", 0, BIT(8), BIT(8), false),
[RK3328_PD_VPU] = DOMAIN_RK3328("vpu", 0, BIT(9), BIT(9), false),
};
static const struct rockchip_domain_info rk3366_pm_domains[] = {
[RK3366_PD_PERI] = DOMAIN_RK3368("peri", BIT(10), BIT(10), BIT(6), true),
[RK3366_PD_VIO] = DOMAIN_RK3368("vio", BIT(14), BIT(14), BIT(8), false),
[RK3366_PD_VIDEO] = DOMAIN_RK3368("video", BIT(13), BIT(13), BIT(7), false),
[RK3366_PD_RKVDEC] = DOMAIN_RK3368("vdec", BIT(11), BIT(11), BIT(7), false),
[RK3366_PD_WIFIBT] = DOMAIN_RK3368("wifibt", BIT(8), BIT(8), BIT(9), false),
[RK3366_PD_VPU] = DOMAIN_RK3368("vpu", BIT(12), BIT(12), BIT(7), false),
[RK3366_PD_GPU] = DOMAIN_RK3368("gpu", BIT(15), BIT(15), BIT(2), false),
};
static const struct rockchip_domain_info rk3368_pm_domains[] = {
[RK3368_PD_PERI] = DOMAIN_RK3368("peri", BIT(13), BIT(12), BIT(6), true),
[RK3368_PD_VIO] = DOMAIN_RK3368("vio", BIT(15), BIT(14), BIT(8), false),
[RK3368_PD_VIDEO] = DOMAIN_RK3368("video", BIT(14), BIT(13), BIT(7), false),
[RK3368_PD_GPU_0] = DOMAIN_RK3368("gpu_0", BIT(16), BIT(15), BIT(2), false),
[RK3368_PD_GPU_1] = DOMAIN_RK3368("gpu_1", BIT(17), BIT(16), BIT(2), false),
};
static const struct rockchip_domain_info rk3399_pm_domains[] = {
[RK3399_PD_TCPD0] = DOMAIN_RK3399("tcpd0", BIT(8), BIT(8), 0, false),
[RK3399_PD_TCPD1] = DOMAIN_RK3399("tcpd1", BIT(9), BIT(9), 0, false),
[RK3399_PD_CCI] = DOMAIN_RK3399("cci", BIT(10), BIT(10), 0, true),
[RK3399_PD_CCI0] = DOMAIN_RK3399("cci0", 0, 0, BIT(15), true),
[RK3399_PD_CCI1] = DOMAIN_RK3399("cci1", 0, 0, BIT(16), true),
[RK3399_PD_PERILP] = DOMAIN_RK3399("perilp", BIT(11), BIT(11), BIT(1), true),
[RK3399_PD_PERIHP] = DOMAIN_RK3399("perihp", BIT(12), BIT(12), BIT(2), true),
[RK3399_PD_CENTER] = DOMAIN_RK3399("center", BIT(13), BIT(13), BIT(14), true),
[RK3399_PD_VIO] = DOMAIN_RK3399("vio", BIT(14), BIT(14), BIT(17), false),
[RK3399_PD_GPU] = DOMAIN_RK3399("gpu", BIT(15), BIT(15), BIT(0), false),
[RK3399_PD_VCODEC] = DOMAIN_RK3399("vcodec", BIT(16), BIT(16), BIT(3), false),
[RK3399_PD_VDU] = DOMAIN_RK3399("vdu", BIT(17), BIT(17), BIT(4), false),
[RK3399_PD_RGA] = DOMAIN_RK3399("rga", BIT(18), BIT(18), BIT(5), false),
[RK3399_PD_IEP] = DOMAIN_RK3399("iep", BIT(19), BIT(19), BIT(6), false),
[RK3399_PD_VO] = DOMAIN_RK3399("vo", BIT(20), BIT(20), 0, false),
[RK3399_PD_VOPB] = DOMAIN_RK3399("vopb", 0, 0, BIT(7), false),
[RK3399_PD_VOPL] = DOMAIN_RK3399("vopl", 0, 0, BIT(8), false),
[RK3399_PD_ISP0] = DOMAIN_RK3399("isp0", BIT(22), BIT(22), BIT(9), false),
[RK3399_PD_ISP1] = DOMAIN_RK3399("isp1", BIT(23), BIT(23), BIT(10), false),
[RK3399_PD_HDCP] = DOMAIN_RK3399("hdcp", BIT(24), BIT(24), BIT(11), false),
[RK3399_PD_GMAC] = DOMAIN_RK3399("gmac", BIT(25), BIT(25), BIT(23), true),
[RK3399_PD_EMMC] = DOMAIN_RK3399("emmc", BIT(26), BIT(26), BIT(24), true),
[RK3399_PD_USB3] = DOMAIN_RK3399("usb3", BIT(27), BIT(27), BIT(12), true),
[RK3399_PD_EDP] = DOMAIN_RK3399("edp", BIT(28), BIT(28), BIT(22), false),
[RK3399_PD_GIC] = DOMAIN_RK3399("gic", BIT(29), BIT(29), BIT(27), true),
[RK3399_PD_SD] = DOMAIN_RK3399("sd", BIT(30), BIT(30), BIT(28), true),
[RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399("sdioaudio", BIT(31), BIT(31), BIT(29), true),
};
static const struct rockchip_domain_info rk3568_pm_domains[] = {
[RK3568_PD_NPU] = DOMAIN_RK3568("npu", BIT(1), BIT(2), false),
[RK3568_PD_GPU] = DOMAIN_RK3568("gpu", BIT(0), BIT(1), false),
[RK3568_PD_VI] = DOMAIN_RK3568("vi", BIT(6), BIT(3), false),
[RK3568_PD_VO] = DOMAIN_RK3568("vo", BIT(7), BIT(4), false),
[RK3568_PD_RGA] = DOMAIN_RK3568("rga", BIT(5), BIT(5), false),
[RK3568_PD_VPU] = DOMAIN_RK3568("vpu", BIT(2), BIT(6), false),
[RK3568_PD_RKVDEC] = DOMAIN_RK3568("vdec", BIT(4), BIT(8), false),
[RK3568_PD_RKVENC] = DOMAIN_RK3568("venc", BIT(3), BIT(7), false),
[RK3568_PD_PIPE] = DOMAIN_RK3568("pipe", BIT(8), BIT(11), false),
};
static const struct rockchip_domain_info rk3588_pm_domains[] = {
[RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false),
[RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false),
[RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0x0, 0, 0, 0x0, 0, 0, false),
[RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, 0x0, BIT(11), BIT(2), 0x0, BIT(1), BIT(1), false),
[RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, 0x0, BIT(12), BIT(3), 0x0, BIT(2), BIT(2), false),
[RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, 0x0, BIT(13), BIT(4), 0x0, BIT(3), BIT(3), false),
[RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, 0x0, BIT(14), BIT(5), 0x0, BIT(4), BIT(4), false),
[RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, 0x0, BIT(15), BIT(6), 0x0, BIT(5), BIT(5), false),
[RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, 0x0, BIT(16), BIT(7), 0x0, BIT(6), BIT(6), false),
[RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, 0x0, BIT(17), BIT(8), 0x0, BIT(7), BIT(7), false),
[RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, 0x0, BIT(18), BIT(9), 0x0, BIT(8), BIT(8), false),
[RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, 0x0, BIT(19), BIT(10), 0x0, 0, 0, false),
[RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, 0x0, BIT(20), BIT(11), 0x0, BIT(9), BIT(9), false),
[RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, 0x0, BIT(21), BIT(12), 0x0, BIT(10), BIT(10), false),
[RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, 0x0, BIT(22), BIT(13), 0x0, 0, 0, false),
[RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, 0x0, BIT(23), BIT(14), 0x0, BIT(11), BIT(11), false),
[RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, 0x0, BIT(24), BIT(15), 0x0, BIT(12), BIT(12), false),
[RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, 0x0, BIT(25), BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false),
[RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, 0x0, BIT(26), BIT(17), 0x0, BIT(15), BIT(15), false),
[RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, 0x0, BIT(27), BIT(18), 0x4, BIT(0), BIT(16), false),
[RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, 0x0, BIT(28), BIT(19), 0x4, BIT(1), BIT(17), false),
[RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, 0x0, BIT(29), BIT(20), 0x4, BIT(5), BIT(21), false),
[RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, 0x0, BIT(30), BIT(21), 0x0, 0, 0, false),
[RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, 0x0, BIT(31), BIT(22), 0x0, 0, 0, true),
[RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0x4, 0, 0, 0x4, BIT(2), BIT(18), false),
[RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, 0x4, BIT(1), BIT(23), 0x0, 0, 0, false),
[RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, 0x4, BIT(2), BIT(24), 0x4, BIT(3), BIT(19), false),
[RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, 0x4, BIT(3), BIT(25), 0x4, BIT(4), BIT(20), true),
[RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, 0x4, BIT(5), BIT(26), 0x0, 0, 0, false),
};
static const struct rockchip_pmu_info px30_pmu = {
.pwr_offset = 0x18,
.status_offset = 0x20,
.req_offset = 0x64,
.idle_offset = 0x6c,
.ack_offset = 0x6c,
.num_domains = ARRAY_SIZE(px30_pm_domains),
.domain_info = px30_pm_domains,
};
static const struct rockchip_pmu_info rk3036_pmu = {
.req_offset = 0x148,
.idle_offset = 0x14c,
.ack_offset = 0x14c,
.num_domains = ARRAY_SIZE(rk3036_pm_domains),
.domain_info = rk3036_pm_domains,
};
static const struct rockchip_pmu_info rk3066_pmu = {
.pwr_offset = 0x08,
.status_offset = 0x0c,
.req_offset = 0x38, /* PMU_MISC_CON1 */
.idle_offset = 0x0c,
.ack_offset = 0x0c,
.num_domains = ARRAY_SIZE(rk3066_pm_domains),
.domain_info = rk3066_pm_domains,
};
static const struct rockchip_pmu_info rk3128_pmu = {
.pwr_offset = 0x04,
.status_offset = 0x08,
.req_offset = 0x0c,
.idle_offset = 0x10,
.ack_offset = 0x10,
.num_domains = ARRAY_SIZE(rk3128_pm_domains),
.domain_info = rk3128_pm_domains,
};
static const struct rockchip_pmu_info rk3188_pmu = {
.pwr_offset = 0x08,
.status_offset = 0x0c,
.req_offset = 0x38, /* PMU_MISC_CON1 */
.idle_offset = 0x0c,
.ack_offset = 0x0c,
.num_domains = ARRAY_SIZE(rk3188_pm_domains),
.domain_info = rk3188_pm_domains,
};
static const struct rockchip_pmu_info rk3228_pmu = {
.req_offset = 0x40c,
.idle_offset = 0x488,
.ack_offset = 0x488,
.num_domains = ARRAY_SIZE(rk3228_pm_domains),
.domain_info = rk3228_pm_domains,
};
static const struct rockchip_pmu_info rk3288_pmu = {
.pwr_offset = 0x08,
.status_offset = 0x0c,
.req_offset = 0x10,
.idle_offset = 0x14,
.ack_offset = 0x14,
.core_pwrcnt_offset = 0x34,
.gpu_pwrcnt_offset = 0x3c,
.core_power_transition_time = 24, /* 1us */
.gpu_power_transition_time = 24, /* 1us */
.num_domains = ARRAY_SIZE(rk3288_pm_domains),
.domain_info = rk3288_pm_domains,
};
static const struct rockchip_pmu_info rk3328_pmu = {
.req_offset = 0x414,
.idle_offset = 0x484,
.ack_offset = 0x484,
.num_domains = ARRAY_SIZE(rk3328_pm_domains),
.domain_info = rk3328_pm_domains,
};
static const struct rockchip_pmu_info rk3366_pmu = {
.pwr_offset = 0x0c,
.status_offset = 0x10,
.req_offset = 0x3c,
.idle_offset = 0x40,
.ack_offset = 0x40,
.core_pwrcnt_offset = 0x48,
.gpu_pwrcnt_offset = 0x50,
.core_power_transition_time = 24,
.gpu_power_transition_time = 24,
.num_domains = ARRAY_SIZE(rk3366_pm_domains),
.domain_info = rk3366_pm_domains,
};
static const struct rockchip_pmu_info rk3368_pmu = {
.pwr_offset = 0x0c,
.status_offset = 0x10,
.req_offset = 0x3c,
.idle_offset = 0x40,
.ack_offset = 0x40,
.core_pwrcnt_offset = 0x48,
.gpu_pwrcnt_offset = 0x50,
.core_power_transition_time = 24,
.gpu_power_transition_time = 24,
.num_domains = ARRAY_SIZE(rk3368_pm_domains),
.domain_info = rk3368_pm_domains,
};
static const struct rockchip_pmu_info rk3399_pmu = {
.pwr_offset = 0x14,
.status_offset = 0x18,
.req_offset = 0x60,
.idle_offset = 0x64,
.ack_offset = 0x68,
/* ARM Trusted Firmware manages power transition times */
.num_domains = ARRAY_SIZE(rk3399_pm_domains),
.domain_info = rk3399_pm_domains,
};
static const struct rockchip_pmu_info rk3568_pmu = {
.pwr_offset = 0xa0,
.status_offset = 0x98,
.req_offset = 0x50,
.idle_offset = 0x68,
.ack_offset = 0x60,
.num_domains = ARRAY_SIZE(rk3568_pm_domains),
.domain_info = rk3568_pm_domains,
};
static const struct rockchip_pmu_info rk3588_pmu = {
.pwr_offset = 0x14c,
.status_offset = 0x180,
.req_offset = 0x10c,
.idle_offset = 0x120,
.ack_offset = 0x118,
.mem_pwr_offset = 0x1a0,
.chain_status_offset = 0x1f0,
.mem_status_offset = 0x1f8,
.repair_status_offset = 0x290,
.num_domains = ARRAY_SIZE(rk3588_pm_domains),
.domain_info = rk3588_pm_domains,
};
static const struct rockchip_pmu_info rv1126_pmu = {
.pwr_offset = 0x110,
.status_offset = 0x108,
.req_offset = 0xc0,
.idle_offset = 0xd8,
.ack_offset = 0xd0,
.num_domains = ARRAY_SIZE(rv1126_pm_domains),
.domain_info = rv1126_pm_domains,
};
static const struct of_device_id rockchip_pm_domain_dt_match[] = {
{
.compatible = "rockchip,px30-power-controller",
.data = (void *)&px30_pmu,
},
{
.compatible = "rockchip,rk3036-power-controller",
.data = (void *)&rk3036_pmu,
},
{
.compatible = "rockchip,rk3066-power-controller",
.data = (void *)&rk3066_pmu,
},
{
.compatible = "rockchip,rk3128-power-controller",
.data = (void *)&rk3128_pmu,
},
{
.compatible = "rockchip,rk3188-power-controller",
.data = (void *)&rk3188_pmu,
},
{
.compatible = "rockchip,rk3228-power-controller",
.data = (void *)&rk3228_pmu,
},
{
.compatible = "rockchip,rk3288-power-controller",
.data = (void *)&rk3288_pmu,
},
{
.compatible = "rockchip,rk3328-power-controller",
.data = (void *)&rk3328_pmu,
},
{
.compatible = "rockchip,rk3366-power-controller",
.data = (void *)&rk3366_pmu,
},
{
.compatible = "rockchip,rk3368-power-controller",
.data = (void *)&rk3368_pmu,
},
{
.compatible = "rockchip,rk3399-power-controller",
.data = (void *)&rk3399_pmu,
},
{
.compatible = "rockchip,rk3568-power-controller",
.data = (void *)&rk3568_pmu,
},
{
.compatible = "rockchip,rk3588-power-controller",
.data = (void *)&rk3588_pmu,
},
{
.compatible = "rockchip,rv1126-power-controller",
.data = (void *)&rv1126_pmu,
},
{ /* sentinel */ },
};
static struct platform_driver rockchip_pm_domain_driver = {
.probe = rockchip_pm_domain_probe,
.driver = {
.name = "rockchip-pm-domain",
.of_match_table = rockchip_pm_domain_dt_match,
/*
* We can't forcibly eject devices from the power
* domain, so we can't really remove power domains
* once they were added.
*/
.suppress_bind_attrs = true,
},
};
static int __init rockchip_pm_domain_drv_register(void)
{
return platform_driver_register(&rockchip_pm_domain_driver);
}
postcore_initcall(rockchip_pm_domain_drv_register);
| linux-master | drivers/pmdomain/rockchip/pm-domains.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* StarFive JH71XX PMU (Power Management Unit) Controller Driver
*
* Copyright (C) 2022 StarFive Technology Co., Ltd.
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <dt-bindings/power/starfive,jh7110-pmu.h>
/* register offset */
#define JH71XX_PMU_SW_TURN_ON_POWER 0x0C
#define JH71XX_PMU_SW_TURN_OFF_POWER 0x10
#define JH71XX_PMU_SW_ENCOURAGE 0x44
#define JH71XX_PMU_TIMER_INT_MASK 0x48
#define JH71XX_PMU_CURR_POWER_MODE 0x80
#define JH71XX_PMU_EVENT_STATUS 0x88
#define JH71XX_PMU_INT_STATUS 0x8C
/* sw encourage cfg */
#define JH71XX_PMU_SW_ENCOURAGE_EN_LO 0x05
#define JH71XX_PMU_SW_ENCOURAGE_EN_HI 0x50
#define JH71XX_PMU_SW_ENCOURAGE_DIS_LO 0x0A
#define JH71XX_PMU_SW_ENCOURAGE_DIS_HI 0xA0
#define JH71XX_PMU_SW_ENCOURAGE_ON 0xFF
/* pmu int status */
#define JH71XX_PMU_INT_SEQ_DONE BIT(0)
#define JH71XX_PMU_INT_HW_REQ BIT(1)
#define JH71XX_PMU_INT_SW_FAIL GENMASK(3, 2)
#define JH71XX_PMU_INT_HW_FAIL GENMASK(5, 4)
#define JH71XX_PMU_INT_PCH_FAIL GENMASK(8, 6)
#define JH71XX_PMU_INT_ALL_MASK GENMASK(8, 0)
/*
* The time required for switching power status is based on the time
* to turn on the largest domain's power, which is at microsecond level
*/
#define JH71XX_PMU_TIMEOUT_US 100
struct jh71xx_domain_info {
const char * const name;
unsigned int flags;
u8 bit;
};
struct jh71xx_pmu_match_data {
const struct jh71xx_domain_info *domain_info;
int num_domains;
};
struct jh71xx_pmu {
struct device *dev;
const struct jh71xx_pmu_match_data *match_data;
void __iomem *base;
struct generic_pm_domain **genpd;
struct genpd_onecell_data genpd_data;
int irq;
spinlock_t lock; /* protects pmu reg */
};
struct jh71xx_pmu_dev {
const struct jh71xx_domain_info *domain_info;
struct jh71xx_pmu *pmu;
struct generic_pm_domain genpd;
};
static int jh71xx_pmu_get_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool *is_on)
{
struct jh71xx_pmu *pmu = pmd->pmu;
if (!mask)
return -EINVAL;
*is_on = readl(pmu->base + JH71XX_PMU_CURR_POWER_MODE) & mask;
return 0;
}
static int jh71xx_pmu_set_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool on)
{
struct jh71xx_pmu *pmu = pmd->pmu;
unsigned long flags;
u32 val;
u32 mode;
u32 encourage_lo;
u32 encourage_hi;
bool is_on;
int ret;
ret = jh71xx_pmu_get_state(pmd, mask, &is_on);
if (ret) {
dev_dbg(pmu->dev, "unable to get current state for %s\n",
pmd->genpd.name);
return ret;
}
if (is_on == on) {
dev_dbg(pmu->dev, "pm domain [%s] is already %sable status.\n",
pmd->genpd.name, on ? "en" : "dis");
return 0;
}
spin_lock_irqsave(&pmu->lock, flags);
/*
* The PMU accepts software encourage to switch power mode in the following 2 steps:
*
* 1.Configure the register SW_TURN_ON_POWER (offset 0x0c) by writing 1 to
* the bit corresponding to the power domain that will be turned on
* and writing 0 to the others.
* Likewise, configure the register SW_TURN_OFF_POWER (offset 0x10) by
* writing 1 to the bit corresponding to the power domain that will be
* turned off and writing 0 to the others.
*/
if (on) {
mode = JH71XX_PMU_SW_TURN_ON_POWER;
encourage_lo = JH71XX_PMU_SW_ENCOURAGE_EN_LO;
encourage_hi = JH71XX_PMU_SW_ENCOURAGE_EN_HI;
} else {
mode = JH71XX_PMU_SW_TURN_OFF_POWER;
encourage_lo = JH71XX_PMU_SW_ENCOURAGE_DIS_LO;
encourage_hi = JH71XX_PMU_SW_ENCOURAGE_DIS_HI;
}
writel(mask, pmu->base + mode);
/*
* 2.Write SW encourage command sequence to the Software Encourage Reg (offset 0x44)
* First write SW_MODE_ENCOURAGE_ON to JH71XX_PMU_SW_ENCOURAGE. This will reset
* the state machine which parses the command sequence. This register must be
* written every time software wants to power on/off a domain.
* Then write the lower bits of the command sequence, followed by the upper
* bits. The sequence differs between powering on & off a domain.
*/
writel(JH71XX_PMU_SW_ENCOURAGE_ON, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
writel(encourage_lo, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
writel(encourage_hi, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
spin_unlock_irqrestore(&pmu->lock, flags);
/* Wait for the power domain bit to be enabled / disabled */
if (on) {
ret = readl_poll_timeout_atomic(pmu->base + JH71XX_PMU_CURR_POWER_MODE,
val, val & mask,
1, JH71XX_PMU_TIMEOUT_US);
} else {
ret = readl_poll_timeout_atomic(pmu->base + JH71XX_PMU_CURR_POWER_MODE,
val, !(val & mask),
1, JH71XX_PMU_TIMEOUT_US);
}
if (ret) {
dev_err(pmu->dev, "%s: failed to power %s\n",
pmd->genpd.name, on ? "on" : "off");
return -ETIMEDOUT;
}
return 0;
}
static int jh71xx_pmu_on(struct generic_pm_domain *genpd)
{
struct jh71xx_pmu_dev *pmd = container_of(genpd,
struct jh71xx_pmu_dev, genpd);
u32 pwr_mask = BIT(pmd->domain_info->bit);
return jh71xx_pmu_set_state(pmd, pwr_mask, true);
}
static int jh71xx_pmu_off(struct generic_pm_domain *genpd)
{
struct jh71xx_pmu_dev *pmd = container_of(genpd,
struct jh71xx_pmu_dev, genpd);
u32 pwr_mask = BIT(pmd->domain_info->bit);
return jh71xx_pmu_set_state(pmd, pwr_mask, false);
}
static void jh71xx_pmu_int_enable(struct jh71xx_pmu *pmu, u32 mask, bool enable)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&pmu->lock, flags);
val = readl(pmu->base + JH71XX_PMU_TIMER_INT_MASK);
if (enable)
val &= ~mask;
else
val |= mask;
writel(val, pmu->base + JH71XX_PMU_TIMER_INT_MASK);
spin_unlock_irqrestore(&pmu->lock, flags);
}
static irqreturn_t jh71xx_pmu_interrupt(int irq, void *data)
{
struct jh71xx_pmu *pmu = data;
u32 val;
val = readl(pmu->base + JH71XX_PMU_INT_STATUS);
if (val & JH71XX_PMU_INT_SEQ_DONE)
dev_dbg(pmu->dev, "sequence done.\n");
if (val & JH71XX_PMU_INT_HW_REQ)
dev_dbg(pmu->dev, "hardware encourage requestion.\n");
if (val & JH71XX_PMU_INT_SW_FAIL)
dev_err(pmu->dev, "software encourage fail.\n");
if (val & JH71XX_PMU_INT_HW_FAIL)
dev_err(pmu->dev, "hardware encourage fail.\n");
if (val & JH71XX_PMU_INT_PCH_FAIL)
dev_err(pmu->dev, "p-channel fail event.\n");
/* clear interrupts */
writel(val, pmu->base + JH71XX_PMU_INT_STATUS);
writel(val, pmu->base + JH71XX_PMU_EVENT_STATUS);
return IRQ_HANDLED;
}
static int jh71xx_pmu_init_domain(struct jh71xx_pmu *pmu, int index)
{
struct jh71xx_pmu_dev *pmd;
u32 pwr_mask;
int ret;
bool is_on = false;
pmd = devm_kzalloc(pmu->dev, sizeof(*pmd), GFP_KERNEL);
if (!pmd)
return -ENOMEM;
pmd->domain_info = &pmu->match_data->domain_info[index];
pmd->pmu = pmu;
pwr_mask = BIT(pmd->domain_info->bit);
pmd->genpd.name = pmd->domain_info->name;
pmd->genpd.flags = pmd->domain_info->flags;
ret = jh71xx_pmu_get_state(pmd, pwr_mask, &is_on);
if (ret)
dev_warn(pmu->dev, "unable to get current state for %s\n",
pmd->genpd.name);
pmd->genpd.power_on = jh71xx_pmu_on;
pmd->genpd.power_off = jh71xx_pmu_off;
pm_genpd_init(&pmd->genpd, NULL, !is_on);
pmu->genpd_data.domains[index] = &pmd->genpd;
return 0;
}
static int jh71xx_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct jh71xx_pmu_match_data *match_data;
struct jh71xx_pmu *pmu;
unsigned int i;
int ret;
pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmu->base))
return PTR_ERR(pmu->base);
pmu->irq = platform_get_irq(pdev, 0);
if (pmu->irq < 0)
return pmu->irq;
ret = devm_request_irq(dev, pmu->irq, jh71xx_pmu_interrupt,
0, pdev->name, pmu);
if (ret)
dev_err(dev, "failed to request irq\n");
match_data = of_device_get_match_data(dev);
if (!match_data)
return -EINVAL;
pmu->genpd = devm_kcalloc(dev, match_data->num_domains,
sizeof(struct generic_pm_domain *),
GFP_KERNEL);
if (!pmu->genpd)
return -ENOMEM;
pmu->dev = dev;
pmu->match_data = match_data;
pmu->genpd_data.domains = pmu->genpd;
pmu->genpd_data.num_domains = match_data->num_domains;
for (i = 0; i < match_data->num_domains; i++) {
ret = jh71xx_pmu_init_domain(pmu, i);
if (ret) {
dev_err(dev, "failed to initialize power domain\n");
return ret;
}
}
spin_lock_init(&pmu->lock);
jh71xx_pmu_int_enable(pmu, JH71XX_PMU_INT_ALL_MASK & ~JH71XX_PMU_INT_PCH_FAIL, true);
ret = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
if (ret) {
dev_err(dev, "failed to register genpd driver: %d\n", ret);
return ret;
}
dev_dbg(dev, "registered %u power domains\n", i);
return 0;
}
static const struct jh71xx_domain_info jh7110_power_domains[] = {
[JH7110_PD_SYSTOP] = {
.name = "SYSTOP",
.bit = 0,
.flags = GENPD_FLAG_ALWAYS_ON,
},
[JH7110_PD_CPU] = {
.name = "CPU",
.bit = 1,
.flags = GENPD_FLAG_ALWAYS_ON,
},
[JH7110_PD_GPUA] = {
.name = "GPUA",
.bit = 2,
},
[JH7110_PD_VDEC] = {
.name = "VDEC",
.bit = 3,
},
[JH7110_PD_VOUT] = {
.name = "VOUT",
.bit = 4,
},
[JH7110_PD_ISP] = {
.name = "ISP",
.bit = 5,
},
[JH7110_PD_VENC] = {
.name = "VENC",
.bit = 6,
},
};
static const struct jh71xx_pmu_match_data jh7110_pmu = {
.num_domains = ARRAY_SIZE(jh7110_power_domains),
.domain_info = jh7110_power_domains,
};
static const struct of_device_id jh71xx_pmu_of_match[] = {
{
.compatible = "starfive,jh7110-pmu",
.data = (void *)&jh7110_pmu,
}, {
/* sentinel */
}
};
static struct platform_driver jh71xx_pmu_driver = {
.probe = jh71xx_pmu_probe,
.driver = {
.name = "jh71xx-pmu",
.of_match_table = jh71xx_pmu_of_match,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(jh71xx_pmu_driver);
MODULE_AUTHOR("Walker Chen <[email protected]>");
MODULE_DESCRIPTION("StarFive JH71XX PMU Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/pmdomain/starfive/jh71xx-pmu.c |
// SPDX-License-Identifier: GPL-2.0
//
// Exynos Generic power domain support.
//
// Copyright (c) 2012 Samsung Electronics Co., Ltd.
// http://www.samsung.com
//
// Implementation of Exynos specific power domain control which is used in
// conjunction with runtime-pm. Support for both device-tree and non-device-tree
// based power domain support is included.
#include <linux/io.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pm_domain.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
struct exynos_pm_domain_config {
/* Value for LOCAL_PWR_CFG and STATUS fields for each domain */
u32 local_pwr_cfg;
};
/*
* Exynos specific wrapper around the generic power domain
*/
struct exynos_pm_domain {
void __iomem *base;
struct generic_pm_domain pd;
u32 local_pwr_cfg;
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
{
struct exynos_pm_domain *pd;
void __iomem *base;
u32 timeout, pwr;
char *op;
pd = container_of(domain, struct exynos_pm_domain, pd);
base = pd->base;
pwr = power_on ? pd->local_pwr_cfg : 0;
writel_relaxed(pwr, base);
/* Wait max 1ms */
timeout = 10;
while ((readl_relaxed(base + 0x4) & pd->local_pwr_cfg) != pwr) {
if (!timeout) {
op = (power_on) ? "enable" : "disable";
pr_err("Power domain %s %s failed\n", domain->name, op);
return -ETIMEDOUT;
}
timeout--;
cpu_relax();
usleep_range(80, 100);
}
return 0;
}
static int exynos_pd_power_on(struct generic_pm_domain *domain)
{
return exynos_pd_power(domain, true);
}
static int exynos_pd_power_off(struct generic_pm_domain *domain)
{
return exynos_pd_power(domain, false);
}
static const struct exynos_pm_domain_config exynos4210_cfg = {
.local_pwr_cfg = 0x7,
};
static const struct exynos_pm_domain_config exynos5433_cfg = {
.local_pwr_cfg = 0xf,
};
static const struct of_device_id exynos_pm_domain_of_match[] = {
{
.compatible = "samsung,exynos4210-pd",
.data = &exynos4210_cfg,
}, {
.compatible = "samsung,exynos5433-pd",
.data = &exynos5433_cfg,
},
{ },
};
static const char *exynos_get_domain_name(struct device_node *node)
{
const char *name;
if (of_property_read_string(node, "label", &name) < 0)
name = kbasename(node->full_name);
return kstrdup_const(name, GFP_KERNEL);
}
static int exynos_pd_probe(struct platform_device *pdev)
{
const struct exynos_pm_domain_config *pm_domain_cfg;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct of_phandle_args child, parent;
struct exynos_pm_domain *pd;
int on, ret;
pm_domain_cfg = of_device_get_match_data(dev);
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
pd->pd.name = exynos_get_domain_name(np);
if (!pd->pd.name)
return -ENOMEM;
pd->base = of_iomap(np, 0);
if (!pd->base) {
kfree_const(pd->pd.name);
return -ENODEV;
}
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg;
pm_genpd_init(&pd->pd, NULL, !on);
ret = of_genpd_add_provider_simple(np, &pd->pd);
if (ret == 0 && of_parse_phandle_with_args(np, "power-domains",
"#power-domain-cells", 0, &parent) == 0) {
child.np = np;
child.args_count = 0;
if (of_genpd_add_subdomain(&parent, &child))
pr_warn("%pOF failed to add subdomain: %pOF\n",
parent.np, child.np);
else
pr_info("%pOF has as child subdomain: %pOF.\n",
parent.np, child.np);
}
pm_runtime_enable(dev);
return ret;
}
static struct platform_driver exynos_pd_driver = {
.probe = exynos_pd_probe,
.driver = {
.name = "exynos-pd",
.of_match_table = exynos_pm_domain_of_match,
.suppress_bind_attrs = true,
}
};
static __init int exynos4_pm_init_power_domain(void)
{
return platform_driver_register(&exynos_pd_driver);
}
core_initcall(exynos4_pm_init_power_domain);
| linux-master | drivers/pmdomain/samsung/exynos-pm-domains.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Pengutronix, Sascha Hauer <[email protected]>
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include <linux/soc/mediatek/infracfg.h>
#include <dt-bindings/power/mt2701-power.h>
#include <dt-bindings/power/mt2712-power.h>
#include <dt-bindings/power/mt6797-power.h>
#include <dt-bindings/power/mt7622-power.h>
#include <dt-bindings/power/mt7623a-power.h>
#include <dt-bindings/power/mt8173-power.h>
#define MTK_POLL_DELAY_US 10
#define MTK_POLL_TIMEOUT USEC_PER_SEC
#define MTK_SCPD_ACTIVE_WAKEUP BIT(0)
#define MTK_SCPD_FWAIT_SRAM BIT(1)
#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
#define SPM_VDE_PWR_CON 0x0210
#define SPM_MFG_PWR_CON 0x0214
#define SPM_VEN_PWR_CON 0x0230
#define SPM_ISP_PWR_CON 0x0238
#define SPM_DIS_PWR_CON 0x023c
#define SPM_CONN_PWR_CON 0x0280
#define SPM_VEN2_PWR_CON 0x0298
#define SPM_AUDIO_PWR_CON 0x029c /* MT8173, MT2712 */
#define SPM_BDP_PWR_CON 0x029c /* MT2701 */
#define SPM_ETH_PWR_CON 0x02a0
#define SPM_HIF_PWR_CON 0x02a4
#define SPM_IFR_MSC_PWR_CON 0x02a8
#define SPM_MFG_2D_PWR_CON 0x02c0
#define SPM_MFG_ASYNC_PWR_CON 0x02c4
#define SPM_USB_PWR_CON 0x02cc
#define SPM_USB2_PWR_CON 0x02d4 /* MT2712 */
#define SPM_ETHSYS_PWR_CON 0x02e0 /* MT7622 */
#define SPM_HIF0_PWR_CON 0x02e4 /* MT7622 */
#define SPM_HIF1_PWR_CON 0x02e8 /* MT7622 */
#define SPM_WB_PWR_CON 0x02ec /* MT7622 */
#define SPM_PWR_STATUS 0x060c
#define SPM_PWR_STATUS_2ND 0x0610
#define PWR_RST_B_BIT BIT(0)
#define PWR_ISO_BIT BIT(1)
#define PWR_ON_BIT BIT(2)
#define PWR_ON_2ND_BIT BIT(3)
#define PWR_CLK_DIS_BIT BIT(4)
#define PWR_STATUS_CONN BIT(1)
#define PWR_STATUS_DISP BIT(3)
#define PWR_STATUS_MFG BIT(4)
#define PWR_STATUS_ISP BIT(5)
#define PWR_STATUS_VDEC BIT(7)
#define PWR_STATUS_BDP BIT(14)
#define PWR_STATUS_ETH BIT(15)
#define PWR_STATUS_HIF BIT(16)
#define PWR_STATUS_IFR_MSC BIT(17)
#define PWR_STATUS_USB2 BIT(19) /* MT2712 */
#define PWR_STATUS_VENC_LT BIT(20)
#define PWR_STATUS_VENC BIT(21)
#define PWR_STATUS_MFG_2D BIT(22) /* MT8173 */
#define PWR_STATUS_MFG_ASYNC BIT(23) /* MT8173 */
#define PWR_STATUS_AUDIO BIT(24) /* MT8173, MT2712 */
#define PWR_STATUS_USB BIT(25) /* MT8173, MT2712 */
#define PWR_STATUS_ETHSYS BIT(24) /* MT7622 */
#define PWR_STATUS_HIF0 BIT(25) /* MT7622 */
#define PWR_STATUS_HIF1 BIT(26) /* MT7622 */
#define PWR_STATUS_WB BIT(27) /* MT7622 */
enum clk_id {
CLK_NONE,
CLK_MM,
CLK_MFG,
CLK_VENC,
CLK_VENC_LT,
CLK_ETHIF,
CLK_VDEC,
CLK_HIFSEL,
CLK_JPGDEC,
CLK_AUDIO,
CLK_MAX,
};
static const char * const clk_names[] = {
NULL,
"mm",
"mfg",
"venc",
"venc_lt",
"ethif",
"vdec",
"hif_sel",
"jpgdec",
"audio",
NULL,
};
#define MAX_CLKS 3
/**
* struct scp_domain_data - scp domain data for power on/off flow
* @name: The domain name.
* @sta_mask: The mask for power on/off status bit.
* @ctl_offs: The offset for main power control register.
* @sram_pdn_bits: The mask for sram power control bits.
* @sram_pdn_ack_bits: The mask for sram power control acked bits.
* @bus_prot_mask: The mask for single step bus protection.
* @clk_id: The basic clocks required by this power domain.
* @caps: The flag for active wake-up action.
*/
struct scp_domain_data {
const char *name;
u32 sta_mask;
int ctl_offs;
u32 sram_pdn_bits;
u32 sram_pdn_ack_bits;
u32 bus_prot_mask;
enum clk_id clk_id[MAX_CLKS];
u8 caps;
};
struct scp;
struct scp_domain {
struct generic_pm_domain genpd;
struct scp *scp;
struct clk *clk[MAX_CLKS];
const struct scp_domain_data *data;
struct regulator *supply;
};
struct scp_ctrl_reg {
int pwr_sta_offs;
int pwr_sta2nd_offs;
};
struct scp {
struct scp_domain *domains;
struct genpd_onecell_data pd_data;
struct device *dev;
void __iomem *base;
struct regmap *infracfg;
struct scp_ctrl_reg ctrl_reg;
bool bus_prot_reg_update;
};
struct scp_subdomain {
int origin;
int subdomain;
};
struct scp_soc_data {
const struct scp_domain_data *domains;
int num_domains;
const struct scp_subdomain *subdomains;
int num_subdomains;
const struct scp_ctrl_reg regs;
bool bus_prot_reg_update;
};
static int scpsys_domain_is_on(struct scp_domain *scpd)
{
struct scp *scp = scpd->scp;
u32 status = readl(scp->base + scp->ctrl_reg.pwr_sta_offs) &
scpd->data->sta_mask;
u32 status2 = readl(scp->base + scp->ctrl_reg.pwr_sta2nd_offs) &
scpd->data->sta_mask;
/*
* A domain is on when both status bits are set. If only one is set
* return an error. This happens while powering up a domain
*/
if (status && status2)
return true;
if (!status && !status2)
return false;
return -EINVAL;
}
static int scpsys_regulator_enable(struct scp_domain *scpd)
{
if (!scpd->supply)
return 0;
return regulator_enable(scpd->supply);
}
static int scpsys_regulator_disable(struct scp_domain *scpd)
{
if (!scpd->supply)
return 0;
return regulator_disable(scpd->supply);
}
static void scpsys_clk_disable(struct clk *clk[], int max_num)
{
int i;
for (i = max_num - 1; i >= 0; i--)
clk_disable_unprepare(clk[i]);
}
static int scpsys_clk_enable(struct clk *clk[], int max_num)
{
int i, ret = 0;
for (i = 0; i < max_num && clk[i]; i++) {
ret = clk_prepare_enable(clk[i]);
if (ret) {
scpsys_clk_disable(clk, i);
break;
}
}
return ret;
}
static int scpsys_sram_enable(struct scp_domain *scpd, void __iomem *ctl_addr)
{
u32 val;
u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
int tmp;
val = readl(ctl_addr);
val &= ~scpd->data->sram_pdn_bits;
writel(val, ctl_addr);
/* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
/*
* Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
* MT7622_POWER_DOMAIN_WB and thus just a trivial setup
* is applied here.
*/
usleep_range(12000, 12100);
} else {
/* Either wait until SRAM_PDN_ACK all 1 or 0 */
int ret = readl_poll_timeout(ctl_addr, tmp,
(tmp & pdn_ack) == 0,
MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
if (ret < 0)
return ret;
}
return 0;
}
static int scpsys_sram_disable(struct scp_domain *scpd, void __iomem *ctl_addr)
{
u32 val;
u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
int tmp;
val = readl(ctl_addr);
val |= scpd->data->sram_pdn_bits;
writel(val, ctl_addr);
/* Either wait until SRAM_PDN_ACK all 1 or 0 */
return readl_poll_timeout(ctl_addr, tmp,
(tmp & pdn_ack) == pdn_ack,
MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
}
static int scpsys_bus_protect_enable(struct scp_domain *scpd)
{
struct scp *scp = scpd->scp;
if (!scpd->data->bus_prot_mask)
return 0;
return mtk_infracfg_set_bus_protection(scp->infracfg,
scpd->data->bus_prot_mask,
scp->bus_prot_reg_update);
}
static int scpsys_bus_protect_disable(struct scp_domain *scpd)
{
struct scp *scp = scpd->scp;
if (!scpd->data->bus_prot_mask)
return 0;
return mtk_infracfg_clear_bus_protection(scp->infracfg,
scpd->data->bus_prot_mask,
scp->bus_prot_reg_update);
}
static int scpsys_power_on(struct generic_pm_domain *genpd)
{
struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
struct scp *scp = scpd->scp;
void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
u32 val;
int ret, tmp;
ret = scpsys_regulator_enable(scpd);
if (ret < 0)
return ret;
ret = scpsys_clk_enable(scpd->clk, MAX_CLKS);
if (ret)
goto err_clk;
/* subsys power on */
val = readl(ctl_addr);
val |= PWR_ON_BIT;
writel(val, ctl_addr);
val |= PWR_ON_2ND_BIT;
writel(val, ctl_addr);
/* wait until PWR_ACK = 1 */
ret = readx_poll_timeout(scpsys_domain_is_on, scpd, tmp, tmp > 0,
MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
if (ret < 0)
goto err_pwr_ack;
val &= ~PWR_CLK_DIS_BIT;
writel(val, ctl_addr);
val &= ~PWR_ISO_BIT;
writel(val, ctl_addr);
val |= PWR_RST_B_BIT;
writel(val, ctl_addr);
ret = scpsys_sram_enable(scpd, ctl_addr);
if (ret < 0)
goto err_pwr_ack;
ret = scpsys_bus_protect_disable(scpd);
if (ret < 0)
goto err_pwr_ack;
return 0;
err_pwr_ack:
scpsys_clk_disable(scpd->clk, MAX_CLKS);
err_clk:
scpsys_regulator_disable(scpd);
dev_err(scp->dev, "Failed to power on domain %s\n", genpd->name);
return ret;
}
static int scpsys_power_off(struct generic_pm_domain *genpd)
{
struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
struct scp *scp = scpd->scp;
void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
u32 val;
int ret, tmp;
ret = scpsys_bus_protect_enable(scpd);
if (ret < 0)
goto out;
ret = scpsys_sram_disable(scpd, ctl_addr);
if (ret < 0)
goto out;
/* subsys power off */
val = readl(ctl_addr);
val |= PWR_ISO_BIT;
writel(val, ctl_addr);
val &= ~PWR_RST_B_BIT;
writel(val, ctl_addr);
val |= PWR_CLK_DIS_BIT;
writel(val, ctl_addr);
val &= ~PWR_ON_BIT;
writel(val, ctl_addr);
val &= ~PWR_ON_2ND_BIT;
writel(val, ctl_addr);
/* wait until PWR_ACK = 0 */
ret = readx_poll_timeout(scpsys_domain_is_on, scpd, tmp, tmp == 0,
MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
if (ret < 0)
goto out;
scpsys_clk_disable(scpd->clk, MAX_CLKS);
ret = scpsys_regulator_disable(scpd);
if (ret < 0)
goto out;
return 0;
out:
dev_err(scp->dev, "Failed to power off domain %s\n", genpd->name);
return ret;
}
static void init_clks(struct platform_device *pdev, struct clk **clk)
{
int i;
for (i = CLK_NONE + 1; i < CLK_MAX; i++)
clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
}
static struct scp *init_scp(struct platform_device *pdev,
const struct scp_domain_data *scp_domain_data, int num,
const struct scp_ctrl_reg *scp_ctrl_reg,
bool bus_prot_reg_update)
{
struct genpd_onecell_data *pd_data;
struct resource *res;
int i, j;
struct scp *scp;
struct clk *clk[CLK_MAX];
scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
if (!scp)
return ERR_PTR(-ENOMEM);
scp->ctrl_reg.pwr_sta_offs = scp_ctrl_reg->pwr_sta_offs;
scp->ctrl_reg.pwr_sta2nd_offs = scp_ctrl_reg->pwr_sta2nd_offs;
scp->bus_prot_reg_update = bus_prot_reg_update;
scp->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
scp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(scp->base))
return ERR_CAST(scp->base);
scp->domains = devm_kcalloc(&pdev->dev,
num, sizeof(*scp->domains), GFP_KERNEL);
if (!scp->domains)
return ERR_PTR(-ENOMEM);
pd_data = &scp->pd_data;
pd_data->domains = devm_kcalloc(&pdev->dev,
num, sizeof(*pd_data->domains), GFP_KERNEL);
if (!pd_data->domains)
return ERR_PTR(-ENOMEM);
scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"infracfg");
if (IS_ERR(scp->infracfg)) {
dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
PTR_ERR(scp->infracfg));
return ERR_CAST(scp->infracfg);
}
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
const struct scp_domain_data *data = &scp_domain_data[i];
scpd->supply = devm_regulator_get_optional(&pdev->dev, data->name);
if (IS_ERR(scpd->supply)) {
if (PTR_ERR(scpd->supply) == -ENODEV)
scpd->supply = NULL;
else
return ERR_CAST(scpd->supply);
}
}
pd_data->num_domains = num;
init_clks(pdev, clk);
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
const struct scp_domain_data *data = &scp_domain_data[i];
pd_data->domains[i] = genpd;
scpd->scp = scp;
scpd->data = data;
for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
struct clk *c = clk[data->clk_id[j]];
if (IS_ERR(c)) {
dev_err(&pdev->dev, "%s: clk unavailable\n",
data->name);
return ERR_CAST(c);
}
scpd->clk[j] = c;
}
genpd->name = data->name;
genpd->power_off = scpsys_power_off;
genpd->power_on = scpsys_power_on;
if (MTK_SCPD_CAPS(scpd, MTK_SCPD_ACTIVE_WAKEUP))
genpd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
}
return scp;
}
static void mtk_register_power_domains(struct platform_device *pdev,
struct scp *scp, int num)
{
struct genpd_onecell_data *pd_data;
int i, ret;
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
bool on;
/*
* Initially turn on all domains to make the domains usable
* with !CONFIG_PM and to get the hardware in sync with the
* software. The unused domains will be switched off during
* late_init time.
*/
on = !WARN_ON(genpd->power_on(genpd) < 0);
pm_genpd_init(genpd, NULL, !on);
}
/*
* We are not allowed to fail here since there is no way to unregister
* a power domain. Once registered above we have to keep the domains
* valid.
*/
pd_data = &scp->pd_data;
ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
if (ret)
dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
}
/*
* MT2701 power domain support
*/
static const struct scp_domain_data scp_domain_data_mt2701[] = {
[MT2701_POWER_DOMAIN_CONN] = {
.name = "conn",
.sta_mask = PWR_STATUS_CONN,
.ctl_offs = SPM_CONN_PWR_CON,
.bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M |
MT2701_TOP_AXI_PROT_EN_CONN_S,
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2701_POWER_DOMAIN_DISP] = {
.name = "disp",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = SPM_DIS_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.clk_id = {CLK_MM},
.bus_prot_mask = MT2701_TOP_AXI_PROT_EN_MM_M0,
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2701_POWER_DOMAIN_MFG] = {
.name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = SPM_MFG_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_MFG},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2701_POWER_DOMAIN_VDEC] = {
.name = "vdec",
.sta_mask = PWR_STATUS_VDEC,
.ctl_offs = SPM_VDE_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_MM},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2701_POWER_DOMAIN_ISP] = {
.name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = SPM_ISP_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.clk_id = {CLK_MM},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2701_POWER_DOMAIN_BDP] = {
.name = "bdp",
.sta_mask = PWR_STATUS_BDP,
.ctl_offs = SPM_BDP_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2701_POWER_DOMAIN_ETH] = {
.name = "eth",
.sta_mask = PWR_STATUS_ETH,
.ctl_offs = SPM_ETH_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_ETHIF},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2701_POWER_DOMAIN_HIF] = {
.name = "hif",
.sta_mask = PWR_STATUS_HIF,
.ctl_offs = SPM_HIF_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_ETHIF},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2701_POWER_DOMAIN_IFR_MSC] = {
.name = "ifr_msc",
.sta_mask = PWR_STATUS_IFR_MSC,
.ctl_offs = SPM_IFR_MSC_PWR_CON,
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
};
/*
* MT2712 power domain support
*/
static const struct scp_domain_data scp_domain_data_mt2712[] = {
[MT2712_POWER_DOMAIN_MM] = {
.name = "mm",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = SPM_DIS_PWR_CON,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_MM},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_VDEC] = {
.name = "vdec",
.sta_mask = PWR_STATUS_VDEC,
.ctl_offs = SPM_VDE_PWR_CON,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_MM, CLK_VDEC},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_VENC] = {
.name = "venc",
.sta_mask = PWR_STATUS_VENC,
.ctl_offs = SPM_VEN_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_MM, CLK_VENC, CLK_JPGDEC},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_ISP] = {
.name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = SPM_ISP_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.clk_id = {CLK_MM},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_AUDIO] = {
.name = "audio",
.sta_mask = PWR_STATUS_AUDIO,
.ctl_offs = SPM_AUDIO_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_AUDIO},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_USB] = {
.name = "usb",
.sta_mask = PWR_STATUS_USB,
.ctl_offs = SPM_USB_PWR_CON,
.sram_pdn_bits = GENMASK(10, 8),
.sram_pdn_ack_bits = GENMASK(14, 12),
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_USB2] = {
.name = "usb2",
.sta_mask = PWR_STATUS_USB2,
.ctl_offs = SPM_USB2_PWR_CON,
.sram_pdn_bits = GENMASK(10, 8),
.sram_pdn_ack_bits = GENMASK(14, 12),
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_MFG] = {
.name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = SPM_MFG_PWR_CON,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(16, 16),
.clk_id = {CLK_MFG},
.bus_prot_mask = BIT(14) | BIT(21) | BIT(23),
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_MFG_SC1] = {
.name = "mfg_sc1",
.sta_mask = BIT(22),
.ctl_offs = 0x02c0,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(16, 16),
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_MFG_SC2] = {
.name = "mfg_sc2",
.sta_mask = BIT(23),
.ctl_offs = 0x02c4,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(16, 16),
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT2712_POWER_DOMAIN_MFG_SC3] = {
.name = "mfg_sc3",
.sta_mask = BIT(30),
.ctl_offs = 0x01f8,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(16, 16),
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
};
static const struct scp_subdomain scp_subdomain_mt2712[] = {
{MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_VDEC},
{MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_VENC},
{MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_ISP},
{MT2712_POWER_DOMAIN_MFG, MT2712_POWER_DOMAIN_MFG_SC1},
{MT2712_POWER_DOMAIN_MFG_SC1, MT2712_POWER_DOMAIN_MFG_SC2},
{MT2712_POWER_DOMAIN_MFG_SC2, MT2712_POWER_DOMAIN_MFG_SC3},
};
/*
* MT6797 power domain support
*/
static const struct scp_domain_data scp_domain_data_mt6797[] = {
[MT6797_POWER_DOMAIN_VDEC] = {
.name = "vdec",
.sta_mask = BIT(7),
.ctl_offs = 0x300,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_VDEC},
},
[MT6797_POWER_DOMAIN_VENC] = {
.name = "venc",
.sta_mask = BIT(21),
.ctl_offs = 0x304,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_NONE},
},
[MT6797_POWER_DOMAIN_ISP] = {
.name = "isp",
.sta_mask = BIT(5),
.ctl_offs = 0x308,
.sram_pdn_bits = GENMASK(9, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.clk_id = {CLK_NONE},
},
[MT6797_POWER_DOMAIN_MM] = {
.name = "mm",
.sta_mask = BIT(3),
.ctl_offs = 0x30C,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_MM},
.bus_prot_mask = (BIT(1) | BIT(2)),
},
[MT6797_POWER_DOMAIN_AUDIO] = {
.name = "audio",
.sta_mask = BIT(24),
.ctl_offs = 0x314,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_NONE},
},
[MT6797_POWER_DOMAIN_MFG_ASYNC] = {
.name = "mfg_async",
.sta_mask = BIT(13),
.ctl_offs = 0x334,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
.clk_id = {CLK_MFG},
},
[MT6797_POWER_DOMAIN_MJC] = {
.name = "mjc",
.sta_mask = BIT(20),
.ctl_offs = 0x310,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_NONE},
},
};
#define SPM_PWR_STATUS_MT6797 0x0180
#define SPM_PWR_STATUS_2ND_MT6797 0x0184
static const struct scp_subdomain scp_subdomain_mt6797[] = {
{MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_VDEC},
{MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_ISP},
{MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_VENC},
{MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_MJC},
};
/*
* MT7622 power domain support
*/
static const struct scp_domain_data scp_domain_data_mt7622[] = {
[MT7622_POWER_DOMAIN_ETHSYS] = {
.name = "ethsys",
.sta_mask = PWR_STATUS_ETHSYS,
.ctl_offs = SPM_ETHSYS_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_NONE},
.bus_prot_mask = MT7622_TOP_AXI_PROT_EN_ETHSYS,
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT7622_POWER_DOMAIN_HIF0] = {
.name = "hif0",
.sta_mask = PWR_STATUS_HIF0,
.ctl_offs = SPM_HIF0_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_HIFSEL},
.bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF0,
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT7622_POWER_DOMAIN_HIF1] = {
.name = "hif1",
.sta_mask = PWR_STATUS_HIF1,
.ctl_offs = SPM_HIF1_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_HIFSEL},
.bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF1,
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT7622_POWER_DOMAIN_WB] = {
.name = "wb",
.sta_mask = PWR_STATUS_WB,
.ctl_offs = SPM_WB_PWR_CON,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
.clk_id = {CLK_NONE},
.bus_prot_mask = MT7622_TOP_AXI_PROT_EN_WB,
.caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_FWAIT_SRAM,
},
};
/*
* MT7623A power domain support
*/
static const struct scp_domain_data scp_domain_data_mt7623a[] = {
[MT7623A_POWER_DOMAIN_CONN] = {
.name = "conn",
.sta_mask = PWR_STATUS_CONN,
.ctl_offs = SPM_CONN_PWR_CON,
.bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M |
MT2701_TOP_AXI_PROT_EN_CONN_S,
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT7623A_POWER_DOMAIN_ETH] = {
.name = "eth",
.sta_mask = PWR_STATUS_ETH,
.ctl_offs = SPM_ETH_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_ETHIF},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT7623A_POWER_DOMAIN_HIF] = {
.name = "hif",
.sta_mask = PWR_STATUS_HIF,
.ctl_offs = SPM_HIF_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_ETHIF},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT7623A_POWER_DOMAIN_IFR_MSC] = {
.name = "ifr_msc",
.sta_mask = PWR_STATUS_IFR_MSC,
.ctl_offs = SPM_IFR_MSC_PWR_CON,
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
};
/*
* MT8173 power domain support
*/
static const struct scp_domain_data scp_domain_data_mt8173[] = {
[MT8173_POWER_DOMAIN_VDEC] = {
.name = "vdec",
.sta_mask = PWR_STATUS_VDEC,
.ctl_offs = SPM_VDE_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_MM},
},
[MT8173_POWER_DOMAIN_VENC] = {
.name = "venc",
.sta_mask = PWR_STATUS_VENC,
.ctl_offs = SPM_VEN_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_MM, CLK_VENC},
},
[MT8173_POWER_DOMAIN_ISP] = {
.name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = SPM_ISP_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.clk_id = {CLK_MM},
},
[MT8173_POWER_DOMAIN_MM] = {
.name = "mm",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = SPM_DIS_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
.clk_id = {CLK_MM},
.bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
MT8173_TOP_AXI_PROT_EN_MM_M1,
},
[MT8173_POWER_DOMAIN_VENC_LT] = {
.name = "venc_lt",
.sta_mask = PWR_STATUS_VENC_LT,
.ctl_offs = SPM_VEN2_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_MM, CLK_VENC_LT},
},
[MT8173_POWER_DOMAIN_AUDIO] = {
.name = "audio",
.sta_mask = PWR_STATUS_AUDIO,
.ctl_offs = SPM_AUDIO_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_NONE},
},
[MT8173_POWER_DOMAIN_USB] = {
.name = "usb",
.sta_mask = PWR_STATUS_USB,
.ctl_offs = SPM_USB_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
.clk_id = {CLK_NONE},
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT8173_POWER_DOMAIN_MFG_ASYNC] = {
.name = "mfg_async",
.sta_mask = PWR_STATUS_MFG_ASYNC,
.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = 0,
.clk_id = {CLK_MFG},
},
[MT8173_POWER_DOMAIN_MFG_2D] = {
.name = "mfg_2d",
.sta_mask = PWR_STATUS_MFG_2D,
.ctl_offs = SPM_MFG_2D_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
.clk_id = {CLK_NONE},
},
[MT8173_POWER_DOMAIN_MFG] = {
.name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = SPM_MFG_PWR_CON,
.sram_pdn_bits = GENMASK(13, 8),
.sram_pdn_ack_bits = GENMASK(21, 16),
.clk_id = {CLK_NONE},
.bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
MT8173_TOP_AXI_PROT_EN_MFG_M0 |
MT8173_TOP_AXI_PROT_EN_MFG_M1 |
MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
},
};
static const struct scp_subdomain scp_subdomain_mt8173[] = {
{MT8173_POWER_DOMAIN_MFG_ASYNC, MT8173_POWER_DOMAIN_MFG_2D},
{MT8173_POWER_DOMAIN_MFG_2D, MT8173_POWER_DOMAIN_MFG},
};
static const struct scp_soc_data mt2701_data = {
.domains = scp_domain_data_mt2701,
.num_domains = ARRAY_SIZE(scp_domain_data_mt2701),
.regs = {
.pwr_sta_offs = SPM_PWR_STATUS,
.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
},
.bus_prot_reg_update = true,
};
static const struct scp_soc_data mt2712_data = {
.domains = scp_domain_data_mt2712,
.num_domains = ARRAY_SIZE(scp_domain_data_mt2712),
.subdomains = scp_subdomain_mt2712,
.num_subdomains = ARRAY_SIZE(scp_subdomain_mt2712),
.regs = {
.pwr_sta_offs = SPM_PWR_STATUS,
.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
},
.bus_prot_reg_update = false,
};
static const struct scp_soc_data mt6797_data = {
.domains = scp_domain_data_mt6797,
.num_domains = ARRAY_SIZE(scp_domain_data_mt6797),
.subdomains = scp_subdomain_mt6797,
.num_subdomains = ARRAY_SIZE(scp_subdomain_mt6797),
.regs = {
.pwr_sta_offs = SPM_PWR_STATUS_MT6797,
.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND_MT6797
},
.bus_prot_reg_update = true,
};
static const struct scp_soc_data mt7622_data = {
.domains = scp_domain_data_mt7622,
.num_domains = ARRAY_SIZE(scp_domain_data_mt7622),
.regs = {
.pwr_sta_offs = SPM_PWR_STATUS,
.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
},
.bus_prot_reg_update = true,
};
static const struct scp_soc_data mt7623a_data = {
.domains = scp_domain_data_mt7623a,
.num_domains = ARRAY_SIZE(scp_domain_data_mt7623a),
.regs = {
.pwr_sta_offs = SPM_PWR_STATUS,
.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
},
.bus_prot_reg_update = true,
};
static const struct scp_soc_data mt8173_data = {
.domains = scp_domain_data_mt8173,
.num_domains = ARRAY_SIZE(scp_domain_data_mt8173),
.subdomains = scp_subdomain_mt8173,
.num_subdomains = ARRAY_SIZE(scp_subdomain_mt8173),
.regs = {
.pwr_sta_offs = SPM_PWR_STATUS,
.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
},
.bus_prot_reg_update = true,
};
/*
* scpsys driver init
*/
static const struct of_device_id of_scpsys_match_tbl[] = {
{
.compatible = "mediatek,mt2701-scpsys",
.data = &mt2701_data,
}, {
.compatible = "mediatek,mt2712-scpsys",
.data = &mt2712_data,
}, {
.compatible = "mediatek,mt6797-scpsys",
.data = &mt6797_data,
}, {
.compatible = "mediatek,mt7622-scpsys",
.data = &mt7622_data,
}, {
.compatible = "mediatek,mt7623a-scpsys",
.data = &mt7623a_data,
}, {
.compatible = "mediatek,mt8173-scpsys",
.data = &mt8173_data,
}, {
/* sentinel */
}
};
static int scpsys_probe(struct platform_device *pdev)
{
const struct scp_subdomain *sd;
const struct scp_soc_data *soc;
struct scp *scp;
struct genpd_onecell_data *pd_data;
int i, ret;
soc = of_device_get_match_data(&pdev->dev);
scp = init_scp(pdev, soc->domains, soc->num_domains, &soc->regs,
soc->bus_prot_reg_update);
if (IS_ERR(scp))
return PTR_ERR(scp);
mtk_register_power_domains(pdev, scp, soc->num_domains);
pd_data = &scp->pd_data;
for (i = 0, sd = soc->subdomains; i < soc->num_subdomains; i++, sd++) {
ret = pm_genpd_add_subdomain(pd_data->domains[sd->origin],
pd_data->domains[sd->subdomain]);
if (ret && IS_ENABLED(CONFIG_PM))
dev_err(&pdev->dev, "Failed to add subdomain: %d\n",
ret);
}
return 0;
}
static struct platform_driver scpsys_drv = {
.probe = scpsys_probe,
.driver = {
.name = "mtk-scpsys",
.suppress_bind_attrs = true,
.owner = THIS_MODULE,
.of_match_table = of_scpsys_match_tbl,
},
};
builtin_platform_driver(scpsys_drv);
| linux-master | drivers/pmdomain/mediatek/mtk-scpsys.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020 Collabora Ltd.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_clk.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/soc/mediatek/infracfg.h>
#include "mt6795-pm-domains.h"
#include "mt8167-pm-domains.h"
#include "mt8173-pm-domains.h"
#include "mt8183-pm-domains.h"
#include "mt8186-pm-domains.h"
#include "mt8188-pm-domains.h"
#include "mt8192-pm-domains.h"
#include "mt8195-pm-domains.h"
#define MTK_POLL_DELAY_US 10
#define MTK_POLL_TIMEOUT USEC_PER_SEC
#define PWR_RST_B_BIT BIT(0)
#define PWR_ISO_BIT BIT(1)
#define PWR_ON_BIT BIT(2)
#define PWR_ON_2ND_BIT BIT(3)
#define PWR_CLK_DIS_BIT BIT(4)
#define PWR_SRAM_CLKISO_BIT BIT(5)
#define PWR_SRAM_ISOINT_B_BIT BIT(6)
struct scpsys_domain {
struct generic_pm_domain genpd;
const struct scpsys_domain_data *data;
struct scpsys *scpsys;
int num_clks;
struct clk_bulk_data *clks;
int num_subsys_clks;
struct clk_bulk_data *subsys_clks;
struct regmap *infracfg;
struct regmap *smi;
struct regulator *supply;
};
struct scpsys {
struct device *dev;
struct regmap *base;
const struct scpsys_soc_data *soc_data;
struct genpd_onecell_data pd_data;
struct generic_pm_domain *domains[];
};
#define to_scpsys_domain(gpd) container_of(gpd, struct scpsys_domain, genpd)
static bool scpsys_domain_is_on(struct scpsys_domain *pd)
{
struct scpsys *scpsys = pd->scpsys;
u32 status, status2;
regmap_read(scpsys->base, pd->data->pwr_sta_offs, &status);
status &= pd->data->sta_mask;
regmap_read(scpsys->base, pd->data->pwr_sta2nd_offs, &status2);
status2 &= pd->data->sta_mask;
/* A domain is on when both status bits are set. */
return status && status2;
}
static int scpsys_sram_enable(struct scpsys_domain *pd)
{
u32 pdn_ack = pd->data->sram_pdn_ack_bits;
struct scpsys *scpsys = pd->scpsys;
unsigned int tmp;
int ret;
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
/* Either wait until SRAM_PDN_ACK all 1 or 0 */
ret = regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
(tmp & pdn_ack) == 0, MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
if (ret < 0)
return ret;
if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
udelay(1);
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
}
return 0;
}
static int scpsys_sram_disable(struct scpsys_domain *pd)
{
u32 pdn_ack = pd->data->sram_pdn_ack_bits;
struct scpsys *scpsys = pd->scpsys;
unsigned int tmp;
if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
udelay(1);
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
}
regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
/* Either wait until SRAM_PDN_ACK all 1 or 0 */
return regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
(tmp & pdn_ack) == pdn_ack, MTK_POLL_DELAY_US,
MTK_POLL_TIMEOUT);
}
static int _scpsys_bus_protect_enable(const struct scpsys_bus_prot_data *bpd, struct regmap *regmap)
{
int i, ret;
for (i = 0; i < SPM_MAX_BUS_PROT_DATA; i++) {
u32 val, mask = bpd[i].bus_prot_mask;
if (!mask)
break;
if (bpd[i].bus_prot_reg_update)
regmap_set_bits(regmap, bpd[i].bus_prot_set, mask);
else
regmap_write(regmap, bpd[i].bus_prot_set, mask);
ret = regmap_read_poll_timeout(regmap, bpd[i].bus_prot_sta,
val, (val & mask) == mask,
MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
if (ret)
return ret;
}
return 0;
}
static int scpsys_bus_protect_enable(struct scpsys_domain *pd)
{
int ret;
ret = _scpsys_bus_protect_enable(pd->data->bp_infracfg, pd->infracfg);
if (ret)
return ret;
return _scpsys_bus_protect_enable(pd->data->bp_smi, pd->smi);
}
static int _scpsys_bus_protect_disable(const struct scpsys_bus_prot_data *bpd,
struct regmap *regmap)
{
int i, ret;
for (i = SPM_MAX_BUS_PROT_DATA - 1; i >= 0; i--) {
u32 val, mask = bpd[i].bus_prot_mask;
if (!mask)
continue;
if (bpd[i].bus_prot_reg_update)
regmap_clear_bits(regmap, bpd[i].bus_prot_clr, mask);
else
regmap_write(regmap, bpd[i].bus_prot_clr, mask);
if (bpd[i].ignore_clr_ack)
continue;
ret = regmap_read_poll_timeout(regmap, bpd[i].bus_prot_sta,
val, !(val & mask),
MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
if (ret)
return ret;
}
return 0;
}
static int scpsys_bus_protect_disable(struct scpsys_domain *pd)
{
int ret;
ret = _scpsys_bus_protect_disable(pd->data->bp_smi, pd->smi);
if (ret)
return ret;
return _scpsys_bus_protect_disable(pd->data->bp_infracfg, pd->infracfg);
}
static int scpsys_regulator_enable(struct regulator *supply)
{
return supply ? regulator_enable(supply) : 0;
}
static int scpsys_regulator_disable(struct regulator *supply)
{
return supply ? regulator_disable(supply) : 0;
}
static int scpsys_power_on(struct generic_pm_domain *genpd)
{
struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
struct scpsys *scpsys = pd->scpsys;
bool tmp;
int ret;
ret = scpsys_regulator_enable(pd->supply);
if (ret)
return ret;
ret = clk_bulk_prepare_enable(pd->num_clks, pd->clks);
if (ret)
goto err_reg;
if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO))
regmap_clear_bits(scpsys->base, pd->data->ext_buck_iso_offs,
pd->data->ext_buck_iso_mask);
/* subsys power on */
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
/* wait until PWR_ACK = 1 */
ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
MTK_POLL_TIMEOUT);
if (ret < 0)
goto err_pwr_ack;
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
ret = clk_bulk_prepare_enable(pd->num_subsys_clks, pd->subsys_clks);
if (ret)
goto err_pwr_ack;
ret = scpsys_sram_enable(pd);
if (ret < 0)
goto err_disable_subsys_clks;
ret = scpsys_bus_protect_disable(pd);
if (ret < 0)
goto err_disable_sram;
return 0;
err_disable_sram:
scpsys_sram_disable(pd);
err_disable_subsys_clks:
clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
err_pwr_ack:
clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
err_reg:
scpsys_regulator_disable(pd->supply);
return ret;
}
static int scpsys_power_off(struct generic_pm_domain *genpd)
{
struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
struct scpsys *scpsys = pd->scpsys;
bool tmp;
int ret;
ret = scpsys_bus_protect_enable(pd);
if (ret < 0)
return ret;
ret = scpsys_sram_disable(pd);
if (ret < 0)
return ret;
if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO))
regmap_set_bits(scpsys->base, pd->data->ext_buck_iso_offs,
pd->data->ext_buck_iso_mask);
clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
/* subsys power off */
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
/* wait until PWR_ACK = 0 */
ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, !tmp, MTK_POLL_DELAY_US,
MTK_POLL_TIMEOUT);
if (ret < 0)
return ret;
clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
scpsys_regulator_disable(pd->supply);
return 0;
}
static struct
generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_node *node)
{
const struct scpsys_domain_data *domain_data;
struct scpsys_domain *pd;
struct device_node *root_node = scpsys->dev->of_node;
struct device_node *smi_node;
struct property *prop;
const char *clk_name;
int i, ret, num_clks;
struct clk *clk;
int clk_ind = 0;
u32 id;
ret = of_property_read_u32(node, "reg", &id);
if (ret) {
dev_err(scpsys->dev, "%pOF: failed to retrieve domain id from reg: %d\n",
node, ret);
return ERR_PTR(-EINVAL);
}
if (id >= scpsys->soc_data->num_domains) {
dev_err(scpsys->dev, "%pOF: invalid domain id %d\n", node, id);
return ERR_PTR(-EINVAL);
}
domain_data = &scpsys->soc_data->domains_data[id];
if (domain_data->sta_mask == 0) {
dev_err(scpsys->dev, "%pOF: undefined domain id %d\n", node, id);
return ERR_PTR(-EINVAL);
}
pd = devm_kzalloc(scpsys->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
pd->data = domain_data;
pd->scpsys = scpsys;
if (MTK_SCPD_CAPS(pd, MTK_SCPD_DOMAIN_SUPPLY)) {
/*
* Find regulator in current power domain node.
* devm_regulator_get() finds regulator in a node and its child
* node, so set of_node to current power domain node then change
* back to original node after regulator is found for current
* power domain node.
*/
scpsys->dev->of_node = node;
pd->supply = devm_regulator_get(scpsys->dev, "domain");
scpsys->dev->of_node = root_node;
if (IS_ERR(pd->supply)) {
dev_err_probe(scpsys->dev, PTR_ERR(pd->supply),
"%pOF: failed to get power supply.\n",
node);
return ERR_CAST(pd->supply);
}
}
pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
if (IS_ERR(pd->infracfg))
return ERR_CAST(pd->infracfg);
smi_node = of_parse_phandle(node, "mediatek,smi", 0);
if (smi_node) {
pd->smi = device_node_to_regmap(smi_node);
of_node_put(smi_node);
if (IS_ERR(pd->smi))
return ERR_CAST(pd->smi);
}
num_clks = of_clk_get_parent_count(node);
if (num_clks > 0) {
/* Calculate number of subsys_clks */
of_property_for_each_string(node, "clock-names", prop, clk_name) {
char *subsys;
subsys = strchr(clk_name, '-');
if (subsys)
pd->num_subsys_clks++;
else
pd->num_clks++;
}
pd->clks = devm_kcalloc(scpsys->dev, pd->num_clks, sizeof(*pd->clks), GFP_KERNEL);
if (!pd->clks)
return ERR_PTR(-ENOMEM);
pd->subsys_clks = devm_kcalloc(scpsys->dev, pd->num_subsys_clks,
sizeof(*pd->subsys_clks), GFP_KERNEL);
if (!pd->subsys_clks)
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < pd->num_clks; i++) {
clk = of_clk_get(node, i);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err_probe(scpsys->dev, ret,
"%pOF: failed to get clk at index %d\n", node, i);
goto err_put_clocks;
}
pd->clks[clk_ind++].clk = clk;
}
for (i = 0; i < pd->num_subsys_clks; i++) {
clk = of_clk_get(node, i + clk_ind);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err_probe(scpsys->dev, ret,
"%pOF: failed to get clk at index %d\n", node,
i + clk_ind);
goto err_put_subsys_clocks;
}
pd->subsys_clks[i].clk = clk;
}
/*
* Initially turn on all domains to make the domains usable
* with !CONFIG_PM and to get the hardware in sync with the
* software. The unused domains will be switched off during
* late_init time.
*/
if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) {
if (scpsys_domain_is_on(pd))
dev_warn(scpsys->dev,
"%pOF: A default off power domain has been ON\n", node);
} else {
ret = scpsys_power_on(&pd->genpd);
if (ret < 0) {
dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
goto err_put_subsys_clocks;
}
if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON))
pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
}
if (scpsys->domains[id]) {
ret = -EINVAL;
dev_err(scpsys->dev,
"power domain with id %d already exists, check your device-tree\n", id);
goto err_put_subsys_clocks;
}
if (!pd->data->name)
pd->genpd.name = node->name;
else
pd->genpd.name = pd->data->name;
pd->genpd.power_off = scpsys_power_off;
pd->genpd.power_on = scpsys_power_on;
if (MTK_SCPD_CAPS(pd, MTK_SCPD_ACTIVE_WAKEUP))
pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF))
pm_genpd_init(&pd->genpd, NULL, true);
else
pm_genpd_init(&pd->genpd, NULL, false);
scpsys->domains[id] = &pd->genpd;
return scpsys->pd_data.domains[id];
err_put_subsys_clocks:
clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
err_put_clocks:
clk_bulk_put(pd->num_clks, pd->clks);
return ERR_PTR(ret);
}
static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *parent)
{
struct generic_pm_domain *child_pd, *parent_pd;
struct device_node *child;
int ret;
for_each_child_of_node(parent, child) {
u32 id;
ret = of_property_read_u32(parent, "reg", &id);
if (ret) {
dev_err(scpsys->dev, "%pOF: failed to get parent domain id\n", child);
goto err_put_node;
}
if (!scpsys->pd_data.domains[id]) {
ret = -EINVAL;
dev_err(scpsys->dev, "power domain with id %d does not exist\n", id);
goto err_put_node;
}
parent_pd = scpsys->pd_data.domains[id];
child_pd = scpsys_add_one_domain(scpsys, child);
if (IS_ERR(child_pd)) {
ret = PTR_ERR(child_pd);
dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
child);
goto err_put_node;
}
ret = pm_genpd_add_subdomain(parent_pd, child_pd);
if (ret) {
dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
child_pd->name, parent_pd->name);
goto err_put_node;
} else {
dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
child_pd->name);
}
/* recursive call to add all subdomains */
ret = scpsys_add_subdomain(scpsys, child);
if (ret)
goto err_put_node;
}
return 0;
err_put_node:
of_node_put(child);
return ret;
}
static void scpsys_remove_one_domain(struct scpsys_domain *pd)
{
int ret;
if (scpsys_domain_is_on(pd))
scpsys_power_off(&pd->genpd);
/*
* We're in the error cleanup already, so we only complain,
* but won't emit another error on top of the original one.
*/
ret = pm_genpd_remove(&pd->genpd);
if (ret < 0)
dev_err(pd->scpsys->dev,
"failed to remove domain '%s' : %d - state may be inconsistent\n",
pd->genpd.name, ret);
clk_bulk_put(pd->num_clks, pd->clks);
clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
}
static void scpsys_domain_cleanup(struct scpsys *scpsys)
{
struct generic_pm_domain *genpd;
struct scpsys_domain *pd;
int i;
for (i = scpsys->pd_data.num_domains - 1; i >= 0; i--) {
genpd = scpsys->pd_data.domains[i];
if (genpd) {
pd = to_scpsys_domain(genpd);
scpsys_remove_one_domain(pd);
}
}
}
static const struct of_device_id scpsys_of_match[] = {
{
.compatible = "mediatek,mt6795-power-controller",
.data = &mt6795_scpsys_data,
},
{
.compatible = "mediatek,mt8167-power-controller",
.data = &mt8167_scpsys_data,
},
{
.compatible = "mediatek,mt8173-power-controller",
.data = &mt8173_scpsys_data,
},
{
.compatible = "mediatek,mt8183-power-controller",
.data = &mt8183_scpsys_data,
},
{
.compatible = "mediatek,mt8186-power-controller",
.data = &mt8186_scpsys_data,
},
{
.compatible = "mediatek,mt8188-power-controller",
.data = &mt8188_scpsys_data,
},
{
.compatible = "mediatek,mt8192-power-controller",
.data = &mt8192_scpsys_data,
},
{
.compatible = "mediatek,mt8195-power-controller",
.data = &mt8195_scpsys_data,
},
{ }
};
static int scpsys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct scpsys_soc_data *soc;
struct device_node *node;
struct device *parent;
struct scpsys *scpsys;
int ret;
soc = of_device_get_match_data(&pdev->dev);
if (!soc) {
dev_err(&pdev->dev, "no power controller data\n");
return -EINVAL;
}
scpsys = devm_kzalloc(dev, struct_size(scpsys, domains, soc->num_domains), GFP_KERNEL);
if (!scpsys)
return -ENOMEM;
scpsys->dev = dev;
scpsys->soc_data = soc;
scpsys->pd_data.domains = scpsys->domains;
scpsys->pd_data.num_domains = soc->num_domains;
parent = dev->parent;
if (!parent) {
dev_err(dev, "no parent for syscon devices\n");
return -ENODEV;
}
scpsys->base = syscon_node_to_regmap(parent->of_node);
if (IS_ERR(scpsys->base)) {
dev_err(dev, "no regmap available\n");
return PTR_ERR(scpsys->base);
}
ret = -ENODEV;
for_each_available_child_of_node(np, node) {
struct generic_pm_domain *domain;
domain = scpsys_add_one_domain(scpsys, node);
if (IS_ERR(domain)) {
ret = PTR_ERR(domain);
of_node_put(node);
goto err_cleanup_domains;
}
ret = scpsys_add_subdomain(scpsys, node);
if (ret) {
of_node_put(node);
goto err_cleanup_domains;
}
}
if (ret) {
dev_dbg(dev, "no power domains present\n");
return ret;
}
ret = of_genpd_add_provider_onecell(np, &scpsys->pd_data);
if (ret) {
dev_err(dev, "failed to add provider: %d\n", ret);
goto err_cleanup_domains;
}
return 0;
err_cleanup_domains:
scpsys_domain_cleanup(scpsys);
return ret;
}
static struct platform_driver scpsys_pm_domain_driver = {
.probe = scpsys_probe,
.driver = {
.name = "mtk-power-controller",
.suppress_bind_attrs = true,
.of_match_table = scpsys_of_match,
},
};
builtin_platform_driver(scpsys_pm_domain_driver);
| linux-master | drivers/pmdomain/mediatek/mtk-pm-domains.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved
*/
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
struct tegra_powergate_info {
unsigned int id;
char *name;
};
struct tegra_powergate {
struct generic_pm_domain genpd;
struct tegra_bpmp *bpmp;
unsigned int id;
};
static inline struct tegra_powergate *
to_tegra_powergate(struct generic_pm_domain *genpd)
{
return container_of(genpd, struct tegra_powergate, genpd);
}
static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp,
unsigned int id, u32 state)
{
struct mrq_pg_request request;
struct tegra_bpmp_message msg;
int err;
memset(&request, 0, sizeof(request));
request.cmd = CMD_PG_SET_STATE;
request.id = id;
request.set_state.state = state;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_PG;
msg.tx.data = &request;
msg.tx.size = sizeof(request);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
else if (msg.rx.ret < 0)
return -EINVAL;
return 0;
}
static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp,
unsigned int id)
{
struct mrq_pg_response response;
struct mrq_pg_request request;
struct tegra_bpmp_message msg;
int err;
memset(&request, 0, sizeof(request));
request.cmd = CMD_PG_GET_STATE;
request.id = id;
memset(&response, 0, sizeof(response));
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_PG;
msg.tx.data = &request;
msg.tx.size = sizeof(request);
msg.rx.data = &response;
msg.rx.size = sizeof(response);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return PG_STATE_OFF;
else if (msg.rx.ret < 0)
return -EINVAL;
return response.get_state.state;
}
static int tegra_bpmp_powergate_get_max_id(struct tegra_bpmp *bpmp)
{
struct mrq_pg_response response;
struct mrq_pg_request request;
struct tegra_bpmp_message msg;
int err;
memset(&request, 0, sizeof(request));
request.cmd = CMD_PG_GET_MAX_ID;
memset(&response, 0, sizeof(response));
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_PG;
msg.tx.data = &request;
msg.tx.size = sizeof(request);
msg.rx.data = &response;
msg.rx.size = sizeof(response);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
else if (msg.rx.ret < 0)
return -EINVAL;
return response.get_max_id.max_id;
}
static char *tegra_bpmp_powergate_get_name(struct tegra_bpmp *bpmp,
unsigned int id)
{
struct mrq_pg_response response;
struct mrq_pg_request request;
struct tegra_bpmp_message msg;
int err;
memset(&request, 0, sizeof(request));
request.cmd = CMD_PG_GET_NAME;
request.id = id;
memset(&response, 0, sizeof(response));
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_PG;
msg.tx.data = &request;
msg.tx.size = sizeof(request);
msg.rx.data = &response;
msg.rx.size = sizeof(response);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0 || msg.rx.ret < 0)
return NULL;
return kstrdup(response.get_name.name, GFP_KERNEL);
}
static inline bool tegra_bpmp_powergate_is_powered(struct tegra_bpmp *bpmp,
unsigned int id)
{
return tegra_bpmp_powergate_get_state(bpmp, id) != PG_STATE_OFF;
}
static int tegra_powergate_power_on(struct generic_pm_domain *domain)
{
struct tegra_powergate *powergate = to_tegra_powergate(domain);
struct tegra_bpmp *bpmp = powergate->bpmp;
return tegra_bpmp_powergate_set_state(bpmp, powergate->id,
PG_STATE_ON);
}
static int tegra_powergate_power_off(struct generic_pm_domain *domain)
{
struct tegra_powergate *powergate = to_tegra_powergate(domain);
struct tegra_bpmp *bpmp = powergate->bpmp;
return tegra_bpmp_powergate_set_state(bpmp, powergate->id,
PG_STATE_OFF);
}
static struct tegra_powergate *
tegra_powergate_add(struct tegra_bpmp *bpmp,
const struct tegra_powergate_info *info)
{
struct tegra_powergate *powergate;
bool off;
int err;
off = !tegra_bpmp_powergate_is_powered(bpmp, info->id);
powergate = devm_kzalloc(bpmp->dev, sizeof(*powergate), GFP_KERNEL);
if (!powergate)
return ERR_PTR(-ENOMEM);
powergate->id = info->id;
powergate->bpmp = bpmp;
powergate->genpd.name = kstrdup(info->name, GFP_KERNEL);
powergate->genpd.power_on = tegra_powergate_power_on;
powergate->genpd.power_off = tegra_powergate_power_off;
err = pm_genpd_init(&powergate->genpd, NULL, off);
if (err < 0) {
kfree(powergate->genpd.name);
return ERR_PTR(err);
}
return powergate;
}
static void tegra_powergate_remove(struct tegra_powergate *powergate)
{
struct generic_pm_domain *genpd = &powergate->genpd;
struct tegra_bpmp *bpmp = powergate->bpmp;
int err;
err = pm_genpd_remove(genpd);
if (err < 0)
dev_err(bpmp->dev, "failed to remove power domain %s: %d\n",
genpd->name, err);
kfree(genpd->name);
}
static int
tegra_bpmp_probe_powergates(struct tegra_bpmp *bpmp,
struct tegra_powergate_info **powergatesp)
{
struct tegra_powergate_info *powergates;
unsigned int max_id, id, count = 0;
unsigned int num_holes = 0;
int err;
err = tegra_bpmp_powergate_get_max_id(bpmp);
if (err < 0)
return err;
max_id = err;
dev_dbg(bpmp->dev, "maximum powergate ID: %u\n", max_id);
powergates = kcalloc(max_id + 1, sizeof(*powergates), GFP_KERNEL);
if (!powergates)
return -ENOMEM;
for (id = 0; id <= max_id; id++) {
struct tegra_powergate_info *info = &powergates[count];
info->name = tegra_bpmp_powergate_get_name(bpmp, id);
if (!info->name || info->name[0] == '\0') {
num_holes++;
continue;
}
info->id = id;
count++;
}
dev_dbg(bpmp->dev, "holes: %u\n", num_holes);
*powergatesp = powergates;
return count;
}
static int tegra_bpmp_add_powergates(struct tegra_bpmp *bpmp,
struct tegra_powergate_info *powergates,
unsigned int count)
{
struct genpd_onecell_data *genpd = &bpmp->genpd;
struct generic_pm_domain **domains;
struct tegra_powergate *powergate;
unsigned int i;
int err;
domains = kcalloc(count, sizeof(*domains), GFP_KERNEL);
if (!domains)
return -ENOMEM;
for (i = 0; i < count; i++) {
powergate = tegra_powergate_add(bpmp, &powergates[i]);
if (IS_ERR(powergate)) {
err = PTR_ERR(powergate);
goto remove;
}
dev_dbg(bpmp->dev, "added power domain %s\n",
powergate->genpd.name);
domains[i] = &powergate->genpd;
}
genpd->num_domains = count;
genpd->domains = domains;
return 0;
remove:
while (i--) {
powergate = to_tegra_powergate(domains[i]);
tegra_powergate_remove(powergate);
}
kfree(domains);
return err;
}
static void tegra_bpmp_remove_powergates(struct tegra_bpmp *bpmp)
{
struct genpd_onecell_data *genpd = &bpmp->genpd;
unsigned int i = genpd->num_domains;
struct tegra_powergate *powergate;
while (i--) {
dev_dbg(bpmp->dev, "removing power domain %s\n",
genpd->domains[i]->name);
powergate = to_tegra_powergate(genpd->domains[i]);
tegra_powergate_remove(powergate);
}
}
static struct generic_pm_domain *
tegra_powergate_xlate(struct of_phandle_args *spec, void *data)
{
struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
struct genpd_onecell_data *genpd = data;
unsigned int i;
for (i = 0; i < genpd->num_domains; i++) {
struct tegra_powergate *powergate;
powergate = to_tegra_powergate(genpd->domains[i]);
if (powergate->id == spec->args[0]) {
domain = &powergate->genpd;
break;
}
}
return domain;
}
int tegra_bpmp_init_powergates(struct tegra_bpmp *bpmp)
{
struct device_node *np = bpmp->dev->of_node;
struct tegra_powergate_info *powergates;
struct device *dev = bpmp->dev;
unsigned int count, i;
int err;
err = tegra_bpmp_probe_powergates(bpmp, &powergates);
if (err < 0)
return err;
count = err;
dev_dbg(dev, "%u power domains probed\n", count);
err = tegra_bpmp_add_powergates(bpmp, powergates, count);
if (err < 0)
goto free;
bpmp->genpd.xlate = tegra_powergate_xlate;
err = of_genpd_add_provider_onecell(np, &bpmp->genpd);
if (err < 0) {
dev_err(dev, "failed to add power domain provider: %d\n", err);
tegra_bpmp_remove_powergates(bpmp);
}
free:
for (i = 0; i < count; i++)
kfree(powergates[i].name);
kfree(powergates);
return err;
}
| linux-master | drivers/pmdomain/tegra/powergate-bpmp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Linaro Ltd.
*
* Author: Ulf Hansson <[email protected]>
*
* Implements PM domains using the generic PM domain for ux500.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/pm_domain.h>
#include <dt-bindings/arm/ux500_pm_domains.h>
static int pd_power_off(struct generic_pm_domain *domain)
{
/*
* Handle the gating of the PM domain regulator here.
*
* Drivers/subsystems handling devices in the PM domain needs to perform
* register context save/restore from their respective runtime PM
* callbacks, to be able to enable PM domain gating/ungating.
*/
return 0;
}
static int pd_power_on(struct generic_pm_domain *domain)
{
/*
* Handle the ungating of the PM domain regulator here.
*
* Drivers/subsystems handling devices in the PM domain needs to perform
* register context save/restore from their respective runtime PM
* callbacks, to be able to enable PM domain gating/ungating.
*/
return 0;
}
static struct generic_pm_domain ux500_pm_domain_vape = {
.name = "VAPE",
.power_off = pd_power_off,
.power_on = pd_power_on,
};
static struct generic_pm_domain *ux500_pm_domains[NR_DOMAINS] = {
[DOMAIN_VAPE] = &ux500_pm_domain_vape,
};
static const struct of_device_id ux500_pm_domain_matches[] = {
{ .compatible = "stericsson,ux500-pm-domains", },
{ },
};
static int ux500_pm_domains_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct genpd_onecell_data *genpd_data;
int i;
if (!np)
return -ENODEV;
genpd_data = kzalloc(sizeof(*genpd_data), GFP_KERNEL);
if (!genpd_data)
return -ENOMEM;
genpd_data->domains = ux500_pm_domains;
genpd_data->num_domains = ARRAY_SIZE(ux500_pm_domains);
for (i = 0; i < ARRAY_SIZE(ux500_pm_domains); ++i)
pm_genpd_init(ux500_pm_domains[i], NULL, false);
of_genpd_add_provider_onecell(np, genpd_data);
return 0;
}
static struct platform_driver ux500_pm_domains_driver = {
.probe = ux500_pm_domains_probe,
.driver = {
.name = "ux500_pm_domains",
.of_match_table = ux500_pm_domain_matches,
},
};
static int __init ux500_pm_domains_init(void)
{
return platform_driver_register(&ux500_pm_domains_driver);
}
arch_initcall(ux500_pm_domains_init);
| linux-master | drivers/pmdomain/st/ste-ux500-pm-domain.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/reset.h>
#define PD_STATE_ON 1
#define PD_STATE_OFF 2
#define PD_RSTN_REG 0x00
#define PD_CLK_GATE_REG 0x04
#define PD_PWROFF_GATE_REG 0x08
#define PD_PSW_ON_REG 0x0c
#define PD_PSW_OFF_REG 0x10
#define PD_PSW_DELAY_REG 0x14
#define PD_OFF_DELAY_REG 0x18
#define PD_ON_DELAY_REG 0x1c
#define PD_COMMAND_REG 0x20
#define PD_STATUS_REG 0x24
#define PD_STATUS_COMPLETE BIT(1)
#define PD_STATUS_BUSY BIT(3)
#define PD_STATUS_STATE GENMASK(17, 16)
#define PD_ACTIVE_CTRL_REG 0x2c
#define PD_GATE_STATUS_REG 0x30
#define PD_RSTN_STATUS BIT(0)
#define PD_CLK_GATE_STATUS BIT(1)
#define PD_PWROFF_GATE_STATUS BIT(2)
#define PD_PSW_STATUS_REG 0x34
#define PD_REGS_SIZE 0x80
struct sun20i_ppu_desc {
const char *const *names;
unsigned int num_domains;
};
struct sun20i_ppu_pd {
struct generic_pm_domain genpd;
void __iomem *base;
};
#define to_sun20i_ppu_pd(_genpd) \
container_of(_genpd, struct sun20i_ppu_pd, genpd)
static bool sun20i_ppu_pd_is_on(const struct sun20i_ppu_pd *pd)
{
u32 status = readl(pd->base + PD_STATUS_REG);
return FIELD_GET(PD_STATUS_STATE, status) == PD_STATE_ON;
}
static int sun20i_ppu_pd_set_power(const struct sun20i_ppu_pd *pd, bool power_on)
{
u32 state, status;
int ret;
if (sun20i_ppu_pd_is_on(pd) == power_on)
return 0;
/* Wait for the power controller to be idle. */
ret = readl_poll_timeout(pd->base + PD_STATUS_REG, status,
!(status & PD_STATUS_BUSY), 100, 1000);
if (ret)
return ret;
state = power_on ? PD_STATE_ON : PD_STATE_OFF;
writel(state, pd->base + PD_COMMAND_REG);
/* Wait for the state transition to complete. */
ret = readl_poll_timeout(pd->base + PD_STATUS_REG, status,
FIELD_GET(PD_STATUS_STATE, status) == state &&
(status & PD_STATUS_COMPLETE), 100, 1000);
if (ret)
return ret;
/* Clear the completion flag. */
writel(status, pd->base + PD_STATUS_REG);
return 0;
}
static int sun20i_ppu_pd_power_on(struct generic_pm_domain *genpd)
{
const struct sun20i_ppu_pd *pd = to_sun20i_ppu_pd(genpd);
return sun20i_ppu_pd_set_power(pd, true);
}
static int sun20i_ppu_pd_power_off(struct generic_pm_domain *genpd)
{
const struct sun20i_ppu_pd *pd = to_sun20i_ppu_pd(genpd);
return sun20i_ppu_pd_set_power(pd, false);
}
static int sun20i_ppu_probe(struct platform_device *pdev)
{
const struct sun20i_ppu_desc *desc;
struct device *dev = &pdev->dev;
struct genpd_onecell_data *ppu;
struct sun20i_ppu_pd *pds;
struct reset_control *rst;
void __iomem *base;
struct clk *clk;
int ret;
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
pds = devm_kcalloc(dev, desc->num_domains, sizeof(*pds), GFP_KERNEL);
if (!pds)
return -ENOMEM;
ppu = devm_kzalloc(dev, sizeof(*ppu), GFP_KERNEL);
if (!ppu)
return -ENOMEM;
ppu->domains = devm_kcalloc(dev, desc->num_domains,
sizeof(*ppu->domains), GFP_KERNEL);
if (!ppu->domains)
return -ENOMEM;
ppu->num_domains = desc->num_domains;
platform_set_drvdata(pdev, ppu);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
rst = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(rst))
return PTR_ERR(rst);
ret = reset_control_deassert(rst);
if (ret)
return ret;
for (unsigned int i = 0; i < ppu->num_domains; ++i) {
struct sun20i_ppu_pd *pd = &pds[i];
pd->genpd.name = desc->names[i];
pd->genpd.power_off = sun20i_ppu_pd_power_off;
pd->genpd.power_on = sun20i_ppu_pd_power_on;
pd->base = base + PD_REGS_SIZE * i;
ret = pm_genpd_init(&pd->genpd, NULL, sun20i_ppu_pd_is_on(pd));
if (ret) {
dev_warn(dev, "Failed to add '%s' domain: %d\n",
pd->genpd.name, ret);
continue;
}
ppu->domains[i] = &pd->genpd;
}
ret = of_genpd_add_provider_onecell(dev->of_node, ppu);
if (ret)
dev_warn(dev, "Failed to add provider: %d\n", ret);
return 0;
}
static const char *const sun20i_d1_ppu_pd_names[] = {
"CPU",
"VE",
"DSP",
};
static const struct sun20i_ppu_desc sun20i_d1_ppu_desc = {
.names = sun20i_d1_ppu_pd_names,
.num_domains = ARRAY_SIZE(sun20i_d1_ppu_pd_names),
};
static const struct of_device_id sun20i_ppu_of_match[] = {
{
.compatible = "allwinner,sun20i-d1-ppu",
.data = &sun20i_d1_ppu_desc,
},
{ }
};
MODULE_DEVICE_TABLE(of, sun20i_ppu_of_match);
static struct platform_driver sun20i_ppu_driver = {
.probe = sun20i_ppu_probe,
.driver = {
.name = "sun20i-ppu",
.of_match_table = sun20i_ppu_of_match,
/* Power domains cannot be removed while they are in use. */
.suppress_bind_attrs = true,
},
};
module_platform_driver(sun20i_ppu_driver);
MODULE_AUTHOR("Samuel Holland <[email protected]>");
MODULE_DESCRIPTION("Allwinner D1 PPU power domain driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/pmdomain/sunxi/sun20i-ppu.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Actions Semi Owl Smart Power System (SPS) shared helpers
*
* Copyright 2012 Actions Semi Inc.
* Author: Actions Semi, Inc.
*
* Copyright (c) 2017 Andreas Färber
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/soc/actions/owl-sps.h>
#define OWL_SPS_PG_CTL 0x0
int owl_sps_set_pg(void __iomem *base, u32 pwr_mask, u32 ack_mask, bool enable)
{
u32 val;
bool ack;
int timeout;
val = readl(base + OWL_SPS_PG_CTL);
ack = val & ack_mask;
if (ack == enable)
return 0;
if (enable)
val |= pwr_mask;
else
val &= ~pwr_mask;
writel(val, base + OWL_SPS_PG_CTL);
for (timeout = 5000; timeout > 0; timeout -= 50) {
val = readl(base + OWL_SPS_PG_CTL);
if ((val & ack_mask) == (enable ? ack_mask : 0))
break;
udelay(50);
}
if (timeout <= 0)
return -ETIMEDOUT;
udelay(10);
return 0;
}
EXPORT_SYMBOL_GPL(owl_sps_set_pg);
| linux-master | drivers/pmdomain/actions/owl-sps-helper.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Actions Semi Owl Smart Power System (SPS)
*
* Copyright 2012 Actions Semi Inc.
* Author: Actions Semi, Inc.
*
* Copyright (c) 2017 Andreas Färber
*/
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_domain.h>
#include <linux/soc/actions/owl-sps.h>
#include <dt-bindings/power/owl-s500-powergate.h>
#include <dt-bindings/power/owl-s700-powergate.h>
#include <dt-bindings/power/owl-s900-powergate.h>
struct owl_sps_domain_info {
const char *name;
int pwr_bit;
int ack_bit;
unsigned int genpd_flags;
};
struct owl_sps_info {
unsigned num_domains;
const struct owl_sps_domain_info *domains;
};
struct owl_sps {
struct device *dev;
const struct owl_sps_info *info;
void __iomem *base;
struct genpd_onecell_data genpd_data;
struct generic_pm_domain *domains[];
};
#define to_owl_pd(gpd) container_of(gpd, struct owl_sps_domain, genpd)
struct owl_sps_domain {
struct generic_pm_domain genpd;
const struct owl_sps_domain_info *info;
struct owl_sps *sps;
};
static int owl_sps_set_power(struct owl_sps_domain *pd, bool enable)
{
u32 pwr_mask, ack_mask;
ack_mask = BIT(pd->info->ack_bit);
pwr_mask = BIT(pd->info->pwr_bit);
return owl_sps_set_pg(pd->sps->base, pwr_mask, ack_mask, enable);
}
static int owl_sps_power_on(struct generic_pm_domain *domain)
{
struct owl_sps_domain *pd = to_owl_pd(domain);
dev_dbg(pd->sps->dev, "%s power on", pd->info->name);
return owl_sps_set_power(pd, true);
}
static int owl_sps_power_off(struct generic_pm_domain *domain)
{
struct owl_sps_domain *pd = to_owl_pd(domain);
dev_dbg(pd->sps->dev, "%s power off", pd->info->name);
return owl_sps_set_power(pd, false);
}
static int owl_sps_init_domain(struct owl_sps *sps, int index)
{
struct owl_sps_domain *pd;
pd = devm_kzalloc(sps->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
pd->info = &sps->info->domains[index];
pd->sps = sps;
pd->genpd.name = pd->info->name;
pd->genpd.power_on = owl_sps_power_on;
pd->genpd.power_off = owl_sps_power_off;
pd->genpd.flags = pd->info->genpd_flags;
pm_genpd_init(&pd->genpd, NULL, false);
sps->genpd_data.domains[index] = &pd->genpd;
return 0;
}
static int owl_sps_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct owl_sps_info *sps_info;
struct owl_sps *sps;
int i, ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "no device node\n");
return -ENODEV;
}
match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
if (!match || !match->data) {
dev_err(&pdev->dev, "unknown compatible or missing data\n");
return -EINVAL;
}
sps_info = match->data;
sps = devm_kzalloc(&pdev->dev,
struct_size(sps, domains, sps_info->num_domains),
GFP_KERNEL);
if (!sps)
return -ENOMEM;
sps->base = of_io_request_and_map(pdev->dev.of_node, 0, "owl-sps");
if (IS_ERR(sps->base)) {
dev_err(&pdev->dev, "failed to map sps registers\n");
return PTR_ERR(sps->base);
}
sps->dev = &pdev->dev;
sps->info = sps_info;
sps->genpd_data.domains = sps->domains;
sps->genpd_data.num_domains = sps_info->num_domains;
for (i = 0; i < sps_info->num_domains; i++) {
ret = owl_sps_init_domain(sps, i);
if (ret)
return ret;
}
ret = of_genpd_add_provider_onecell(pdev->dev.of_node, &sps->genpd_data);
if (ret) {
dev_err(&pdev->dev, "failed to add provider (%d)", ret);
return ret;
}
return 0;
}
static const struct owl_sps_domain_info s500_sps_domains[] = {
[S500_PD_VDE] = {
.name = "VDE",
.pwr_bit = 0,
.ack_bit = 16,
},
[S500_PD_VCE_SI] = {
.name = "VCE_SI",
.pwr_bit = 1,
.ack_bit = 17,
},
[S500_PD_USB2_1] = {
.name = "USB2_1",
.pwr_bit = 2,
.ack_bit = 18,
},
[S500_PD_CPU2] = {
.name = "CPU2",
.pwr_bit = 5,
.ack_bit = 21,
.genpd_flags = GENPD_FLAG_ALWAYS_ON,
},
[S500_PD_CPU3] = {
.name = "CPU3",
.pwr_bit = 6,
.ack_bit = 22,
.genpd_flags = GENPD_FLAG_ALWAYS_ON,
},
[S500_PD_DMA] = {
.name = "DMA",
.pwr_bit = 8,
.ack_bit = 12,
},
[S500_PD_DS] = {
.name = "DS",
.pwr_bit = 9,
.ack_bit = 13,
},
[S500_PD_USB3] = {
.name = "USB3",
.pwr_bit = 10,
.ack_bit = 14,
},
[S500_PD_USB2_0] = {
.name = "USB2_0",
.pwr_bit = 11,
.ack_bit = 15,
},
};
static const struct owl_sps_info s500_sps_info = {
.num_domains = ARRAY_SIZE(s500_sps_domains),
.domains = s500_sps_domains,
};
static const struct owl_sps_domain_info s700_sps_domains[] = {
[S700_PD_VDE] = {
.name = "VDE",
.pwr_bit = 0,
},
[S700_PD_VCE_SI] = {
.name = "VCE_SI",
.pwr_bit = 1,
},
[S700_PD_USB2_1] = {
.name = "USB2_1",
.pwr_bit = 2,
},
[S700_PD_HDE] = {
.name = "HDE",
.pwr_bit = 7,
},
[S700_PD_DMA] = {
.name = "DMA",
.pwr_bit = 8,
},
[S700_PD_DS] = {
.name = "DS",
.pwr_bit = 9,
},
[S700_PD_USB3] = {
.name = "USB3",
.pwr_bit = 10,
},
[S700_PD_USB2_0] = {
.name = "USB2_0",
.pwr_bit = 11,
},
};
static const struct owl_sps_info s700_sps_info = {
.num_domains = ARRAY_SIZE(s700_sps_domains),
.domains = s700_sps_domains,
};
static const struct owl_sps_domain_info s900_sps_domains[] = {
[S900_PD_GPU_B] = {
.name = "GPU_B",
.pwr_bit = 3,
},
[S900_PD_VCE] = {
.name = "VCE",
.pwr_bit = 4,
},
[S900_PD_SENSOR] = {
.name = "SENSOR",
.pwr_bit = 5,
},
[S900_PD_VDE] = {
.name = "VDE",
.pwr_bit = 6,
},
[S900_PD_HDE] = {
.name = "HDE",
.pwr_bit = 7,
},
[S900_PD_USB3] = {
.name = "USB3",
.pwr_bit = 8,
},
[S900_PD_DDR0] = {
.name = "DDR0",
.pwr_bit = 9,
},
[S900_PD_DDR1] = {
.name = "DDR1",
.pwr_bit = 10,
},
[S900_PD_DE] = {
.name = "DE",
.pwr_bit = 13,
},
[S900_PD_NAND] = {
.name = "NAND",
.pwr_bit = 14,
},
[S900_PD_USB2_H0] = {
.name = "USB2_H0",
.pwr_bit = 15,
},
[S900_PD_USB2_H1] = {
.name = "USB2_H1",
.pwr_bit = 16,
},
};
static const struct owl_sps_info s900_sps_info = {
.num_domains = ARRAY_SIZE(s900_sps_domains),
.domains = s900_sps_domains,
};
static const struct of_device_id owl_sps_of_matches[] = {
{ .compatible = "actions,s500-sps", .data = &s500_sps_info },
{ .compatible = "actions,s700-sps", .data = &s700_sps_info },
{ .compatible = "actions,s900-sps", .data = &s900_sps_info },
{ }
};
static struct platform_driver owl_sps_platform_driver = {
.probe = owl_sps_probe,
.driver = {
.name = "owl-sps",
.of_match_table = owl_sps_of_matches,
.suppress_bind_attrs = true,
},
};
static int __init owl_sps_init(void)
{
return platform_driver_register(&owl_sps_platform_driver);
}
postcore_initcall(owl_sps_init);
| linux-master | drivers/pmdomain/actions/owl-sps.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BCM63xx Power Domain Controller Driver
*
* Copyright (C) 2020 Álvaro Fernández Rojas <[email protected]>
*/
#include <dt-bindings/soc/bcm6318-pm.h>
#include <dt-bindings/soc/bcm6328-pm.h>
#include <dt-bindings/soc/bcm6362-pm.h>
#include <dt-bindings/soc/bcm63268-pm.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/of.h>
struct bcm63xx_power_dev {
struct generic_pm_domain genpd;
struct bcm63xx_power *power;
uint32_t mask;
};
struct bcm63xx_power {
void __iomem *base;
spinlock_t lock;
struct bcm63xx_power_dev *dev;
struct genpd_onecell_data genpd_data;
struct generic_pm_domain **genpd;
};
struct bcm63xx_power_data {
const char * const name;
uint8_t bit;
unsigned int flags;
};
static int bcm63xx_power_get_state(struct bcm63xx_power_dev *pmd, bool *is_on)
{
struct bcm63xx_power *power = pmd->power;
if (!pmd->mask) {
*is_on = false;
return -EINVAL;
}
*is_on = !(__raw_readl(power->base) & pmd->mask);
return 0;
}
static int bcm63xx_power_set_state(struct bcm63xx_power_dev *pmd, bool on)
{
struct bcm63xx_power *power = pmd->power;
unsigned long flags;
uint32_t val;
if (!pmd->mask)
return -EINVAL;
spin_lock_irqsave(&power->lock, flags);
val = __raw_readl(power->base);
if (on)
val &= ~pmd->mask;
else
val |= pmd->mask;
__raw_writel(val, power->base);
spin_unlock_irqrestore(&power->lock, flags);
return 0;
}
static int bcm63xx_power_on(struct generic_pm_domain *genpd)
{
struct bcm63xx_power_dev *pmd = container_of(genpd,
struct bcm63xx_power_dev, genpd);
return bcm63xx_power_set_state(pmd, true);
}
static int bcm63xx_power_off(struct generic_pm_domain *genpd)
{
struct bcm63xx_power_dev *pmd = container_of(genpd,
struct bcm63xx_power_dev, genpd);
return bcm63xx_power_set_state(pmd, false);
}
static int bcm63xx_power_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct bcm63xx_power_data *entry, *table;
struct bcm63xx_power *power;
unsigned int ndom;
uint8_t max_bit = 0;
int ret;
power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
if (!power)
return -ENOMEM;
power->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(power->base))
return PTR_ERR(power->base);
table = of_device_get_match_data(dev);
if (!table)
return -EINVAL;
power->genpd_data.num_domains = 0;
ndom = 0;
for (entry = table; entry->name; entry++) {
max_bit = max(max_bit, entry->bit);
ndom++;
}
if (!ndom)
return -ENODEV;
power->genpd_data.num_domains = max_bit + 1;
power->dev = devm_kcalloc(dev, power->genpd_data.num_domains,
sizeof(struct bcm63xx_power_dev),
GFP_KERNEL);
if (!power->dev)
return -ENOMEM;
power->genpd = devm_kcalloc(dev, power->genpd_data.num_domains,
sizeof(struct generic_pm_domain *),
GFP_KERNEL);
if (!power->genpd)
return -ENOMEM;
power->genpd_data.domains = power->genpd;
ndom = 0;
for (entry = table; entry->name; entry++) {
struct bcm63xx_power_dev *pmd = &power->dev[ndom];
bool is_on;
pmd->power = power;
pmd->mask = BIT(entry->bit);
pmd->genpd.name = entry->name;
pmd->genpd.flags = entry->flags;
ret = bcm63xx_power_get_state(pmd, &is_on);
if (ret)
dev_warn(dev, "unable to get current state for %s\n",
pmd->genpd.name);
pmd->genpd.power_on = bcm63xx_power_on;
pmd->genpd.power_off = bcm63xx_power_off;
pm_genpd_init(&pmd->genpd, NULL, !is_on);
power->genpd[entry->bit] = &pmd->genpd;
ndom++;
}
spin_lock_init(&power->lock);
ret = of_genpd_add_provider_onecell(np, &power->genpd_data);
if (ret) {
dev_err(dev, "failed to register genpd driver: %d\n", ret);
return ret;
}
dev_info(dev, "registered %u power domains\n", ndom);
return 0;
}
static const struct bcm63xx_power_data bcm6318_power_domains[] = {
{
.name = "pcie",
.bit = BCM6318_POWER_DOMAIN_PCIE,
}, {
.name = "usb",
.bit = BCM6318_POWER_DOMAIN_USB,
}, {
.name = "ephy0",
.bit = BCM6318_POWER_DOMAIN_EPHY0,
}, {
.name = "ephy1",
.bit = BCM6318_POWER_DOMAIN_EPHY1,
}, {
.name = "ephy2",
.bit = BCM6318_POWER_DOMAIN_EPHY2,
}, {
.name = "ephy3",
.bit = BCM6318_POWER_DOMAIN_EPHY3,
}, {
.name = "ldo2p5",
.bit = BCM6318_POWER_DOMAIN_LDO2P5,
.flags = GENPD_FLAG_ALWAYS_ON,
}, {
.name = "ldo2p9",
.bit = BCM6318_POWER_DOMAIN_LDO2P9,
.flags = GENPD_FLAG_ALWAYS_ON,
}, {
.name = "sw1p0",
.bit = BCM6318_POWER_DOMAIN_SW1P0,
.flags = GENPD_FLAG_ALWAYS_ON,
}, {
.name = "pad",
.bit = BCM6318_POWER_DOMAIN_PAD,
.flags = GENPD_FLAG_ALWAYS_ON,
}, {
/* sentinel */
},
};
static const struct bcm63xx_power_data bcm6328_power_domains[] = {
{
.name = "adsl2-mips",
.bit = BCM6328_POWER_DOMAIN_ADSL2_MIPS,
}, {
.name = "adsl2-phy",
.bit = BCM6328_POWER_DOMAIN_ADSL2_PHY,
}, {
.name = "adsl2-afe",
.bit = BCM6328_POWER_DOMAIN_ADSL2_AFE,
}, {
.name = "sar",
.bit = BCM6328_POWER_DOMAIN_SAR,
}, {
.name = "pcm",
.bit = BCM6328_POWER_DOMAIN_PCM,
}, {
.name = "usbd",
.bit = BCM6328_POWER_DOMAIN_USBD,
}, {
.name = "usbh",
.bit = BCM6328_POWER_DOMAIN_USBH,
}, {
.name = "pcie",
.bit = BCM6328_POWER_DOMAIN_PCIE,
}, {
.name = "robosw",
.bit = BCM6328_POWER_DOMAIN_ROBOSW,
}, {
.name = "ephy",
.bit = BCM6328_POWER_DOMAIN_EPHY,
}, {
/* sentinel */
},
};
static const struct bcm63xx_power_data bcm6362_power_domains[] = {
{
.name = "sar",
.bit = BCM6362_POWER_DOMAIN_SAR,
}, {
.name = "ipsec",
.bit = BCM6362_POWER_DOMAIN_IPSEC,
}, {
.name = "mips",
.bit = BCM6362_POWER_DOMAIN_MIPS,
.flags = GENPD_FLAG_ALWAYS_ON,
}, {
.name = "dect",
.bit = BCM6362_POWER_DOMAIN_DECT,
}, {
.name = "usbh",
.bit = BCM6362_POWER_DOMAIN_USBH,
}, {
.name = "usbd",
.bit = BCM6362_POWER_DOMAIN_USBD,
}, {
.name = "robosw",
.bit = BCM6362_POWER_DOMAIN_ROBOSW,
}, {
.name = "pcm",
.bit = BCM6362_POWER_DOMAIN_PCM,
}, {
.name = "periph",
.bit = BCM6362_POWER_DOMAIN_PERIPH,
.flags = GENPD_FLAG_ALWAYS_ON,
}, {
.name = "adsl-phy",
.bit = BCM6362_POWER_DOMAIN_ADSL_PHY,
}, {
.name = "gmii-pads",
.bit = BCM6362_POWER_DOMAIN_GMII_PADS,
}, {
.name = "fap",
.bit = BCM6362_POWER_DOMAIN_FAP,
}, {
.name = "pcie",
.bit = BCM6362_POWER_DOMAIN_PCIE,
}, {
.name = "wlan-pads",
.bit = BCM6362_POWER_DOMAIN_WLAN_PADS,
}, {
/* sentinel */
},
};
static const struct bcm63xx_power_data bcm63268_power_domains[] = {
{
.name = "sar",
.bit = BCM63268_POWER_DOMAIN_SAR,
}, {
.name = "ipsec",
.bit = BCM63268_POWER_DOMAIN_IPSEC,
}, {
.name = "mips",
.bit = BCM63268_POWER_DOMAIN_MIPS,
.flags = GENPD_FLAG_ALWAYS_ON,
}, {
.name = "dect",
.bit = BCM63268_POWER_DOMAIN_DECT,
}, {
.name = "usbh",
.bit = BCM63268_POWER_DOMAIN_USBH,
}, {
.name = "usbd",
.bit = BCM63268_POWER_DOMAIN_USBD,
}, {
.name = "robosw",
.bit = BCM63268_POWER_DOMAIN_ROBOSW,
}, {
.name = "pcm",
.bit = BCM63268_POWER_DOMAIN_PCM,
}, {
.name = "periph",
.bit = BCM63268_POWER_DOMAIN_PERIPH,
.flags = GENPD_FLAG_ALWAYS_ON,
}, {
.name = "vdsl-phy",
.bit = BCM63268_POWER_DOMAIN_VDSL_PHY,
}, {
.name = "vdsl-mips",
.bit = BCM63268_POWER_DOMAIN_VDSL_MIPS,
}, {
.name = "fap",
.bit = BCM63268_POWER_DOMAIN_FAP,
}, {
.name = "pcie",
.bit = BCM63268_POWER_DOMAIN_PCIE,
}, {
.name = "wlan-pads",
.bit = BCM63268_POWER_DOMAIN_WLAN_PADS,
}, {
/* sentinel */
},
};
static const struct of_device_id bcm63xx_power_of_match[] = {
{
.compatible = "brcm,bcm6318-power-controller",
.data = &bcm6318_power_domains,
}, {
.compatible = "brcm,bcm6328-power-controller",
.data = &bcm6328_power_domains,
}, {
.compatible = "brcm,bcm6362-power-controller",
.data = &bcm6362_power_domains,
}, {
.compatible = "brcm,bcm63268-power-controller",
.data = &bcm63268_power_domains,
}, {
/* sentinel */
}
};
static struct platform_driver bcm63xx_power_driver = {
.driver = {
.name = "bcm63xx-power-controller",
.of_match_table = bcm63xx_power_of_match,
},
.probe = bcm63xx_power_probe,
};
builtin_platform_driver(bcm63xx_power_driver);
| linux-master | drivers/pmdomain/bcm/bcm63xx-power.c |
// SPDX-License-Identifier: GPL-2.0
/* (C) 2015 Pengutronix, Alexander Aring <[email protected]>
*
* Authors:
* Alexander Aring <[email protected]>
* Eric Anholt <[email protected]>
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <dt-bindings/power/raspberrypi-power.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
/*
* Firmware indices for the old power domains interface. Only a few
* of them were actually implemented.
*/
#define RPI_OLD_POWER_DOMAIN_USB 3
#define RPI_OLD_POWER_DOMAIN_V3D 10
struct rpi_power_domain {
u32 domain;
bool enabled;
bool old_interface;
struct generic_pm_domain base;
struct rpi_firmware *fw;
};
struct rpi_power_domains {
bool has_new_interface;
struct genpd_onecell_data xlate;
struct rpi_firmware *fw;
struct rpi_power_domain domains[RPI_POWER_DOMAIN_COUNT];
};
/*
* Packet definition used by RPI_FIRMWARE_SET_POWER_STATE and
* RPI_FIRMWARE_SET_DOMAIN_STATE
*/
struct rpi_power_domain_packet {
u32 domain;
u32 on;
};
/*
* Asks the firmware to enable or disable power on a specific power
* domain.
*/
static int rpi_firmware_set_power(struct rpi_power_domain *rpi_domain, bool on)
{
struct rpi_power_domain_packet packet;
packet.domain = rpi_domain->domain;
packet.on = on;
return rpi_firmware_property(rpi_domain->fw,
rpi_domain->old_interface ?
RPI_FIRMWARE_SET_POWER_STATE :
RPI_FIRMWARE_SET_DOMAIN_STATE,
&packet, sizeof(packet));
}
static int rpi_domain_off(struct generic_pm_domain *domain)
{
struct rpi_power_domain *rpi_domain =
container_of(domain, struct rpi_power_domain, base);
return rpi_firmware_set_power(rpi_domain, false);
}
static int rpi_domain_on(struct generic_pm_domain *domain)
{
struct rpi_power_domain *rpi_domain =
container_of(domain, struct rpi_power_domain, base);
return rpi_firmware_set_power(rpi_domain, true);
}
static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains,
int xlate_index, const char *name)
{
struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index];
dom->fw = rpi_domains->fw;
dom->base.name = name;
dom->base.power_on = rpi_domain_on;
dom->base.power_off = rpi_domain_off;
/*
* Treat all power domains as off at boot.
*
* The firmware itself may be keeping some domains on, but
* from Linux's perspective all we control is the refcounts
* that we give to the firmware, and we can't ask the firmware
* to turn off something that we haven't ourselves turned on.
*/
pm_genpd_init(&dom->base, NULL, true);
rpi_domains->xlate.domains[xlate_index] = &dom->base;
}
static void rpi_init_power_domain(struct rpi_power_domains *rpi_domains,
int xlate_index, const char *name)
{
struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index];
if (!rpi_domains->has_new_interface)
return;
/* The DT binding index is the firmware's domain index minus one. */
dom->domain = xlate_index + 1;
rpi_common_init_power_domain(rpi_domains, xlate_index, name);
}
static void rpi_init_old_power_domain(struct rpi_power_domains *rpi_domains,
int xlate_index, int domain,
const char *name)
{
struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index];
dom->old_interface = true;
dom->domain = domain;
rpi_common_init_power_domain(rpi_domains, xlate_index, name);
}
/*
* Detects whether the firmware supports the new power domains interface.
*
* The firmware doesn't actually return an error on an unknown tag,
* and just skips over it, so we do the detection by putting an
* unexpected value in the return field and checking if it was
* unchanged.
*/
static bool
rpi_has_new_domain_support(struct rpi_power_domains *rpi_domains)
{
struct rpi_power_domain_packet packet;
int ret;
packet.domain = RPI_POWER_DOMAIN_ARM;
packet.on = ~0;
ret = rpi_firmware_property(rpi_domains->fw,
RPI_FIRMWARE_GET_DOMAIN_STATE,
&packet, sizeof(packet));
return ret == 0 && packet.on != ~0;
}
static int rpi_power_probe(struct platform_device *pdev)
{
struct device_node *fw_np;
struct device *dev = &pdev->dev;
struct rpi_power_domains *rpi_domains;
rpi_domains = devm_kzalloc(dev, sizeof(*rpi_domains), GFP_KERNEL);
if (!rpi_domains)
return -ENOMEM;
rpi_domains->xlate.domains =
devm_kcalloc(dev,
RPI_POWER_DOMAIN_COUNT,
sizeof(*rpi_domains->xlate.domains),
GFP_KERNEL);
if (!rpi_domains->xlate.domains)
return -ENOMEM;
rpi_domains->xlate.num_domains = RPI_POWER_DOMAIN_COUNT;
fw_np = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
if (!fw_np) {
dev_err(&pdev->dev, "no firmware node\n");
return -ENODEV;
}
rpi_domains->fw = devm_rpi_firmware_get(&pdev->dev, fw_np);
of_node_put(fw_np);
if (!rpi_domains->fw)
return -EPROBE_DEFER;
rpi_domains->has_new_interface =
rpi_has_new_domain_support(rpi_domains);
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C0, "I2C0");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C1, "I2C1");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C2, "I2C2");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VIDEO_SCALER,
"VIDEO_SCALER");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VPU1, "VPU1");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_HDMI, "HDMI");
/*
* Use the old firmware interface for USB power, so that we
* can turn it on even if the firmware hasn't been updated.
*/
rpi_init_old_power_domain(rpi_domains, RPI_POWER_DOMAIN_USB,
RPI_OLD_POWER_DOMAIN_USB, "USB");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VEC, "VEC");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_JPEG, "JPEG");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_H264, "H264");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_V3D, "V3D");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_ISP, "ISP");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_UNICAM0, "UNICAM0");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_UNICAM1, "UNICAM1");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CCP2RX, "CCP2RX");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CSI2, "CSI2");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CPI, "CPI");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_DSI0, "DSI0");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_DSI1, "DSI1");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_TRANSPOSER,
"TRANSPOSER");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CCP2TX, "CCP2TX");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CDP, "CDP");
rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_ARM, "ARM");
of_genpd_add_provider_onecell(dev->of_node, &rpi_domains->xlate);
platform_set_drvdata(pdev, rpi_domains);
return 0;
}
static const struct of_device_id rpi_power_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-power", },
{},
};
MODULE_DEVICE_TABLE(of, rpi_power_of_match);
static struct platform_driver rpi_power_driver = {
.driver = {
.name = "raspberrypi-power",
.of_match_table = rpi_power_of_match,
},
.probe = rpi_power_probe,
};
builtin_platform_driver(rpi_power_driver);
MODULE_AUTHOR("Alexander Aring <[email protected]>");
MODULE_AUTHOR("Eric Anholt <[email protected]>");
MODULE_DESCRIPTION("Raspberry Pi power domain driver");
| linux-master | drivers/pmdomain/bcm/raspberrypi-power.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Power domain driver for Broadcom BCM2835
*
* Copyright (C) 2018 Broadcom
*/
#include <dt-bindings/soc/bcm2835-pm.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/bcm2835-pm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
#define PM_GNRIC 0x00
#define PM_AUDIO 0x04
#define PM_STATUS 0x18
#define PM_RSTC 0x1c
#define PM_RSTS 0x20
#define PM_WDOG 0x24
#define PM_PADS0 0x28
#define PM_PADS2 0x2c
#define PM_PADS3 0x30
#define PM_PADS4 0x34
#define PM_PADS5 0x38
#define PM_PADS6 0x3c
#define PM_CAM0 0x44
#define PM_CAM0_LDOHPEN BIT(2)
#define PM_CAM0_LDOLPEN BIT(1)
#define PM_CAM0_CTRLEN BIT(0)
#define PM_CAM1 0x48
#define PM_CAM1_LDOHPEN BIT(2)
#define PM_CAM1_LDOLPEN BIT(1)
#define PM_CAM1_CTRLEN BIT(0)
#define PM_CCP2TX 0x4c
#define PM_CCP2TX_LDOEN BIT(1)
#define PM_CCP2TX_CTRLEN BIT(0)
#define PM_DSI0 0x50
#define PM_DSI0_LDOHPEN BIT(2)
#define PM_DSI0_LDOLPEN BIT(1)
#define PM_DSI0_CTRLEN BIT(0)
#define PM_DSI1 0x54
#define PM_DSI1_LDOHPEN BIT(2)
#define PM_DSI1_LDOLPEN BIT(1)
#define PM_DSI1_CTRLEN BIT(0)
#define PM_HDMI 0x58
#define PM_HDMI_RSTDR BIT(19)
#define PM_HDMI_LDOPD BIT(1)
#define PM_HDMI_CTRLEN BIT(0)
#define PM_USB 0x5c
/* The power gates must be enabled with this bit before enabling the LDO in the
* USB block.
*/
#define PM_USB_CTRLEN BIT(0)
#define PM_PXLDO 0x60
#define PM_PXBG 0x64
#define PM_DFT 0x68
#define PM_SMPS 0x6c
#define PM_XOSC 0x70
#define PM_SPAREW 0x74
#define PM_SPARER 0x78
#define PM_AVS_RSTDR 0x7c
#define PM_AVS_STAT 0x80
#define PM_AVS_EVENT 0x84
#define PM_AVS_INTEN 0x88
#define PM_DUMMY 0xfc
#define PM_IMAGE 0x108
#define PM_GRAFX 0x10c
#define PM_PROC 0x110
#define PM_ENAB BIT(12)
#define PM_ISPRSTN BIT(8)
#define PM_H264RSTN BIT(7)
#define PM_PERIRSTN BIT(6)
#define PM_V3DRSTN BIT(6)
#define PM_ISFUNC BIT(5)
#define PM_MRDONE BIT(4)
#define PM_MEMREP BIT(3)
#define PM_ISPOW BIT(2)
#define PM_POWOK BIT(1)
#define PM_POWUP BIT(0)
#define PM_INRUSH_SHIFT 13
#define PM_INRUSH_3_5_MA 0
#define PM_INRUSH_5_MA 1
#define PM_INRUSH_10_MA 2
#define PM_INRUSH_20_MA 3
#define PM_INRUSH_MASK (3 << PM_INRUSH_SHIFT)
#define PM_PASSWORD 0x5a000000
#define PM_WDOG_TIME_SET 0x000fffff
#define PM_RSTC_WRCFG_CLR 0xffffffcf
#define PM_RSTS_HADWRH_SET 0x00000040
#define PM_RSTC_WRCFG_SET 0x00000030
#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
#define PM_RSTC_RESET 0x00000102
#define PM_READ(reg) readl(power->base + (reg))
#define PM_WRITE(reg, val) writel(PM_PASSWORD | (val), power->base + (reg))
#define ASB_BRDG_VERSION 0x00
#define ASB_CPR_CTRL 0x04
#define ASB_V3D_S_CTRL 0x08
#define ASB_V3D_M_CTRL 0x0c
#define ASB_ISP_S_CTRL 0x10
#define ASB_ISP_M_CTRL 0x14
#define ASB_H264_S_CTRL 0x18
#define ASB_H264_M_CTRL 0x1c
#define ASB_REQ_STOP BIT(0)
#define ASB_ACK BIT(1)
#define ASB_EMPTY BIT(2)
#define ASB_FULL BIT(3)
#define ASB_AXI_BRDG_ID 0x20
#define BCM2835_BRDG_ID 0x62726467
struct bcm2835_power_domain {
struct generic_pm_domain base;
struct bcm2835_power *power;
u32 domain;
struct clk *clk;
};
struct bcm2835_power {
struct device *dev;
/* PM registers. */
void __iomem *base;
/* AXI Async bridge registers. */
void __iomem *asb;
/* RPiVid bridge registers. */
void __iomem *rpivid_asb;
struct genpd_onecell_data pd_xlate;
struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT];
struct reset_controller_dev reset;
};
static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable)
{
void __iomem *base = power->asb;
u64 start;
u32 val;
switch (reg) {
case 0:
return 0;
case ASB_V3D_S_CTRL:
case ASB_V3D_M_CTRL:
if (power->rpivid_asb)
base = power->rpivid_asb;
break;
}
start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
if (enable) {
val = readl(base + reg) & ~ASB_REQ_STOP;
} else {
val = readl(base + reg) | ASB_REQ_STOP;
}
writel(PM_PASSWORD | val, base + reg);
while (readl(base + reg) & ASB_ACK) {
cpu_relax();
if (ktime_get_ns() - start >= 1000)
return -ETIMEDOUT;
}
return 0;
}
static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
{
return bcm2835_asb_control(power, reg, true);
}
static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
{
return bcm2835_asb_control(power, reg, false);
}
static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg)
{
struct bcm2835_power *power = pd->power;
/* We don't run this on BCM2711 */
if (power->rpivid_asb)
return 0;
/* Enable functional isolation */
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC);
/* Enable electrical isolation */
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
/* Open the power switches. */
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_POWUP);
return 0;
}
static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg)
{
struct bcm2835_power *power = pd->power;
struct device *dev = power->dev;
u64 start;
int ret;
int inrush;
bool powok;
/* We don't run this on BCM2711 */
if (power->rpivid_asb)
return 0;
/* If it was already powered on by the fw, leave it that way. */
if (PM_READ(pm_reg) & PM_POWUP)
return 0;
/* Enable power. Allowing too much current at once may result
* in POWOK never getting set, so start low and ramp it up as
* necessary to succeed.
*/
powok = false;
for (inrush = PM_INRUSH_3_5_MA; inrush <= PM_INRUSH_20_MA; inrush++) {
PM_WRITE(pm_reg,
(PM_READ(pm_reg) & ~PM_INRUSH_MASK) |
(inrush << PM_INRUSH_SHIFT) |
PM_POWUP);
start = ktime_get_ns();
while (!(powok = !!(PM_READ(pm_reg) & PM_POWOK))) {
cpu_relax();
if (ktime_get_ns() - start >= 3000)
break;
}
}
if (!powok) {
dev_err(dev, "Timeout waiting for %s power OK\n",
pd->base.name);
ret = -ETIMEDOUT;
goto err_disable_powup;
}
/* Disable electrical isolation */
PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISPOW);
/* Repair memory */
PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_MEMREP);
start = ktime_get_ns();
while (!(PM_READ(pm_reg) & PM_MRDONE)) {
cpu_relax();
if (ktime_get_ns() - start >= 1000) {
dev_err(dev, "Timeout waiting for %s memory repair\n",
pd->base.name);
ret = -ETIMEDOUT;
goto err_disable_ispow;
}
}
/* Disable functional isolation */
PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISFUNC);
return 0;
err_disable_ispow:
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
err_disable_powup:
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~(PM_POWUP | PM_INRUSH_MASK));
return ret;
}
static int bcm2835_asb_power_on(struct bcm2835_power_domain *pd,
u32 pm_reg,
u32 asb_m_reg,
u32 asb_s_reg,
u32 reset_flags)
{
struct bcm2835_power *power = pd->power;
int ret;
ret = clk_prepare_enable(pd->clk);
if (ret) {
dev_err(power->dev, "Failed to enable clock for %s\n",
pd->base.name);
return ret;
}
/* Wait 32 clocks for reset to propagate, 1 us will be enough */
udelay(1);
clk_disable_unprepare(pd->clk);
/* Deassert the resets. */
PM_WRITE(pm_reg, PM_READ(pm_reg) | reset_flags);
ret = clk_prepare_enable(pd->clk);
if (ret) {
dev_err(power->dev, "Failed to enable clock for %s\n",
pd->base.name);
goto err_enable_resets;
}
ret = bcm2835_asb_enable(power, asb_m_reg);
if (ret) {
dev_err(power->dev, "Failed to enable ASB master for %s\n",
pd->base.name);
goto err_disable_clk;
}
ret = bcm2835_asb_enable(power, asb_s_reg);
if (ret) {
dev_err(power->dev, "Failed to enable ASB slave for %s\n",
pd->base.name);
goto err_disable_asb_master;
}
return 0;
err_disable_asb_master:
bcm2835_asb_disable(power, asb_m_reg);
err_disable_clk:
clk_disable_unprepare(pd->clk);
err_enable_resets:
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
return ret;
}
static int bcm2835_asb_power_off(struct bcm2835_power_domain *pd,
u32 pm_reg,
u32 asb_m_reg,
u32 asb_s_reg,
u32 reset_flags)
{
struct bcm2835_power *power = pd->power;
int ret;
ret = bcm2835_asb_disable(power, asb_s_reg);
if (ret) {
dev_warn(power->dev, "Failed to disable ASB slave for %s\n",
pd->base.name);
return ret;
}
ret = bcm2835_asb_disable(power, asb_m_reg);
if (ret) {
dev_warn(power->dev, "Failed to disable ASB master for %s\n",
pd->base.name);
bcm2835_asb_enable(power, asb_s_reg);
return ret;
}
clk_disable_unprepare(pd->clk);
/* Assert the resets. */
PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
return 0;
}
static int bcm2835_power_pd_power_on(struct generic_pm_domain *domain)
{
struct bcm2835_power_domain *pd =
container_of(domain, struct bcm2835_power_domain, base);
struct bcm2835_power *power = pd->power;
switch (pd->domain) {
case BCM2835_POWER_DOMAIN_GRAFX:
return bcm2835_power_power_on(pd, PM_GRAFX);
case BCM2835_POWER_DOMAIN_GRAFX_V3D:
return bcm2835_asb_power_on(pd, PM_GRAFX,
ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
PM_V3DRSTN);
case BCM2835_POWER_DOMAIN_IMAGE:
return bcm2835_power_power_on(pd, PM_IMAGE);
case BCM2835_POWER_DOMAIN_IMAGE_PERI:
return bcm2835_asb_power_on(pd, PM_IMAGE,
0, 0,
PM_PERIRSTN);
case BCM2835_POWER_DOMAIN_IMAGE_ISP:
return bcm2835_asb_power_on(pd, PM_IMAGE,
ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
PM_ISPRSTN);
case BCM2835_POWER_DOMAIN_IMAGE_H264:
return bcm2835_asb_power_on(pd, PM_IMAGE,
ASB_H264_M_CTRL, ASB_H264_S_CTRL,
PM_H264RSTN);
case BCM2835_POWER_DOMAIN_USB:
PM_WRITE(PM_USB, PM_USB_CTRLEN);
return 0;
case BCM2835_POWER_DOMAIN_DSI0:
PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN | PM_DSI0_LDOHPEN);
return 0;
case BCM2835_POWER_DOMAIN_DSI1:
PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN | PM_DSI1_LDOHPEN);
return 0;
case BCM2835_POWER_DOMAIN_CCP2TX:
PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN | PM_CCP2TX_LDOEN);
return 0;
case BCM2835_POWER_DOMAIN_HDMI:
PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_RSTDR);
PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_CTRLEN);
PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_LDOPD);
usleep_range(100, 200);
PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_RSTDR);
return 0;
default:
dev_err(power->dev, "Invalid domain %d\n", pd->domain);
return -EINVAL;
}
}
static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
{
struct bcm2835_power_domain *pd =
container_of(domain, struct bcm2835_power_domain, base);
struct bcm2835_power *power = pd->power;
switch (pd->domain) {
case BCM2835_POWER_DOMAIN_GRAFX:
return bcm2835_power_power_off(pd, PM_GRAFX);
case BCM2835_POWER_DOMAIN_GRAFX_V3D:
return bcm2835_asb_power_off(pd, PM_GRAFX,
ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
PM_V3DRSTN);
case BCM2835_POWER_DOMAIN_IMAGE:
return bcm2835_power_power_off(pd, PM_IMAGE);
case BCM2835_POWER_DOMAIN_IMAGE_PERI:
return bcm2835_asb_power_off(pd, PM_IMAGE,
0, 0,
PM_PERIRSTN);
case BCM2835_POWER_DOMAIN_IMAGE_ISP:
return bcm2835_asb_power_off(pd, PM_IMAGE,
ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
PM_ISPRSTN);
case BCM2835_POWER_DOMAIN_IMAGE_H264:
return bcm2835_asb_power_off(pd, PM_IMAGE,
ASB_H264_M_CTRL, ASB_H264_S_CTRL,
PM_H264RSTN);
case BCM2835_POWER_DOMAIN_USB:
PM_WRITE(PM_USB, 0);
return 0;
case BCM2835_POWER_DOMAIN_DSI0:
PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
PM_WRITE(PM_DSI0, 0);
return 0;
case BCM2835_POWER_DOMAIN_DSI1:
PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
PM_WRITE(PM_DSI1, 0);
return 0;
case BCM2835_POWER_DOMAIN_CCP2TX:
PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
PM_WRITE(PM_CCP2TX, 0);
return 0;
case BCM2835_POWER_DOMAIN_HDMI:
PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_LDOPD);
PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_CTRLEN);
return 0;
default:
dev_err(power->dev, "Invalid domain %d\n", pd->domain);
return -EINVAL;
}
}
static int
bcm2835_init_power_domain(struct bcm2835_power *power,
int pd_xlate_index, const char *name)
{
struct device *dev = power->dev;
struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
dom->clk = devm_clk_get(dev->parent, name);
if (IS_ERR(dom->clk)) {
int ret = PTR_ERR(dom->clk);
if (ret == -EPROBE_DEFER)
return ret;
/* Some domains don't have a clk, so make sure that we
* don't deref an error pointer later.
*/
dom->clk = NULL;
}
dom->base.name = name;
dom->base.power_on = bcm2835_power_pd_power_on;
dom->base.power_off = bcm2835_power_pd_power_off;
dom->domain = pd_xlate_index;
dom->power = power;
/* XXX: on/off at boot? */
pm_genpd_init(&dom->base, NULL, true);
power->pd_xlate.domains[pd_xlate_index] = &dom->base;
return 0;
}
/** bcm2835_reset_reset - Resets a block that has a reset line in the
* PM block.
*
* The consumer of the reset controller must have the power domain up
* -- there's no reset ability with the power domain down. To reset
* the sub-block, we just disable its access to memory through the
* ASB, reset, and re-enable.
*/
static int bcm2835_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
reset);
struct bcm2835_power_domain *pd;
int ret;
switch (id) {
case BCM2835_RESET_V3D:
pd = &power->domains[BCM2835_POWER_DOMAIN_GRAFX_V3D];
break;
case BCM2835_RESET_H264:
pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_H264];
break;
case BCM2835_RESET_ISP:
pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_ISP];
break;
default:
dev_err(power->dev, "Bad reset id %ld\n", id);
return -EINVAL;
}
ret = bcm2835_power_pd_power_off(&pd->base);
if (ret)
return ret;
return bcm2835_power_pd_power_on(&pd->base);
}
static int bcm2835_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
reset);
switch (id) {
case BCM2835_RESET_V3D:
return !PM_READ(PM_GRAFX & PM_V3DRSTN);
case BCM2835_RESET_H264:
return !PM_READ(PM_IMAGE & PM_H264RSTN);
case BCM2835_RESET_ISP:
return !PM_READ(PM_IMAGE & PM_ISPRSTN);
default:
return -EINVAL;
}
}
static const struct reset_control_ops bcm2835_reset_ops = {
.reset = bcm2835_reset_reset,
.status = bcm2835_reset_status,
};
static const char *const power_domain_names[] = {
[BCM2835_POWER_DOMAIN_GRAFX] = "grafx",
[BCM2835_POWER_DOMAIN_GRAFX_V3D] = "v3d",
[BCM2835_POWER_DOMAIN_IMAGE] = "image",
[BCM2835_POWER_DOMAIN_IMAGE_PERI] = "peri_image",
[BCM2835_POWER_DOMAIN_IMAGE_H264] = "h264",
[BCM2835_POWER_DOMAIN_IMAGE_ISP] = "isp",
[BCM2835_POWER_DOMAIN_USB] = "usb",
[BCM2835_POWER_DOMAIN_DSI0] = "dsi0",
[BCM2835_POWER_DOMAIN_DSI1] = "dsi1",
[BCM2835_POWER_DOMAIN_CAM0] = "cam0",
[BCM2835_POWER_DOMAIN_CAM1] = "cam1",
[BCM2835_POWER_DOMAIN_CCP2TX] = "ccp2tx",
[BCM2835_POWER_DOMAIN_HDMI] = "hdmi",
};
static int bcm2835_power_probe(struct platform_device *pdev)
{
struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct bcm2835_power *power;
static const struct {
int parent, child;
} domain_deps[] = {
{ BCM2835_POWER_DOMAIN_GRAFX, BCM2835_POWER_DOMAIN_GRAFX_V3D },
{ BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_PERI },
{ BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_H264 },
{ BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_ISP },
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_USB },
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
};
int ret = 0, i;
u32 id;
power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
if (!power)
return -ENOMEM;
platform_set_drvdata(pdev, power);
power->dev = dev;
power->base = pm->base;
power->asb = pm->asb;
power->rpivid_asb = pm->rpivid_asb;
id = readl(power->asb + ASB_AXI_BRDG_ID);
if (id != BCM2835_BRDG_ID /* "BRDG" */) {
dev_err(dev, "ASB register ID returned 0x%08x\n", id);
return -ENODEV;
}
if (power->rpivid_asb) {
id = readl(power->rpivid_asb + ASB_AXI_BRDG_ID);
if (id != BCM2835_BRDG_ID /* "BRDG" */) {
dev_err(dev, "RPiVid ASB register ID returned 0x%08x\n",
id);
return -ENODEV;
}
}
power->pd_xlate.domains = devm_kcalloc(dev,
ARRAY_SIZE(power_domain_names),
sizeof(*power->pd_xlate.domains),
GFP_KERNEL);
if (!power->pd_xlate.domains)
return -ENOMEM;
power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
if (ret)
goto fail;
}
for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
&power->domains[domain_deps[i].child].base);
}
power->reset.owner = THIS_MODULE;
power->reset.nr_resets = BCM2835_RESET_COUNT;
power->reset.ops = &bcm2835_reset_ops;
power->reset.of_node = dev->parent->of_node;
ret = devm_reset_controller_register(dev, &power->reset);
if (ret)
goto fail;
of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
dev_info(dev, "Broadcom BCM2835 power domains driver");
return 0;
fail:
for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
struct generic_pm_domain *dom = &power->domains[i].base;
if (dom->name)
pm_genpd_remove(dom);
}
return ret;
}
static struct platform_driver bcm2835_power_driver = {
.probe = bcm2835_power_probe,
.driver = {
.name = "bcm2835-power",
},
};
module_platform_driver(bcm2835_power_driver);
MODULE_AUTHOR("Eric Anholt <[email protected]>");
MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM power domains and reset");
| linux-master | drivers/pmdomain/bcm/bcm2835-power.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2013 Broadcom
* Copyright (C) 2020 Rafał Miłecki <[email protected]>
*/
#include <dt-bindings/soc/bcm-pmb.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/reset/bcm63xx_pmb.h>
#define BPCM_ID_REG 0x00
#define BPCM_CAPABILITIES 0x04
#define BPCM_CAP_NUM_ZONES 0x000000ff
#define BPCM_CAP_SR_REG_BITS 0x0000ff00
#define BPCM_CAP_PLLTYPE 0x00030000
#define BPCM_CAP_UBUS 0x00080000
#define BPCM_CONTROL 0x08
#define BPCM_STATUS 0x0c
#define BPCM_ROSC_CONTROL 0x10
#define BPCM_ROSC_THRESH_H 0x14
#define BPCM_ROSC_THRESHOLD_BCM6838 0x14
#define BPCM_ROSC_THRESH_S 0x18
#define BPCM_ROSC_COUNT_BCM6838 0x18
#define BPCM_ROSC_COUNT 0x1c
#define BPCM_PWD_CONTROL_BCM6838 0x1c
#define BPCM_PWD_CONTROL 0x20
#define BPCM_SR_CONTROL_BCM6838 0x20
#define BPCM_PWD_ACCUM_CONTROL 0x24
#define BPCM_SR_CONTROL 0x28
#define BPCM_GLOBAL_CONTROL 0x2c
#define BPCM_MISC_CONTROL 0x30
#define BPCM_MISC_CONTROL2 0x34
#define BPCM_SGPHY_CNTL 0x38
#define BPCM_SGPHY_STATUS 0x3c
#define BPCM_ZONE0 0x40
#define BPCM_ZONE_CONTROL 0x00
#define BPCM_ZONE_CONTROL_MANUAL_CLK_EN 0x00000001
#define BPCM_ZONE_CONTROL_MANUAL_RESET_CTL 0x00000002
#define BPCM_ZONE_CONTROL_FREQ_SCALE_USED 0x00000004 /* R/O */
#define BPCM_ZONE_CONTROL_DPG_CAPABLE 0x00000008 /* R/O */
#define BPCM_ZONE_CONTROL_MANUAL_MEM_PWR 0x00000030
#define BPCM_ZONE_CONTROL_MANUAL_ISO_CTL 0x00000040
#define BPCM_ZONE_CONTROL_MANUAL_CTL 0x00000080
#define BPCM_ZONE_CONTROL_DPG_CTL_EN 0x00000100
#define BPCM_ZONE_CONTROL_PWR_DN_REQ 0x00000200
#define BPCM_ZONE_CONTROL_PWR_UP_REQ 0x00000400
#define BPCM_ZONE_CONTROL_MEM_PWR_CTL_EN 0x00000800
#define BPCM_ZONE_CONTROL_BLK_RESET_ASSERT 0x00001000
#define BPCM_ZONE_CONTROL_MEM_STBY 0x00002000
#define BPCM_ZONE_CONTROL_RESERVED 0x0007c000
#define BPCM_ZONE_CONTROL_PWR_CNTL_STATE 0x00f80000
#define BPCM_ZONE_CONTROL_FREQ_SCALAR_DYN_SEL 0x01000000 /* R/O */
#define BPCM_ZONE_CONTROL_PWR_OFF_STATE 0x02000000 /* R/O */
#define BPCM_ZONE_CONTROL_PWR_ON_STATE 0x04000000 /* R/O */
#define BPCM_ZONE_CONTROL_PWR_GOOD 0x08000000 /* R/O */
#define BPCM_ZONE_CONTROL_DPG_PWR_STATE 0x10000000 /* R/O */
#define BPCM_ZONE_CONTROL_MEM_PWR_STATE 0x20000000 /* R/O */
#define BPCM_ZONE_CONTROL_ISO_STATE 0x40000000 /* R/O */
#define BPCM_ZONE_CONTROL_RESET_STATE 0x80000000 /* R/O */
#define BPCM_ZONE_CONFIG1 0x04
#define BPCM_ZONE_CONFIG2 0x08
#define BPCM_ZONE_FREQ_SCALAR_CONTROL 0x0c
#define BPCM_ZONE_SIZE 0x10
struct bcm_pmb {
struct device *dev;
void __iomem *base;
spinlock_t lock;
bool little_endian;
struct genpd_onecell_data genpd_onecell_data;
};
struct bcm_pmb_pd_data {
const char * const name;
int id;
u8 bus;
u8 device;
};
struct bcm_pmb_pm_domain {
struct bcm_pmb *pmb;
const struct bcm_pmb_pd_data *data;
struct generic_pm_domain genpd;
};
static int bcm_pmb_bpcm_read(struct bcm_pmb *pmb, int bus, u8 device,
int offset, u32 *val)
{
void __iomem *base = pmb->base + bus * 0x20;
unsigned long flags;
int err;
spin_lock_irqsave(&pmb->lock, flags);
err = bpcm_rd(base, device, offset, val);
spin_unlock_irqrestore(&pmb->lock, flags);
if (!err)
*val = pmb->little_endian ? le32_to_cpu(*val) : be32_to_cpu(*val);
return err;
}
static int bcm_pmb_bpcm_write(struct bcm_pmb *pmb, int bus, u8 device,
int offset, u32 val)
{
void __iomem *base = pmb->base + bus * 0x20;
unsigned long flags;
int err;
val = pmb->little_endian ? cpu_to_le32(val) : cpu_to_be32(val);
spin_lock_irqsave(&pmb->lock, flags);
err = bpcm_wr(base, device, offset, val);
spin_unlock_irqrestore(&pmb->lock, flags);
return err;
}
static int bcm_pmb_power_off_zone(struct bcm_pmb *pmb, int bus, u8 device,
int zone)
{
int offset;
u32 val;
int err;
offset = BPCM_ZONE0 + zone * BPCM_ZONE_SIZE + BPCM_ZONE_CONTROL;
err = bcm_pmb_bpcm_read(pmb, bus, device, offset, &val);
if (err)
return err;
val |= BPCM_ZONE_CONTROL_PWR_DN_REQ;
val &= ~BPCM_ZONE_CONTROL_PWR_UP_REQ;
err = bcm_pmb_bpcm_write(pmb, bus, device, offset, val);
return err;
}
static int bcm_pmb_power_on_zone(struct bcm_pmb *pmb, int bus, u8 device,
int zone)
{
int offset;
u32 val;
int err;
offset = BPCM_ZONE0 + zone * BPCM_ZONE_SIZE + BPCM_ZONE_CONTROL;
err = bcm_pmb_bpcm_read(pmb, bus, device, offset, &val);
if (err)
return err;
if (!(val & BPCM_ZONE_CONTROL_PWR_ON_STATE)) {
val &= ~BPCM_ZONE_CONTROL_PWR_DN_REQ;
val |= BPCM_ZONE_CONTROL_DPG_CTL_EN;
val |= BPCM_ZONE_CONTROL_PWR_UP_REQ;
val |= BPCM_ZONE_CONTROL_MEM_PWR_CTL_EN;
val |= BPCM_ZONE_CONTROL_BLK_RESET_ASSERT;
err = bcm_pmb_bpcm_write(pmb, bus, device, offset, val);
}
return err;
}
static int bcm_pmb_power_off_device(struct bcm_pmb *pmb, int bus, u8 device)
{
int offset;
u32 val;
int err;
/* Entire device can be powered off by powering off the 0th zone */
offset = BPCM_ZONE0 + BPCM_ZONE_CONTROL;
err = bcm_pmb_bpcm_read(pmb, bus, device, offset, &val);
if (err)
return err;
if (!(val & BPCM_ZONE_CONTROL_PWR_OFF_STATE)) {
val = BPCM_ZONE_CONTROL_PWR_DN_REQ;
err = bcm_pmb_bpcm_write(pmb, bus, device, offset, val);
}
return err;
}
static int bcm_pmb_power_on_device(struct bcm_pmb *pmb, int bus, u8 device)
{
u32 val;
int err;
int i;
err = bcm_pmb_bpcm_read(pmb, bus, device, BPCM_CAPABILITIES, &val);
if (err)
return err;
for (i = 0; i < (val & BPCM_CAP_NUM_ZONES); i++) {
err = bcm_pmb_power_on_zone(pmb, bus, device, i);
if (err)
return err;
}
return err;
}
static int bcm_pmb_power_on_sata(struct bcm_pmb *pmb, int bus, u8 device)
{
int err;
err = bcm_pmb_power_on_zone(pmb, bus, device, 0);
if (err)
return err;
/* Does not apply to the BCM963158 */
err = bcm_pmb_bpcm_write(pmb, bus, device, BPCM_MISC_CONTROL, 0);
if (err)
return err;
err = bcm_pmb_bpcm_write(pmb, bus, device, BPCM_SR_CONTROL, 0xffffffff);
if (err)
return err;
err = bcm_pmb_bpcm_write(pmb, bus, device, BPCM_SR_CONTROL, 0);
return err;
}
static int bcm_pmb_power_on(struct generic_pm_domain *genpd)
{
struct bcm_pmb_pm_domain *pd = container_of(genpd, struct bcm_pmb_pm_domain, genpd);
const struct bcm_pmb_pd_data *data = pd->data;
struct bcm_pmb *pmb = pd->pmb;
switch (data->id) {
case BCM_PMB_PCIE0:
case BCM_PMB_PCIE1:
case BCM_PMB_PCIE2:
return bcm_pmb_power_on_zone(pmb, data->bus, data->device, 0);
case BCM_PMB_HOST_USB:
return bcm_pmb_power_on_device(pmb, data->bus, data->device);
case BCM_PMB_SATA:
return bcm_pmb_power_on_sata(pmb, data->bus, data->device);
default:
dev_err(pmb->dev, "unsupported device id: %d\n", data->id);
return -EINVAL;
}
}
static int bcm_pmb_power_off(struct generic_pm_domain *genpd)
{
struct bcm_pmb_pm_domain *pd = container_of(genpd, struct bcm_pmb_pm_domain, genpd);
const struct bcm_pmb_pd_data *data = pd->data;
struct bcm_pmb *pmb = pd->pmb;
switch (data->id) {
case BCM_PMB_PCIE0:
case BCM_PMB_PCIE1:
case BCM_PMB_PCIE2:
return bcm_pmb_power_off_zone(pmb, data->bus, data->device, 0);
case BCM_PMB_HOST_USB:
return bcm_pmb_power_off_device(pmb, data->bus, data->device);
default:
dev_err(pmb->dev, "unsupported device id: %d\n", data->id);
return -EINVAL;
}
}
static int bcm_pmb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct bcm_pmb_pd_data *table;
const struct bcm_pmb_pd_data *e;
struct bcm_pmb *pmb;
int max_id;
int err;
pmb = devm_kzalloc(dev, sizeof(*pmb), GFP_KERNEL);
if (!pmb)
return -ENOMEM;
pmb->dev = dev;
pmb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmb->base))
return PTR_ERR(pmb->base);
spin_lock_init(&pmb->lock);
pmb->little_endian = !of_device_is_big_endian(dev->of_node);
table = of_device_get_match_data(dev);
if (!table)
return -EINVAL;
max_id = 0;
for (e = table; e->name; e++)
max_id = max(max_id, e->id);
pmb->genpd_onecell_data.num_domains = max_id + 1;
pmb->genpd_onecell_data.domains =
devm_kcalloc(dev, pmb->genpd_onecell_data.num_domains,
sizeof(struct generic_pm_domain *), GFP_KERNEL);
if (!pmb->genpd_onecell_data.domains)
return -ENOMEM;
for (e = table; e->name; e++) {
struct bcm_pmb_pm_domain *pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
pd->pmb = pmb;
pd->data = e;
pd->genpd.name = e->name;
pd->genpd.power_on = bcm_pmb_power_on;
pd->genpd.power_off = bcm_pmb_power_off;
pm_genpd_init(&pd->genpd, NULL, true);
pmb->genpd_onecell_data.domains[e->id] = &pd->genpd;
}
err = of_genpd_add_provider_onecell(dev->of_node, &pmb->genpd_onecell_data);
if (err) {
dev_err(dev, "failed to add genpd provider: %d\n", err);
return err;
}
return 0;
}
static const struct bcm_pmb_pd_data bcm_pmb_bcm4908_data[] = {
{ .name = "pcie2", .id = BCM_PMB_PCIE2, .bus = 0, .device = 2, },
{ .name = "pcie0", .id = BCM_PMB_PCIE0, .bus = 1, .device = 14, },
{ .name = "pcie1", .id = BCM_PMB_PCIE1, .bus = 1, .device = 15, },
{ .name = "usb", .id = BCM_PMB_HOST_USB, .bus = 1, .device = 17, },
{ },
};
static const struct bcm_pmb_pd_data bcm_pmb_bcm63138_data[] = {
{ .name = "sata", .id = BCM_PMB_SATA, .bus = 0, .device = 3, },
{ },
};
static const struct of_device_id bcm_pmb_of_match[] = {
{ .compatible = "brcm,bcm4908-pmb", .data = &bcm_pmb_bcm4908_data, },
{ .compatible = "brcm,bcm63138-pmb", .data = &bcm_pmb_bcm63138_data, },
{ },
};
static struct platform_driver bcm_pmb_driver = {
.driver = {
.name = "bcm-pmb",
.of_match_table = bcm_pmb_of_match,
},
.probe = bcm_pmb_probe,
};
builtin_platform_driver(bcm_pmb_driver);
| linux-master | drivers/pmdomain/bcm/bcm-pmb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car S4-8 System Controller
*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <dt-bindings/power/r8a779f0-sysc.h>
#include "rcar-gen4-sysc.h"
static struct rcar_gen4_sysc_area r8a779f0_areas[] __initdata = {
{ "always-on", R8A779F0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "a3e0", R8A779F0_PD_A3E0, R8A779F0_PD_ALWAYS_ON, PD_SCU },
{ "a3e1", R8A779F0_PD_A3E1, R8A779F0_PD_ALWAYS_ON, PD_SCU },
{ "a2e0d0", R8A779F0_PD_A2E0D0, R8A779F0_PD_A3E0, PD_SCU },
{ "a2e0d1", R8A779F0_PD_A2E0D1, R8A779F0_PD_A3E0, PD_SCU },
{ "a2e1d0", R8A779F0_PD_A2E1D0, R8A779F0_PD_A3E1, PD_SCU },
{ "a2e1d1", R8A779F0_PD_A2E1D1, R8A779F0_PD_A3E1, PD_SCU },
{ "a1e0d0c0", R8A779F0_PD_A1E0D0C0, R8A779F0_PD_A2E0D0, PD_CPU_NOCR },
{ "a1e0d0c1", R8A779F0_PD_A1E0D0C1, R8A779F0_PD_A2E0D0, PD_CPU_NOCR },
{ "a1e0d1c0", R8A779F0_PD_A1E0D1C0, R8A779F0_PD_A2E0D1, PD_CPU_NOCR },
{ "a1e0d1c1", R8A779F0_PD_A1E0D1C1, R8A779F0_PD_A2E0D1, PD_CPU_NOCR },
{ "a1e1d0c0", R8A779F0_PD_A1E1D0C0, R8A779F0_PD_A2E1D0, PD_CPU_NOCR },
{ "a1e1d0c1", R8A779F0_PD_A1E1D0C1, R8A779F0_PD_A2E1D0, PD_CPU_NOCR },
{ "a1e1d1c0", R8A779F0_PD_A1E1D1C0, R8A779F0_PD_A2E1D1, PD_CPU_NOCR },
{ "a1e1d1c1", R8A779F0_PD_A1E1D1C1, R8A779F0_PD_A2E1D1, PD_CPU_NOCR },
};
const struct rcar_gen4_sysc_info r8a779f0_sysc_info __initconst = {
.areas = r8a779f0_areas,
.num_areas = ARRAY_SIZE(r8a779f0_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a779f0-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car V4H System Controller
*
* Copyright (C) 2022 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <dt-bindings/power/r8a779g0-sysc.h>
#include "rcar-gen4-sysc.h"
static struct rcar_gen4_sysc_area r8a779g0_areas[] __initdata = {
{ "always-on", R8A779G0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "a3e0", R8A779G0_PD_A3E0, R8A779G0_PD_ALWAYS_ON, PD_SCU },
{ "a2e0d0", R8A779G0_PD_A2E0D0, R8A779G0_PD_A3E0, PD_SCU },
{ "a2e0d1", R8A779G0_PD_A2E0D1, R8A779G0_PD_A3E0, PD_SCU },
{ "a1e0d0c0", R8A779G0_PD_A1E0D0C0, R8A779G0_PD_A2E0D0, PD_CPU_NOCR },
{ "a1e0d0c1", R8A779G0_PD_A1E0D0C1, R8A779G0_PD_A2E0D0, PD_CPU_NOCR },
{ "a1e0d1c0", R8A779G0_PD_A1E0D1C0, R8A779G0_PD_A2E0D1, PD_CPU_NOCR },
{ "a1e0d1c1", R8A779G0_PD_A1E0D1C1, R8A779G0_PD_A2E0D1, PD_CPU_NOCR },
{ "a33dga", R8A779G0_PD_A33DGA, R8A779G0_PD_ALWAYS_ON },
{ "a23dgb", R8A779G0_PD_A23DGB, R8A779G0_PD_A33DGA },
{ "a3vip0", R8A779G0_PD_A3VIP0, R8A779G0_PD_ALWAYS_ON },
{ "a3vip1", R8A779G0_PD_A3VIP1, R8A779G0_PD_ALWAYS_ON },
{ "a3vip2", R8A779G0_PD_A3VIP2, R8A779G0_PD_ALWAYS_ON },
{ "a3dul", R8A779G0_PD_A3DUL, R8A779G0_PD_ALWAYS_ON },
{ "a3isp0", R8A779G0_PD_A3ISP0, R8A779G0_PD_ALWAYS_ON },
{ "a3isp1", R8A779G0_PD_A3ISP1, R8A779G0_PD_ALWAYS_ON },
{ "a3ir", R8A779G0_PD_A3IR, R8A779G0_PD_ALWAYS_ON },
{ "a2cn0", R8A779G0_PD_A2CN0, R8A779G0_PD_A3IR },
{ "a1cnn0", R8A779G0_PD_A1CNN0, R8A779G0_PD_A2CN0 },
{ "a1dsp0", R8A779G0_PD_A1DSP0, R8A779G0_PD_A2CN0 },
{ "a1dsp1", R8A779G0_PD_A1DSP1, R8A779G0_PD_A2CN0 },
{ "a1dsp2", R8A779G0_PD_A1DSP2, R8A779G0_PD_A2CN0 },
{ "a1dsp3", R8A779G0_PD_A1DSP3, R8A779G0_PD_A2CN0 },
{ "a2imp01", R8A779G0_PD_A2IMP01, R8A779G0_PD_A3IR },
{ "a2imp23", R8A779G0_PD_A2IMP23, R8A779G0_PD_A3IR },
{ "a2psc", R8A779G0_PD_A2PSC, R8A779G0_PD_A3IR },
{ "a2dma", R8A779G0_PD_A2DMA, R8A779G0_PD_A3IR },
{ "a2cv0", R8A779G0_PD_A2CV0, R8A779G0_PD_A3IR },
{ "a2cv1", R8A779G0_PD_A2CV1, R8A779G0_PD_A3IR },
{ "a2cv2", R8A779G0_PD_A2CV2, R8A779G0_PD_A3IR },
{ "a2cv3", R8A779G0_PD_A2CV3, R8A779G0_PD_A3IR },
};
const struct rcar_gen4_sysc_info r8a779g0_sysc_info __initconst = {
.areas = r8a779g0_areas,
.num_areas = ARRAY_SIZE(r8a779g0_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a779g0-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car H3 System Controller
*
* Copyright (C) 2016-2017 Glider bvba
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
#include <dt-bindings/power/r8a7795-sysc.h>
#include "rcar-sysc.h"
static struct rcar_sysc_area r8a7795_areas[] __initdata = {
{ "always-on", 0, 0, R8A7795_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A7795_PD_CA57_SCU, R8A7795_PD_ALWAYS_ON,
PD_SCU },
{ "ca57-cpu0", 0x80, 0, R8A7795_PD_CA57_CPU0, R8A7795_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca57-cpu1", 0x80, 1, R8A7795_PD_CA57_CPU1, R8A7795_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca57-cpu2", 0x80, 2, R8A7795_PD_CA57_CPU2, R8A7795_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca57-cpu3", 0x80, 3, R8A7795_PD_CA57_CPU3, R8A7795_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca53-scu", 0x140, 0, R8A7795_PD_CA53_SCU, R8A7795_PD_ALWAYS_ON,
PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A7795_PD_CA53_CPU0, R8A7795_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu1", 0x200, 1, R8A7795_PD_CA53_CPU1, R8A7795_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu2", 0x200, 2, R8A7795_PD_CA53_CPU2, R8A7795_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A7795_PD_CA53_CPU3, R8A7795_PD_CA53_SCU,
PD_CPU_NOCR },
{ "a3vp", 0x340, 0, R8A7795_PD_A3VP, R8A7795_PD_ALWAYS_ON },
{ "cr7", 0x240, 0, R8A7795_PD_CR7, R8A7795_PD_ALWAYS_ON },
{ "a3vc", 0x380, 0, R8A7795_PD_A3VC, R8A7795_PD_ALWAYS_ON },
{ "a2vc1", 0x3c0, 1, R8A7795_PD_A2VC1, R8A7795_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A7795_PD_3DG_A, R8A7795_PD_ALWAYS_ON },
{ "3dg-b", 0x100, 1, R8A7795_PD_3DG_B, R8A7795_PD_3DG_A },
{ "3dg-c", 0x100, 2, R8A7795_PD_3DG_C, R8A7795_PD_3DG_B },
{ "3dg-d", 0x100, 3, R8A7795_PD_3DG_D, R8A7795_PD_3DG_C },
{ "3dg-e", 0x100, 4, R8A7795_PD_3DG_E, R8A7795_PD_3DG_D },
{ "a3ir", 0x180, 0, R8A7795_PD_A3IR, R8A7795_PD_ALWAYS_ON },
};
/*
* Fixups for R-Car H3 revisions
*/
#define NO_EXTMASK BIT(1) /* Missing SYSCEXTMASK register */
static const struct soc_device_attribute r8a7795_quirks_match[] __initconst = {
{
.soc_id = "r8a7795", .revision = "ES2.*",
.data = (void *)(NO_EXTMASK),
},
{ /* sentinel */ }
};
static int __init r8a7795_sysc_init(void)
{
const struct soc_device_attribute *attr;
u32 quirks = 0;
attr = soc_device_match(r8a7795_quirks_match);
if (attr)
quirks = (uintptr_t)attr->data;
if (quirks & NO_EXTMASK)
r8a7795_sysc_info.extmask_val = 0;
return 0;
}
struct rcar_sysc_info r8a7795_sysc_info __initdata = {
.init = r8a7795_sysc_init,
.areas = r8a7795_areas,
.num_areas = ARRAY_SIZE(r8a7795_areas),
.extmask_offs = 0x2f8,
.extmask_val = BIT(0),
};
| linux-master | drivers/pmdomain/renesas/r8a7795-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car V3M System Controller
*
* Copyright (C) 2017 Cogent Embedded Inc.
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77970-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
{ "always-on", 0, 0, R8A77970_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca53-scu", 0x140, 0, R8A77970_PD_CA53_SCU, R8A77970_PD_ALWAYS_ON,
PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A77970_PD_CA53_CPU0, R8A77970_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu1", 0x200, 1, R8A77970_PD_CA53_CPU1, R8A77970_PD_CA53_SCU,
PD_CPU_NOCR },
{ "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON },
{ "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_A3IR },
{ "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A3IR },
{ "a2dp", 0x400, 2, R8A77970_PD_A2DP, R8A77970_PD_A3IR },
{ "a2cn", 0x400, 3, R8A77970_PD_A2CN, R8A77970_PD_A3IR },
{ "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_A3IR },
{ "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A3IR },
};
const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
.areas = r8a77970_areas,
.num_areas = ARRAY_SIZE(r8a77970_areas),
.extmask_offs = 0x1b0,
.extmask_val = BIT(0),
};
| linux-master | drivers/pmdomain/renesas/r8a77970-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G2N System Controller
* Copyright (C) 2019 Renesas Electronics Corp.
*
* Based on Renesas R-Car M3-W System Controller
* Copyright (C) 2016 Glider bvba
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a774b1-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a774b1_areas[] __initconst = {
{ "always-on", 0, 0, R8A774B1_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A774B1_PD_CA57_SCU, R8A774B1_PD_ALWAYS_ON,
PD_SCU },
{ "ca57-cpu0", 0x80, 0, R8A774B1_PD_CA57_CPU0, R8A774B1_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca57-cpu1", 0x80, 1, R8A774B1_PD_CA57_CPU1, R8A774B1_PD_CA57_SCU,
PD_CPU_NOCR },
{ "a3vc", 0x380, 0, R8A774B1_PD_A3VC, R8A774B1_PD_ALWAYS_ON },
{ "a3vp", 0x340, 0, R8A774B1_PD_A3VP, R8A774B1_PD_ALWAYS_ON },
{ "a2vc1", 0x3c0, 1, R8A774B1_PD_A2VC1, R8A774B1_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A774B1_PD_3DG_A, R8A774B1_PD_ALWAYS_ON },
{ "3dg-b", 0x100, 1, R8A774B1_PD_3DG_B, R8A774B1_PD_3DG_A },
};
const struct rcar_sysc_info r8a774b1_sysc_info __initconst = {
.areas = r8a774b1_areas,
.num_areas = ARRAY_SIZE(r8a774b1_areas),
.extmask_offs = 0x2f8,
.extmask_val = BIT(0),
};
| linux-master | drivers/pmdomain/renesas/r8a774b1-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* rmobile power management support
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Kuninori Morimoto <[email protected]>
* Copyright (C) 2014 Glider bvba
*
* based on pm-sh7372.c
* Copyright (C) 2011 Magnus Damm
*/
#include <linux/clk/renesas.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
/* SYSC */
#define SPDCR 0x08 /* SYS Power Down Control Register */
#define SWUCR 0x14 /* SYS Wakeup Control Register */
#define PSTR 0x80 /* Power Status Register */
#define PSTR_RETRIES 100
#define PSTR_DELAY_US 10
struct rmobile_pm_domain {
struct generic_pm_domain genpd;
struct dev_power_governor *gov;
int (*suspend)(void);
void __iomem *base;
unsigned int bit_shift;
};
static inline
struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
{
return container_of(d, struct rmobile_pm_domain, genpd);
}
static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
{
struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd);
unsigned int mask = BIT(rmobile_pd->bit_shift);
u32 val;
if (rmobile_pd->suspend) {
int ret = rmobile_pd->suspend();
if (ret)
return ret;
}
if (readl(rmobile_pd->base + PSTR) & mask) {
writel(mask, rmobile_pd->base + SPDCR);
readl_poll_timeout_atomic(rmobile_pd->base + SPDCR, val,
!(val & mask), 0, PSTR_RETRIES);
}
pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", genpd->name, mask,
readl(rmobile_pd->base + PSTR));
return 0;
}
static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
{
unsigned int val, mask = BIT(rmobile_pd->bit_shift);
int ret = 0;
if (readl(rmobile_pd->base + PSTR) & mask)
return ret;
writel(mask, rmobile_pd->base + SWUCR);
ret = readl_poll_timeout_atomic(rmobile_pd->base + SWUCR, val,
(val & mask), PSTR_DELAY_US,
PSTR_RETRIES * PSTR_DELAY_US);
pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
rmobile_pd->genpd.name, mask,
readl(rmobile_pd->base + PSTR));
return ret;
}
static int rmobile_pd_power_up(struct generic_pm_domain *genpd)
{
return __rmobile_pd_power_up(to_rmobile_pd(genpd));
}
static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
{
struct generic_pm_domain *genpd = &rmobile_pd->genpd;
struct dev_power_governor *gov = rmobile_pd->gov;
genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = cpg_mstp_attach_dev;
genpd->detach_dev = cpg_mstp_detach_dev;
if (!(genpd->flags & GENPD_FLAG_ALWAYS_ON)) {
genpd->power_off = rmobile_pd_power_down;
genpd->power_on = rmobile_pd_power_up;
__rmobile_pd_power_up(rmobile_pd);
}
pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
}
static int rmobile_pd_suspend_console(void)
{
/*
* Serial consoles make use of SCIF hardware located in this domain,
* hence keep the power domain on if "no_console_suspend" is set.
*/
return console_suspend_enabled ? 0 : -EBUSY;
}
enum pd_types {
PD_NORMAL,
PD_CPU,
PD_CONSOLE,
PD_DEBUG,
PD_MEMCTL,
};
#define MAX_NUM_SPECIAL_PDS 16
static struct special_pd {
struct device_node *pd;
enum pd_types type;
} special_pds[MAX_NUM_SPECIAL_PDS] __initdata;
static unsigned int num_special_pds __initdata;
static const struct of_device_id special_ids[] __initconst = {
{ .compatible = "arm,coresight-etm3x", .data = (void *)PD_DEBUG },
{ .compatible = "renesas,dbsc-r8a73a4", .data = (void *)PD_MEMCTL, },
{ .compatible = "renesas,dbsc3-r8a7740", .data = (void *)PD_MEMCTL, },
{ .compatible = "renesas,sbsc-sh73a0", .data = (void *)PD_MEMCTL, },
{ /* sentinel */ },
};
static void __init add_special_pd(struct device_node *np, enum pd_types type)
{
unsigned int i;
struct device_node *pd;
pd = of_parse_phandle(np, "power-domains", 0);
if (!pd)
return;
for (i = 0; i < num_special_pds; i++)
if (pd == special_pds[i].pd && type == special_pds[i].type) {
of_node_put(pd);
return;
}
if (num_special_pds == ARRAY_SIZE(special_pds)) {
pr_warn("Too many special PM domains\n");
of_node_put(pd);
return;
}
pr_debug("Special PM domain %pOFn type %d for %pOF\n", pd, type, np);
special_pds[num_special_pds].pd = pd;
special_pds[num_special_pds].type = type;
num_special_pds++;
}
static void __init get_special_pds(void)
{
struct device_node *np;
const struct of_device_id *id;
/* PM domains containing CPUs */
for_each_of_cpu_node(np)
add_special_pd(np, PD_CPU);
/* PM domain containing console */
if (of_stdout)
add_special_pd(of_stdout, PD_CONSOLE);
/* PM domains containing other special devices */
for_each_matching_node_and_match(np, special_ids, &id)
add_special_pd(np, (enum pd_types)id->data);
}
static void __init put_special_pds(void)
{
unsigned int i;
for (i = 0; i < num_special_pds; i++)
of_node_put(special_pds[i].pd);
}
static enum pd_types __init pd_type(const struct device_node *pd)
{
unsigned int i;
for (i = 0; i < num_special_pds; i++)
if (pd == special_pds[i].pd)
return special_pds[i].type;
return PD_NORMAL;
}
static void __init rmobile_setup_pm_domain(struct device_node *np,
struct rmobile_pm_domain *pd)
{
const char *name = pd->genpd.name;
switch (pd_type(np)) {
case PD_CPU:
/*
* This domain contains the CPU core and therefore it should
* only be turned off if the CPU is not in use.
*/
pr_debug("PM domain %s contains CPU\n", name);
pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
break;
case PD_CONSOLE:
pr_debug("PM domain %s contains serial console\n", name);
pd->gov = &pm_domain_always_on_gov;
pd->suspend = rmobile_pd_suspend_console;
break;
case PD_DEBUG:
/*
* This domain contains the Coresight-ETM hardware block and
* therefore it should only be turned off if the debug module
* is not in use.
*/
pr_debug("PM domain %s contains Coresight-ETM\n", name);
pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
break;
case PD_MEMCTL:
/*
* This domain contains a memory-controller and therefore it
* should only be turned off if memory is not in use.
*/
pr_debug("PM domain %s contains MEMCTL\n", name);
pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
break;
case PD_NORMAL:
if (pd->bit_shift == ~0) {
/* Top-level always-on domain */
pr_debug("PM domain %s is always-on domain\n", name);
pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
}
break;
}
rmobile_init_pm_domain(pd);
}
static int __init rmobile_add_pm_domains(void __iomem *base,
struct device_node *parent,
struct generic_pm_domain *genpd_parent)
{
struct device_node *np;
for_each_child_of_node(parent, np) {
struct rmobile_pm_domain *pd;
u32 idx = ~0;
if (of_property_read_u32(np, "reg", &idx)) {
/* always-on domain */
}
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
of_node_put(np);
return -ENOMEM;
}
pd->genpd.name = np->name;
pd->base = base;
pd->bit_shift = idx;
rmobile_setup_pm_domain(np, pd);
if (genpd_parent)
pm_genpd_add_subdomain(genpd_parent, &pd->genpd);
of_genpd_add_provider_simple(np, &pd->genpd);
rmobile_add_pm_domains(base, np, &pd->genpd);
}
return 0;
}
static int __init rmobile_init_pm_domains(void)
{
struct device_node *np, *pmd;
bool scanned = false;
void __iomem *base;
int ret = 0;
for_each_compatible_node(np, NULL, "renesas,sysc-rmobile") {
base = of_iomap(np, 0);
if (!base) {
pr_warn("%pOF cannot map reg 0\n", np);
continue;
}
pmd = of_get_child_by_name(np, "pm-domains");
if (!pmd) {
iounmap(base);
pr_warn("%pOF lacks pm-domains node\n", np);
continue;
}
if (!scanned) {
/* Find PM domains containing special blocks */
get_special_pds();
scanned = true;
}
ret = rmobile_add_pm_domains(base, pmd, NULL);
of_node_put(pmd);
if (ret) {
of_node_put(np);
break;
}
fwnode_dev_initialized(of_fwnode_handle(np), true);
}
put_special_pds();
return ret;
}
core_initcall(rmobile_init_pm_domains);
| linux-master | drivers/pmdomain/renesas/rmobile-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G1H System Controller
*
* Copyright (C) 2020 Renesas Electronics Corp.
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7742-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a7742_areas[] __initconst = {
{ "always-on", 0, 0, R8A7742_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca15-scu", 0x180, 0, R8A7742_PD_CA15_SCU, R8A7742_PD_ALWAYS_ON,
PD_SCU },
{ "ca15-cpu0", 0x40, 0, R8A7742_PD_CA15_CPU0, R8A7742_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu1", 0x40, 1, R8A7742_PD_CA15_CPU1, R8A7742_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu2", 0x40, 2, R8A7742_PD_CA15_CPU2, R8A7742_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu3", 0x40, 3, R8A7742_PD_CA15_CPU3, R8A7742_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca7-scu", 0x100, 0, R8A7742_PD_CA7_SCU, R8A7742_PD_ALWAYS_ON,
PD_SCU },
{ "ca7-cpu0", 0x1c0, 0, R8A7742_PD_CA7_CPU0, R8A7742_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu1", 0x1c0, 1, R8A7742_PD_CA7_CPU1, R8A7742_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu2", 0x1c0, 2, R8A7742_PD_CA7_CPU2, R8A7742_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu3", 0x1c0, 3, R8A7742_PD_CA7_CPU3, R8A7742_PD_CA7_SCU,
PD_CPU_NOCR },
{ "rgx", 0xc0, 0, R8A7742_PD_RGX, R8A7742_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a7742_sysc_info __initconst = {
.areas = r8a7742_areas,
.num_areas = ARRAY_SIZE(r8a7742_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a7742-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Gen4 SYSC Power management support
*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include "rcar-gen4-sysc.h"
/* SYSC Common */
#define SYSCSR 0x000 /* SYSC Status Register */
#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
/* Power Domain Registers */
#define PDRSR(n) (0x1000 + ((n) * 0x40))
#define PDRONCR(n) (0x1004 + ((n) * 0x40))
#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
#define PDRESR(n) (0x100C + ((n) * 0x40))
/* PWRON/PWROFF */
#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
/* PDRESR */
#define PDRESR_ERR BIT(0)
/* PDRSR */
#define PDRSR_OFF BIT(0) /* Power-OFF state */
#define PDRSR_ON BIT(4) /* Power-ON state */
#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
#define SYSCSR_TIMEOUT 10000
#define SYSCSR_DELAY_US 10
#define PDRESR_RETRIES 1000
#define PDRESR_DELAY_US 10
#define SYSCISR_TIMEOUT 10000
#define SYSCISR_DELAY_US 10
#define RCAR_GEN4_PD_ALWAYS_ON 64
#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
static void __iomem *rcar_gen4_sysc_base;
static DEFINE_SPINLOCK(rcar_gen4_sysc_lock); /* SMP CPUs + I/O devices */
static int rcar_gen4_sysc_pwr_on_off(u8 pdr, bool on)
{
unsigned int reg_offs;
u32 val;
int ret;
if (on)
reg_offs = PDRONCR(pdr);
else
reg_offs = PDROFFCR(pdr);
/* Wait until SYSC is ready to accept a power request */
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCSR, val,
(val & SYSCSR_BUSY) == SYSCSR_BUSY,
SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
if (ret < 0)
return -EAGAIN;
/* Submit power shutoff or power resume request */
iowrite32(PWRON_PWROFF, rcar_gen4_sysc_base + reg_offs);
return 0;
}
static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
{
u32 val;
int ret;
iowrite32(isr_mask, rcar_gen4_sysc_base + SYSCISCR(reg_idx));
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
val, !(val & isr_mask),
SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
if (ret < 0) {
pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
return -EIO;
}
return 0;
}
static int rcar_gen4_sysc_power(u8 pdr, bool on)
{
unsigned int isr_mask;
unsigned int reg_idx, bit_idx;
unsigned int status;
unsigned long flags;
int ret = 0;
u32 val;
int k;
spin_lock_irqsave(&rcar_gen4_sysc_lock, flags);
reg_idx = pdr / NUM_DOMAINS_EACH_REG;
bit_idx = pdr % NUM_DOMAINS_EACH_REG;
isr_mask = BIT(bit_idx);
/*
* The interrupt source needs to be enabled, but masked, to prevent the
* CPU from receiving it.
*/
iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIER(reg_idx)) | isr_mask,
rcar_gen4_sysc_base + SYSCIER(reg_idx));
iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
rcar_gen4_sysc_base + SYSCIMR(reg_idx));
ret = clear_irq_flags(reg_idx, isr_mask);
if (ret)
goto out;
/* Submit power shutoff or resume request until it was accepted */
for (k = 0; k < PDRESR_RETRIES; k++) {
ret = rcar_gen4_sysc_pwr_on_off(pdr, on);
if (ret)
goto out;
status = ioread32(rcar_gen4_sysc_base + PDRESR(pdr));
if (!(status & PDRESR_ERR))
break;
udelay(PDRESR_DELAY_US);
}
if (k == PDRESR_RETRIES) {
ret = -EIO;
goto out;
}
/* Wait until the power shutoff or resume request has completed * */
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
val, (val & isr_mask),
SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
if (ret < 0) {
ret = -EIO;
goto out;
}
/* Clear interrupt flags */
ret = clear_irq_flags(reg_idx, isr_mask);
if (ret)
goto out;
out:
spin_unlock_irqrestore(&rcar_gen4_sysc_lock, flags);
pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
pdr, ioread32(rcar_gen4_sysc_base + SYSCISCR(reg_idx)), ret);
return ret;
}
static bool rcar_gen4_sysc_power_is_off(u8 pdr)
{
unsigned int st;
st = ioread32(rcar_gen4_sysc_base + PDRSR(pdr));
if (st & PDRSR_OFF)
return true;
return false;
}
struct rcar_gen4_sysc_pd {
struct generic_pm_domain genpd;
u8 pdr;
unsigned int flags;
char name[];
};
static inline struct rcar_gen4_sysc_pd *to_rcar_gen4_pd(struct generic_pm_domain *d)
{
return container_of(d, struct rcar_gen4_sysc_pd, genpd);
}
static int rcar_gen4_sysc_pd_power_off(struct generic_pm_domain *genpd)
{
struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return rcar_gen4_sysc_power(pd->pdr, false);
}
static int rcar_gen4_sysc_pd_power_on(struct generic_pm_domain *genpd)
{
struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return rcar_gen4_sysc_power(pd->pdr, true);
}
static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd)
{
struct generic_pm_domain *genpd = &pd->genpd;
const char *name = pd->genpd.name;
int error;
if (pd->flags & PD_CPU) {
/*
* This domain contains a CPU core and therefore it should
* only be turned off if the CPU is not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "CPU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_SCU) {
/*
* This domain contains an SCU and cache-controller, and
* therefore it should only be turned off if the CPU cores are
* not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "SCU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_NO_CR) {
/*
* This domain cannot be turned off.
*/
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
}
if (!(pd->flags & (PD_CPU | PD_SCU))) {
/* Enable Clock Domain for I/O devices */
genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
}
genpd->power_off = rcar_gen4_sysc_pd_power_off;
genpd->power_on = rcar_gen4_sysc_pd_power_on;
if (pd->flags & (PD_CPU | PD_NO_CR)) {
/* Skip CPUs (handled by SMP code) and areas without control */
pr_debug("%s: Not touching %s\n", __func__, genpd->name);
goto finalize;
}
if (!rcar_gen4_sysc_power_is_off(pd->pdr)) {
pr_debug("%s: %s is already powered\n", __func__, genpd->name);
goto finalize;
}
rcar_gen4_sysc_power(pd->pdr, true);
finalize:
error = pm_genpd_init(genpd, &simple_qos_governor, false);
if (error)
pr_err("Failed to init PM domain %s: %d\n", name, error);
return error;
}
static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A779A0
{ .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A779F0
{ .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A779G0
{ .compatible = "renesas,r8a779g0-sysc", .data = &r8a779g0_sysc_info },
#endif
{ /* sentinel */ }
};
struct rcar_gen4_pm_domains {
struct genpd_onecell_data onecell_data;
struct generic_pm_domain *domains[RCAR_GEN4_PD_ALWAYS_ON + 1];
};
static struct genpd_onecell_data *rcar_gen4_sysc_onecell_data;
static int __init rcar_gen4_sysc_pd_init(void)
{
const struct rcar_gen4_sysc_info *info;
const struct of_device_id *match;
struct rcar_gen4_pm_domains *domains;
struct device_node *np;
void __iomem *base;
unsigned int i;
int error;
np = of_find_matching_node_and_match(NULL, rcar_gen4_sysc_matches, &match);
if (!np)
return -ENODEV;
info = match->data;
base = of_iomap(np, 0);
if (!base) {
pr_warn("%pOF: Cannot map regs\n", np);
error = -ENOMEM;
goto out_put;
}
rcar_gen4_sysc_base = base;
domains = kzalloc(sizeof(*domains), GFP_KERNEL);
if (!domains) {
error = -ENOMEM;
goto out_put;
}
domains->onecell_data.domains = domains->domains;
domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
rcar_gen4_sysc_onecell_data = &domains->onecell_data;
for (i = 0; i < info->num_areas; i++) {
const struct rcar_gen4_sysc_area *area = &info->areas[i];
struct rcar_gen4_sysc_pd *pd;
size_t n;
if (!area->name) {
/* Skip NULLified area */
continue;
}
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
error = -ENOMEM;
goto out_put;
}
memcpy(pd->name, area->name, n);
pd->genpd.name = pd->name;
pd->pdr = area->pdr;
pd->flags = area->flags;
error = rcar_gen4_sysc_pd_setup(pd);
if (error)
goto out_put;
domains->domains[area->pdr] = &pd->genpd;
if (area->parent < 0)
continue;
error = pm_genpd_add_subdomain(domains->domains[area->parent],
&pd->genpd);
if (error) {
pr_warn("Failed to add PM subdomain %s to parent %u\n",
area->name, area->parent);
goto out_put;
}
}
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
out_put:
of_node_put(np);
return error;
}
early_initcall(rcar_gen4_sysc_pd_init);
| linux-master | drivers/pmdomain/renesas/rcar-gen4-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car M2-W/N System Controller
*
* Copyright (C) 2016 Glider bvba
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7791-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a7791_areas[] __initconst = {
{ "always-on", 0, 0, R8A7791_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca15-scu", 0x180, 0, R8A7791_PD_CA15_SCU, R8A7791_PD_ALWAYS_ON,
PD_SCU },
{ "ca15-cpu0", 0x40, 0, R8A7791_PD_CA15_CPU0, R8A7791_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu1", 0x40, 1, R8A7791_PD_CA15_CPU1, R8A7791_PD_CA15_SCU,
PD_CPU_NOCR },
{ "sh-4a", 0x80, 0, R8A7791_PD_SH_4A, R8A7791_PD_ALWAYS_ON },
{ "sgx", 0xc0, 0, R8A7791_PD_SGX, R8A7791_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a7791_sysc_info __initconst = {
.areas = r8a7791_areas,
.num_areas = ARRAY_SIZE(r8a7791_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a7791-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car D3 System Controller
*
* Copyright (C) 2017 Glider bvba
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77995-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a77995_areas[] __initconst = {
{ "always-on", 0, 0, R8A77995_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca53-scu", 0x140, 0, R8A77995_PD_CA53_SCU, R8A77995_PD_ALWAYS_ON,
PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A77995_PD_CA53_CPU0, R8A77995_PD_CA53_SCU,
PD_CPU_NOCR },
};
const struct rcar_sysc_info r8a77995_sysc_info __initconst = {
.areas = r8a77995_areas,
.num_areas = ARRAY_SIZE(r8a77995_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a77995-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G1C System Controller
*
* Copyright (C) 2018 Renesas Electronics Corp.
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77470-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a77470_areas[] __initconst = {
{ "always-on", 0, 0, R8A77470_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca7-scu", 0x100, 0, R8A77470_PD_CA7_SCU, R8A77470_PD_ALWAYS_ON,
PD_SCU },
{ "ca7-cpu0", 0x1c0, 0, R8A77470_PD_CA7_CPU0, R8A77470_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu1", 0x1c0, 1, R8A77470_PD_CA7_CPU1, R8A77470_PD_CA7_SCU,
PD_CPU_NOCR },
{ "sgx", 0xc0, 0, R8A77470_PD_SGX, R8A77470_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a77470_sysc_info __initconst = {
.areas = r8a77470_areas,
.num_areas = ARRAY_SIZE(r8a77470_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a77470-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G2M System Controller
* Copyright (C) 2018 Renesas Electronics Corp.
*
* Based on Renesas R-Car M3-W System Controller
* Copyright (C) 2016 Glider bvba
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a774a1-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a774a1_areas[] __initconst = {
{ "always-on", 0, 0, R8A774A1_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A774A1_PD_CA57_SCU, R8A774A1_PD_ALWAYS_ON,
PD_SCU },
{ "ca57-cpu0", 0x80, 0, R8A774A1_PD_CA57_CPU0, R8A774A1_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca57-cpu1", 0x80, 1, R8A774A1_PD_CA57_CPU1, R8A774A1_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca53-scu", 0x140, 0, R8A774A1_PD_CA53_SCU, R8A774A1_PD_ALWAYS_ON,
PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A774A1_PD_CA53_CPU0, R8A774A1_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu1", 0x200, 1, R8A774A1_PD_CA53_CPU1, R8A774A1_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu2", 0x200, 2, R8A774A1_PD_CA53_CPU2, R8A774A1_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A774A1_PD_CA53_CPU3, R8A774A1_PD_CA53_SCU,
PD_CPU_NOCR },
{ "a3vc", 0x380, 0, R8A774A1_PD_A3VC, R8A774A1_PD_ALWAYS_ON },
{ "a2vc0", 0x3c0, 0, R8A774A1_PD_A2VC0, R8A774A1_PD_A3VC },
{ "a2vc1", 0x3c0, 1, R8A774A1_PD_A2VC1, R8A774A1_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A774A1_PD_3DG_A, R8A774A1_PD_ALWAYS_ON },
{ "3dg-b", 0x100, 1, R8A774A1_PD_3DG_B, R8A774A1_PD_3DG_A },
};
const struct rcar_sysc_info r8a774a1_sysc_info __initconst = {
.areas = r8a774a1_areas,
.num_areas = ARRAY_SIZE(r8a774a1_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a774a1-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car M3-W/W+ System Controller
*
* Copyright (C) 2016 Glider bvba
* Copyright (C) 2018-2019 Renesas Electronics Corporation
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7796-sysc.h>
#include "rcar-sysc.h"
static struct rcar_sysc_area r8a7796_areas[] __initdata = {
{ "always-on", 0, 0, R8A7796_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A7796_PD_CA57_SCU, R8A7796_PD_ALWAYS_ON,
PD_SCU },
{ "ca57-cpu0", 0x80, 0, R8A7796_PD_CA57_CPU0, R8A7796_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca57-cpu1", 0x80, 1, R8A7796_PD_CA57_CPU1, R8A7796_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca53-scu", 0x140, 0, R8A7796_PD_CA53_SCU, R8A7796_PD_ALWAYS_ON,
PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A7796_PD_CA53_CPU0, R8A7796_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu1", 0x200, 1, R8A7796_PD_CA53_CPU1, R8A7796_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu2", 0x200, 2, R8A7796_PD_CA53_CPU2, R8A7796_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A7796_PD_CA53_CPU3, R8A7796_PD_CA53_SCU,
PD_CPU_NOCR },
{ "cr7", 0x240, 0, R8A7796_PD_CR7, R8A7796_PD_ALWAYS_ON },
{ "a3vc", 0x380, 0, R8A7796_PD_A3VC, R8A7796_PD_ALWAYS_ON },
{ "a2vc0", 0x3c0, 0, R8A7796_PD_A2VC0, R8A7796_PD_A3VC },
{ "a2vc1", 0x3c0, 1, R8A7796_PD_A2VC1, R8A7796_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A7796_PD_3DG_A, R8A7796_PD_ALWAYS_ON },
{ "3dg-b", 0x100, 1, R8A7796_PD_3DG_B, R8A7796_PD_3DG_A },
{ "a3ir", 0x180, 0, R8A7796_PD_A3IR, R8A7796_PD_ALWAYS_ON },
};
#ifdef CONFIG_SYSC_R8A77960
const struct rcar_sysc_info r8a77960_sysc_info __initconst = {
.areas = r8a7796_areas,
.num_areas = ARRAY_SIZE(r8a7796_areas),
};
#endif /* CONFIG_SYSC_R8A77960 */
#ifdef CONFIG_SYSC_R8A77961
static int __init r8a77961_sysc_init(void)
{
rcar_sysc_nullify(r8a7796_areas, ARRAY_SIZE(r8a7796_areas),
R8A7796_PD_A2VC0);
return 0;
}
const struct rcar_sysc_info r8a77961_sysc_info __initconst = {
.init = r8a77961_sysc_init,
.areas = r8a7796_areas,
.num_areas = ARRAY_SIZE(r8a7796_areas),
.extmask_offs = 0x2f8,
.extmask_val = BIT(0),
};
#endif /* CONFIG_SYSC_R8A77961 */
| linux-master | drivers/pmdomain/renesas/r8a7796-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car V3U System Controller
*
* Copyright (C) 2020 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <dt-bindings/power/r8a779a0-sysc.h>
#include "rcar-gen4-sysc.h"
static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = {
{ "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU },
{ "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU },
{ "a2e0d0", R8A779A0_PD_A2E0D0, R8A779A0_PD_A3E0, PD_SCU },
{ "a2e0d1", R8A779A0_PD_A2E0D1, R8A779A0_PD_A3E0, PD_SCU },
{ "a2e1d0", R8A779A0_PD_A2E1D0, R8A779A0_PD_A3E1, PD_SCU },
{ "a2e1d1", R8A779A0_PD_A2E1D1, R8A779A0_PD_A3E1, PD_SCU },
{ "a1e0d0c0", R8A779A0_PD_A1E0D0C0, R8A779A0_PD_A2E0D0, PD_CPU_NOCR },
{ "a1e0d0c1", R8A779A0_PD_A1E0D0C1, R8A779A0_PD_A2E0D0, PD_CPU_NOCR },
{ "a1e0d1c0", R8A779A0_PD_A1E0D1C0, R8A779A0_PD_A2E0D1, PD_CPU_NOCR },
{ "a1e0d1c1", R8A779A0_PD_A1E0D1C1, R8A779A0_PD_A2E0D1, PD_CPU_NOCR },
{ "a1e1d0c0", R8A779A0_PD_A1E1D0C0, R8A779A0_PD_A2E1D0, PD_CPU_NOCR },
{ "a1e1d0c1", R8A779A0_PD_A1E1D0C1, R8A779A0_PD_A2E1D0, PD_CPU_NOCR },
{ "a1e1d1c0", R8A779A0_PD_A1E1D1C0, R8A779A0_PD_A2E1D1, PD_CPU_NOCR },
{ "a1e1d1c1", R8A779A0_PD_A1E1D1C1, R8A779A0_PD_A2E1D1, PD_CPU_NOCR },
{ "3dg-a", R8A779A0_PD_3DG_A, R8A779A0_PD_ALWAYS_ON },
{ "3dg-b", R8A779A0_PD_3DG_B, R8A779A0_PD_3DG_A },
{ "a3vip0", R8A779A0_PD_A3VIP0, R8A779A0_PD_ALWAYS_ON },
{ "a3vip1", R8A779A0_PD_A3VIP1, R8A779A0_PD_ALWAYS_ON },
{ "a3vip3", R8A779A0_PD_A3VIP3, R8A779A0_PD_ALWAYS_ON },
{ "a3vip2", R8A779A0_PD_A3VIP2, R8A779A0_PD_ALWAYS_ON },
{ "a3isp01", R8A779A0_PD_A3ISP01, R8A779A0_PD_ALWAYS_ON },
{ "a3isp23", R8A779A0_PD_A3ISP23, R8A779A0_PD_ALWAYS_ON },
{ "a3ir", R8A779A0_PD_A3IR, R8A779A0_PD_ALWAYS_ON },
{ "a2cn0", R8A779A0_PD_A2CN0, R8A779A0_PD_A3IR },
{ "a2imp01", R8A779A0_PD_A2IMP01, R8A779A0_PD_A3IR },
{ "a2dp0", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR },
{ "a2cv0", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR },
{ "a2cv1", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR },
{ "a2cv4", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR },
{ "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
{ "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR },
{ "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR },
{ "a2dp1", R8A779A0_PD_A2DP1, R8A779A0_PD_A3IR },
{ "a2cv2", R8A779A0_PD_A2CV2, R8A779A0_PD_A3IR },
{ "a2cv3", R8A779A0_PD_A2CV3, R8A779A0_PD_A3IR },
{ "a2cv5", R8A779A0_PD_A2CV5, R8A779A0_PD_A3IR },
{ "a2cv7", R8A779A0_PD_A2CV7, R8A779A0_PD_A3IR },
{ "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR },
{ "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 },
{ "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 },
{ "a1dsp0", R8A779A0_PD_A1DSP0, R8A779A0_PD_A2CN2 },
{ "a1cnn1", R8A779A0_PD_A1CNN1, R8A779A0_PD_A2CN1 },
{ "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 },
};
const struct rcar_gen4_sysc_info r8a779a0_sysc_info __initconst = {
.areas = r8a779a0_areas,
.num_areas = ARRAY_SIZE(r8a779a0_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a779a0-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car V3H System Controller
*
* Copyright (C) 2018 Renesas Electronics Corp.
* Copyright (C) 2018 Cogent Embedded, Inc.
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77980-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
{ "always-on", 0, 0, R8A77980_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca53-scu", 0x140, 0, R8A77980_PD_CA53_SCU, R8A77980_PD_ALWAYS_ON,
PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A77980_PD_CA53_CPU0, R8A77980_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu1", 0x200, 1, R8A77980_PD_CA53_CPU1, R8A77980_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu2", 0x200, 2, R8A77980_PD_CA53_CPU2, R8A77980_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
PD_CPU_NOCR },
{ "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
{ "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
{ "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
{ "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
{ "a2ir2", 0x400, 2, R8A77980_PD_A2IR2, R8A77980_PD_A3IR },
{ "a2ir3", 0x400, 3, R8A77980_PD_A2IR3, R8A77980_PD_A3IR },
{ "a2ir4", 0x400, 4, R8A77980_PD_A2IR4, R8A77980_PD_A3IR },
{ "a2ir5", 0x400, 5, R8A77980_PD_A2IR5, R8A77980_PD_A3IR },
{ "a2sc0", 0x400, 6, R8A77980_PD_A2SC0, R8A77980_PD_A3IR },
{ "a2sc1", 0x400, 7, R8A77980_PD_A2SC1, R8A77980_PD_A3IR },
{ "a2sc2", 0x400, 8, R8A77980_PD_A2SC2, R8A77980_PD_A3IR },
{ "a2sc3", 0x400, 9, R8A77980_PD_A2SC3, R8A77980_PD_A3IR },
{ "a2sc4", 0x400, 10, R8A77980_PD_A2SC4, R8A77980_PD_A3IR },
{ "a2dp0", 0x400, 11, R8A77980_PD_A2DP0, R8A77980_PD_A3IR },
{ "a2dp1", 0x400, 12, R8A77980_PD_A2DP1, R8A77980_PD_A3IR },
{ "a2cn", 0x400, 13, R8A77980_PD_A2CN, R8A77980_PD_A3IR },
{ "a3vip0", 0x2c0, 0, R8A77980_PD_A3VIP0, R8A77980_PD_ALWAYS_ON },
{ "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_ALWAYS_ON },
{ "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a77980_sysc_info __initconst = {
.areas = r8a77980_areas,
.num_areas = ARRAY_SIZE(r8a77980_areas),
.extmask_offs = 0x138,
.extmask_val = BIT(0),
};
| linux-master | drivers/pmdomain/renesas/r8a77980-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G2E System Controller
* Copyright (C) 2018 Renesas Electronics Corp.
*
* Based on Renesas R-Car E3 System Controller
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
#include <dt-bindings/power/r8a774c0-sysc.h>
#include "rcar-sysc.h"
static struct rcar_sysc_area r8a774c0_areas[] __initdata = {
{ "always-on", 0, 0, R8A774C0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca53-scu", 0x140, 0, R8A774C0_PD_CA53_SCU, R8A774C0_PD_ALWAYS_ON,
PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A774C0_PD_CA53_CPU0, R8A774C0_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu1", 0x200, 1, R8A774C0_PD_CA53_CPU1, R8A774C0_PD_CA53_SCU,
PD_CPU_NOCR },
{ "a3vc", 0x380, 0, R8A774C0_PD_A3VC, R8A774C0_PD_ALWAYS_ON },
{ "a2vc1", 0x3c0, 1, R8A774C0_PD_A2VC1, R8A774C0_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A774C0_PD_3DG_A, R8A774C0_PD_ALWAYS_ON },
{ "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A },
};
/* Fixups for RZ/G2E ES1.0 revision */
static const struct soc_device_attribute r8a774c0[] __initconst = {
{ .soc_id = "r8a774c0", .revision = "ES1.0" },
{ /* sentinel */ }
};
static int __init r8a774c0_sysc_init(void)
{
if (soc_device_match(r8a774c0)) {
/* Fix incorrect 3DG hierarchy */
swap(r8a774c0_areas[6], r8a774c0_areas[7]);
r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON;
r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B;
}
return 0;
}
const struct rcar_sysc_info r8a774c0_sysc_info __initconst = {
.init = r8a774c0_sysc_init,
.areas = r8a774c0_areas,
.num_areas = ARRAY_SIZE(r8a774c0_areas),
.extmask_offs = 0x2f8,
.extmask_val = BIT(0),
};
| linux-master | drivers/pmdomain/renesas/r8a774c0-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G1M System Controller
*
* Copyright (C) 2016 Cogent Embedded Inc.
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7743-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a7743_areas[] __initconst = {
{ "always-on", 0, 0, R8A7743_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca15-scu", 0x180, 0, R8A7743_PD_CA15_SCU, R8A7743_PD_ALWAYS_ON,
PD_SCU },
{ "ca15-cpu0", 0x40, 0, R8A7743_PD_CA15_CPU0, R8A7743_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu1", 0x40, 1, R8A7743_PD_CA15_CPU1, R8A7743_PD_CA15_SCU,
PD_CPU_NOCR },
{ "sgx", 0xc0, 0, R8A7743_PD_SGX, R8A7743_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a7743_sysc_info __initconst = {
.areas = r8a7743_areas,
.num_areas = ARRAY_SIZE(r8a7743_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a7743-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car E3 System Controller
*
* Copyright (C) 2018 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
#include <dt-bindings/power/r8a77990-sysc.h>
#include "rcar-sysc.h"
static struct rcar_sysc_area r8a77990_areas[] __initdata = {
{ "always-on", 0, 0, R8A77990_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca53-scu", 0x140, 0, R8A77990_PD_CA53_SCU, R8A77990_PD_ALWAYS_ON,
PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A77990_PD_CA53_CPU0, R8A77990_PD_CA53_SCU,
PD_CPU_NOCR },
{ "ca53-cpu1", 0x200, 1, R8A77990_PD_CA53_CPU1, R8A77990_PD_CA53_SCU,
PD_CPU_NOCR },
{ "cr7", 0x240, 0, R8A77990_PD_CR7, R8A77990_PD_ALWAYS_ON },
{ "a3vc", 0x380, 0, R8A77990_PD_A3VC, R8A77990_PD_ALWAYS_ON },
{ "a2vc1", 0x3c0, 1, R8A77990_PD_A2VC1, R8A77990_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A77990_PD_3DG_A, R8A77990_PD_ALWAYS_ON },
{ "3dg-b", 0x100, 1, R8A77990_PD_3DG_B, R8A77990_PD_3DG_A },
};
/* Fixups for R-Car E3 ES1.0 revision */
static const struct soc_device_attribute r8a77990[] __initconst = {
{ .soc_id = "r8a77990", .revision = "ES1.0" },
{ /* sentinel */ }
};
static int __init r8a77990_sysc_init(void)
{
if (soc_device_match(r8a77990)) {
/* Fix incorrect 3DG hierarchy */
swap(r8a77990_areas[7], r8a77990_areas[8]);
r8a77990_areas[7].parent = R8A77990_PD_ALWAYS_ON;
r8a77990_areas[8].parent = R8A77990_PD_3DG_B;
}
return 0;
}
const struct rcar_sysc_info r8a77990_sysc_info __initconst = {
.init = r8a77990_sysc_init,
.areas = r8a77990_areas,
.num_areas = ARRAY_SIZE(r8a77990_areas),
.extmask_offs = 0x2f8,
.extmask_val = BIT(0),
};
| linux-master | drivers/pmdomain/renesas/r8a77990-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car H1 System Controller
*
* Copyright (C) 2016 Glider bvba
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7779-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a7779_areas[] __initconst = {
{ "always-on", 0, 0, R8A7779_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "arm1", 0x40, 1, R8A7779_PD_ARM1, R8A7779_PD_ALWAYS_ON,
PD_CPU_CR },
{ "arm2", 0x40, 2, R8A7779_PD_ARM2, R8A7779_PD_ALWAYS_ON,
PD_CPU_CR },
{ "arm3", 0x40, 3, R8A7779_PD_ARM3, R8A7779_PD_ALWAYS_ON,
PD_CPU_CR },
{ "sgx", 0xc0, 0, R8A7779_PD_SGX, R8A7779_PD_ALWAYS_ON },
{ "vdp", 0x100, 0, R8A7779_PD_VDP, R8A7779_PD_ALWAYS_ON },
{ "imp", 0x140, 0, R8A7779_PD_IMP, R8A7779_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a7779_sysc_info __initconst = {
.areas = r8a7779_areas,
.num_areas = ARRAY_SIZE(r8a7779_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a7779-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G2H System Controller
* Copyright (C) 2020 Renesas Electronics Corp.
*
* Based on Renesas R-Car H3 System Controller
* Copyright (C) 2016-2017 Glider bvba
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a774e1-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a774e1_areas[] __initconst = {
{ "always-on", 0, 0, R8A774E1_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A774E1_PD_CA57_SCU, R8A774E1_PD_ALWAYS_ON, PD_SCU },
{ "ca57-cpu0", 0x80, 0, R8A774E1_PD_CA57_CPU0, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
{ "ca57-cpu1", 0x80, 1, R8A774E1_PD_CA57_CPU1, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
{ "ca57-cpu2", 0x80, 2, R8A774E1_PD_CA57_CPU2, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
{ "ca57-cpu3", 0x80, 3, R8A774E1_PD_CA57_CPU3, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
{ "ca53-scu", 0x140, 0, R8A774E1_PD_CA53_SCU, R8A774E1_PD_ALWAYS_ON, PD_SCU },
{ "ca53-cpu0", 0x200, 0, R8A774E1_PD_CA53_CPU0, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
{ "ca53-cpu1", 0x200, 1, R8A774E1_PD_CA53_CPU1, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
{ "ca53-cpu2", 0x200, 2, R8A774E1_PD_CA53_CPU2, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A774E1_PD_CA53_CPU3, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
{ "a3vp", 0x340, 0, R8A774E1_PD_A3VP, R8A774E1_PD_ALWAYS_ON },
{ "a3vc", 0x380, 0, R8A774E1_PD_A3VC, R8A774E1_PD_ALWAYS_ON },
{ "a2vc1", 0x3c0, 1, R8A774E1_PD_A2VC1, R8A774E1_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A774E1_PD_3DG_A, R8A774E1_PD_ALWAYS_ON },
{ "3dg-b", 0x100, 1, R8A774E1_PD_3DG_B, R8A774E1_PD_3DG_A },
{ "3dg-c", 0x100, 2, R8A774E1_PD_3DG_C, R8A774E1_PD_3DG_B },
{ "3dg-d", 0x100, 3, R8A774E1_PD_3DG_D, R8A774E1_PD_3DG_C },
{ "3dg-e", 0x100, 4, R8A774E1_PD_3DG_E, R8A774E1_PD_3DG_D },
};
const struct rcar_sysc_info r8a774e1_sysc_info __initconst = {
.areas = r8a774e1_areas,
.num_areas = ARRAY_SIZE(r8a774e1_areas),
.extmask_offs = 0x2f8,
.extmask_val = BIT(0),
};
| linux-master | drivers/pmdomain/renesas/r8a774e1-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car E2 System Controller
*
* Copyright (C) 2016 Glider bvba
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7794-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a7794_areas[] __initconst = {
{ "always-on", 0, 0, R8A7794_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca7-scu", 0x100, 0, R8A7794_PD_CA7_SCU, R8A7794_PD_ALWAYS_ON,
PD_SCU },
{ "ca7-cpu0", 0x1c0, 0, R8A7794_PD_CA7_CPU0, R8A7794_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu1", 0x1c0, 1, R8A7794_PD_CA7_CPU1, R8A7794_PD_CA7_SCU,
PD_CPU_NOCR },
{ "sh-4a", 0x80, 0, R8A7794_PD_SH_4A, R8A7794_PD_ALWAYS_ON },
{ "sgx", 0xc0, 0, R8A7794_PD_SGX, R8A7794_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a7794_sysc_info __initconst = {
.areas = r8a7794_areas,
.num_areas = ARRAY_SIZE(r8a7794_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a7794-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car H2 System Controller
*
* Copyright (C) 2016 Glider bvba
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7790-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a7790_areas[] __initconst = {
{ "always-on", 0, 0, R8A7790_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca15-scu", 0x180, 0, R8A7790_PD_CA15_SCU, R8A7790_PD_ALWAYS_ON,
PD_SCU },
{ "ca15-cpu0", 0x40, 0, R8A7790_PD_CA15_CPU0, R8A7790_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu1", 0x40, 1, R8A7790_PD_CA15_CPU1, R8A7790_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu2", 0x40, 2, R8A7790_PD_CA15_CPU2, R8A7790_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu3", 0x40, 3, R8A7790_PD_CA15_CPU3, R8A7790_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca7-scu", 0x100, 0, R8A7790_PD_CA7_SCU, R8A7790_PD_ALWAYS_ON,
PD_SCU },
{ "ca7-cpu0", 0x1c0, 0, R8A7790_PD_CA7_CPU0, R8A7790_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu1", 0x1c0, 1, R8A7790_PD_CA7_CPU1, R8A7790_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu2", 0x1c0, 2, R8A7790_PD_CA7_CPU2, R8A7790_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu3", 0x1c0, 3, R8A7790_PD_CA7_CPU3, R8A7790_PD_CA7_SCU,
PD_CPU_NOCR },
{ "sh-4a", 0x80, 0, R8A7790_PD_SH_4A, R8A7790_PD_ALWAYS_ON },
{ "rgx", 0xc0, 0, R8A7790_PD_RGX, R8A7790_PD_ALWAYS_ON },
{ "imp", 0x140, 0, R8A7790_PD_IMP, R8A7790_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a7790_sysc_info __initconst = {
.areas = r8a7790_areas,
.num_areas = ARRAY_SIZE(r8a7790_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a7790-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car V2H (R8A7792) System Controller
*
* Copyright (C) 2016 Cogent Embedded Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7792-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a7792_areas[] __initconst = {
{ "always-on", 0, 0, R8A7792_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca15-scu", 0x180, 0, R8A7792_PD_CA15_SCU, R8A7792_PD_ALWAYS_ON,
PD_SCU },
{ "ca15-cpu0", 0x40, 0, R8A7792_PD_CA15_CPU0, R8A7792_PD_CA15_SCU,
PD_CPU_NOCR },
{ "ca15-cpu1", 0x40, 1, R8A7792_PD_CA15_CPU1, R8A7792_PD_CA15_SCU,
PD_CPU_NOCR },
{ "sgx", 0xc0, 0, R8A7792_PD_SGX, R8A7792_PD_ALWAYS_ON },
{ "imp", 0x140, 0, R8A7792_PD_IMP, R8A7792_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a7792_sysc_info __initconst = {
.areas = r8a7792_areas,
.num_areas = ARRAY_SIZE(r8a7792_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a7792-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car M3-N System Controller
* Copyright (C) 2018 Jacopo Mondi <[email protected]>
*
* Based on Renesas R-Car M3-W System Controller
* Copyright (C) 2016 Glider bvba
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77965-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a77965_areas[] __initconst = {
{ "always-on", 0, 0, R8A77965_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A77965_PD_CA57_SCU, R8A77965_PD_ALWAYS_ON,
PD_SCU },
{ "ca57-cpu0", 0x80, 0, R8A77965_PD_CA57_CPU0, R8A77965_PD_CA57_SCU,
PD_CPU_NOCR },
{ "ca57-cpu1", 0x80, 1, R8A77965_PD_CA57_CPU1, R8A77965_PD_CA57_SCU,
PD_CPU_NOCR },
{ "cr7", 0x240, 0, R8A77965_PD_CR7, R8A77965_PD_ALWAYS_ON },
{ "a3vc", 0x380, 0, R8A77965_PD_A3VC, R8A77965_PD_ALWAYS_ON },
{ "a3vp", 0x340, 0, R8A77965_PD_A3VP, R8A77965_PD_ALWAYS_ON },
{ "a2vc1", 0x3c0, 1, R8A77965_PD_A2VC1, R8A77965_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A77965_PD_3DG_A, R8A77965_PD_ALWAYS_ON },
{ "3dg-b", 0x100, 1, R8A77965_PD_3DG_B, R8A77965_PD_3DG_A },
};
const struct rcar_sysc_info r8a77965_sysc_info __initconst = {
.areas = r8a77965_areas,
.num_areas = ARRAY_SIZE(r8a77965_areas),
.extmask_offs = 0x2f8,
.extmask_val = BIT(0),
};
| linux-master | drivers/pmdomain/renesas/r8a77965-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G1E System Controller
*
* Copyright (C) 2016 Cogent Embedded Inc.
*/
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7745-sysc.h>
#include "rcar-sysc.h"
static const struct rcar_sysc_area r8a7745_areas[] __initconst = {
{ "always-on", 0, 0, R8A7745_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca7-scu", 0x100, 0, R8A7745_PD_CA7_SCU, R8A7745_PD_ALWAYS_ON,
PD_SCU },
{ "ca7-cpu0", 0x1c0, 0, R8A7745_PD_CA7_CPU0, R8A7745_PD_CA7_SCU,
PD_CPU_NOCR },
{ "ca7-cpu1", 0x1c0, 1, R8A7745_PD_CA7_CPU1, R8A7745_PD_CA7_SCU,
PD_CPU_NOCR },
{ "sgx", 0xc0, 0, R8A7745_PD_SGX, R8A7745_PD_ALWAYS_ON },
};
const struct rcar_sysc_info r8a7745_sysc_info __initconst = {
.areas = r8a7745_areas,
.num_areas = ARRAY_SIZE(r8a7745_areas),
};
| linux-master | drivers/pmdomain/renesas/r8a7745-sysc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* R-Car SYSC Power management support
*
* Copyright (C) 2014 Magnus Damm
* Copyright (C) 2015-2017 Glider bvba
*/
#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/soc/renesas/rcar-sysc.h>
#include "rcar-sysc.h"
/* SYSC Common */
#define SYSCSR 0x00 /* SYSC Status Register */
#define SYSCISR 0x04 /* Interrupt Status Register */
#define SYSCISCR 0x08 /* Interrupt Status Clear Register */
#define SYSCIER 0x0c /* Interrupt Enable Register */
#define SYSCIMR 0x10 /* Interrupt Mask Register */
/* SYSC Status Register */
#define SYSCSR_PONENB 1 /* Ready for power resume requests */
#define SYSCSR_POFFENB 0 /* Ready for power shutoff requests */
/*
* Power Control Register Offsets inside the register block for each domain
* Note: The "CR" registers for ARM cores exist on H1 only
* Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2
* Use PSCI on R-Car Gen3
*/
#define PWRSR_OFFS 0x00 /* Power Status Register */
#define PWROFFCR_OFFS 0x04 /* Power Shutoff Control Register */
#define PWROFFSR_OFFS 0x08 /* Power Shutoff Status Register */
#define PWRONCR_OFFS 0x0c /* Power Resume Control Register */
#define PWRONSR_OFFS 0x10 /* Power Resume Status Register */
#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */
#define SYSCSR_TIMEOUT 100
#define SYSCSR_DELAY_US 1
#define PWRER_RETRIES 100
#define PWRER_DELAY_US 1
#define SYSCISR_TIMEOUT 1000
#define SYSCISR_DELAY_US 1
#define RCAR_PD_ALWAYS_ON 32 /* Always-on power area */
struct rcar_sysc_ch {
u16 chan_offs;
u8 chan_bit;
u8 isr_bit;
};
static void __iomem *rcar_sysc_base;
static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
static u32 rcar_sysc_extmask_offs, rcar_sysc_extmask_val;
static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
{
unsigned int sr_bit, reg_offs;
u32 val;
int ret;
if (on) {
sr_bit = SYSCSR_PONENB;
reg_offs = PWRONCR_OFFS;
} else {
sr_bit = SYSCSR_POFFENB;
reg_offs = PWROFFCR_OFFS;
}
/* Wait until SYSC is ready to accept a power request */
ret = readl_poll_timeout_atomic(rcar_sysc_base + SYSCSR, val,
val & BIT(sr_bit), SYSCSR_DELAY_US,
SYSCSR_TIMEOUT);
if (ret)
return -EAGAIN;
/* Submit power shutoff or power resume request */
iowrite32(BIT(sysc_ch->chan_bit),
rcar_sysc_base + sysc_ch->chan_offs + reg_offs);
return 0;
}
static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
{
unsigned int isr_mask = BIT(sysc_ch->isr_bit);
unsigned int chan_mask = BIT(sysc_ch->chan_bit);
unsigned int status, k;
unsigned long flags;
int ret;
spin_lock_irqsave(&rcar_sysc_lock, flags);
/*
* Mask external power requests for CPU or 3DG domains
*/
if (rcar_sysc_extmask_val) {
iowrite32(rcar_sysc_extmask_val,
rcar_sysc_base + rcar_sysc_extmask_offs);
}
/*
* The interrupt source needs to be enabled, but masked, to prevent the
* CPU from receiving it.
*/
iowrite32(ioread32(rcar_sysc_base + SYSCIMR) | isr_mask,
rcar_sysc_base + SYSCIMR);
iowrite32(ioread32(rcar_sysc_base + SYSCIER) | isr_mask,
rcar_sysc_base + SYSCIER);
iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
/* Submit power shutoff or resume request until it was accepted */
for (k = 0; k < PWRER_RETRIES; k++) {
ret = rcar_sysc_pwr_on_off(sysc_ch, on);
if (ret)
goto out;
status = ioread32(rcar_sysc_base +
sysc_ch->chan_offs + PWRER_OFFS);
if (!(status & chan_mask))
break;
udelay(PWRER_DELAY_US);
}
if (k == PWRER_RETRIES) {
ret = -EIO;
goto out;
}
/* Wait until the power shutoff or resume request has completed * */
ret = readl_poll_timeout_atomic(rcar_sysc_base + SYSCISR, status,
status & isr_mask, SYSCISR_DELAY_US,
SYSCISR_TIMEOUT);
if (ret)
ret = -EIO;
iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
out:
if (rcar_sysc_extmask_val)
iowrite32(0, rcar_sysc_base + rcar_sysc_extmask_offs);
spin_unlock_irqrestore(&rcar_sysc_lock, flags);
pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
sysc_ch->isr_bit, ioread32(rcar_sysc_base + SYSCISR), ret);
return ret;
}
static bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
{
unsigned int st;
st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS);
if (st & BIT(sysc_ch->chan_bit))
return true;
return false;
}
struct rcar_sysc_pd {
struct generic_pm_domain genpd;
struct rcar_sysc_ch ch;
unsigned int flags;
char name[];
};
static inline struct rcar_sysc_pd *to_rcar_pd(struct generic_pm_domain *d)
{
return container_of(d, struct rcar_sysc_pd, genpd);
}
static int rcar_sysc_pd_power_off(struct generic_pm_domain *genpd)
{
struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return rcar_sysc_power(&pd->ch, false);
}
static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
{
struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return rcar_sysc_power(&pd->ch, true);
}
static bool has_cpg_mstp;
static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
{
struct generic_pm_domain *genpd = &pd->genpd;
const char *name = pd->genpd.name;
int error;
if (pd->flags & PD_CPU) {
/*
* This domain contains a CPU core and therefore it should
* only be turned off if the CPU is not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "CPU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_SCU) {
/*
* This domain contains an SCU and cache-controller, and
* therefore it should only be turned off if the CPU cores are
* not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "SCU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_NO_CR) {
/*
* This domain cannot be turned off.
*/
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
}
if (!(pd->flags & (PD_CPU | PD_SCU))) {
/* Enable Clock Domain for I/O devices */
genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
if (has_cpg_mstp) {
genpd->attach_dev = cpg_mstp_attach_dev;
genpd->detach_dev = cpg_mstp_detach_dev;
} else {
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
}
}
genpd->power_off = rcar_sysc_pd_power_off;
genpd->power_on = rcar_sysc_pd_power_on;
if (pd->flags & (PD_CPU | PD_NO_CR)) {
/* Skip CPUs (handled by SMP code) and areas without control */
pr_debug("%s: Not touching %s\n", __func__, genpd->name);
goto finalize;
}
if (!rcar_sysc_power_is_off(&pd->ch)) {
pr_debug("%s: %s is already powered\n", __func__, genpd->name);
goto finalize;
}
rcar_sysc_power(&pd->ch, true);
finalize:
error = pm_genpd_init(genpd, &simple_qos_governor, false);
if (error)
pr_err("Failed to init PM domain %s: %d\n", name, error);
return error;
}
static const struct of_device_id rcar_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A7742
{ .compatible = "renesas,r8a7742-sysc", .data = &r8a7742_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A7743
{ .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
/* RZ/G1N is identical to RZ/G2M w.r.t. power domains. */
{ .compatible = "renesas,r8a7744-sysc", .data = &r8a7743_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A7745
{ .compatible = "renesas,r8a7745-sysc", .data = &r8a7745_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77470
{ .compatible = "renesas,r8a77470-sysc", .data = &r8a77470_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A774A1
{ .compatible = "renesas,r8a774a1-sysc", .data = &r8a774a1_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A774B1
{ .compatible = "renesas,r8a774b1-sysc", .data = &r8a774b1_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A774C0
{ .compatible = "renesas,r8a774c0-sysc", .data = &r8a774c0_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A774E1
{ .compatible = "renesas,r8a774e1-sysc", .data = &r8a774e1_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A7779
{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A7790
{ .compatible = "renesas,r8a7790-sysc", .data = &r8a7790_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A7791
{ .compatible = "renesas,r8a7791-sysc", .data = &r8a7791_sysc_info },
/* R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. */
{ .compatible = "renesas,r8a7793-sysc", .data = &r8a7791_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A7792
{ .compatible = "renesas,r8a7792-sysc", .data = &r8a7792_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A7794
{ .compatible = "renesas,r8a7794-sysc", .data = &r8a7794_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A7795
{ .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77960
{ .compatible = "renesas,r8a7796-sysc", .data = &r8a77960_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77961
{ .compatible = "renesas,r8a77961-sysc", .data = &r8a77961_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77965
{ .compatible = "renesas,r8a77965-sysc", .data = &r8a77965_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77970
{ .compatible = "renesas,r8a77970-sysc", .data = &r8a77970_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77980
{ .compatible = "renesas,r8a77980-sysc", .data = &r8a77980_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77990
{ .compatible = "renesas,r8a77990-sysc", .data = &r8a77990_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77995
{ .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info },
#endif
{ /* sentinel */ }
};
struct rcar_pm_domains {
struct genpd_onecell_data onecell_data;
struct generic_pm_domain *domains[RCAR_PD_ALWAYS_ON + 1];
};
static struct genpd_onecell_data *rcar_sysc_onecell_data;
static int __init rcar_sysc_pd_init(void)
{
const struct rcar_sysc_info *info;
const struct of_device_id *match;
struct rcar_pm_domains *domains;
struct device_node *np;
void __iomem *base;
unsigned int i;
int error;
np = of_find_matching_node_and_match(NULL, rcar_sysc_matches, &match);
if (!np)
return -ENODEV;
info = match->data;
if (info->init) {
error = info->init();
if (error)
goto out_put;
}
has_cpg_mstp = of_find_compatible_node(NULL, NULL,
"renesas,cpg-mstp-clocks");
base = of_iomap(np, 0);
if (!base) {
pr_warn("%pOF: Cannot map regs\n", np);
error = -ENOMEM;
goto out_put;
}
rcar_sysc_base = base;
/* Optional External Request Mask Register */
rcar_sysc_extmask_offs = info->extmask_offs;
rcar_sysc_extmask_val = info->extmask_val;
domains = kzalloc(sizeof(*domains), GFP_KERNEL);
if (!domains) {
error = -ENOMEM;
goto out_put;
}
domains->onecell_data.domains = domains->domains;
domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
rcar_sysc_onecell_data = &domains->onecell_data;
for (i = 0; i < info->num_areas; i++) {
const struct rcar_sysc_area *area = &info->areas[i];
struct rcar_sysc_pd *pd;
size_t n;
if (!area->name) {
/* Skip NULLified area */
continue;
}
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
error = -ENOMEM;
goto out_put;
}
memcpy(pd->name, area->name, n);
pd->genpd.name = pd->name;
pd->ch.chan_offs = area->chan_offs;
pd->ch.chan_bit = area->chan_bit;
pd->ch.isr_bit = area->isr_bit;
pd->flags = area->flags;
error = rcar_sysc_pd_setup(pd);
if (error)
goto out_put;
domains->domains[area->isr_bit] = &pd->genpd;
if (area->parent < 0)
continue;
error = pm_genpd_add_subdomain(domains->domains[area->parent],
&pd->genpd);
if (error) {
pr_warn("Failed to add PM subdomain %s to parent %u\n",
area->name, area->parent);
goto out_put;
}
}
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
if (!error)
fwnode_dev_initialized(of_fwnode_handle(np), true);
out_put:
of_node_put(np);
return error;
}
early_initcall(rcar_sysc_pd_init);
void __init rcar_sysc_nullify(struct rcar_sysc_area *areas,
unsigned int num_areas, u8 id)
{
unsigned int i;
for (i = 0; i < num_areas; i++)
if (areas[i].isr_bit == id) {
areas[i].name = NULL;
return;
}
}
#ifdef CONFIG_ARCH_R8A7779
static int rcar_sysc_power_cpu(unsigned int idx, bool on)
{
struct generic_pm_domain *genpd;
struct rcar_sysc_pd *pd;
unsigned int i;
if (!rcar_sysc_onecell_data)
return -ENODEV;
for (i = 0; i < rcar_sysc_onecell_data->num_domains; i++) {
genpd = rcar_sysc_onecell_data->domains[i];
if (!genpd)
continue;
pd = to_rcar_pd(genpd);
if (!(pd->flags & PD_CPU) || pd->ch.chan_bit != idx)
continue;
return rcar_sysc_power(&pd->ch, on);
}
return -ENOENT;
}
int rcar_sysc_power_down_cpu(unsigned int cpu)
{
return rcar_sysc_power_cpu(cpu, false);
}
int rcar_sysc_power_up_cpu(unsigned int cpu)
{
return rcar_sysc_power_cpu(cpu, true);
}
#endif /* CONFIG_ARCH_R8A7779 */
| linux-master | drivers/pmdomain/renesas/rcar-sysc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI SCI Generic Power Domain Driver
*
* Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
* J Keerthy <[email protected]>
* Dave Gerlach <[email protected]>
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
/**
* struct ti_sci_genpd_provider: holds common TI SCI genpd provider data
* @ti_sci: handle to TI SCI protocol driver that provides ops to
* communicate with system control processor.
* @dev: pointer to dev for the driver for devm allocs
* @pd_list: list of all the power domains on the device
* @data: onecell data for genpd core
*/
struct ti_sci_genpd_provider {
const struct ti_sci_handle *ti_sci;
struct device *dev;
struct list_head pd_list;
struct genpd_onecell_data data;
};
/**
* struct ti_sci_pm_domain: TI specific data needed for power domain
* @idx: index of the device that identifies it with the system
* control processor.
* @exclusive: Permissions for exclusive request or shared request of the
* device.
* @pd: generic_pm_domain for use with the genpd framework
* @node: link for the genpd list
* @parent: link to the parent TI SCI genpd provider
*/
struct ti_sci_pm_domain {
int idx;
u8 exclusive;
struct generic_pm_domain pd;
struct list_head node;
struct ti_sci_genpd_provider *parent;
};
#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
/*
* ti_sci_pd_power_off(): genpd power down hook
* @domain: pointer to the powerdomain to power off
*/
static int ti_sci_pd_power_off(struct generic_pm_domain *domain)
{
struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
return ti_sci->ops.dev_ops.put_device(ti_sci, pd->idx);
}
/*
* ti_sci_pd_power_on(): genpd power up hook
* @domain: pointer to the powerdomain to power on
*/
static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
{
struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
if (pd->exclusive)
return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci,
pd->idx);
else
return ti_sci->ops.dev_ops.get_device(ti_sci, pd->idx);
}
/*
* ti_sci_pd_xlate(): translation service for TI SCI genpds
* @genpdspec: DT identification data for the genpd
* @data: genpd core data for all the powerdomains on the device
*/
static struct generic_pm_domain *ti_sci_pd_xlate(
struct of_phandle_args *genpdspec,
void *data)
{
struct genpd_onecell_data *genpd_data = data;
unsigned int idx = genpdspec->args[0];
if (genpdspec->args_count != 1 && genpdspec->args_count != 2)
return ERR_PTR(-EINVAL);
if (idx >= genpd_data->num_domains) {
pr_err("%s: invalid domain index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
if (!genpd_data->domains[idx])
return ERR_PTR(-ENOENT);
genpd_to_ti_sci_pd(genpd_data->domains[idx])->exclusive =
genpdspec->args[1];
return genpd_data->domains[idx];
}
static const struct of_device_id ti_sci_pm_domain_matches[] = {
{ .compatible = "ti,sci-pm-domain", },
{ },
};
MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
static int ti_sci_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ti_sci_genpd_provider *pd_provider;
struct ti_sci_pm_domain *pd;
struct device_node *np;
struct of_phandle_args args;
int ret;
u32 max_id = 0;
int index;
pd_provider = devm_kzalloc(dev, sizeof(*pd_provider), GFP_KERNEL);
if (!pd_provider)
return -ENOMEM;
pd_provider->ti_sci = devm_ti_sci_get_handle(dev);
if (IS_ERR(pd_provider->ti_sci))
return PTR_ERR(pd_provider->ti_sci);
pd_provider->dev = dev;
INIT_LIST_HEAD(&pd_provider->pd_list);
/* Find highest device ID used for power domains */
for_each_node_with_property(np, "power-domains") {
index = 0;
while (1) {
ret = of_parse_phandle_with_args(np, "power-domains",
"#power-domain-cells",
index, &args);
if (ret)
break;
if (args.args_count >= 1 && args.np == dev->of_node) {
if (args.args[0] > max_id)
max_id = args.args[0];
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
pd->pd.name = devm_kasprintf(dev, GFP_KERNEL,
"pd:%d",
args.args[0]);
if (!pd->pd.name)
return -ENOMEM;
pd->pd.power_off = ti_sci_pd_power_off;
pd->pd.power_on = ti_sci_pd_power_on;
pd->idx = args.args[0];
pd->parent = pd_provider;
pm_genpd_init(&pd->pd, NULL, true);
list_add(&pd->node, &pd_provider->pd_list);
}
index++;
}
}
pd_provider->data.domains =
devm_kcalloc(dev, max_id + 1,
sizeof(*pd_provider->data.domains),
GFP_KERNEL);
if (!pd_provider->data.domains)
return -ENOMEM;
pd_provider->data.num_domains = max_id + 1;
pd_provider->data.xlate = ti_sci_pd_xlate;
list_for_each_entry(pd, &pd_provider->pd_list, node)
pd_provider->data.domains[pd->idx] = &pd->pd;
return of_genpd_add_provider_onecell(dev->of_node, &pd_provider->data);
}
static struct platform_driver ti_sci_pm_domains_driver = {
.probe = ti_sci_pm_domain_probe,
.driver = {
.name = "ti_sci_pm_domains",
.of_match_table = ti_sci_pm_domain_matches,
},
};
module_platform_driver(ti_sci_pm_domains_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI System Control Interface (SCI) Power Domain driver");
MODULE_AUTHOR("Dave Gerlach");
| linux-master | drivers/pmdomain/ti/ti_sci_pm_domains.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OMAP2+ PRM driver
*
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Tero Kristo <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
#include <linux/platform_data/ti-prm.h>
enum omap_prm_domain_mode {
OMAP_PRMD_OFF,
OMAP_PRMD_RETENTION,
OMAP_PRMD_ON_INACTIVE,
OMAP_PRMD_ON_ACTIVE,
};
struct omap_prm_domain_map {
unsigned int usable_modes; /* Mask of hardware supported modes */
unsigned long statechange:1; /* Optional low-power state change */
unsigned long logicretstate:1; /* Optional logic off mode */
};
struct omap_prm_domain {
struct device *dev;
struct omap_prm *prm;
struct generic_pm_domain pd;
u16 pwrstctrl;
u16 pwrstst;
const struct omap_prm_domain_map *cap;
u32 pwrstctrl_saved;
unsigned int uses_pm_clk:1;
};
struct omap_rst_map {
s8 rst;
s8 st;
};
struct omap_prm_data {
u32 base;
const char *name;
const char *clkdm_name;
u16 pwrstctrl;
u16 pwrstst;
const struct omap_prm_domain_map *dmap;
u16 rstctrl;
u16 rstst;
const struct omap_rst_map *rstmap;
u8 flags;
};
struct omap_prm {
const struct omap_prm_data *data;
void __iomem *base;
struct omap_prm_domain *prmd;
};
struct omap_reset_data {
struct reset_controller_dev rcdev;
struct omap_prm *prm;
u32 mask;
spinlock_t lock;
struct clockdomain *clkdm;
struct device *dev;
};
#define genpd_to_prm_domain(gpd) container_of(gpd, struct omap_prm_domain, pd)
#define to_omap_reset_data(p) container_of((p), struct omap_reset_data, rcdev)
#define OMAP_MAX_RESETS 8
#define OMAP_RESET_MAX_WAIT 10000
#define OMAP_PRM_HAS_RSTCTRL BIT(0)
#define OMAP_PRM_HAS_RSTST BIT(1)
#define OMAP_PRM_HAS_NO_CLKDM BIT(2)
#define OMAP_PRM_RET_WHEN_IDLE BIT(3)
#define OMAP_PRM_HAS_RESETS (OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_RSTST)
#define PRM_STATE_MAX_WAIT 10000
#define PRM_LOGICRETSTATE BIT(2)
#define PRM_LOWPOWERSTATECHANGE BIT(4)
#define PRM_POWERSTATE_MASK OMAP_PRMD_ON_ACTIVE
#define PRM_ST_INTRANSITION BIT(20)
static const struct omap_prm_domain_map omap_prm_all = {
.usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
BIT(OMAP_PRMD_RETENTION) | BIT(OMAP_PRMD_OFF),
.statechange = 1,
.logicretstate = 1,
};
static const struct omap_prm_domain_map omap_prm_noinact = {
.usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION) |
BIT(OMAP_PRMD_OFF),
.statechange = 1,
.logicretstate = 1,
};
static const struct omap_prm_domain_map omap_prm_nooff = {
.usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
BIT(OMAP_PRMD_RETENTION),
.statechange = 1,
.logicretstate = 1,
};
static const struct omap_prm_domain_map omap_prm_onoff_noauto = {
.usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_OFF),
.statechange = 1,
};
static const struct omap_prm_domain_map omap_prm_alwon = {
.usable_modes = BIT(OMAP_PRMD_ON_ACTIVE),
};
static const struct omap_prm_domain_map omap_prm_reton = {
.usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION),
.statechange = 1,
.logicretstate = 1,
};
static const struct omap_rst_map rst_map_0[] = {
{ .rst = 0, .st = 0 },
{ .rst = -1 },
};
static const struct omap_rst_map rst_map_01[] = {
{ .rst = 0, .st = 0 },
{ .rst = 1, .st = 1 },
{ .rst = -1 },
};
static const struct omap_rst_map rst_map_012[] = {
{ .rst = 0, .st = 0 },
{ .rst = 1, .st = 1 },
{ .rst = 2, .st = 2 },
{ .rst = -1 },
};
static const struct omap_prm_data omap4_prm_data[] = {
{
.name = "mpu", .base = 0x4a306300,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
},
{
.name = "tesla", .base = 0x4a306400,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
},
{
.name = "abe", .base = 0x4a306500,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_all,
},
{
.name = "always_on_core", .base = 0x4a306600,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
},
{
.name = "core", .base = 0x4a306700,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
.rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati",
.rstmap = rst_map_012,
.flags = OMAP_PRM_RET_WHEN_IDLE,
},
{
.name = "ivahd", .base = 0x4a306f00,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012
},
{
.name = "cam", .base = 0x4a307000,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{
.name = "dss", .base = 0x4a307100,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact
},
{
.name = "gfx", .base = 0x4a307200,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
},
{
.name = "l3init", .base = 0x4a307300,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
},
{
.name = "l4per", .base = 0x4a307400,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
.flags = OMAP_PRM_RET_WHEN_IDLE,
},
{
.name = "cefuse", .base = 0x4a307600,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
},
{
.name = "wkup", .base = 0x4a307700,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
},
{
.name = "emu", .base = 0x4a307900,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
},
{
.name = "device", .base = 0x4a307b00,
.rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01,
.flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
},
{ },
};
static const struct omap_prm_data omap5_prm_data[] = {
{
.name = "mpu", .base = 0x4ae06300,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
},
{
.name = "dsp", .base = 0x4ae06400,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
},
{
.name = "abe", .base = 0x4ae06500,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_nooff,
},
{
.name = "coreaon", .base = 0x4ae06600,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
},
{
.name = "core", .base = 0x4ae06700,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
.rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu",
.rstmap = rst_map_012
},
{
.name = "iva", .base = 0x4ae07200,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012
},
{
.name = "cam", .base = 0x4ae07300,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
},
{
.name = "dss", .base = 0x4ae07400,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact
},
{
.name = "gpu", .base = 0x4ae07500,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
},
{
.name = "l3init", .base = 0x4ae07600,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
},
{
.name = "custefuse", .base = 0x4ae07700,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
},
{
.name = "wkupaon", .base = 0x4ae07800,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
},
{
.name = "emu", .base = 0x4ae07a00,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
},
{
.name = "device", .base = 0x4ae07c00,
.rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01,
.flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
},
{ },
};
static const struct omap_prm_data dra7_prm_data[] = {
{
.name = "mpu", .base = 0x4ae06300,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
},
{
.name = "dsp1", .base = 0x4ae06400,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
},
{
.name = "ipu", .base = 0x4ae06500,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
.clkdm_name = "ipu1"
},
{
.name = "coreaon", .base = 0x4ae06628,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
},
{
.name = "core", .base = 0x4ae06700,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
.rstctrl = 0x210, .rstst = 0x214, .rstmap = rst_map_012,
.clkdm_name = "ipu2"
},
{
.name = "iva", .base = 0x4ae06f00,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
},
{
.name = "cam", .base = 0x4ae07000,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{
.name = "dss", .base = 0x4ae07100,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{
.name = "gpu", .base = 0x4ae07200,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{
.name = "l3init", .base = 0x4ae07300,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
.clkdm_name = "pcie"
},
{
.name = "l4per", .base = 0x4ae07400,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
},
{
.name = "custefuse", .base = 0x4ae07600,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{
.name = "wkupaon", .base = 0x4ae07724,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
},
{
.name = "emu", .base = 0x4ae07900,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{
.name = "dsp2", .base = 0x4ae07b00,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
},
{
.name = "eve1", .base = 0x4ae07b40,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
},
{
.name = "eve2", .base = 0x4ae07b80,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
},
{
.name = "eve3", .base = 0x4ae07bc0,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
},
{
.name = "eve4", .base = 0x4ae07c00,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
},
{
.name = "rtc", .base = 0x4ae07c60,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
},
{
.name = "vpe", .base = 0x4ae07c80,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{ },
};
static const struct omap_rst_map am3_per_rst_map[] = {
{ .rst = 1 },
{ .rst = -1 },
};
static const struct omap_rst_map am3_wkup_rst_map[] = {
{ .rst = 3, .st = 5 },
{ .rst = -1 },
};
static const struct omap_prm_data am3_prm_data[] = {
{
.name = "per", .base = 0x44e00c00,
.pwrstctrl = 0xc, .pwrstst = 0x8, .dmap = &omap_prm_noinact,
.rstctrl = 0x0, .rstmap = am3_per_rst_map,
.flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp"
},
{
.name = "wkup", .base = 0x44e00d00,
.pwrstctrl = 0x4, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
.rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map,
.flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
},
{
.name = "mpu", .base = 0x44e00e00,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
},
{
.name = "device", .base = 0x44e00f00,
.rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01,
.flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
},
{
.name = "rtc", .base = 0x44e01000,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
},
{
.name = "gfx", .base = 0x44e01100,
.pwrstctrl = 0, .pwrstst = 0x10, .dmap = &omap_prm_noinact,
.rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
},
{
.name = "cefuse", .base = 0x44e01200,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{ },
};
static const struct omap_rst_map am4_per_rst_map[] = {
{ .rst = 1, .st = 0 },
{ .rst = -1 },
};
static const struct omap_rst_map am4_device_rst_map[] = {
{ .rst = 0, .st = 1 },
{ .rst = 1, .st = 0 },
{ .rst = -1 },
};
static const struct omap_prm_data am4_prm_data[] = {
{
.name = "mpu", .base = 0x44df0300,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
},
{
.name = "gfx", .base = 0x44df0400,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
},
{
.name = "rtc", .base = 0x44df0500,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
},
{
.name = "tamper", .base = 0x44df0600,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
},
{
.name = "cefuse", .base = 0x44df0700,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
},
{
.name = "per", .base = 0x44df0800,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map,
.clkdm_name = "pruss_ocp"
},
{
.name = "wkup", .base = 0x44df2000,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map,
.flags = OMAP_PRM_HAS_NO_CLKDM
},
{
.name = "device", .base = 0x44df4000,
.rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map,
.flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
},
{ },
};
static const struct of_device_id omap_prm_id_table[] = {
{ .compatible = "ti,omap4-prm-inst", .data = omap4_prm_data },
{ .compatible = "ti,omap5-prm-inst", .data = omap5_prm_data },
{ .compatible = "ti,dra7-prm-inst", .data = dra7_prm_data },
{ .compatible = "ti,am3-prm-inst", .data = am3_prm_data },
{ .compatible = "ti,am4-prm-inst", .data = am4_prm_data },
{ },
};
#ifdef DEBUG
static void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
const char *desc)
{
dev_dbg(prmd->dev, "%s %s: %08x/%08x\n",
prmd->pd.name, desc,
readl_relaxed(prmd->prm->base + prmd->pwrstctrl),
readl_relaxed(prmd->prm->base + prmd->pwrstst));
}
#else
static inline void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
const char *desc)
{
}
#endif
static int omap_prm_domain_power_on(struct generic_pm_domain *domain)
{
struct omap_prm_domain *prmd;
int ret;
u32 v, mode;
prmd = genpd_to_prm_domain(domain);
if (!prmd->cap)
return 0;
omap_prm_domain_show_state(prmd, "on: previous state");
if (prmd->pwrstctrl_saved)
v = prmd->pwrstctrl_saved;
else
v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
if (prmd->prm->data->flags & OMAP_PRM_RET_WHEN_IDLE)
mode = OMAP_PRMD_RETENTION;
else
mode = OMAP_PRMD_ON_ACTIVE;
writel_relaxed((v & ~PRM_POWERSTATE_MASK) | mode,
prmd->prm->base + prmd->pwrstctrl);
/* wait for the transition bit to get cleared */
ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
v, !(v & PRM_ST_INTRANSITION), 1,
PRM_STATE_MAX_WAIT);
if (ret)
dev_err(prmd->dev, "%s: %s timed out\n",
prmd->pd.name, __func__);
omap_prm_domain_show_state(prmd, "on: new state");
return ret;
}
/* No need to check for holes in the mask for the lowest mode */
static int omap_prm_domain_find_lowest(struct omap_prm_domain *prmd)
{
return __ffs(prmd->cap->usable_modes);
}
static int omap_prm_domain_power_off(struct generic_pm_domain *domain)
{
struct omap_prm_domain *prmd;
int ret;
u32 v;
prmd = genpd_to_prm_domain(domain);
if (!prmd->cap)
return 0;
omap_prm_domain_show_state(prmd, "off: previous state");
v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
prmd->pwrstctrl_saved = v;
v &= ~PRM_POWERSTATE_MASK;
v |= omap_prm_domain_find_lowest(prmd);
if (prmd->cap->statechange)
v |= PRM_LOWPOWERSTATECHANGE;
if (prmd->cap->logicretstate)
v &= ~PRM_LOGICRETSTATE;
else
v |= PRM_LOGICRETSTATE;
writel_relaxed(v, prmd->prm->base + prmd->pwrstctrl);
/* wait for the transition bit to get cleared */
ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
v, !(v & PRM_ST_INTRANSITION), 1,
PRM_STATE_MAX_WAIT);
if (ret)
dev_warn(prmd->dev, "%s: %s timed out\n",
__func__, prmd->pd.name);
omap_prm_domain_show_state(prmd, "off: new state");
return 0;
}
/*
* Note that ti-sysc already manages the module clocks separately so
* no need to manage those. Interconnect instances need clocks managed
* for simple-pm-bus.
*/
static int omap_prm_domain_attach_clock(struct device *dev,
struct omap_prm_domain *prmd)
{
struct device_node *np = dev->of_node;
int error;
if (!of_device_is_compatible(np, "simple-pm-bus"))
return 0;
if (!of_property_read_bool(np, "clocks"))
return 0;
error = pm_clk_create(dev);
if (error)
return error;
error = of_pm_clk_add_clks(dev);
if (error < 0) {
pm_clk_destroy(dev);
return error;
}
prmd->uses_pm_clk = 1;
return 0;
}
static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
struct generic_pm_domain_data *genpd_data;
struct of_phandle_args pd_args;
struct omap_prm_domain *prmd;
struct device_node *np;
int ret;
prmd = genpd_to_prm_domain(domain);
np = dev->of_node;
ret = of_parse_phandle_with_args(np, "power-domains",
"#power-domain-cells", 0, &pd_args);
if (ret < 0)
return ret;
if (pd_args.args_count != 0)
dev_warn(dev, "%s: unusupported #power-domain-cells: %i\n",
prmd->pd.name, pd_args.args_count);
genpd_data = dev_gpd_data(dev);
genpd_data->data = NULL;
ret = omap_prm_domain_attach_clock(dev, prmd);
if (ret)
return ret;
return 0;
}
static void omap_prm_domain_detach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
struct generic_pm_domain_data *genpd_data;
struct omap_prm_domain *prmd;
prmd = genpd_to_prm_domain(domain);
if (prmd->uses_pm_clk)
pm_clk_destroy(dev);
genpd_data = dev_gpd_data(dev);
genpd_data->data = NULL;
}
static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
{
struct omap_prm_domain *prmd;
struct device_node *np = dev->of_node;
const struct omap_prm_data *data;
const char *name;
int error;
if (!of_property_present(dev->of_node, "#power-domain-cells"))
return 0;
of_node_put(dev->of_node);
prmd = devm_kzalloc(dev, sizeof(*prmd), GFP_KERNEL);
if (!prmd)
return -ENOMEM;
data = prm->data;
name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
data->name);
prmd->dev = dev;
prmd->prm = prm;
prmd->cap = prmd->prm->data->dmap;
prmd->pwrstctrl = prmd->prm->data->pwrstctrl;
prmd->pwrstst = prmd->prm->data->pwrstst;
prmd->pd.name = name;
prmd->pd.power_on = omap_prm_domain_power_on;
prmd->pd.power_off = omap_prm_domain_power_off;
prmd->pd.attach_dev = omap_prm_domain_attach_dev;
prmd->pd.detach_dev = omap_prm_domain_detach_dev;
prmd->pd.flags = GENPD_FLAG_PM_CLK;
pm_genpd_init(&prmd->pd, NULL, true);
error = of_genpd_add_provider_simple(np, &prmd->pd);
if (error)
pm_genpd_remove(&prmd->pd);
else
prm->prmd = prmd;
return error;
}
static bool _is_valid_reset(struct omap_reset_data *reset, unsigned long id)
{
if (reset->mask & BIT(id))
return true;
return false;
}
static int omap_reset_get_st_bit(struct omap_reset_data *reset,
unsigned long id)
{
const struct omap_rst_map *map = reset->prm->data->rstmap;
while (map->rst >= 0) {
if (map->rst == id)
return map->st;
map++;
}
return id;
}
static int omap_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct omap_reset_data *reset = to_omap_reset_data(rcdev);
u32 v;
int st_bit = omap_reset_get_st_bit(reset, id);
bool has_rstst = reset->prm->data->rstst ||
(reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
/* Check if we have rstst */
if (!has_rstst)
return -ENOTSUPP;
/* Check if hw reset line is asserted */
v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
if (v & BIT(id))
return 1;
/*
* Check reset status, high value means reset sequence has been
* completed successfully so we can return 0 here (reset deasserted)
*/
v = readl_relaxed(reset->prm->base + reset->prm->data->rstst);
v >>= st_bit;
v &= 1;
return !v;
}
static int omap_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct omap_reset_data *reset = to_omap_reset_data(rcdev);
u32 v;
unsigned long flags;
/* assert the reset control line */
spin_lock_irqsave(&reset->lock, flags);
v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
v |= 1 << id;
writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
spin_unlock_irqrestore(&reset->lock, flags);
return 0;
}
static int omap_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct omap_reset_data *reset = to_omap_reset_data(rcdev);
u32 v;
int st_bit;
bool has_rstst;
unsigned long flags;
struct ti_prm_platform_data *pdata = dev_get_platdata(reset->dev);
int ret = 0;
/* Nothing to do if the reset is already deasserted */
if (!omap_reset_status(rcdev, id))
return 0;
has_rstst = reset->prm->data->rstst ||
(reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
if (has_rstst) {
st_bit = omap_reset_get_st_bit(reset, id);
/* Clear the reset status by writing 1 to the status bit */
v = 1 << st_bit;
writel_relaxed(v, reset->prm->base + reset->prm->data->rstst);
}
if (reset->clkdm)
pdata->clkdm_deny_idle(reset->clkdm);
/* de-assert the reset control line */
spin_lock_irqsave(&reset->lock, flags);
v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
v &= ~(1 << id);
writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
spin_unlock_irqrestore(&reset->lock, flags);
/* wait for the reset bit to clear */
ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
reset->prm->data->rstctrl,
v, !(v & BIT(id)), 1,
OMAP_RESET_MAX_WAIT);
if (ret)
pr_err("%s: timedout waiting for %s:%lu\n", __func__,
reset->prm->data->name, id);
/* wait for the status to be set */
if (has_rstst) {
ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
reset->prm->data->rstst,
v, v & BIT(st_bit), 1,
OMAP_RESET_MAX_WAIT);
if (ret)
pr_err("%s: timedout waiting for %s:%lu\n", __func__,
reset->prm->data->name, id);
}
if (reset->clkdm)
pdata->clkdm_allow_idle(reset->clkdm);
return ret;
}
static const struct reset_control_ops omap_reset_ops = {
.assert = omap_reset_assert,
.deassert = omap_reset_deassert,
.status = omap_reset_status,
};
static int omap_prm_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
struct omap_reset_data *reset = to_omap_reset_data(rcdev);
if (!_is_valid_reset(reset, reset_spec->args[0]))
return -EINVAL;
return reset_spec->args[0];
}
static int omap_prm_reset_init(struct platform_device *pdev,
struct omap_prm *prm)
{
struct omap_reset_data *reset;
const struct omap_rst_map *map;
struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
char buf[32];
u32 v;
/*
* Check if we have controllable resets. If either rstctrl is non-zero
* or OMAP_PRM_HAS_RSTCTRL flag is set, we have reset control register
* for the domain.
*/
if (!prm->data->rstctrl && !(prm->data->flags & OMAP_PRM_HAS_RSTCTRL))
return 0;
/* Check if we have the pdata callbacks in place */
if (!pdata || !pdata->clkdm_lookup || !pdata->clkdm_deny_idle ||
!pdata->clkdm_allow_idle)
return -EINVAL;
map = prm->data->rstmap;
if (!map)
return -EINVAL;
reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
if (!reset)
return -ENOMEM;
reset->rcdev.owner = THIS_MODULE;
reset->rcdev.ops = &omap_reset_ops;
reset->rcdev.of_node = pdev->dev.of_node;
reset->rcdev.nr_resets = OMAP_MAX_RESETS;
reset->rcdev.of_xlate = omap_prm_reset_xlate;
reset->rcdev.of_reset_n_cells = 1;
reset->dev = &pdev->dev;
spin_lock_init(&reset->lock);
reset->prm = prm;
sprintf(buf, "%s_clkdm", prm->data->clkdm_name ? prm->data->clkdm_name :
prm->data->name);
if (!(prm->data->flags & OMAP_PRM_HAS_NO_CLKDM)) {
reset->clkdm = pdata->clkdm_lookup(buf);
if (!reset->clkdm)
return -EINVAL;
}
while (map->rst >= 0) {
reset->mask |= BIT(map->rst);
map++;
}
/* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
if (prm->data->rstmap == rst_map_012) {
v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
if ((v & reset->mask) != reset->mask) {
dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
writel_relaxed(reset->mask, reset->prm->base +
reset->prm->data->rstctrl);
}
}
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}
static int omap_prm_probe(struct platform_device *pdev)
{
struct resource *res;
const struct omap_prm_data *data;
struct omap_prm *prm;
int ret;
data = of_device_get_match_data(&pdev->dev);
if (!data)
return -ENOTSUPP;
prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL);
if (!prm)
return -ENOMEM;
prm->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(prm->base))
return PTR_ERR(prm->base);
while (data->base != res->start) {
if (!data->base)
return -EINVAL;
data++;
}
prm->data = data;
ret = omap_prm_domain_init(&pdev->dev, prm);
if (ret)
return ret;
ret = omap_prm_reset_init(pdev, prm);
if (ret)
goto err_domain;
return 0;
err_domain:
of_genpd_del_provider(pdev->dev.of_node);
pm_genpd_remove(&prm->prmd->pd);
return ret;
}
static struct platform_driver omap_prm_driver = {
.probe = omap_prm_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = omap_prm_id_table,
},
};
builtin_platform_driver(omap_prm_driver);
| linux-master | drivers/pmdomain/ti/omap_prm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 TOSHIBA CORPORATION
* Copyright (c) 2020 Toshiba Electronic Devices & Storage Corporation
* Copyright (c) 2020 Nobuhiro Iwamatsu <[email protected]>
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#define WDT_CNT 0x00
#define WDT_MIN 0x04
#define WDT_MAX 0x08
#define WDT_CTL 0x0c
#define WDT_CMD 0x10
#define WDT_CMD_CLEAR 0x4352
#define WDT_CMD_START_STOP 0x5354
#define WDT_DIV 0x30
#define VISCONTI_WDT_FREQ 2000000 /* 2MHz */
#define WDT_DEFAULT_TIMEOUT 10U /* in seconds */
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(
nowayout,
"Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT)")");
struct visconti_wdt_priv {
struct watchdog_device wdev;
void __iomem *base;
u32 div;
};
static int visconti_wdt_start(struct watchdog_device *wdev)
{
struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdev);
u32 timeout = wdev->timeout * VISCONTI_WDT_FREQ;
writel(priv->div, priv->base + WDT_DIV);
writel(0, priv->base + WDT_MIN);
writel(timeout, priv->base + WDT_MAX);
writel(0, priv->base + WDT_CTL);
writel(WDT_CMD_START_STOP, priv->base + WDT_CMD);
return 0;
}
static int visconti_wdt_stop(struct watchdog_device *wdev)
{
struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdev);
writel(1, priv->base + WDT_CTL);
writel(WDT_CMD_START_STOP, priv->base + WDT_CMD);
return 0;
}
static int visconti_wdt_ping(struct watchdog_device *wdd)
{
struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdd);
writel(WDT_CMD_CLEAR, priv->base + WDT_CMD);
return 0;
}
static unsigned int visconti_wdt_get_timeleft(struct watchdog_device *wdev)
{
struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdev);
u32 timeout = wdev->timeout * VISCONTI_WDT_FREQ;
u32 cnt = readl(priv->base + WDT_CNT);
if (timeout <= cnt)
return 0;
timeout -= cnt;
return timeout / VISCONTI_WDT_FREQ;
}
static int visconti_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
{
u32 val;
struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdev);
wdev->timeout = timeout;
val = wdev->timeout * VISCONTI_WDT_FREQ;
/* Clear counter before setting timeout because WDT expires */
writel(WDT_CMD_CLEAR, priv->base + WDT_CMD);
writel(val, priv->base + WDT_MAX);
return 0;
}
static const struct watchdog_info visconti_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.identity = "Visconti Watchdog",
};
static const struct watchdog_ops visconti_wdt_ops = {
.owner = THIS_MODULE,
.start = visconti_wdt_start,
.stop = visconti_wdt_stop,
.ping = visconti_wdt_ping,
.get_timeleft = visconti_wdt_get_timeleft,
.set_timeout = visconti_wdt_set_timeout,
};
static int visconti_wdt_probe(struct platform_device *pdev)
{
struct watchdog_device *wdev;
struct visconti_wdt_priv *priv;
struct device *dev = &pdev->dev;
struct clk *clk;
int ret;
unsigned long clk_freq;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(dev, PTR_ERR(clk), "Could not get clock\n");
clk_freq = clk_get_rate(clk);
if (!clk_freq)
return -EINVAL;
priv->div = clk_freq / VISCONTI_WDT_FREQ;
/* Initialize struct watchdog_device. */
wdev = &priv->wdev;
wdev->info = &visconti_wdt_info;
wdev->ops = &visconti_wdt_ops;
wdev->parent = dev;
wdev->min_timeout = 1;
wdev->max_timeout = 0xffffffff / VISCONTI_WDT_FREQ;
wdev->timeout = min(wdev->max_timeout, WDT_DEFAULT_TIMEOUT);
watchdog_set_drvdata(wdev, priv);
watchdog_set_nowayout(wdev, nowayout);
watchdog_stop_on_unregister(wdev);
/* This overrides the default timeout only if DT configuration was found */
ret = watchdog_init_timeout(wdev, 0, dev);
if (ret)
dev_warn(dev, "Specified timeout value invalid, using default\n");
return devm_watchdog_register_device(dev, wdev);
}
static const struct of_device_id visconti_wdt_of_match[] = {
{ .compatible = "toshiba,visconti-wdt", },
{}
};
MODULE_DEVICE_TABLE(of, visconti_wdt_of_match);
static struct platform_driver visconti_wdt_driver = {
.driver = {
.name = "visconti_wdt",
.of_match_table = visconti_wdt_of_match,
},
.probe = visconti_wdt_probe,
};
module_platform_driver(visconti_wdt_driver);
MODULE_DESCRIPTION("TOSHIBA Visconti Watchdog Driver");
MODULE_AUTHOR("Nobuhiro Iwamatsu <[email protected]");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/visconti_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kontron PLD watchdog driver
*
* Copyright (c) 2010-2013 Kontron Europe GmbH
* Author: Michael Brunner <[email protected]>
*
* Note: From the PLD watchdog point of view timeout and pretimeout are
* defined differently than in the kernel.
* First the pretimeout stage runs out before the timeout stage gets
* active.
*
* Kernel/API: P-----| pretimeout
* |-----------------------T timeout
* Watchdog: |-----------------P pretimeout_stage
* |-----T timeout_stage
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/mfd/kempld.h>
#define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4)
#define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x))
#define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4)
#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4)
#define STAGE_CFG_PRESCALER_MASK 0x30
#define STAGE_CFG_ACTION_MASK 0x7
#define STAGE_CFG_ASSERT (1 << 3)
#define KEMPLD_WDT_MAX_STAGES 2
#define KEMPLD_WDT_KICK 0x16
#define KEMPLD_WDT_CFG 0x17
#define KEMPLD_WDT_CFG_ENABLE 0x10
#define KEMPLD_WDT_CFG_ENABLE_LOCK 0x8
#define KEMPLD_WDT_CFG_GLOBAL_LOCK 0x80
enum {
ACTION_NONE = 0,
ACTION_RESET,
ACTION_NMI,
ACTION_SMI,
ACTION_SCI,
ACTION_DELAY,
};
enum {
STAGE_TIMEOUT = 0,
STAGE_PRETIMEOUT,
};
enum {
PRESCALER_21 = 0,
PRESCALER_17,
PRESCALER_12,
};
static const u32 kempld_prescaler[] = {
[PRESCALER_21] = (1 << 21) - 1,
[PRESCALER_17] = (1 << 17) - 1,
[PRESCALER_12] = (1 << 12) - 1,
0,
};
struct kempld_wdt_stage {
unsigned int id;
u32 mask;
};
struct kempld_wdt_data {
struct kempld_device_data *pld;
struct watchdog_device wdd;
unsigned int pretimeout;
struct kempld_wdt_stage stage[KEMPLD_WDT_MAX_STAGES];
u8 pm_status_store;
};
#define DEFAULT_TIMEOUT 30 /* seconds */
#define DEFAULT_PRETIMEOUT 0
static unsigned int timeout = DEFAULT_TIMEOUT;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. (>=0, default="
__MODULE_STRING(DEFAULT_TIMEOUT) ")");
static unsigned int pretimeout = DEFAULT_PRETIMEOUT;
module_param(pretimeout, uint, 0);
MODULE_PARM_DESC(pretimeout,
"Watchdog pretimeout in seconds. (>=0, default="
__MODULE_STRING(DEFAULT_PRETIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static int kempld_wdt_set_stage_action(struct kempld_wdt_data *wdt_data,
struct kempld_wdt_stage *stage,
u8 action)
{
struct kempld_device_data *pld = wdt_data->pld;
u8 stage_cfg;
if (!stage || !stage->mask)
return -EINVAL;
kempld_get_mutex(pld);
stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
stage_cfg &= ~STAGE_CFG_ACTION_MASK;
stage_cfg |= (action & STAGE_CFG_ACTION_MASK);
if (action == ACTION_RESET)
stage_cfg |= STAGE_CFG_ASSERT;
else
stage_cfg &= ~STAGE_CFG_ASSERT;
kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
kempld_release_mutex(pld);
return 0;
}
static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
struct kempld_wdt_stage *stage,
unsigned int timeout)
{
struct kempld_device_data *pld = wdt_data->pld;
u32 prescaler;
u64 stage_timeout64;
u32 stage_timeout;
u32 remainder;
u8 stage_cfg;
prescaler = kempld_prescaler[PRESCALER_21];
if (!stage)
return -EINVAL;
stage_timeout64 = (u64)timeout * pld->pld_clock;
remainder = do_div(stage_timeout64, prescaler);
if (remainder)
stage_timeout64++;
if (stage_timeout64 > stage->mask)
return -EINVAL;
stage_timeout = stage_timeout64 & stage->mask;
kempld_get_mutex(pld);
stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
stage_cfg &= ~STAGE_CFG_PRESCALER_MASK;
stage_cfg |= STAGE_CFG_SET_PRESCALER(PRESCALER_21);
kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
kempld_write32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id),
stage_timeout);
kempld_release_mutex(pld);
return 0;
}
/*
* kempld_get_mutex must be called prior to calling this function.
*/
static unsigned int kempld_wdt_get_timeout(struct kempld_wdt_data *wdt_data,
struct kempld_wdt_stage *stage)
{
struct kempld_device_data *pld = wdt_data->pld;
unsigned int timeout;
u64 stage_timeout;
u32 prescaler;
u32 remainder;
u8 stage_cfg;
if (!stage->mask)
return 0;
stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
stage_timeout = kempld_read32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id));
prescaler = kempld_prescaler[STAGE_CFG_GET_PRESCALER(stage_cfg)];
stage_timeout = (stage_timeout & stage->mask) * prescaler;
remainder = do_div(stage_timeout, pld->pld_clock);
if (remainder)
stage_timeout++;
timeout = stage_timeout;
WARN_ON_ONCE(timeout != stage_timeout);
return timeout;
}
static int kempld_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
struct kempld_wdt_stage *pretimeout_stage;
struct kempld_wdt_stage *timeout_stage;
int ret;
timeout_stage = &wdt_data->stage[STAGE_TIMEOUT];
pretimeout_stage = &wdt_data->stage[STAGE_PRETIMEOUT];
if (pretimeout_stage->mask && wdt_data->pretimeout > 0)
timeout = wdt_data->pretimeout;
ret = kempld_wdt_set_stage_action(wdt_data, timeout_stage,
ACTION_RESET);
if (ret)
return ret;
ret = kempld_wdt_set_stage_timeout(wdt_data, timeout_stage,
timeout);
if (ret)
return ret;
wdd->timeout = timeout;
return 0;
}
static int kempld_wdt_set_pretimeout(struct watchdog_device *wdd,
unsigned int pretimeout)
{
struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
struct kempld_wdt_stage *pretimeout_stage;
u8 action = ACTION_NONE;
int ret;
pretimeout_stage = &wdt_data->stage[STAGE_PRETIMEOUT];
if (!pretimeout_stage->mask)
return -ENXIO;
if (pretimeout > wdd->timeout)
return -EINVAL;
if (pretimeout > 0)
action = ACTION_NMI;
ret = kempld_wdt_set_stage_action(wdt_data, pretimeout_stage,
action);
if (ret)
return ret;
ret = kempld_wdt_set_stage_timeout(wdt_data, pretimeout_stage,
wdd->timeout - pretimeout);
if (ret)
return ret;
wdt_data->pretimeout = pretimeout;
return 0;
}
static void kempld_wdt_update_timeouts(struct kempld_wdt_data *wdt_data)
{
struct kempld_device_data *pld = wdt_data->pld;
struct kempld_wdt_stage *pretimeout_stage;
struct kempld_wdt_stage *timeout_stage;
unsigned int pretimeout, timeout;
pretimeout_stage = &wdt_data->stage[STAGE_PRETIMEOUT];
timeout_stage = &wdt_data->stage[STAGE_TIMEOUT];
kempld_get_mutex(pld);
pretimeout = kempld_wdt_get_timeout(wdt_data, pretimeout_stage);
timeout = kempld_wdt_get_timeout(wdt_data, timeout_stage);
kempld_release_mutex(pld);
if (pretimeout)
wdt_data->pretimeout = timeout;
else
wdt_data->pretimeout = 0;
wdt_data->wdd.timeout = pretimeout + timeout;
}
static int kempld_wdt_start(struct watchdog_device *wdd)
{
struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
struct kempld_device_data *pld = wdt_data->pld;
u8 status;
int ret;
ret = kempld_wdt_set_timeout(wdd, wdd->timeout);
if (ret)
return ret;
kempld_get_mutex(pld);
status = kempld_read8(pld, KEMPLD_WDT_CFG);
status |= KEMPLD_WDT_CFG_ENABLE;
kempld_write8(pld, KEMPLD_WDT_CFG, status);
status = kempld_read8(pld, KEMPLD_WDT_CFG);
kempld_release_mutex(pld);
/* Check if the watchdog was enabled */
if (!(status & KEMPLD_WDT_CFG_ENABLE))
return -EACCES;
return 0;
}
static int kempld_wdt_stop(struct watchdog_device *wdd)
{
struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
struct kempld_device_data *pld = wdt_data->pld;
u8 status;
kempld_get_mutex(pld);
status = kempld_read8(pld, KEMPLD_WDT_CFG);
status &= ~KEMPLD_WDT_CFG_ENABLE;
kempld_write8(pld, KEMPLD_WDT_CFG, status);
status = kempld_read8(pld, KEMPLD_WDT_CFG);
kempld_release_mutex(pld);
/* Check if the watchdog was disabled */
if (status & KEMPLD_WDT_CFG_ENABLE)
return -EACCES;
return 0;
}
static int kempld_wdt_keepalive(struct watchdog_device *wdd)
{
struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
struct kempld_device_data *pld = wdt_data->pld;
kempld_get_mutex(pld);
kempld_write8(pld, KEMPLD_WDT_KICK, 'K');
kempld_release_mutex(pld);
return 0;
}
static long kempld_wdt_ioctl(struct watchdog_device *wdd, unsigned int cmd,
unsigned long arg)
{
struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
void __user *argp = (void __user *)arg;
int ret = -ENOIOCTLCMD;
int __user *p = argp;
int new_value;
switch (cmd) {
case WDIOC_SETPRETIMEOUT:
if (get_user(new_value, p))
return -EFAULT;
ret = kempld_wdt_set_pretimeout(wdd, new_value);
if (ret)
return ret;
ret = kempld_wdt_keepalive(wdd);
break;
case WDIOC_GETPRETIMEOUT:
ret = put_user(wdt_data->pretimeout, (int __user *)arg);
break;
}
return ret;
}
static int kempld_wdt_probe_stages(struct watchdog_device *wdd)
{
struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
struct kempld_device_data *pld = wdt_data->pld;
struct kempld_wdt_stage *pretimeout_stage;
struct kempld_wdt_stage *timeout_stage;
u8 index, data, data_orig;
u32 mask;
int i, j;
pretimeout_stage = &wdt_data->stage[STAGE_PRETIMEOUT];
timeout_stage = &wdt_data->stage[STAGE_TIMEOUT];
pretimeout_stage->mask = 0;
timeout_stage->mask = 0;
for (i = 0; i < 3; i++) {
index = KEMPLD_WDT_STAGE_TIMEOUT(i);
mask = 0;
kempld_get_mutex(pld);
/* Probe each byte individually. */
for (j = 0; j < 4; j++) {
data_orig = kempld_read8(pld, index + j);
kempld_write8(pld, index + j, 0x00);
data = kempld_read8(pld, index + j);
/* A failed write means this byte is reserved */
if (data != 0x00)
break;
kempld_write8(pld, index + j, data_orig);
mask |= 0xff << (j * 8);
}
kempld_release_mutex(pld);
/* Assign available stages to timeout and pretimeout */
if (!timeout_stage->mask) {
timeout_stage->mask = mask;
timeout_stage->id = i;
} else {
if (pld->feature_mask & KEMPLD_FEATURE_BIT_NMI) {
pretimeout_stage->mask = timeout_stage->mask;
timeout_stage->mask = mask;
pretimeout_stage->id = timeout_stage->id;
timeout_stage->id = i;
}
break;
}
}
if (!timeout_stage->mask)
return -ENODEV;
return 0;
}
static const struct watchdog_info kempld_wdt_info = {
.identity = "KEMPLD Watchdog",
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE |
WDIOF_PRETIMEOUT
};
static const struct watchdog_ops kempld_wdt_ops = {
.owner = THIS_MODULE,
.start = kempld_wdt_start,
.stop = kempld_wdt_stop,
.ping = kempld_wdt_keepalive,
.set_timeout = kempld_wdt_set_timeout,
.ioctl = kempld_wdt_ioctl,
};
static int kempld_wdt_probe(struct platform_device *pdev)
{
struct kempld_device_data *pld = dev_get_drvdata(pdev->dev.parent);
struct kempld_wdt_data *wdt_data;
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
u8 status;
int ret = 0;
wdt_data = devm_kzalloc(dev, sizeof(*wdt_data), GFP_KERNEL);
if (!wdt_data)
return -ENOMEM;
wdt_data->pld = pld;
wdd = &wdt_data->wdd;
wdd->parent = dev;
kempld_get_mutex(pld);
status = kempld_read8(pld, KEMPLD_WDT_CFG);
kempld_release_mutex(pld);
/* Enable nowayout if watchdog is already locked */
if (status & (KEMPLD_WDT_CFG_ENABLE_LOCK |
KEMPLD_WDT_CFG_GLOBAL_LOCK)) {
if (!nowayout)
dev_warn(dev,
"Forcing nowayout - watchdog lock enabled!\n");
nowayout = true;
}
wdd->info = &kempld_wdt_info;
wdd->ops = &kempld_wdt_ops;
watchdog_set_drvdata(wdd, wdt_data);
watchdog_set_nowayout(wdd, nowayout);
ret = kempld_wdt_probe_stages(wdd);
if (ret)
return ret;
kempld_wdt_set_timeout(wdd, timeout);
kempld_wdt_set_pretimeout(wdd, pretimeout);
/* Check if watchdog is already enabled */
if (status & KEMPLD_WDT_CFG_ENABLE) {
/* Get current watchdog settings */
kempld_wdt_update_timeouts(wdt_data);
dev_info(dev, "Watchdog was already enabled\n");
}
platform_set_drvdata(pdev, wdt_data);
watchdog_stop_on_reboot(wdd);
watchdog_stop_on_unregister(wdd);
ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
dev_info(dev, "Watchdog registered with %ds timeout\n", wdd->timeout);
return 0;
}
/* Disable watchdog if it is active during suspend */
static int kempld_wdt_suspend(struct platform_device *pdev,
pm_message_t message)
{
struct kempld_wdt_data *wdt_data = platform_get_drvdata(pdev);
struct kempld_device_data *pld = wdt_data->pld;
struct watchdog_device *wdd = &wdt_data->wdd;
kempld_get_mutex(pld);
wdt_data->pm_status_store = kempld_read8(pld, KEMPLD_WDT_CFG);
kempld_release_mutex(pld);
kempld_wdt_update_timeouts(wdt_data);
if (wdt_data->pm_status_store & KEMPLD_WDT_CFG_ENABLE)
return kempld_wdt_stop(wdd);
return 0;
}
/* Enable watchdog and configure it if necessary */
static int kempld_wdt_resume(struct platform_device *pdev)
{
struct kempld_wdt_data *wdt_data = platform_get_drvdata(pdev);
struct watchdog_device *wdd = &wdt_data->wdd;
/*
* If watchdog was stopped before suspend be sure it gets disabled
* again, for the case BIOS has enabled it during resume
*/
if (wdt_data->pm_status_store & KEMPLD_WDT_CFG_ENABLE)
return kempld_wdt_start(wdd);
else
return kempld_wdt_stop(wdd);
}
static struct platform_driver kempld_wdt_driver = {
.driver = {
.name = "kempld-wdt",
},
.probe = kempld_wdt_probe,
.suspend = pm_ptr(kempld_wdt_suspend),
.resume = pm_ptr(kempld_wdt_resume),
};
module_platform_driver(kempld_wdt_driver);
MODULE_DESCRIPTION("KEM PLD Watchdog Driver");
MODULE_AUTHOR("Michael Brunner <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/kempld_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Watchdog driver for Alphascale ASM9260.
*
* Copyright (c) 2014 Oleksij Rempel <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/watchdog.h>
#define CLOCK_FREQ 1000000
/* Watchdog Mode register */
#define HW_WDMOD 0x00
/* Wake interrupt. Set by HW, can't be cleared. */
#define BM_MOD_WDINT BIT(3)
/* This bit set if timeout reached. Cleared by SW. */
#define BM_MOD_WDTOF BIT(2)
/* HW Reset on timeout */
#define BM_MOD_WDRESET BIT(1)
/* WD enable */
#define BM_MOD_WDEN BIT(0)
/*
* Watchdog Timer Constant register
* Minimal value is 0xff, the meaning of this value
* depends on used clock: T = WDCLK * (0xff + 1) * 4
*/
#define HW_WDTC 0x04
#define BM_WDTC_MAX(freq) (0x7fffffff / (freq))
/* Watchdog Feed register */
#define HW_WDFEED 0x08
/* Watchdog Timer Value register */
#define HW_WDTV 0x0c
#define ASM9260_WDT_DEFAULT_TIMEOUT 30
enum asm9260_wdt_mode {
HW_RESET,
SW_RESET,
DEBUG,
};
struct asm9260_wdt_priv {
struct device *dev;
struct watchdog_device wdd;
struct clk *clk;
struct clk *clk_ahb;
struct reset_control *rst;
void __iomem *iobase;
int irq;
unsigned long wdt_freq;
enum asm9260_wdt_mode mode;
};
static int asm9260_wdt_feed(struct watchdog_device *wdd)
{
struct asm9260_wdt_priv *priv = watchdog_get_drvdata(wdd);
iowrite32(0xaa, priv->iobase + HW_WDFEED);
iowrite32(0x55, priv->iobase + HW_WDFEED);
return 0;
}
static unsigned int asm9260_wdt_gettimeleft(struct watchdog_device *wdd)
{
struct asm9260_wdt_priv *priv = watchdog_get_drvdata(wdd);
u32 counter;
counter = ioread32(priv->iobase + HW_WDTV);
return counter / priv->wdt_freq;
}
static int asm9260_wdt_updatetimeout(struct watchdog_device *wdd)
{
struct asm9260_wdt_priv *priv = watchdog_get_drvdata(wdd);
u32 counter;
counter = wdd->timeout * priv->wdt_freq;
iowrite32(counter, priv->iobase + HW_WDTC);
return 0;
}
static int asm9260_wdt_enable(struct watchdog_device *wdd)
{
struct asm9260_wdt_priv *priv = watchdog_get_drvdata(wdd);
u32 mode = 0;
if (priv->mode == HW_RESET)
mode = BM_MOD_WDRESET;
iowrite32(BM_MOD_WDEN | mode, priv->iobase + HW_WDMOD);
asm9260_wdt_updatetimeout(wdd);
asm9260_wdt_feed(wdd);
return 0;
}
static int asm9260_wdt_disable(struct watchdog_device *wdd)
{
struct asm9260_wdt_priv *priv = watchdog_get_drvdata(wdd);
/* The only way to disable WD is to reset it. */
reset_control_assert(priv->rst);
reset_control_deassert(priv->rst);
return 0;
}
static int asm9260_wdt_settimeout(struct watchdog_device *wdd, unsigned int to)
{
wdd->timeout = to;
asm9260_wdt_updatetimeout(wdd);
return 0;
}
static void asm9260_wdt_sys_reset(struct asm9260_wdt_priv *priv)
{
/* init WD if it was not started */
iowrite32(BM_MOD_WDEN | BM_MOD_WDRESET, priv->iobase + HW_WDMOD);
iowrite32(0xff, priv->iobase + HW_WDTC);
/* first pass correct sequence */
asm9260_wdt_feed(&priv->wdd);
/*
* Then write wrong pattern to the feed to trigger reset
* ASAP.
*/
iowrite32(0xff, priv->iobase + HW_WDFEED);
mdelay(1000);
}
static irqreturn_t asm9260_wdt_irq(int irq, void *devid)
{
struct asm9260_wdt_priv *priv = devid;
u32 stat;
stat = ioread32(priv->iobase + HW_WDMOD);
if (!(stat & BM_MOD_WDINT))
return IRQ_NONE;
if (priv->mode == DEBUG) {
dev_info(priv->dev, "Watchdog Timeout. Do nothing.\n");
} else {
dev_info(priv->dev, "Watchdog Timeout. Doing SW Reset.\n");
asm9260_wdt_sys_reset(priv);
}
return IRQ_HANDLED;
}
static int asm9260_restart(struct watchdog_device *wdd, unsigned long action,
void *data)
{
struct asm9260_wdt_priv *priv = watchdog_get_drvdata(wdd);
asm9260_wdt_sys_reset(priv);
return 0;
}
static const struct watchdog_info asm9260_wdt_ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING
| WDIOF_MAGICCLOSE,
.identity = "Alphascale asm9260 Watchdog",
};
static const struct watchdog_ops asm9260_wdt_ops = {
.owner = THIS_MODULE,
.start = asm9260_wdt_enable,
.stop = asm9260_wdt_disable,
.get_timeleft = asm9260_wdt_gettimeleft,
.ping = asm9260_wdt_feed,
.set_timeout = asm9260_wdt_settimeout,
.restart = asm9260_restart,
};
static void asm9260_clk_disable_unprepare(void *data)
{
clk_disable_unprepare(data);
}
static int asm9260_wdt_get_dt_clks(struct asm9260_wdt_priv *priv)
{
int err;
unsigned long clk;
priv->clk = devm_clk_get(priv->dev, "mod");
if (IS_ERR(priv->clk)) {
dev_err(priv->dev, "Failed to get \"mod\" clk\n");
return PTR_ERR(priv->clk);
}
/* configure AHB clock */
priv->clk_ahb = devm_clk_get(priv->dev, "ahb");
if (IS_ERR(priv->clk_ahb)) {
dev_err(priv->dev, "Failed to get \"ahb\" clk\n");
return PTR_ERR(priv->clk_ahb);
}
err = clk_prepare_enable(priv->clk_ahb);
if (err) {
dev_err(priv->dev, "Failed to enable ahb_clk!\n");
return err;
}
err = devm_add_action_or_reset(priv->dev,
asm9260_clk_disable_unprepare,
priv->clk_ahb);
if (err)
return err;
err = clk_set_rate(priv->clk, CLOCK_FREQ);
if (err) {
dev_err(priv->dev, "Failed to set rate!\n");
return err;
}
err = clk_prepare_enable(priv->clk);
if (err) {
dev_err(priv->dev, "Failed to enable clk!\n");
return err;
}
err = devm_add_action_or_reset(priv->dev,
asm9260_clk_disable_unprepare,
priv->clk);
if (err)
return err;
/* wdt has internal divider */
clk = clk_get_rate(priv->clk);
if (!clk) {
dev_err(priv->dev, "Failed, clk is 0!\n");
return -EINVAL;
}
priv->wdt_freq = clk / 2;
return 0;
}
static void asm9260_wdt_get_dt_mode(struct asm9260_wdt_priv *priv)
{
const char *tmp;
int ret;
/* default mode */
priv->mode = HW_RESET;
ret = of_property_read_string(priv->dev->of_node,
"alphascale,mode", &tmp);
if (ret < 0)
return;
if (!strcmp(tmp, "hw"))
priv->mode = HW_RESET;
else if (!strcmp(tmp, "sw"))
priv->mode = SW_RESET;
else if (!strcmp(tmp, "debug"))
priv->mode = DEBUG;
else
dev_warn(priv->dev, "unknown reset-type: %s. Using default \"hw\" mode.",
tmp);
}
static int asm9260_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct asm9260_wdt_priv *priv;
struct watchdog_device *wdd;
int ret;
static const char * const mode_name[] = { "hw", "sw", "debug", };
priv = devm_kzalloc(dev, sizeof(struct asm9260_wdt_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
priv->rst = devm_reset_control_get_exclusive(dev, "wdt_rst");
if (IS_ERR(priv->rst))
return PTR_ERR(priv->rst);
ret = asm9260_wdt_get_dt_clks(priv);
if (ret)
return ret;
wdd = &priv->wdd;
wdd->info = &asm9260_wdt_ident;
wdd->ops = &asm9260_wdt_ops;
wdd->min_timeout = 1;
wdd->max_timeout = BM_WDTC_MAX(priv->wdt_freq);
wdd->parent = dev;
watchdog_set_drvdata(wdd, priv);
/*
* If 'timeout-sec' unspecified in devicetree, assume a 30 second
* default, unless the max timeout is less than 30 seconds, then use
* the max instead.
*/
wdd->timeout = ASM9260_WDT_DEFAULT_TIMEOUT;
watchdog_init_timeout(wdd, 0, dev);
asm9260_wdt_get_dt_mode(priv);
if (priv->mode != HW_RESET)
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq > 0) {
/*
* Not all supported platforms specify an interrupt for the
* watchdog, so let's make it optional.
*/
ret = devm_request_irq(dev, priv->irq, asm9260_wdt_irq, 0,
pdev->name, priv);
if (ret < 0)
dev_warn(dev, "failed to request IRQ\n");
}
watchdog_set_restart_priority(wdd, 128);
watchdog_stop_on_reboot(wdd);
watchdog_stop_on_unregister(wdd);
ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
platform_set_drvdata(pdev, priv);
dev_info(dev, "Watchdog enabled (timeout: %d sec, mode: %s)\n",
wdd->timeout, mode_name[priv->mode]);
return 0;
}
static const struct of_device_id asm9260_wdt_of_match[] = {
{ .compatible = "alphascale,asm9260-wdt"},
{},
};
MODULE_DEVICE_TABLE(of, asm9260_wdt_of_match);
static struct platform_driver asm9260_wdt_driver = {
.driver = {
.name = "asm9260-wdt",
.of_match_table = asm9260_wdt_of_match,
},
.probe = asm9260_wdt_probe,
};
module_platform_driver(asm9260_wdt_driver);
MODULE_DESCRIPTION("asm9260 WatchDog Timer Driver");
MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/asm9260_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IT8712F "Smart Guardian" Watchdog support
*
* Copyright (c) 2006-2007 Jorge Boncompte - DTI2 <[email protected]>
*
* Based on info and code taken from:
*
* drivers/char/watchdog/scx200_wdt.c
* drivers/hwmon/it87.c
* IT8712F EC-LPC I/O Preliminary Specification 0.8.2
* IT8712F EC-LPC I/O Preliminary Specification 0.9.3
*
* The author(s) of this software shall not be held liable for damages
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ioport.h>
#define NAME "it8712f_wdt"
MODULE_AUTHOR("Jorge Boncompte - DTI2 <[email protected]>");
MODULE_DESCRIPTION("IT8712F Watchdog Driver");
MODULE_LICENSE("GPL");
static int max_units = 255;
static int margin = 60; /* in seconds */
module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
static unsigned long wdt_open;
static unsigned expect_close;
static unsigned char revision;
/* Dog Food address - We use the game port address */
static unsigned short address;
#define REG 0x2e /* The register to read/write */
#define VAL 0x2f /* The value to read/write */
#define LDN 0x07 /* Register: Logical device select */
#define DEVID 0x20 /* Register: Device ID */
#define DEVREV 0x22 /* Register: Device Revision */
#define ACT_REG 0x30 /* LDN Register: Activation */
#define BASE_REG 0x60 /* LDN Register: Base address */
#define IT8712F_DEVID 0x8712
#define LDN_GPIO 0x07 /* GPIO and Watch Dog Timer */
#define LDN_GAME 0x09 /* Game Port */
#define WDT_CONTROL 0x71 /* WDT Register: Control */
#define WDT_CONFIG 0x72 /* WDT Register: Configuration */
#define WDT_TIMEOUT 0x73 /* WDT Register: Timeout Value */
#define WDT_RESET_GAME 0x10 /* Reset timer on read or write to game port */
#define WDT_RESET_KBD 0x20 /* Reset timer on keyboard interrupt */
#define WDT_RESET_MOUSE 0x40 /* Reset timer on mouse interrupt */
#define WDT_RESET_CIR 0x80 /* Reset timer on consumer IR interrupt */
#define WDT_UNIT_SEC 0x80 /* If 0 in MINUTES */
#define WDT_OUT_PWROK 0x10 /* Pulse PWROK on timeout */
#define WDT_OUT_KRST 0x40 /* Pulse reset on timeout */
static int wdt_control_reg = WDT_RESET_GAME;
module_param(wdt_control_reg, int, 0);
MODULE_PARM_DESC(wdt_control_reg, "Value to write to watchdog control "
"register. The default WDT_RESET_GAME resets the timer on "
"game port reads that this driver generates. You can also "
"use KBD, MOUSE or CIR if you have some external way to "
"generate those interrupts.");
static int superio_inb(int reg)
{
outb(reg, REG);
return inb(VAL);
}
static void superio_outb(int val, int reg)
{
outb(reg, REG);
outb(val, VAL);
}
static int superio_inw(int reg)
{
int val;
outb(reg++, REG);
val = inb(VAL) << 8;
outb(reg, REG);
val |= inb(VAL);
return val;
}
static inline void superio_select(int ldn)
{
outb(LDN, REG);
outb(ldn, VAL);
}
static inline int superio_enter(void)
{
/*
* Try to reserve REG and REG + 1 for exclusive access.
*/
if (!request_muxed_region(REG, 2, NAME))
return -EBUSY;
outb(0x87, REG);
outb(0x01, REG);
outb(0x55, REG);
outb(0x55, REG);
return 0;
}
static inline void superio_exit(void)
{
outb(0x02, REG);
outb(0x02, VAL);
release_region(REG, 2);
}
static inline void it8712f_wdt_ping(void)
{
if (wdt_control_reg & WDT_RESET_GAME)
inb(address);
}
static void it8712f_wdt_update_margin(void)
{
int config = WDT_OUT_KRST | WDT_OUT_PWROK;
int units = margin;
/* Switch to minutes precision if the configured margin
* value does not fit within the register width.
*/
if (units <= max_units) {
config |= WDT_UNIT_SEC; /* else UNIT is MINUTES */
pr_info("timer margin %d seconds\n", units);
} else {
units /= 60;
pr_info("timer margin %d minutes\n", units);
}
superio_outb(config, WDT_CONFIG);
if (revision >= 0x08)
superio_outb(units >> 8, WDT_TIMEOUT + 1);
superio_outb(units, WDT_TIMEOUT);
}
static int it8712f_wdt_get_status(void)
{
if (superio_inb(WDT_CONTROL) & 0x01)
return WDIOF_CARDRESET;
else
return 0;
}
static int it8712f_wdt_enable(void)
{
int ret = superio_enter();
if (ret)
return ret;
pr_debug("enabling watchdog timer\n");
superio_select(LDN_GPIO);
superio_outb(wdt_control_reg, WDT_CONTROL);
it8712f_wdt_update_margin();
superio_exit();
it8712f_wdt_ping();
return 0;
}
static int it8712f_wdt_disable(void)
{
int ret = superio_enter();
if (ret)
return ret;
pr_debug("disabling watchdog timer\n");
superio_select(LDN_GPIO);
superio_outb(0, WDT_CONFIG);
superio_outb(0, WDT_CONTROL);
if (revision >= 0x08)
superio_outb(0, WDT_TIMEOUT + 1);
superio_outb(0, WDT_TIMEOUT);
superio_exit();
return 0;
}
static int it8712f_wdt_notify(struct notifier_block *this,
unsigned long code, void *unused)
{
if (code == SYS_HALT || code == SYS_POWER_OFF)
if (!nowayout)
it8712f_wdt_disable();
return NOTIFY_DONE;
}
static struct notifier_block it8712f_wdt_notifier = {
.notifier_call = it8712f_wdt_notify,
};
static ssize_t it8712f_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/* check for a magic close character */
if (len) {
size_t i;
it8712f_wdt_ping();
expect_close = 0;
for (i = 0; i < len; ++i) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
return len;
}
static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.identity = "IT8712F Watchdog",
.firmware_version = 1,
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
int value;
int ret;
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
ret = superio_enter();
if (ret)
return ret;
superio_select(LDN_GPIO);
value = it8712f_wdt_get_status();
superio_exit();
return put_user(value, p);
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
it8712f_wdt_ping();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(value, p))
return -EFAULT;
if (value < 1)
return -EINVAL;
if (value > (max_units * 60))
return -EINVAL;
margin = value;
ret = superio_enter();
if (ret)
return ret;
superio_select(LDN_GPIO);
it8712f_wdt_update_margin();
superio_exit();
it8712f_wdt_ping();
fallthrough;
case WDIOC_GETTIMEOUT:
if (put_user(margin, p))
return -EFAULT;
return 0;
default:
return -ENOTTY;
}
}
static int it8712f_wdt_open(struct inode *inode, struct file *file)
{
int ret;
/* only allow one at a time */
if (test_and_set_bit(0, &wdt_open))
return -EBUSY;
ret = it8712f_wdt_enable();
if (ret)
return ret;
return stream_open(inode, file);
}
static int it8712f_wdt_release(struct inode *inode, struct file *file)
{
if (expect_close != 42) {
pr_warn("watchdog device closed unexpectedly, will not disable the watchdog timer\n");
} else if (!nowayout) {
if (it8712f_wdt_disable())
pr_warn("Watchdog disable failed\n");
}
expect_close = 0;
clear_bit(0, &wdt_open);
return 0;
}
static const struct file_operations it8712f_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = it8712f_wdt_write,
.unlocked_ioctl = it8712f_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = it8712f_wdt_open,
.release = it8712f_wdt_release,
};
static struct miscdevice it8712f_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &it8712f_wdt_fops,
};
static int __init it8712f_wdt_find(unsigned short *address)
{
int err = -ENODEV;
int chip_type;
int ret = superio_enter();
if (ret)
return ret;
chip_type = superio_inw(DEVID);
if (chip_type != IT8712F_DEVID)
goto exit;
superio_select(LDN_GAME);
superio_outb(1, ACT_REG);
if (!(superio_inb(ACT_REG) & 0x01)) {
pr_err("Device not activated, skipping\n");
goto exit;
}
*address = superio_inw(BASE_REG);
if (*address == 0) {
pr_err("Base address not set, skipping\n");
goto exit;
}
err = 0;
revision = superio_inb(DEVREV) & 0x0f;
/* Later revisions have 16-bit values per datasheet 0.9.1 */
if (revision >= 0x08)
max_units = 65535;
if (margin > (max_units * 60))
margin = (max_units * 60);
pr_info("Found IT%04xF chip revision %d - using DogFood address 0x%x\n",
chip_type, revision, *address);
exit:
superio_exit();
return err;
}
static int __init it8712f_wdt_init(void)
{
int err = 0;
if (it8712f_wdt_find(&address))
return -ENODEV;
if (!request_region(address, 1, "IT8712F Watchdog")) {
pr_warn("watchdog I/O region busy\n");
return -EBUSY;
}
err = it8712f_wdt_disable();
if (err) {
pr_err("unable to disable watchdog timer\n");
goto out;
}
err = register_reboot_notifier(&it8712f_wdt_notifier);
if (err) {
pr_err("unable to register reboot notifier\n");
goto out;
}
err = misc_register(&it8712f_wdt_miscdev);
if (err) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, err);
goto reboot_out;
}
return 0;
reboot_out:
unregister_reboot_notifier(&it8712f_wdt_notifier);
out:
release_region(address, 1);
return err;
}
static void __exit it8712f_wdt_exit(void)
{
misc_deregister(&it8712f_wdt_miscdev);
unregister_reboot_notifier(&it8712f_wdt_notifier);
release_region(address, 1);
}
module_init(it8712f_wdt_init);
module_exit(it8712f_wdt_exit);
| linux-master | drivers/watchdog/it8712f_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Watchdog timer for PowerPC Book-E systems
*
* Author: Matthew McClintock
* Maintainer: Kumar Gala <[email protected]>
*
* Copyright 2005, 2008, 2010-2011 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/watchdog.h>
#include <asm/reg_booke.h>
#include <asm/time.h>
#include <asm/div64.h>
/* If the kernel parameter wdt=1, the watchdog will be enabled at boot.
* Also, the wdt_period sets the watchdog timer period timeout.
* For E500 cpus the wdt_period sets which bit changing from 0->1 will
* trigger a watchdog timeout. This watchdog timeout will occur 3 times, the
* first time nothing will happen, the second time a watchdog exception will
* occur, and the final time the board will reset.
*/
#ifdef CONFIG_PPC_E500
#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
#define WDTP_MASK (WDTP(0x3f))
#else
#define WDTP(x) (TCR_WP(x))
#define WDTP_MASK (TCR_WP_MASK)
#endif
static bool booke_wdt_enabled;
module_param(booke_wdt_enabled, bool, 0);
static int booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
module_param(booke_wdt_period, int, 0);
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
#ifdef CONFIG_PPC_E500
/* For the specified period, determine the number of seconds
* corresponding to the reset time. There will be a watchdog
* exception at approximately 3/5 of this time.
*
* The formula to calculate this is given by:
* 2.5 * (2^(63-period+1)) / timebase_freq
*
* In order to simplify things, we assume that period is
* at least 1. This will still result in a very long timeout.
*/
static unsigned long long period_to_sec(unsigned int period)
{
unsigned long long tmp = 1ULL << (64 - period);
unsigned long tmp2 = ppc_tb_freq;
/* tmp may be a very large number and we don't want to overflow,
* so divide the timebase freq instead of multiplying tmp
*/
tmp2 = tmp2 / 5 * 2;
do_div(tmp, tmp2);
return tmp;
}
/*
* This procedure will find the highest period which will give a timeout
* greater than the one required. e.g. for a bus speed of 66666666 and
* a parameter of 2 secs, then this procedure will return a value of 38.
*/
static unsigned int sec_to_period(unsigned int secs)
{
unsigned int period;
for (period = 63; period > 0; period--) {
if (period_to_sec(period) >= secs)
return period;
}
return 0;
}
#define MAX_WDT_TIMEOUT period_to_sec(1)
#else /* CONFIG_PPC_E500 */
static unsigned long long period_to_sec(unsigned int period)
{
return period;
}
static unsigned int sec_to_period(unsigned int secs)
{
return secs;
}
#define MAX_WDT_TIMEOUT 3 /* from Kconfig */
#endif /* !CONFIG_PPC_E500 */
static void __booke_wdt_set(void *data)
{
u32 val;
struct watchdog_device *wdog = data;
val = mfspr(SPRN_TCR);
val &= ~WDTP_MASK;
val |= WDTP(sec_to_period(wdog->timeout));
mtspr(SPRN_TCR, val);
}
static void booke_wdt_set(void *data)
{
on_each_cpu(__booke_wdt_set, data, 0);
}
static void __booke_wdt_ping(void *data)
{
mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
}
static int booke_wdt_ping(struct watchdog_device *wdog)
{
on_each_cpu(__booke_wdt_ping, NULL, 0);
return 0;
}
static void __booke_wdt_enable(void *data)
{
u32 val;
struct watchdog_device *wdog = data;
/* clear status before enabling watchdog */
__booke_wdt_ping(NULL);
val = mfspr(SPRN_TCR);
val &= ~WDTP_MASK;
val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(sec_to_period(wdog->timeout)));
mtspr(SPRN_TCR, val);
}
/**
* __booke_wdt_disable - disable the watchdog on the given CPU
*
* This function is called on each CPU. It disables the watchdog on that CPU.
*
* TCR[WRC] cannot be changed once it has been set to non-zero, but we can
* effectively disable the watchdog by setting its period to the maximum value.
*/
static void __booke_wdt_disable(void *data)
{
u32 val;
val = mfspr(SPRN_TCR);
val &= ~(TCR_WIE | WDTP_MASK);
mtspr(SPRN_TCR, val);
/* clear status to make sure nothing is pending */
__booke_wdt_ping(NULL);
}
static int booke_wdt_start(struct watchdog_device *wdog)
{
on_each_cpu(__booke_wdt_enable, wdog, 0);
pr_debug("watchdog enabled (timeout = %u sec)\n", wdog->timeout);
return 0;
}
static int booke_wdt_stop(struct watchdog_device *wdog)
{
on_each_cpu(__booke_wdt_disable, NULL, 0);
pr_debug("watchdog disabled\n");
return 0;
}
static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int timeout)
{
wdt_dev->timeout = timeout;
booke_wdt_set(wdt_dev);
return 0;
}
static struct watchdog_info booke_wdt_info __ro_after_init = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "PowerPC Book-E Watchdog",
};
static const struct watchdog_ops booke_wdt_ops = {
.owner = THIS_MODULE,
.start = booke_wdt_start,
.stop = booke_wdt_stop,
.ping = booke_wdt_ping,
.set_timeout = booke_wdt_set_timeout,
};
static struct watchdog_device booke_wdt_dev = {
.info = &booke_wdt_info,
.ops = &booke_wdt_ops,
.min_timeout = 1,
};
static void __exit booke_wdt_exit(void)
{
watchdog_unregister_device(&booke_wdt_dev);
}
static int __init booke_wdt_init(void)
{
int ret = 0;
pr_info("powerpc book-e watchdog driver loaded\n");
booke_wdt_info.firmware_version = cur_cpu_spec->pvr_value;
booke_wdt_set_timeout(&booke_wdt_dev,
period_to_sec(booke_wdt_period));
watchdog_set_nowayout(&booke_wdt_dev, nowayout);
booke_wdt_dev.max_timeout = MAX_WDT_TIMEOUT;
if (booke_wdt_enabled)
booke_wdt_start(&booke_wdt_dev);
ret = watchdog_register_device(&booke_wdt_dev);
return ret;
}
module_init(booke_wdt_init);
module_exit(booke_wdt_exit);
MODULE_ALIAS("booke_wdt");
MODULE_DESCRIPTION("PowerPC Book-E watchdog driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/booke_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2010, Paul Cercueil <[email protected]>
* JZ4740 Watchdog driver
*/
#include <linux/mfd/ingenic-tcu.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/regmap.h>
#define DEFAULT_HEARTBEAT 5
#define MAX_HEARTBEAT 2048
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static unsigned int heartbeat = DEFAULT_HEARTBEAT;
module_param(heartbeat, uint, 0);
MODULE_PARM_DESC(heartbeat,
"Watchdog heartbeat period in seconds from 1 to "
__MODULE_STRING(MAX_HEARTBEAT) ", default "
__MODULE_STRING(DEFAULT_HEARTBEAT));
struct jz4740_wdt_drvdata {
struct watchdog_device wdt;
struct regmap *map;
struct clk *clk;
unsigned long clk_rate;
};
static int jz4740_wdt_ping(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
regmap_write(drvdata->map, TCU_REG_WDT_TCNT, 0);
return 0;
}
static int jz4740_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int new_timeout)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
u16 timeout_value = (u16)(drvdata->clk_rate * new_timeout);
unsigned int tcer;
regmap_read(drvdata->map, TCU_REG_WDT_TCER, &tcer);
regmap_write(drvdata->map, TCU_REG_WDT_TCER, 0);
regmap_write(drvdata->map, TCU_REG_WDT_TDR, timeout_value);
regmap_write(drvdata->map, TCU_REG_WDT_TCNT, 0);
if (tcer & TCU_WDT_TCER_TCEN)
regmap_write(drvdata->map, TCU_REG_WDT_TCER, TCU_WDT_TCER_TCEN);
wdt_dev->timeout = new_timeout;
return 0;
}
static int jz4740_wdt_start(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
unsigned int tcer;
int ret;
ret = clk_prepare_enable(drvdata->clk);
if (ret)
return ret;
regmap_read(drvdata->map, TCU_REG_WDT_TCER, &tcer);
jz4740_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
/* Start watchdog if it wasn't started already */
if (!(tcer & TCU_WDT_TCER_TCEN))
regmap_write(drvdata->map, TCU_REG_WDT_TCER, TCU_WDT_TCER_TCEN);
return 0;
}
static int jz4740_wdt_stop(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
regmap_write(drvdata->map, TCU_REG_WDT_TCER, 0);
clk_disable_unprepare(drvdata->clk);
return 0;
}
static int jz4740_wdt_restart(struct watchdog_device *wdt_dev,
unsigned long action, void *data)
{
wdt_dev->timeout = 0;
jz4740_wdt_start(wdt_dev);
return 0;
}
static const struct watchdog_info jz4740_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "jz4740 Watchdog",
};
static const struct watchdog_ops jz4740_wdt_ops = {
.owner = THIS_MODULE,
.start = jz4740_wdt_start,
.stop = jz4740_wdt_stop,
.ping = jz4740_wdt_ping,
.set_timeout = jz4740_wdt_set_timeout,
.restart = jz4740_wdt_restart,
};
#ifdef CONFIG_OF
static const struct of_device_id jz4740_wdt_of_matches[] = {
{ .compatible = "ingenic,jz4740-watchdog", },
{ .compatible = "ingenic,jz4780-watchdog", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches);
#endif
static int jz4740_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct jz4740_wdt_drvdata *drvdata;
struct watchdog_device *jz4740_wdt;
long rate;
int ret;
drvdata = devm_kzalloc(dev, sizeof(struct jz4740_wdt_drvdata),
GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->clk = devm_clk_get(&pdev->dev, "wdt");
if (IS_ERR(drvdata->clk)) {
dev_err(&pdev->dev, "cannot find WDT clock\n");
return PTR_ERR(drvdata->clk);
}
/* Set smallest clock possible */
rate = clk_round_rate(drvdata->clk, 1);
if (rate < 0)
return rate;
ret = clk_set_rate(drvdata->clk, rate);
if (ret)
return ret;
drvdata->clk_rate = rate;
jz4740_wdt = &drvdata->wdt;
jz4740_wdt->info = &jz4740_wdt_info;
jz4740_wdt->ops = &jz4740_wdt_ops;
jz4740_wdt->min_timeout = 1;
jz4740_wdt->max_timeout = 0xffff / rate;
jz4740_wdt->timeout = clamp(heartbeat,
jz4740_wdt->min_timeout,
jz4740_wdt->max_timeout);
jz4740_wdt->parent = dev;
watchdog_set_nowayout(jz4740_wdt, nowayout);
watchdog_set_drvdata(jz4740_wdt, drvdata);
drvdata->map = device_node_to_regmap(dev->parent->of_node);
if (IS_ERR(drvdata->map)) {
dev_err(dev, "regmap not found\n");
return PTR_ERR(drvdata->map);
}
return devm_watchdog_register_device(dev, &drvdata->wdt);
}
static struct platform_driver jz4740_wdt_driver = {
.probe = jz4740_wdt_probe,
.driver = {
.name = "jz4740-wdt",
.of_match_table = of_match_ptr(jz4740_wdt_of_matches),
},
};
module_platform_driver(jz4740_wdt_driver);
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_DESCRIPTION("jz4740 Watchdog Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:jz4740-wdt");
| linux-master | drivers/watchdog/jz4740_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/watchdog/shwdt.c
*
* Watchdog driver for integrated watchdog in the SuperH processors.
*
* Copyright (C) 2001 - 2012 Paul Mundt <[email protected]>
*
* 14-Dec-2001 Matt Domsch <[email protected]>
* Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
*
* 19-Apr-2002 Rob Radez <[email protected]>
* Added expect close support, made emulated timeout runtime changeable
* general cleanups, add some ioctls
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/watchdog.h>
#include <linux/pm_runtime.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <asm/watchdog.h>
#define DRV_NAME "sh-wdt"
/*
* Default clock division ratio is 5.25 msecs. For an additional table of
* values, consult the asm-sh/watchdog.h. Overload this at module load
* time.
*
* In order for this to work reliably we need to have HZ set to 1000 or
* something quite higher than 100 (or we need a proper high-res timer
* implementation that will deal with this properly), otherwise the 10ms
* resolution of a jiffy is enough to trigger the overflow. For things like
* the SH-4 and SH-5, this isn't necessarily that big of a problem, though
* for the SH-2 and SH-3, this isn't recommended unless the WDT is absolutely
* necssary.
*
* As a result of this timing problem, the only modes that are particularly
* feasible are the 4096 and the 2048 divisors, which yield 5.25 and 2.62ms
* overflow periods respectively.
*
* Also, since we can't really expect userspace to be responsive enough
* before the overflow happens, we maintain two separate timers .. One in
* the kernel for clearing out WOVF every 2ms or so (again, this depends on
* HZ == 1000), and another for monitoring userspace writes to the WDT device.
*
* As such, we currently use a configurable heartbeat interval which defaults
* to 30s. In this case, the userspace daemon is only responsible for periodic
* writes to the device before the next heartbeat is scheduled. If the daemon
* misses its deadline, the kernel timer will allow the WDT to overflow.
*/
static int clock_division_ratio = WTCSR_CKS_4096;
#define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4))
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
static bool nowayout = WATCHDOG_NOWAYOUT;
static unsigned long next_heartbeat;
struct sh_wdt {
void __iomem *base;
struct device *dev;
struct clk *clk;
spinlock_t lock;
struct timer_list timer;
};
static int sh_wdt_start(struct watchdog_device *wdt_dev)
{
struct sh_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
u8 csr;
pm_runtime_get_sync(wdt->dev);
clk_enable(wdt->clk);
spin_lock_irqsave(&wdt->lock, flags);
next_heartbeat = jiffies + (heartbeat * HZ);
mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
csr = sh_wdt_read_csr();
csr |= WTCSR_WT | clock_division_ratio;
sh_wdt_write_csr(csr);
sh_wdt_write_cnt(0);
/*
* These processors have a bit of an inconsistent initialization
* process.. starting with SH-3, RSTS was moved to WTCSR, and the
* RSTCSR register was removed.
*
* On the SH-2 however, in addition with bits being in different
* locations, we must deal with RSTCSR outright..
*/
csr = sh_wdt_read_csr();
csr |= WTCSR_TME;
csr &= ~WTCSR_RSTS;
sh_wdt_write_csr(csr);
#ifdef CONFIG_CPU_SH2
csr = sh_wdt_read_rstcsr();
csr &= ~RSTCSR_RSTS;
sh_wdt_write_rstcsr(csr);
#endif
spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
static int sh_wdt_stop(struct watchdog_device *wdt_dev)
{
struct sh_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
u8 csr;
spin_lock_irqsave(&wdt->lock, flags);
del_timer(&wdt->timer);
csr = sh_wdt_read_csr();
csr &= ~WTCSR_TME;
sh_wdt_write_csr(csr);
spin_unlock_irqrestore(&wdt->lock, flags);
clk_disable(wdt->clk);
pm_runtime_put_sync(wdt->dev);
return 0;
}
static int sh_wdt_keepalive(struct watchdog_device *wdt_dev)
{
struct sh_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
spin_lock_irqsave(&wdt->lock, flags);
next_heartbeat = jiffies + (heartbeat * HZ);
spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
{
struct sh_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
if (unlikely(t < 1 || t > 3600)) /* arbitrary upper limit */
return -EINVAL;
spin_lock_irqsave(&wdt->lock, flags);
heartbeat = t;
wdt_dev->timeout = t;
spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
static void sh_wdt_ping(struct timer_list *t)
{
struct sh_wdt *wdt = from_timer(wdt, t, timer);
unsigned long flags;
spin_lock_irqsave(&wdt->lock, flags);
if (time_before(jiffies, next_heartbeat)) {
u8 csr;
csr = sh_wdt_read_csr();
csr &= ~WTCSR_IOVF;
sh_wdt_write_csr(csr);
sh_wdt_write_cnt(0);
mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
} else
dev_warn(wdt->dev, "Heartbeat lost! Will not ping "
"the watchdog\n");
spin_unlock_irqrestore(&wdt->lock, flags);
}
static const struct watchdog_info sh_wdt_info = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "SH WDT",
};
static const struct watchdog_ops sh_wdt_ops = {
.owner = THIS_MODULE,
.start = sh_wdt_start,
.stop = sh_wdt_stop,
.ping = sh_wdt_keepalive,
.set_timeout = sh_wdt_set_heartbeat,
};
static struct watchdog_device sh_wdt_dev = {
.info = &sh_wdt_info,
.ops = &sh_wdt_ops,
};
static int sh_wdt_probe(struct platform_device *pdev)
{
struct sh_wdt *wdt;
int rc;
/*
* As this driver only covers the global watchdog case, reject
* any attempts to register per-CPU watchdogs.
*/
if (pdev->id != -1)
return -EINVAL;
wdt = devm_kzalloc(&pdev->dev, sizeof(struct sh_wdt), GFP_KERNEL);
if (unlikely(!wdt))
return -ENOMEM;
wdt->dev = &pdev->dev;
wdt->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(wdt->clk)) {
/*
* Clock framework support is optional, continue on
* anyways if we don't find a matching clock.
*/
wdt->clk = NULL;
}
wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
watchdog_set_nowayout(&sh_wdt_dev, nowayout);
watchdog_set_drvdata(&sh_wdt_dev, wdt);
sh_wdt_dev.parent = &pdev->dev;
spin_lock_init(&wdt->lock);
rc = sh_wdt_set_heartbeat(&sh_wdt_dev, heartbeat);
if (unlikely(rc)) {
/* Default timeout if invalid */
sh_wdt_set_heartbeat(&sh_wdt_dev, WATCHDOG_HEARTBEAT);
dev_warn(&pdev->dev,
"heartbeat value must be 1<=x<=3600, using %d\n",
sh_wdt_dev.timeout);
}
dev_info(&pdev->dev, "configured with heartbeat=%d sec (nowayout=%d)\n",
sh_wdt_dev.timeout, nowayout);
rc = watchdog_register_device(&sh_wdt_dev);
if (unlikely(rc)) {
dev_err(&pdev->dev, "Can't register watchdog (err=%d)\n", rc);
return rc;
}
timer_setup(&wdt->timer, sh_wdt_ping, 0);
wdt->timer.expires = next_ping_period(clock_division_ratio);
dev_info(&pdev->dev, "initialized.\n");
pm_runtime_enable(&pdev->dev);
return 0;
}
static void sh_wdt_remove(struct platform_device *pdev)
{
watchdog_unregister_device(&sh_wdt_dev);
pm_runtime_disable(&pdev->dev);
}
static void sh_wdt_shutdown(struct platform_device *pdev)
{
sh_wdt_stop(&sh_wdt_dev);
}
static struct platform_driver sh_wdt_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = sh_wdt_probe,
.remove_new = sh_wdt_remove,
.shutdown = sh_wdt_shutdown,
};
static int __init sh_wdt_init(void)
{
if (unlikely(clock_division_ratio < 0x5 ||
clock_division_ratio > 0x7)) {
clock_division_ratio = WTCSR_CKS_4096;
pr_info("divisor must be 0x5<=x<=0x7, using %d\n",
clock_division_ratio);
}
return platform_driver_register(&sh_wdt_driver);
}
static void __exit sh_wdt_exit(void)
{
platform_driver_unregister(&sh_wdt_driver);
}
module_init(sh_wdt_init);
module_exit(sh_wdt_exit);
MODULE_AUTHOR("Paul Mundt <[email protected]>");
MODULE_DESCRIPTION("SuperH watchdog driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
module_param(clock_division_ratio, int, 0);
MODULE_PARM_DESC(clock_division_ratio,
"Clock division ratio. Valid ranges are from 0x5 (1.31ms) "
"to 0x7 (5.25ms). (default=" __MODULE_STRING(WTCSR_CKS_4096) ")");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat,
"Watchdog heartbeat in seconds. (1 <= heartbeat <= 3600, default="
__MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
| linux-master | drivers/watchdog/shwdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* watchdog_core.c
*
* (c) Copyright 2008-2011 Alan Cox <[email protected]>,
* All Rights Reserved.
*
* (c) Copyright 2008-2011 Wim Van Sebroeck <[email protected]>.
*
* This source code is part of the generic code that can be used
* by all the watchdog timer drivers.
*
* Based on source code of the following authors:
* Matt Domsch <[email protected]>,
* Rob Radez <[email protected]>,
* Rusty Lynch <[email protected]>
* Satyam Sharma <[email protected]>
* Randy Dunlap <[email protected]>
*
* Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
* admit liability nor provide warranty for any of this software.
* This material is provided "AS-IS" and at no charge.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> /* For EXPORT_SYMBOL/module stuff/... */
#include <linux/types.h> /* For standard types */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/reboot.h> /* For restart handler */
#include <linux/watchdog.h> /* For watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/idr.h> /* For ida_* macros */
#include <linux/err.h> /* For IS_ERR macros */
#include <linux/of.h> /* For of_get_timeout_sec */
#include <linux/suspend.h>
#include "watchdog_core.h" /* For watchdog_dev_register/... */
#define CREATE_TRACE_POINTS
#include <trace/events/watchdog.h>
static DEFINE_IDA(watchdog_ida);
static int stop_on_reboot = -1;
module_param(stop_on_reboot, int, 0444);
MODULE_PARM_DESC(stop_on_reboot, "Stop watchdogs on reboot (0=keep watching, 1=stop)");
/*
* Deferred Registration infrastructure.
*
* Sometimes watchdog drivers needs to be loaded as soon as possible,
* for example when it's impossible to disable it. To do so,
* raising the initcall level of the watchdog driver is a solution.
* But in such case, the miscdev is maybe not ready (subsys_initcall), and
* watchdog_core need miscdev to register the watchdog as a char device.
*
* The deferred registration infrastructure offer a way for the watchdog
* subsystem to register a watchdog properly, even before miscdev is ready.
*/
static DEFINE_MUTEX(wtd_deferred_reg_mutex);
static LIST_HEAD(wtd_deferred_reg_list);
static bool wtd_deferred_reg_done;
static void watchdog_deferred_registration_add(struct watchdog_device *wdd)
{
list_add_tail(&wdd->deferred,
&wtd_deferred_reg_list);
}
static void watchdog_deferred_registration_del(struct watchdog_device *wdd)
{
struct list_head *p, *n;
struct watchdog_device *wdd_tmp;
list_for_each_safe(p, n, &wtd_deferred_reg_list) {
wdd_tmp = list_entry(p, struct watchdog_device,
deferred);
if (wdd_tmp == wdd) {
list_del(&wdd_tmp->deferred);
break;
}
}
}
static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
{
/*
* Check that we have valid min and max timeout values, if
* not reset them both to 0 (=not used or unknown)
*/
if (!wdd->max_hw_heartbeat_ms && wdd->min_timeout > wdd->max_timeout) {
pr_info("Invalid min and max timeout values, resetting to 0!\n");
wdd->min_timeout = 0;
wdd->max_timeout = 0;
}
}
/**
* watchdog_init_timeout() - initialize the timeout field
* @wdd: watchdog device
* @timeout_parm: timeout module parameter
* @dev: Device that stores the timeout-sec property
*
* Initialize the timeout field of the watchdog_device struct with either the
* timeout module parameter (if it is valid value) or the timeout-sec property
* (only if it is a valid value and the timeout_parm is out of bounds).
* If none of them are valid then we keep the old value (which should normally
* be the default timeout value). Note that for the module parameter, '0' means
* 'use default' while it is an invalid value for the timeout-sec property.
* It should simply be dropped if you want to use the default value then.
*
* A zero is returned on success or -EINVAL if all provided values are out of
* bounds.
*/
int watchdog_init_timeout(struct watchdog_device *wdd,
unsigned int timeout_parm, struct device *dev)
{
const char *dev_str = wdd->parent ? dev_name(wdd->parent) :
(const char *)wdd->info->identity;
unsigned int t = 0;
int ret = 0;
watchdog_check_min_max_timeout(wdd);
/* check the driver supplied value (likely a module parameter) first */
if (timeout_parm) {
if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
wdd->timeout = timeout_parm;
return 0;
}
pr_err("%s: driver supplied timeout (%u) out of range\n",
dev_str, timeout_parm);
ret = -EINVAL;
}
/* try to get the timeout_sec property */
if (dev && dev->of_node &&
of_property_read_u32(dev->of_node, "timeout-sec", &t) == 0) {
if (t && !watchdog_timeout_invalid(wdd, t)) {
wdd->timeout = t;
return 0;
}
pr_err("%s: DT supplied timeout (%u) out of range\n", dev_str, t);
ret = -EINVAL;
}
if (ret < 0 && wdd->timeout)
pr_warn("%s: falling back to default timeout (%u)\n", dev_str,
wdd->timeout);
return ret;
}
EXPORT_SYMBOL_GPL(watchdog_init_timeout);
static int watchdog_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *data)
{
struct watchdog_device *wdd;
wdd = container_of(nb, struct watchdog_device, reboot_nb);
if (code == SYS_DOWN || code == SYS_HALT || code == SYS_POWER_OFF) {
if (watchdog_hw_running(wdd)) {
int ret;
ret = wdd->ops->stop(wdd);
trace_watchdog_stop(wdd, ret);
if (ret)
return NOTIFY_BAD;
}
}
return NOTIFY_DONE;
}
static int watchdog_restart_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
restart_nb);
int ret;
ret = wdd->ops->restart(wdd, action, data);
if (ret)
return NOTIFY_BAD;
return NOTIFY_DONE;
}
static int watchdog_pm_notifier(struct notifier_block *nb, unsigned long mode,
void *data)
{
struct watchdog_device *wdd;
int ret = 0;
wdd = container_of(nb, struct watchdog_device, pm_nb);
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_RESTORE_PREPARE:
case PM_SUSPEND_PREPARE:
ret = watchdog_dev_suspend(wdd);
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
case PM_POST_SUSPEND:
ret = watchdog_dev_resume(wdd);
break;
}
if (ret)
return NOTIFY_BAD;
return NOTIFY_DONE;
}
/**
* watchdog_set_restart_priority - Change priority of restart handler
* @wdd: watchdog device
* @priority: priority of the restart handler, should follow these guidelines:
* 0: use watchdog's restart function as last resort, has limited restart
* capabilies
* 128: default restart handler, use if no other handler is expected to be
* available and/or if restart is sufficient to restart the entire system
* 255: preempt all other handlers
*
* If a wdd->ops->restart function is provided when watchdog_register_device is
* called, it will be registered as a restart handler with the priority given
* here.
*/
void watchdog_set_restart_priority(struct watchdog_device *wdd, int priority)
{
wdd->restart_nb.priority = priority;
}
EXPORT_SYMBOL_GPL(watchdog_set_restart_priority);
static int __watchdog_register_device(struct watchdog_device *wdd)
{
int ret, id = -1;
if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
return -EINVAL;
/* Mandatory operations need to be supported */
if (!wdd->ops->start || (!wdd->ops->stop && !wdd->max_hw_heartbeat_ms))
return -EINVAL;
watchdog_check_min_max_timeout(wdd);
/*
* Note: now that all watchdog_device data has been verified, we
* will not check this anymore in other functions. If data gets
* corrupted in a later stage then we expect a kernel panic!
*/
/* Use alias for watchdog id if possible */
if (wdd->parent) {
ret = of_alias_get_id(wdd->parent->of_node, "watchdog");
if (ret >= 0)
id = ida_simple_get(&watchdog_ida, ret,
ret + 1, GFP_KERNEL);
}
if (id < 0)
id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
if (id < 0)
return id;
wdd->id = id;
ret = watchdog_dev_register(wdd);
if (ret) {
ida_simple_remove(&watchdog_ida, id);
if (!(id == 0 && ret == -EBUSY))
return ret;
/* Retry in case a legacy watchdog module exists */
id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
if (id < 0)
return id;
wdd->id = id;
ret = watchdog_dev_register(wdd);
if (ret) {
ida_simple_remove(&watchdog_ida, id);
return ret;
}
}
/* Module parameter to force watchdog policy on reboot. */
if (stop_on_reboot != -1) {
if (stop_on_reboot)
set_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
else
clear_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
}
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
if (!wdd->ops->stop)
pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id);
else {
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
ret = register_reboot_notifier(&wdd->reboot_nb);
if (ret) {
pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
wdd->id, ret);
watchdog_dev_unregister(wdd);
ida_simple_remove(&watchdog_ida, id);
return ret;
}
}
}
if (wdd->ops->restart) {
wdd->restart_nb.notifier_call = watchdog_restart_notifier;
ret = register_restart_handler(&wdd->restart_nb);
if (ret)
pr_warn("watchdog%d: Cannot register restart handler (%d)\n",
wdd->id, ret);
}
if (test_bit(WDOG_NO_PING_ON_SUSPEND, &wdd->status)) {
wdd->pm_nb.notifier_call = watchdog_pm_notifier;
ret = register_pm_notifier(&wdd->pm_nb);
if (ret)
pr_warn("watchdog%d: Cannot register pm handler (%d)\n",
wdd->id, ret);
}
return 0;
}
/**
* watchdog_register_device() - register a watchdog device
* @wdd: watchdog device
*
* Register a watchdog device with the kernel so that the
* watchdog timer can be accessed from userspace.
*
* A zero is returned on success and a negative errno code for
* failure.
*/
int watchdog_register_device(struct watchdog_device *wdd)
{
const char *dev_str;
int ret = 0;
mutex_lock(&wtd_deferred_reg_mutex);
if (wtd_deferred_reg_done)
ret = __watchdog_register_device(wdd);
else
watchdog_deferred_registration_add(wdd);
mutex_unlock(&wtd_deferred_reg_mutex);
if (ret) {
dev_str = wdd->parent ? dev_name(wdd->parent) :
(const char *)wdd->info->identity;
pr_err("%s: failed to register watchdog device (err = %d)\n",
dev_str, ret);
}
return ret;
}
EXPORT_SYMBOL_GPL(watchdog_register_device);
static void __watchdog_unregister_device(struct watchdog_device *wdd)
{
if (wdd == NULL)
return;
if (wdd->ops->restart)
unregister_restart_handler(&wdd->restart_nb);
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
unregister_reboot_notifier(&wdd->reboot_nb);
watchdog_dev_unregister(wdd);
ida_simple_remove(&watchdog_ida, wdd->id);
}
/**
* watchdog_unregister_device() - unregister a watchdog device
* @wdd: watchdog device to unregister
*
* Unregister a watchdog device that was previously successfully
* registered with watchdog_register_device().
*/
void watchdog_unregister_device(struct watchdog_device *wdd)
{
mutex_lock(&wtd_deferred_reg_mutex);
if (wtd_deferred_reg_done)
__watchdog_unregister_device(wdd);
else
watchdog_deferred_registration_del(wdd);
mutex_unlock(&wtd_deferred_reg_mutex);
}
EXPORT_SYMBOL_GPL(watchdog_unregister_device);
static void devm_watchdog_unregister_device(struct device *dev, void *res)
{
watchdog_unregister_device(*(struct watchdog_device **)res);
}
/**
* devm_watchdog_register_device() - resource managed watchdog_register_device()
* @dev: device that is registering this watchdog device
* @wdd: watchdog device
*
* Managed watchdog_register_device(). For watchdog device registered by this
* function, watchdog_unregister_device() is automatically called on driver
* detach. See watchdog_register_device() for more information.
*/
int devm_watchdog_register_device(struct device *dev,
struct watchdog_device *wdd)
{
struct watchdog_device **rcwdd;
int ret;
rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd),
GFP_KERNEL);
if (!rcwdd)
return -ENOMEM;
ret = watchdog_register_device(wdd);
if (!ret) {
*rcwdd = wdd;
devres_add(dev, rcwdd);
} else {
devres_free(rcwdd);
}
return ret;
}
EXPORT_SYMBOL_GPL(devm_watchdog_register_device);
static int __init watchdog_deferred_registration(void)
{
mutex_lock(&wtd_deferred_reg_mutex);
wtd_deferred_reg_done = true;
while (!list_empty(&wtd_deferred_reg_list)) {
struct watchdog_device *wdd;
wdd = list_first_entry(&wtd_deferred_reg_list,
struct watchdog_device, deferred);
list_del(&wdd->deferred);
__watchdog_register_device(wdd);
}
mutex_unlock(&wtd_deferred_reg_mutex);
return 0;
}
static int __init watchdog_init(void)
{
int err;
err = watchdog_dev_init();
if (err < 0)
return err;
watchdog_deferred_registration();
return 0;
}
static void __exit watchdog_exit(void)
{
watchdog_dev_exit();
ida_destroy(&watchdog_ida);
}
subsys_initcall_sync(watchdog_init);
module_exit(watchdog_exit);
MODULE_AUTHOR("Alan Cox <[email protected]>");
MODULE_AUTHOR("Wim Van Sebroeck <[email protected]>");
MODULE_DESCRIPTION("WatchDog Timer Driver Core");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/watchdog_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sunplus Watchdog Driver
*
* Copyright (C) 2021 Sunplus Technology Co., Ltd.
*
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/watchdog.h>
#define WDT_CTRL 0x00
#define WDT_CNT 0x04
#define WDT_STOP 0x3877
#define WDT_RESUME 0x4A4B
#define WDT_CLRIRQ 0x7482
#define WDT_UNLOCK 0xAB00
#define WDT_LOCK 0xAB01
#define WDT_CONMAX 0xDEAF
/* TIMEOUT_MAX = ffff0/90kHz =11.65, so longer than 11 seconds will time out. */
#define SP_WDT_MAX_TIMEOUT 11U
#define SP_WDT_DEFAULT_TIMEOUT 10
#define STC_CLK 90000
#define DEVICE_NAME "sunplus-wdt"
static unsigned int timeout;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct sp_wdt_priv {
struct watchdog_device wdev;
void __iomem *base;
struct clk *clk;
struct reset_control *rstc;
};
static int sp_wdt_restart(struct watchdog_device *wdev,
unsigned long action, void *data)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
writel(WDT_STOP, base + WDT_CTRL);
writel(WDT_UNLOCK, base + WDT_CTRL);
writel(0x0001, base + WDT_CNT);
writel(WDT_LOCK, base + WDT_CTRL);
writel(WDT_RESUME, base + WDT_CTRL);
return 0;
}
static int sp_wdt_ping(struct watchdog_device *wdev)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
u32 count;
if (wdev->timeout > SP_WDT_MAX_TIMEOUT) {
/* WDT_CONMAX sets the count to the maximum (down-counting). */
writel(WDT_CONMAX, base + WDT_CTRL);
} else {
writel(WDT_UNLOCK, base + WDT_CTRL);
/*
* Watchdog timer is a 20-bit down-counting based on STC_CLK.
* This register bits[16:0] is from bit[19:4] of the watchdog
* timer counter.
*/
count = (wdev->timeout * STC_CLK) >> 4;
writel(count, base + WDT_CNT);
writel(WDT_LOCK, base + WDT_CTRL);
}
return 0;
}
static int sp_wdt_stop(struct watchdog_device *wdev)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
writel(WDT_STOP, base + WDT_CTRL);
return 0;
}
static int sp_wdt_start(struct watchdog_device *wdev)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
writel(WDT_RESUME, base + WDT_CTRL);
return 0;
}
static unsigned int sp_wdt_get_timeleft(struct watchdog_device *wdev)
{
struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
void __iomem *base = priv->base;
u32 val;
val = readl(base + WDT_CNT);
val &= 0xffff;
val = val << 4;
return val;
}
static const struct watchdog_info sp_wdt_info = {
.identity = DEVICE_NAME,
.options = WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
};
static const struct watchdog_ops sp_wdt_ops = {
.owner = THIS_MODULE,
.start = sp_wdt_start,
.stop = sp_wdt_stop,
.ping = sp_wdt_ping,
.get_timeleft = sp_wdt_get_timeleft,
.restart = sp_wdt_restart,
};
static void sp_clk_disable_unprepare(void *data)
{
clk_disable_unprepare(data);
}
static void sp_reset_control_assert(void *data)
{
reset_control_assert(data);
}
static int sp_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sp_wdt_priv *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n");
ret = clk_prepare_enable(priv->clk);
if (ret)
return dev_err_probe(dev, ret, "Failed to enable clock\n");
ret = devm_add_action_or_reset(dev, sp_clk_disable_unprepare, priv->clk);
if (ret)
return ret;
/* The timer and watchdog shared the STC reset */
priv->rstc = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(priv->rstc))
return dev_err_probe(dev, PTR_ERR(priv->rstc), "Failed to get reset\n");
reset_control_deassert(priv->rstc);
ret = devm_add_action_or_reset(dev, sp_reset_control_assert, priv->rstc);
if (ret)
return ret;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->wdev.info = &sp_wdt_info;
priv->wdev.ops = &sp_wdt_ops;
priv->wdev.timeout = SP_WDT_DEFAULT_TIMEOUT;
priv->wdev.max_hw_heartbeat_ms = SP_WDT_MAX_TIMEOUT * 1000;
priv->wdev.min_timeout = 1;
priv->wdev.parent = dev;
watchdog_set_drvdata(&priv->wdev, priv);
watchdog_init_timeout(&priv->wdev, timeout, dev);
watchdog_set_nowayout(&priv->wdev, nowayout);
watchdog_stop_on_reboot(&priv->wdev);
watchdog_set_restart_priority(&priv->wdev, 128);
return devm_watchdog_register_device(dev, &priv->wdev);
}
static const struct of_device_id sp_wdt_of_match[] = {
{.compatible = "sunplus,sp7021-wdt", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sp_wdt_of_match);
static struct platform_driver sp_wdt_driver = {
.probe = sp_wdt_probe,
.driver = {
.name = DEVICE_NAME,
.of_match_table = sp_wdt_of_match,
},
};
module_platform_driver(sp_wdt_driver);
MODULE_AUTHOR("Xiantao Hu <[email protected]>");
MODULE_DESCRIPTION("Sunplus Watchdog Timer Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/sunplus_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Watchdog driver for z/VM and LPAR using the diag 288 interface.
*
* Under z/VM, expiration of the watchdog will send a "system restart" command
* to CP.
*
* The command can be altered using the module parameter "cmd". This is
* not recommended because it's only supported on z/VM but not whith LPAR.
*
* On LPAR, the watchdog will always trigger a system restart. the module
* paramter cmd is meaningless here.
*
*
* Copyright IBM Corp. 2004, 2013
* Author(s): Arnd Bergmann ([email protected])
* Philipp Hachtmann ([email protected])
*
*/
#define KMSG_COMPONENT "diag288_wdt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
#include <asm/ebcdic.h>
#include <asm/diag.h>
#include <linux/io.h>
#define MAX_CMDLEN 240
#define DEFAULT_CMD "SYSTEM RESTART"
#define MIN_INTERVAL 15 /* Minimal time supported by diag88 */
#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */
#define WDT_DEFAULT_TIMEOUT 30
/* Function codes - init, change, cancel */
#define WDT_FUNC_INIT 0
#define WDT_FUNC_CHANGE 1
#define WDT_FUNC_CANCEL 2
#define WDT_FUNC_CONCEAL 0x80000000
/* Action codes for LPAR watchdog */
#define LPARWDT_RESTART 0
static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD;
static bool conceal_on;
static bool nowayout_info = WATCHDOG_NOWAYOUT;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <[email protected]>");
MODULE_AUTHOR("Philipp Hachtmann <[email protected]>");
MODULE_DESCRIPTION("System z diag288 Watchdog Timer");
module_param_string(cmd, wdt_cmd, MAX_CMDLEN, 0644);
MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers (z/VM only)");
module_param_named(conceal, conceal_on, bool, 0644);
MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog is active (z/VM only)");
module_param_named(nowayout, nowayout_info, bool, 0444);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = CONFIG_WATCHDOG_NOWAYOUT)");
MODULE_ALIAS("vmwatchdog");
static char *cmd_buf;
static int diag288(unsigned int func, unsigned int timeout,
unsigned long action, unsigned int len)
{
union register_pair r1 = { .even = func, .odd = timeout, };
union register_pair r3 = { .even = action, .odd = len, };
int err;
diag_stat_inc(DIAG_STAT_X288);
err = -EINVAL;
asm volatile(
" diag %[r1],%[r3],0x288\n"
"0: la %[err],0\n"
"1:\n"
EX_TABLE(0b, 1b)
: [err] "+d" (err)
: [r1] "d" (r1.pair), [r3] "d" (r3.pair)
: "cc", "memory");
return err;
}
static int diag288_str(unsigned int func, unsigned int timeout, char *cmd)
{
ssize_t len;
len = strscpy(cmd_buf, cmd, MAX_CMDLEN);
if (len < 0)
return len;
ASCEBC(cmd_buf, MAX_CMDLEN);
EBC_TOUPPER(cmd_buf, MAX_CMDLEN);
return diag288(func, timeout, virt_to_phys(cmd_buf), len);
}
static int wdt_start(struct watchdog_device *dev)
{
int ret;
unsigned int func;
if (MACHINE_IS_VM) {
func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
: WDT_FUNC_INIT;
ret = diag288_str(func, dev->timeout, wdt_cmd);
WARN_ON(ret != 0);
} else {
ret = diag288(WDT_FUNC_INIT, dev->timeout, LPARWDT_RESTART, 0);
}
if (ret) {
pr_err("The watchdog cannot be activated\n");
return ret;
}
return 0;
}
static int wdt_stop(struct watchdog_device *dev)
{
return diag288(WDT_FUNC_CANCEL, 0, 0, 0);
}
static int wdt_ping(struct watchdog_device *dev)
{
int ret;
unsigned int func;
if (MACHINE_IS_VM) {
/*
* It seems to be ok to z/VM to use the init function to
* retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must
* be used when the watchdog is running.
*/
func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
: WDT_FUNC_INIT;
ret = diag288_str(func, dev->timeout, wdt_cmd);
WARN_ON(ret != 0);
} else {
ret = diag288(WDT_FUNC_CHANGE, dev->timeout, 0, 0);
}
if (ret)
pr_err("The watchdog timer cannot be started or reset\n");
return ret;
}
static int wdt_set_timeout(struct watchdog_device * dev, unsigned int new_to)
{
dev->timeout = new_to;
return wdt_ping(dev);
}
static const struct watchdog_ops wdt_ops = {
.owner = THIS_MODULE,
.start = wdt_start,
.stop = wdt_stop,
.ping = wdt_ping,
.set_timeout = wdt_set_timeout,
};
static const struct watchdog_info wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.firmware_version = 0,
.identity = "z Watchdog",
};
static struct watchdog_device wdt_dev = {
.parent = NULL,
.info = &wdt_info,
.ops = &wdt_ops,
.bootstatus = 0,
.timeout = WDT_DEFAULT_TIMEOUT,
.min_timeout = MIN_INTERVAL,
.max_timeout = MAX_INTERVAL,
};
static int __init diag288_init(void)
{
int ret;
watchdog_set_nowayout(&wdt_dev, nowayout_info);
if (MACHINE_IS_VM) {
cmd_buf = kmalloc(MAX_CMDLEN, GFP_KERNEL);
if (!cmd_buf) {
pr_err("The watchdog cannot be initialized\n");
return -ENOMEM;
}
ret = diag288_str(WDT_FUNC_INIT, MIN_INTERVAL, "BEGIN");
if (ret != 0) {
pr_err("The watchdog cannot be initialized\n");
kfree(cmd_buf);
return -EINVAL;
}
} else {
if (diag288(WDT_FUNC_INIT, WDT_DEFAULT_TIMEOUT,
LPARWDT_RESTART, 0)) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}
}
if (diag288(WDT_FUNC_CANCEL, 0, 0, 0)) {
pr_err("The watchdog cannot be deactivated\n");
return -EINVAL;
}
return watchdog_register_device(&wdt_dev);
}
static void __exit diag288_exit(void)
{
watchdog_unregister_device(&wdt_dev);
kfree(cmd_buf);
}
module_init(diag288_init);
module_exit(diag288_exit);
| linux-master | drivers/watchdog/diag288_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SBSA(Server Base System Architecture) Generic Watchdog driver
*
* Copyright (c) 2015, Linaro Ltd.
* Author: Fu Wei <[email protected]>
* Suravee Suthikulpanit <[email protected]>
* Al Stone <[email protected]>
* Timur Tabi <[email protected]>
*
* ARM SBSA Generic Watchdog has two stage timeouts:
* the first signal (WS0) is for alerting the system by interrupt,
* the second one (WS1) is a real hardware reset.
* More details about the hardware specification of this device:
* ARM DEN0029B - Server Base System Architecture (SBSA)
*
* This driver can operate ARM SBSA Generic Watchdog as a single stage watchdog
* or a two stages watchdog, it's set up by the module parameter "action".
* In the single stage mode, when the timeout is reached, your system
* will be reset by WS1. The first signal (WS0) is ignored.
* In the two stages mode, when the timeout is reached, the first signal (WS0)
* will trigger panic. If the system is getting into trouble and cannot be reset
* by panic or restart properly by the kdump kernel(if supported), then the
* second stage (as long as the first stage) will be reached, system will be
* reset by WS1. This function can help administrator to backup the system
* context info by panic console output or kdump.
*
* SBSA GWDT:
* if action is 1 (the two stages mode):
* |--------WOR-------WS0--------WOR-------WS1
* |----timeout-----(panic)----timeout-----reset
*
* if action is 0 (the single stage mode):
* |------WOR-----WS0(ignored)-----WOR------WS1
* |--------------timeout-------------------reset
*
* Note: Since this watchdog timer has two stages, and each stage is determined
* by WOR, in the single stage mode, the timeout is (WOR * 2); in the two
* stages mode, the timeout is WOR. The maximum timeout in the two stages mode
* is half of that in the single stage mode.
*/
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
#include <asm/arch_timer.h>
#define DRV_NAME "sbsa-gwdt"
#define WATCHDOG_NAME "SBSA Generic Watchdog"
/* SBSA Generic Watchdog register definitions */
/* refresh frame */
#define SBSA_GWDT_WRR 0x000
/* control frame */
#define SBSA_GWDT_WCS 0x000
#define SBSA_GWDT_WOR 0x008
#define SBSA_GWDT_WCV 0x010
/* refresh/control frame */
#define SBSA_GWDT_W_IIDR 0xfcc
#define SBSA_GWDT_IDR 0xfd0
/* Watchdog Control and Status Register */
#define SBSA_GWDT_WCS_EN BIT(0)
#define SBSA_GWDT_WCS_WS0 BIT(1)
#define SBSA_GWDT_WCS_WS1 BIT(2)
#define SBSA_GWDT_VERSION_MASK 0xF
#define SBSA_GWDT_VERSION_SHIFT 16
/**
* struct sbsa_gwdt - Internal representation of the SBSA GWDT
* @wdd: kernel watchdog_device structure
* @clk: store the System Counter clock frequency, in Hz.
* @version: store the architecture version
* @refresh_base: Virtual address of the watchdog refresh frame
* @control_base: Virtual address of the watchdog control frame
*/
struct sbsa_gwdt {
struct watchdog_device wdd;
u32 clk;
int version;
void __iomem *refresh_base;
void __iomem *control_base;
};
#define DEFAULT_TIMEOUT 10 /* seconds */
static unsigned int timeout;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. (>=0, default="
__MODULE_STRING(DEFAULT_TIMEOUT) ")");
/*
* action refers to action taken when watchdog gets WS0
* 0 = skip
* 1 = panic
* defaults to skip (0)
*/
static int action;
module_param(action, int, 0);
MODULE_PARM_DESC(action, "after watchdog gets WS0 interrupt, do: "
"0 = skip(*) 1 = panic");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, S_IRUGO);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Arm Base System Architecture 1.0 introduces watchdog v1 which
* increases the length watchdog offset register to 48 bits.
* - For version 0: WOR is 32 bits;
* - For version 1: WOR is 48 bits which comprises the register
* offset 0x8 and 0xC, and the bits [63:48] are reserved which are
* Read-As-Zero and Writes-Ignored.
*/
static u64 sbsa_gwdt_reg_read(struct sbsa_gwdt *gwdt)
{
if (gwdt->version == 0)
return readl(gwdt->control_base + SBSA_GWDT_WOR);
else
return lo_hi_readq(gwdt->control_base + SBSA_GWDT_WOR);
}
static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
{
if (gwdt->version == 0)
writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR);
else
lo_hi_writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
}
/*
* watchdog operation functions
*/
static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
wdd->timeout = timeout;
timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
if (action)
sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
else
/*
* In the single stage mode, The first signal (WS0) is ignored,
* the timeout is (WOR * 2), so the WOR should be configured
* to half value of timeout.
*/
sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
return 0;
}
static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
u64 timeleft = 0;
/*
* In the single stage mode, if WS0 is deasserted
* (watchdog is in the first stage),
* timeleft = WOR + (WCV - system counter)
*/
if (!action &&
!(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
timeleft += sbsa_gwdt_reg_read(gwdt);
timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
arch_timer_read_counter();
do_div(timeleft, gwdt->clk);
return timeleft;
}
static int sbsa_gwdt_keepalive(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
/*
* Writing WRR for an explicit watchdog refresh.
* You can write anyting (like 0).
*/
writel(0, gwdt->refresh_base + SBSA_GWDT_WRR);
return 0;
}
static void sbsa_gwdt_get_version(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
int ver;
ver = readl(gwdt->control_base + SBSA_GWDT_W_IIDR);
ver = (ver >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK;
gwdt->version = ver;
}
static int sbsa_gwdt_start(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
/* writing WCS will cause an explicit watchdog refresh */
writel(SBSA_GWDT_WCS_EN, gwdt->control_base + SBSA_GWDT_WCS);
return 0;
}
static int sbsa_gwdt_stop(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
/* Simply write 0 to WCS to clean WCS_EN bit */
writel(0, gwdt->control_base + SBSA_GWDT_WCS);
return 0;
}
static irqreturn_t sbsa_gwdt_interrupt(int irq, void *dev_id)
{
panic(WATCHDOG_NAME " timeout");
return IRQ_HANDLED;
}
static const struct watchdog_info sbsa_gwdt_info = {
.identity = WATCHDOG_NAME,
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE |
WDIOF_CARDRESET,
};
static const struct watchdog_ops sbsa_gwdt_ops = {
.owner = THIS_MODULE,
.start = sbsa_gwdt_start,
.stop = sbsa_gwdt_stop,
.ping = sbsa_gwdt_keepalive,
.set_timeout = sbsa_gwdt_set_timeout,
.get_timeleft = sbsa_gwdt_get_timeleft,
};
static int sbsa_gwdt_probe(struct platform_device *pdev)
{
void __iomem *rf_base, *cf_base;
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct sbsa_gwdt *gwdt;
int ret, irq;
u32 status;
gwdt = devm_kzalloc(dev, sizeof(*gwdt), GFP_KERNEL);
if (!gwdt)
return -ENOMEM;
platform_set_drvdata(pdev, gwdt);
cf_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cf_base))
return PTR_ERR(cf_base);
rf_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(rf_base))
return PTR_ERR(rf_base);
/*
* Get the frequency of system counter from the cp15 interface of ARM
* Generic timer. We don't need to check it, because if it returns "0",
* system would panic in very early stage.
*/
gwdt->clk = arch_timer_get_cntfrq();
gwdt->refresh_base = rf_base;
gwdt->control_base = cf_base;
wdd = &gwdt->wdd;
wdd->parent = dev;
wdd->info = &sbsa_gwdt_info;
wdd->ops = &sbsa_gwdt_ops;
wdd->min_timeout = 1;
wdd->timeout = DEFAULT_TIMEOUT;
watchdog_set_drvdata(wdd, gwdt);
watchdog_set_nowayout(wdd, nowayout);
sbsa_gwdt_get_version(wdd);
if (gwdt->version == 0)
wdd->max_hw_heartbeat_ms = U32_MAX / gwdt->clk * 1000;
else
wdd->max_hw_heartbeat_ms = GENMASK_ULL(47, 0) / gwdt->clk * 1000;
status = readl(cf_base + SBSA_GWDT_WCS);
if (status & SBSA_GWDT_WCS_WS1) {
dev_warn(dev, "System reset by WDT.\n");
wdd->bootstatus |= WDIOF_CARDRESET;
}
if (status & SBSA_GWDT_WCS_EN)
set_bit(WDOG_HW_RUNNING, &wdd->status);
if (action) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
action = 0;
dev_warn(dev, "unable to get ws0 interrupt.\n");
} else {
/*
* In case there is a pending ws0 interrupt, just ping
* the watchdog before registering the interrupt routine
*/
writel(0, rf_base + SBSA_GWDT_WRR);
if (devm_request_irq(dev, irq, sbsa_gwdt_interrupt, 0,
pdev->name, gwdt)) {
action = 0;
dev_warn(dev, "unable to request IRQ %d.\n",
irq);
}
}
if (!action)
dev_warn(dev, "falling back to single stage mode.\n");
}
/*
* In the single stage mode, The first signal (WS0) is ignored,
* the timeout is (WOR * 2), so the maximum timeout should be doubled.
*/
if (!action)
wdd->max_hw_heartbeat_ms *= 2;
watchdog_init_timeout(wdd, timeout, dev);
/*
* Update timeout to WOR.
* Because of the explicit watchdog refresh mechanism,
* it's also a ping, if watchdog is enabled.
*/
sbsa_gwdt_set_timeout(wdd, wdd->timeout);
watchdog_stop_on_reboot(wdd);
ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
dev_info(dev, "Initialized with %ds timeout @ %u Hz, action=%d.%s\n",
wdd->timeout, gwdt->clk, action,
status & SBSA_GWDT_WCS_EN ? " [enabled]" : "");
return 0;
}
/* Disable watchdog if it is active during suspend */
static int __maybe_unused sbsa_gwdt_suspend(struct device *dev)
{
struct sbsa_gwdt *gwdt = dev_get_drvdata(dev);
if (watchdog_hw_running(&gwdt->wdd))
sbsa_gwdt_stop(&gwdt->wdd);
return 0;
}
/* Enable watchdog if necessary */
static int __maybe_unused sbsa_gwdt_resume(struct device *dev)
{
struct sbsa_gwdt *gwdt = dev_get_drvdata(dev);
if (watchdog_hw_running(&gwdt->wdd))
sbsa_gwdt_start(&gwdt->wdd);
return 0;
}
static const struct dev_pm_ops sbsa_gwdt_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sbsa_gwdt_suspend, sbsa_gwdt_resume)
};
static const struct of_device_id sbsa_gwdt_of_match[] = {
{ .compatible = "arm,sbsa-gwdt", },
{},
};
MODULE_DEVICE_TABLE(of, sbsa_gwdt_of_match);
static const struct platform_device_id sbsa_gwdt_pdev_match[] = {
{ .name = DRV_NAME, },
{},
};
MODULE_DEVICE_TABLE(platform, sbsa_gwdt_pdev_match);
static struct platform_driver sbsa_gwdt_driver = {
.driver = {
.name = DRV_NAME,
.pm = &sbsa_gwdt_pm_ops,
.of_match_table = sbsa_gwdt_of_match,
},
.probe = sbsa_gwdt_probe,
.id_table = sbsa_gwdt_pdev_match,
};
module_platform_driver(sbsa_gwdt_driver);
MODULE_DESCRIPTION("SBSA Generic Watchdog Driver");
MODULE_AUTHOR("Fu Wei <[email protected]>");
MODULE_AUTHOR("Suravee Suthikulpanit <[email protected]>");
MODULE_AUTHOR("Al Stone <[email protected]>");
MODULE_AUTHOR("Timur Tabi <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/sbsa_gwdt.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.