python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NXP SC18IS602/603 SPI driver
*
* Copyright (C) Guenter Roeck <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/platform_data/sc18is602.h>
#include <linux/gpio/consumer.h>
enum chips { sc18is602, sc18is602b, sc18is603 };
#define SC18IS602_BUFSIZ 200
#define SC18IS602_CLOCK 7372000
#define SC18IS602_MODE_CPHA BIT(2)
#define SC18IS602_MODE_CPOL BIT(3)
#define SC18IS602_MODE_LSB_FIRST BIT(5)
#define SC18IS602_MODE_CLOCK_DIV_4 0x0
#define SC18IS602_MODE_CLOCK_DIV_16 0x1
#define SC18IS602_MODE_CLOCK_DIV_64 0x2
#define SC18IS602_MODE_CLOCK_DIV_128 0x3
struct sc18is602 {
struct spi_controller *host;
struct device *dev;
u8 ctrl;
u32 freq;
u32 speed;
/* I2C data */
struct i2c_client *client;
enum chips id;
u8 buffer[SC18IS602_BUFSIZ + 1];
int tlen; /* Data queued for tx in buffer */
int rindex; /* Receive data index in buffer */
struct gpio_desc *reset;
};
static int sc18is602_wait_ready(struct sc18is602 *hw, int len)
{
int i, err;
int usecs = 1000000 * len / hw->speed + 1;
u8 dummy[1];
for (i = 0; i < 10; i++) {
err = i2c_master_recv(hw->client, dummy, 1);
if (err >= 0)
return 0;
usleep_range(usecs, usecs * 2);
}
return -ETIMEDOUT;
}
static int sc18is602_txrx(struct sc18is602 *hw, struct spi_message *msg,
struct spi_transfer *t, bool do_transfer)
{
unsigned int len = t->len;
int ret;
if (hw->tlen == 0) {
/* First byte (I2C command) is chip select */
hw->buffer[0] = 1 << spi_get_chipselect(msg->spi, 0);
hw->tlen = 1;
hw->rindex = 0;
}
/*
* We can not immediately send data to the chip, since each I2C message
* resembles a full SPI message (from CS active to CS inactive).
* Enqueue messages up to the first read or until do_transfer is true.
*/
if (t->tx_buf) {
memcpy(&hw->buffer[hw->tlen], t->tx_buf, len);
hw->tlen += len;
if (t->rx_buf)
do_transfer = true;
else
hw->rindex = hw->tlen - 1;
} else if (t->rx_buf) {
/*
* For receive-only transfers we still need to perform a dummy
* write to receive data from the SPI chip.
* Read data starts at the end of transmit data (minus 1 to
* account for CS).
*/
hw->rindex = hw->tlen - 1;
memset(&hw->buffer[hw->tlen], 0, len);
hw->tlen += len;
do_transfer = true;
}
if (do_transfer && hw->tlen > 1) {
ret = sc18is602_wait_ready(hw, SC18IS602_BUFSIZ);
if (ret < 0)
return ret;
ret = i2c_master_send(hw->client, hw->buffer, hw->tlen);
if (ret < 0)
return ret;
if (ret != hw->tlen)
return -EIO;
if (t->rx_buf) {
int rlen = hw->rindex + len;
ret = sc18is602_wait_ready(hw, hw->tlen);
if (ret < 0)
return ret;
ret = i2c_master_recv(hw->client, hw->buffer, rlen);
if (ret < 0)
return ret;
if (ret != rlen)
return -EIO;
memcpy(t->rx_buf, &hw->buffer[hw->rindex], len);
}
hw->tlen = 0;
}
return len;
}
static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
{
u8 ctrl = 0;
int ret;
if (mode & SPI_CPHA)
ctrl |= SC18IS602_MODE_CPHA;
if (mode & SPI_CPOL)
ctrl |= SC18IS602_MODE_CPOL;
if (mode & SPI_LSB_FIRST)
ctrl |= SC18IS602_MODE_LSB_FIRST;
/* Find the closest clock speed */
if (hz >= hw->freq / 4) {
ctrl |= SC18IS602_MODE_CLOCK_DIV_4;
hw->speed = hw->freq / 4;
} else if (hz >= hw->freq / 16) {
ctrl |= SC18IS602_MODE_CLOCK_DIV_16;
hw->speed = hw->freq / 16;
} else if (hz >= hw->freq / 64) {
ctrl |= SC18IS602_MODE_CLOCK_DIV_64;
hw->speed = hw->freq / 64;
} else {
ctrl |= SC18IS602_MODE_CLOCK_DIV_128;
hw->speed = hw->freq / 128;
}
/*
* Don't do anything if the control value did not change. The initial
* value of 0xff for hw->ctrl ensures that the correct mode will be set
* with the first call to this function.
*/
if (ctrl == hw->ctrl)
return 0;
ret = i2c_smbus_write_byte_data(hw->client, 0xf0, ctrl);
if (ret < 0)
return ret;
hw->ctrl = ctrl;
return 0;
}
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
if (t && t->len + tlen > SC18IS602_BUFSIZ + 1)
return -EINVAL;
return 0;
}
static int sc18is602_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
struct sc18is602 *hw = spi_controller_get_devdata(host);
struct spi_device *spi = m->spi;
struct spi_transfer *t;
int status = 0;
hw->tlen = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
bool do_transfer;
status = sc18is602_check_transfer(spi, t, hw->tlen);
if (status < 0)
break;
status = sc18is602_setup_transfer(hw, t->speed_hz, spi->mode);
if (status < 0)
break;
do_transfer = t->cs_change || list_is_last(&t->transfer_list,
&m->transfers);
if (t->len) {
status = sc18is602_txrx(hw, m, t, do_transfer);
if (status < 0)
break;
m->actual_length += status;
}
status = 0;
spi_transfer_delay_exec(t);
}
m->status = status;
spi_finalize_current_message(host);
return status;
}
static size_t sc18is602_max_transfer_size(struct spi_device *spi)
{
return SC18IS602_BUFSIZ;
}
static int sc18is602_setup(struct spi_device *spi)
{
struct sc18is602 *hw = spi_controller_get_devdata(spi->controller);
/* SC18IS602 does not support CS2 */
if (hw->id == sc18is602 && (spi_get_chipselect(spi, 0) == 2))
return -ENXIO;
return 0;
}
static int sc18is602_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
struct sc18is602 *hw;
struct spi_controller *host;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -EINVAL;
host = devm_spi_alloc_host(dev, sizeof(struct sc18is602));
if (!host)
return -ENOMEM;
hw = spi_controller_get_devdata(host);
i2c_set_clientdata(client, hw);
/* assert reset and then release */
hw->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(hw->reset))
return PTR_ERR(hw->reset);
gpiod_set_value_cansleep(hw->reset, 0);
hw->host = host;
hw->client = client;
hw->dev = dev;
hw->ctrl = 0xff;
if (client->dev.of_node)
hw->id = (uintptr_t)of_device_get_match_data(&client->dev);
else
hw->id = id->driver_data;
switch (hw->id) {
case sc18is602:
case sc18is602b:
host->num_chipselect = 4;
hw->freq = SC18IS602_CLOCK;
break;
case sc18is603:
host->num_chipselect = 2;
if (pdata) {
hw->freq = pdata->clock_frequency;
} else {
const __be32 *val;
int len;
val = of_get_property(np, "clock-frequency", &len);
if (val && len >= sizeof(__be32))
hw->freq = be32_to_cpup(val);
}
if (!hw->freq)
hw->freq = SC18IS602_CLOCK;
break;
}
host->bus_num = np ? -1 : client->adapter->nr;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->setup = sc18is602_setup;
host->transfer_one_message = sc18is602_transfer_one;
host->max_transfer_size = sc18is602_max_transfer_size;
host->max_message_size = sc18is602_max_transfer_size;
host->dev.of_node = np;
host->min_speed_hz = hw->freq / 128;
host->max_speed_hz = hw->freq / 4;
return devm_spi_register_controller(dev, host);
}
static const struct i2c_device_id sc18is602_id[] = {
{ "sc18is602", sc18is602 },
{ "sc18is602b", sc18is602b },
{ "sc18is603", sc18is603 },
{ }
};
MODULE_DEVICE_TABLE(i2c, sc18is602_id);
static const struct of_device_id sc18is602_of_match[] __maybe_unused = {
{
.compatible = "nxp,sc18is602",
.data = (void *)sc18is602
},
{
.compatible = "nxp,sc18is602b",
.data = (void *)sc18is602b
},
{
.compatible = "nxp,sc18is603",
.data = (void *)sc18is603
},
{ },
};
MODULE_DEVICE_TABLE(of, sc18is602_of_match);
static struct i2c_driver sc18is602_driver = {
.driver = {
.name = "sc18is602",
.of_match_table = of_match_ptr(sc18is602_of_match),
},
.probe = sc18is602_probe,
.id_table = sc18is602_id,
};
module_i2c_driver(sc18is602_driver);
MODULE_DESCRIPTION("SC18IS602/603 SPI Host Driver");
MODULE_AUTHOR("Guenter Roeck");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-sc18is602.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Spreadtrum Communications Inc.
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/sprd-dma.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#define SPRD_SPI_TXD 0x0
#define SPRD_SPI_CLKD 0x4
#define SPRD_SPI_CTL0 0x8
#define SPRD_SPI_CTL1 0xc
#define SPRD_SPI_CTL2 0x10
#define SPRD_SPI_CTL3 0x14
#define SPRD_SPI_CTL4 0x18
#define SPRD_SPI_CTL5 0x1c
#define SPRD_SPI_INT_EN 0x20
#define SPRD_SPI_INT_CLR 0x24
#define SPRD_SPI_INT_RAW_STS 0x28
#define SPRD_SPI_INT_MASK_STS 0x2c
#define SPRD_SPI_STS1 0x30
#define SPRD_SPI_STS2 0x34
#define SPRD_SPI_DSP_WAIT 0x38
#define SPRD_SPI_STS3 0x3c
#define SPRD_SPI_CTL6 0x40
#define SPRD_SPI_STS4 0x44
#define SPRD_SPI_FIFO_RST 0x48
#define SPRD_SPI_CTL7 0x4c
#define SPRD_SPI_STS5 0x50
#define SPRD_SPI_CTL8 0x54
#define SPRD_SPI_CTL9 0x58
#define SPRD_SPI_CTL10 0x5c
#define SPRD_SPI_CTL11 0x60
#define SPRD_SPI_CTL12 0x64
#define SPRD_SPI_STS6 0x68
#define SPRD_SPI_STS7 0x6c
#define SPRD_SPI_STS8 0x70
#define SPRD_SPI_STS9 0x74
/* Bits & mask definition for register CTL0 */
#define SPRD_SPI_SCK_REV BIT(13)
#define SPRD_SPI_NG_TX BIT(1)
#define SPRD_SPI_NG_RX BIT(0)
#define SPRD_SPI_CHNL_LEN_MASK GENMASK(4, 0)
#define SPRD_SPI_CSN_MASK GENMASK(11, 8)
#define SPRD_SPI_CS0_VALID BIT(8)
/* Bits & mask definition for register SPI_INT_EN */
#define SPRD_SPI_TX_END_INT_EN BIT(8)
#define SPRD_SPI_RX_END_INT_EN BIT(9)
/* Bits & mask definition for register SPI_INT_RAW_STS */
#define SPRD_SPI_TX_END_RAW BIT(8)
#define SPRD_SPI_RX_END_RAW BIT(9)
/* Bits & mask definition for register SPI_INT_CLR */
#define SPRD_SPI_TX_END_CLR BIT(8)
#define SPRD_SPI_RX_END_CLR BIT(9)
/* Bits & mask definition for register INT_MASK_STS */
#define SPRD_SPI_MASK_RX_END BIT(9)
#define SPRD_SPI_MASK_TX_END BIT(8)
/* Bits & mask definition for register STS2 */
#define SPRD_SPI_TX_BUSY BIT(8)
/* Bits & mask definition for register CTL1 */
#define SPRD_SPI_RX_MODE BIT(12)
#define SPRD_SPI_TX_MODE BIT(13)
#define SPRD_SPI_RTX_MD_MASK GENMASK(13, 12)
/* Bits & mask definition for register CTL2 */
#define SPRD_SPI_DMA_EN BIT(6)
/* Bits & mask definition for register CTL4 */
#define SPRD_SPI_START_RX BIT(9)
#define SPRD_SPI_ONLY_RECV_MASK GENMASK(8, 0)
/* Bits & mask definition for register SPI_INT_CLR */
#define SPRD_SPI_RX_END_INT_CLR BIT(9)
#define SPRD_SPI_TX_END_INT_CLR BIT(8)
/* Bits & mask definition for register SPI_INT_RAW */
#define SPRD_SPI_RX_END_IRQ BIT(9)
#define SPRD_SPI_TX_END_IRQ BIT(8)
/* Bits & mask definition for register CTL12 */
#define SPRD_SPI_SW_RX_REQ BIT(0)
#define SPRD_SPI_SW_TX_REQ BIT(1)
/* Bits & mask definition for register CTL7 */
#define SPRD_SPI_DATA_LINE2_EN BIT(15)
#define SPRD_SPI_MODE_MASK GENMASK(5, 3)
#define SPRD_SPI_MODE_OFFSET 3
#define SPRD_SPI_3WIRE_MODE 4
#define SPRD_SPI_4WIRE_MODE 0
/* Bits & mask definition for register CTL8 */
#define SPRD_SPI_TX_MAX_LEN_MASK GENMASK(19, 0)
#define SPRD_SPI_TX_LEN_H_MASK GENMASK(3, 0)
#define SPRD_SPI_TX_LEN_H_OFFSET 16
/* Bits & mask definition for register CTL9 */
#define SPRD_SPI_TX_LEN_L_MASK GENMASK(15, 0)
/* Bits & mask definition for register CTL10 */
#define SPRD_SPI_RX_MAX_LEN_MASK GENMASK(19, 0)
#define SPRD_SPI_RX_LEN_H_MASK GENMASK(3, 0)
#define SPRD_SPI_RX_LEN_H_OFFSET 16
/* Bits & mask definition for register CTL11 */
#define SPRD_SPI_RX_LEN_L_MASK GENMASK(15, 0)
/* Default & maximum word delay cycles */
#define SPRD_SPI_MIN_DELAY_CYCLE 14
#define SPRD_SPI_MAX_DELAY_CYCLE 130
#define SPRD_SPI_FIFO_SIZE 32
#define SPRD_SPI_CHIP_CS_NUM 0x4
#define SPRD_SPI_CHNL_LEN 2
#define SPRD_SPI_DEFAULT_SOURCE 26000000
#define SPRD_SPI_MAX_SPEED_HZ 48000000
#define SPRD_SPI_AUTOSUSPEND_DELAY 100
#define SPRD_SPI_DMA_STEP 8
enum sprd_spi_dma_channel {
SPRD_SPI_RX,
SPRD_SPI_TX,
SPRD_SPI_MAX,
};
struct sprd_spi_dma {
bool enable;
struct dma_chan *dma_chan[SPRD_SPI_MAX];
enum dma_slave_buswidth width;
u32 fragmens_len;
u32 rx_len;
};
struct sprd_spi {
void __iomem *base;
phys_addr_t phy_base;
struct device *dev;
struct clk *clk;
int irq;
u32 src_clk;
u32 hw_mode;
u32 trans_len;
u32 trans_mode;
u32 word_delay;
u32 hw_speed_hz;
u32 len;
int status;
struct sprd_spi_dma dma;
struct completion xfer_completion;
const void *tx_buf;
void *rx_buf;
int (*read_bufs)(struct sprd_spi *ss, u32 len);
int (*write_bufs)(struct sprd_spi *ss, u32 len);
};
static u32 sprd_spi_transfer_max_timeout(struct sprd_spi *ss,
struct spi_transfer *t)
{
/*
* The time spent on transmission of the full FIFO data is the maximum
* SPI transmission time.
*/
u32 size = t->bits_per_word * SPRD_SPI_FIFO_SIZE;
u32 bit_time_us = DIV_ROUND_UP(USEC_PER_SEC, ss->hw_speed_hz);
u32 total_time_us = size * bit_time_us;
/*
* There is an interval between data and the data in our SPI hardware,
* so the total transmission time need add the interval time.
*/
u32 interval_cycle = SPRD_SPI_FIFO_SIZE * ss->word_delay;
u32 interval_time_us = DIV_ROUND_UP(interval_cycle * USEC_PER_SEC,
ss->src_clk);
return total_time_us + interval_time_us;
}
static int sprd_spi_wait_for_tx_end(struct sprd_spi *ss, struct spi_transfer *t)
{
u32 val, us;
int ret;
us = sprd_spi_transfer_max_timeout(ss, t);
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
val & SPRD_SPI_TX_END_IRQ, 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi send timeout!\n");
return ret;
}
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_STS2, val,
!(val & SPRD_SPI_TX_BUSY), 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi busy timeout!\n");
return ret;
}
writel_relaxed(SPRD_SPI_TX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
return 0;
}
static int sprd_spi_wait_for_rx_end(struct sprd_spi *ss, struct spi_transfer *t)
{
u32 val, us;
int ret;
us = sprd_spi_transfer_max_timeout(ss, t);
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
val & SPRD_SPI_RX_END_IRQ, 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi rx timeout!\n");
return ret;
}
writel_relaxed(SPRD_SPI_RX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
return 0;
}
static void sprd_spi_tx_req(struct sprd_spi *ss)
{
writel_relaxed(SPRD_SPI_SW_TX_REQ, ss->base + SPRD_SPI_CTL12);
}
static void sprd_spi_rx_req(struct sprd_spi *ss)
{
writel_relaxed(SPRD_SPI_SW_RX_REQ, ss->base + SPRD_SPI_CTL12);
}
static void sprd_spi_enter_idle(struct sprd_spi *ss)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
val &= ~SPRD_SPI_RTX_MD_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL1);
}
static void sprd_spi_set_transfer_bits(struct sprd_spi *ss, u32 bits)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
/* Set the valid bits for every transaction */
val &= ~(SPRD_SPI_CHNL_LEN_MASK << SPRD_SPI_CHNL_LEN);
val |= bits << SPRD_SPI_CHNL_LEN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
}
static void sprd_spi_set_tx_length(struct sprd_spi *ss, u32 length)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL8);
length &= SPRD_SPI_TX_MAX_LEN_MASK;
val &= ~SPRD_SPI_TX_LEN_H_MASK;
val |= length >> SPRD_SPI_TX_LEN_H_OFFSET;
writel_relaxed(val, ss->base + SPRD_SPI_CTL8);
val = length & SPRD_SPI_TX_LEN_L_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL9);
}
static void sprd_spi_set_rx_length(struct sprd_spi *ss, u32 length)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL10);
length &= SPRD_SPI_RX_MAX_LEN_MASK;
val &= ~SPRD_SPI_RX_LEN_H_MASK;
val |= length >> SPRD_SPI_RX_LEN_H_OFFSET;
writel_relaxed(val, ss->base + SPRD_SPI_CTL10);
val = length & SPRD_SPI_RX_LEN_L_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL11);
}
static void sprd_spi_chipselect(struct spi_device *sdev, bool cs)
{
struct spi_controller *sctlr = sdev->controller;
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
u32 val;
val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
/* The SPI controller will pull down CS pin if cs is 0 */
if (!cs) {
val &= ~SPRD_SPI_CS0_VALID;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
} else {
val |= SPRD_SPI_CSN_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
}
}
static int sprd_spi_write_only_receive(struct sprd_spi *ss, u32 len)
{
u32 val;
/* Clear the start receive bit and reset receive data number */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val &= ~(SPRD_SPI_START_RX | SPRD_SPI_ONLY_RECV_MASK);
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
/* Set the receive data length */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val |= len & SPRD_SPI_ONLY_RECV_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
/* Trigger to receive data */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val |= SPRD_SPI_START_RX;
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
return len;
}
static int sprd_spi_write_bufs_u8(struct sprd_spi *ss, u32 len)
{
u8 *tx_p = (u8 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writeb_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i;
return i;
}
static int sprd_spi_write_bufs_u16(struct sprd_spi *ss, u32 len)
{
u16 *tx_p = (u16 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writew_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i << 1;
return i << 1;
}
static int sprd_spi_write_bufs_u32(struct sprd_spi *ss, u32 len)
{
u32 *tx_p = (u32 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writel_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i << 2;
return i << 2;
}
static int sprd_spi_read_bufs_u8(struct sprd_spi *ss, u32 len)
{
u8 *rx_p = (u8 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readb_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i;
return i;
}
static int sprd_spi_read_bufs_u16(struct sprd_spi *ss, u32 len)
{
u16 *rx_p = (u16 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readw_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i << 1;
return i << 1;
}
static int sprd_spi_read_bufs_u32(struct sprd_spi *ss, u32 len)
{
u32 *rx_p = (u32 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readl_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i << 2;
return i << 2;
}
static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u32 trans_len = ss->trans_len, len;
int ret, write_size = 0, read_size = 0;
while (trans_len) {
len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE :
trans_len;
if (ss->trans_mode & SPRD_SPI_TX_MODE) {
sprd_spi_set_tx_length(ss, len);
write_size += ss->write_bufs(ss, len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to transfer.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_tx_req(ss);
ret = sprd_spi_wait_for_tx_end(ss, t);
} else {
sprd_spi_set_rx_length(ss, len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to read.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_rx_req(ss);
else
write_size += ss->write_bufs(ss, len);
ret = sprd_spi_wait_for_rx_end(ss, t);
}
if (ret)
goto complete;
if (ss->trans_mode & SPRD_SPI_RX_MODE)
read_size += ss->read_bufs(ss, len);
trans_len -= len;
}
if (ss->trans_mode & SPRD_SPI_TX_MODE)
ret = write_size;
else
ret = read_size;
complete:
sprd_spi_enter_idle(ss);
return ret;
}
static void sprd_spi_irq_enable(struct sprd_spi *ss)
{
u32 val;
/* Clear interrupt status before enabling interrupt. */
writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR,
ss->base + SPRD_SPI_INT_CLR);
/* Enable SPI interrupt only in DMA mode. */
val = readl_relaxed(ss->base + SPRD_SPI_INT_EN);
writel_relaxed(val | SPRD_SPI_TX_END_INT_EN |
SPRD_SPI_RX_END_INT_EN,
ss->base + SPRD_SPI_INT_EN);
}
static void sprd_spi_irq_disable(struct sprd_spi *ss)
{
writel_relaxed(0, ss->base + SPRD_SPI_INT_EN);
}
static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2);
if (enable)
val |= SPRD_SPI_DMA_EN;
else
val &= ~SPRD_SPI_DMA_EN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL2);
}
static int sprd_spi_dma_submit(struct dma_chan *dma_chan,
struct dma_slave_config *c,
struct sg_table *sg,
enum dma_transfer_direction dir)
{
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
unsigned long flags;
int ret;
ret = dmaengine_slave_config(dma_chan, c);
if (ret < 0)
return ret;
flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags);
if (!desc)
return -ENODEV;
cookie = dmaengine_submit(desc);
if (dma_submit_error(cookie))
return dma_submit_error(cookie);
dma_async_issue_pending(dma_chan);
return 0;
}
static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
{
struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX];
struct dma_slave_config config = {
.src_addr = ss->phy_base,
.src_addr_width = ss->dma.width,
.dst_addr_width = ss->dma.width,
.dst_maxburst = ss->dma.fragmens_len,
};
int ret;
ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
if (ret)
return ret;
return ss->dma.rx_len;
}
static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
{
struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX];
struct dma_slave_config config = {
.dst_addr = ss->phy_base,
.src_addr_width = ss->dma.width,
.dst_addr_width = ss->dma.width,
.src_maxburst = ss->dma.fragmens_len,
};
int ret;
ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
if (ret)
return ret;
return t->len;
}
static int sprd_spi_dma_request(struct sprd_spi *ss)
{
ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX]))
return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]),
"request RX DMA channel failed!\n");
ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]),
"request TX DMA channel failed!\n");
}
return 0;
}
static void sprd_spi_dma_release(struct sprd_spi *ss)
{
if (ss->dma.dma_chan[SPRD_SPI_RX])
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
if (ss->dma.dma_chan[SPRD_SPI_TX])
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]);
}
static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
struct spi_transfer *t)
{
struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
u32 trans_len = ss->trans_len;
int ret, write_size = 0;
reinit_completion(&ss->xfer_completion);
sprd_spi_irq_enable(ss);
if (ss->trans_mode & SPRD_SPI_TX_MODE) {
write_size = sprd_spi_dma_tx_config(ss, t);
sprd_spi_set_tx_length(ss, trans_len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to transfer.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_tx_req(ss);
} else {
sprd_spi_set_rx_length(ss, trans_len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to read.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_rx_req(ss);
else
write_size = ss->write_bufs(ss, trans_len);
}
if (write_size < 0) {
ret = write_size;
dev_err(ss->dev, "failed to write, ret = %d\n", ret);
goto trans_complete;
}
if (ss->trans_mode & SPRD_SPI_RX_MODE) {
/*
* Set up the DMA receive data length, which must be an
* integral multiple of fragment length. But when the length
* of received data is less than fragment length, DMA can be
* configured to receive data according to the actual length
* of received data.
*/
ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
(t->len - t->len % ss->dma.fragmens_len) :
t->len;
ret = sprd_spi_dma_rx_config(ss, t);
if (ret < 0) {
dev_err(&sdev->dev,
"failed to configure rx DMA, ret = %d\n", ret);
goto trans_complete;
}
}
sprd_spi_dma_enable(ss, true);
wait_for_completion(&(ss->xfer_completion));
if (ss->trans_mode & SPRD_SPI_TX_MODE)
ret = write_size;
else
ret = ss->dma.rx_len;
trans_complete:
sprd_spi_dma_enable(ss, false);
sprd_spi_enter_idle(ss);
sprd_spi_irq_disable(ss);
return ret;
}
static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
{
/*
* From SPI datasheet, the prescale calculation formula:
* prescale = SPI source clock / (2 * SPI_freq) - 1;
*/
u32 clk_div = DIV_ROUND_UP(ss->src_clk, speed_hz << 1) - 1;
/* Save the real hardware speed */
ss->hw_speed_hz = (ss->src_clk >> 1) / (clk_div + 1);
writel_relaxed(clk_div, ss->base + SPRD_SPI_CLKD);
}
static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
{
struct spi_delay *d = &t->word_delay;
u16 word_delay, interval;
u32 val;
if (d->unit != SPI_DELAY_UNIT_SCK)
return -EINVAL;
val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
/* Set default chip selection, clock phase and clock polarity */
val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
val |= ss->hw_mode & SPI_CPOL ? SPRD_SPI_SCK_REV : 0;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
/*
* Set the intervals of two SPI frames, and the inteval calculation
* formula as below per datasheet:
* interval time (source clock cycles) = interval * 4 + 10.
*/
word_delay = clamp_t(u16, d->value, SPRD_SPI_MIN_DELAY_CYCLE,
SPRD_SPI_MAX_DELAY_CYCLE);
interval = DIV_ROUND_UP(word_delay - 10, 4);
ss->word_delay = interval * 4 + 10;
writel_relaxed(interval, ss->base + SPRD_SPI_CTL5);
/* Reset SPI fifo */
writel_relaxed(1, ss->base + SPRD_SPI_FIFO_RST);
writel_relaxed(0, ss->base + SPRD_SPI_FIFO_RST);
/* Set SPI work mode */
val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
val &= ~SPRD_SPI_MODE_MASK;
if (ss->hw_mode & SPI_3WIRE)
val |= SPRD_SPI_3WIRE_MODE << SPRD_SPI_MODE_OFFSET;
else
val |= SPRD_SPI_4WIRE_MODE << SPRD_SPI_MODE_OFFSET;
if (ss->hw_mode & SPI_TX_DUAL)
val |= SPRD_SPI_DATA_LINE2_EN;
else
val &= ~SPRD_SPI_DATA_LINE2_EN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL7);
return 0;
}
static int sprd_spi_setup_transfer(struct spi_device *sdev,
struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u8 bits_per_word = t->bits_per_word;
u32 val, mode = 0;
int ret;
ss->len = t->len;
ss->tx_buf = t->tx_buf;
ss->rx_buf = t->rx_buf;
ss->hw_mode = sdev->mode;
ret = sprd_spi_init_hw(ss, t);
if (ret)
return ret;
/* Set tansfer speed and valid bits */
sprd_spi_set_speed(ss, t->speed_hz);
sprd_spi_set_transfer_bits(ss, bits_per_word);
if (bits_per_word > 16)
bits_per_word = round_up(bits_per_word, 16);
else
bits_per_word = round_up(bits_per_word, 8);
switch (bits_per_word) {
case 8:
ss->trans_len = t->len;
ss->read_bufs = sprd_spi_read_bufs_u8;
ss->write_bufs = sprd_spi_write_bufs_u8;
ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP;
break;
case 16:
ss->trans_len = t->len >> 1;
ss->read_bufs = sprd_spi_read_bufs_u16;
ss->write_bufs = sprd_spi_write_bufs_u16;
ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1;
break;
case 32:
ss->trans_len = t->len >> 2;
ss->read_bufs = sprd_spi_read_bufs_u32;
ss->write_bufs = sprd_spi_write_bufs_u32;
ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2;
break;
default:
return -EINVAL;
}
/* Set transfer read or write mode */
val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
val &= ~SPRD_SPI_RTX_MD_MASK;
if (t->tx_buf)
mode |= SPRD_SPI_TX_MODE;
if (t->rx_buf)
mode |= SPRD_SPI_RX_MODE;
writel_relaxed(val | mode, ss->base + SPRD_SPI_CTL1);
ss->trans_mode = mode;
/*
* If in only receive mode, we need to trigger the SPI controller to
* receive data automatically.
*/
if (ss->trans_mode == SPRD_SPI_RX_MODE)
ss->write_bufs = sprd_spi_write_only_receive;
return 0;
}
static int sprd_spi_transfer_one(struct spi_controller *sctlr,
struct spi_device *sdev,
struct spi_transfer *t)
{
int ret;
ret = sprd_spi_setup_transfer(sdev, t);
if (ret)
goto setup_err;
if (sctlr->can_dma(sctlr, sdev, t))
ret = sprd_spi_dma_txrx_bufs(sdev, t);
else
ret = sprd_spi_txrx_bufs(sdev, t);
if (ret == t->len)
ret = 0;
else if (ret >= 0)
ret = -EREMOTEIO;
setup_err:
spi_finalize_current_transfer(sctlr);
return ret;
}
static irqreturn_t sprd_spi_handle_irq(int irq, void *data)
{
struct sprd_spi *ss = (struct sprd_spi *)data;
u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS);
if (val & SPRD_SPI_MASK_TX_END) {
writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
if (!(ss->trans_mode & SPRD_SPI_RX_MODE))
complete(&ss->xfer_completion);
return IRQ_HANDLED;
}
if (val & SPRD_SPI_MASK_RX_END) {
writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
if (ss->dma.rx_len < ss->len) {
ss->rx_buf += ss->dma.rx_len;
ss->dma.rx_len +=
ss->read_bufs(ss, ss->len - ss->dma.rx_len);
}
complete(&ss->xfer_completion);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
{
int ret;
ss->irq = platform_get_irq(pdev, 0);
if (ss->irq < 0)
return ss->irq;
ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
0, pdev->name, ss);
if (ret)
dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n",
ss->irq, ret);
return ret;
}
static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
{
struct clk *clk_spi, *clk_parent;
clk_spi = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(clk_spi)) {
dev_warn(&pdev->dev, "can't get the spi clock\n");
clk_spi = NULL;
}
clk_parent = devm_clk_get(&pdev->dev, "source");
if (IS_ERR(clk_parent)) {
dev_warn(&pdev->dev, "can't get the source clock\n");
clk_parent = NULL;
}
ss->clk = devm_clk_get(&pdev->dev, "enable");
if (IS_ERR(ss->clk)) {
dev_err(&pdev->dev, "can't get the enable clock\n");
return PTR_ERR(ss->clk);
}
if (!clk_set_parent(clk_spi, clk_parent))
ss->src_clk = clk_get_rate(clk_spi);
else
ss->src_clk = SPRD_SPI_DEFAULT_SOURCE;
return 0;
}
static bool sprd_spi_can_dma(struct spi_controller *sctlr,
struct spi_device *spi, struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
}
static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss)
{
int ret;
ret = sprd_spi_dma_request(ss);
if (ret) {
if (ret == -EPROBE_DEFER)
return ret;
dev_warn(&pdev->dev,
"failed to request dma, enter no dma mode, ret = %d\n",
ret);
return 0;
}
ss->dma.enable = true;
return 0;
}
static int sprd_spi_probe(struct platform_device *pdev)
{
struct spi_controller *sctlr;
struct resource *res;
struct sprd_spi *ss;
int ret;
pdev->id = of_alias_get_id(pdev->dev.of_node, "spi");
sctlr = spi_alloc_master(&pdev->dev, sizeof(*ss));
if (!sctlr)
return -ENOMEM;
ss = spi_controller_get_devdata(sctlr);
ss->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ss->base)) {
ret = PTR_ERR(ss->base);
goto free_controller;
}
ss->phy_base = res->start;
ss->dev = &pdev->dev;
sctlr->dev.of_node = pdev->dev.of_node;
sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
sctlr->bus_num = pdev->id;
sctlr->set_cs = sprd_spi_chipselect;
sctlr->transfer_one = sprd_spi_transfer_one;
sctlr->can_dma = sprd_spi_can_dma;
sctlr->auto_runtime_pm = true;
sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
SPRD_SPI_MAX_SPEED_HZ);
init_completion(&ss->xfer_completion);
platform_set_drvdata(pdev, sctlr);
ret = sprd_spi_clk_init(pdev, ss);
if (ret)
goto free_controller;
ret = sprd_spi_irq_init(pdev, ss);
if (ret)
goto free_controller;
ret = sprd_spi_dma_init(pdev, ss);
if (ret)
goto free_controller;
ret = clk_prepare_enable(ss->clk);
if (ret)
goto release_dma;
ret = pm_runtime_set_active(&pdev->dev);
if (ret < 0)
goto disable_clk;
pm_runtime_set_autosuspend_delay(&pdev->dev,
SPRD_SPI_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to resume SPI controller\n");
goto err_rpm_put;
}
ret = devm_spi_register_controller(&pdev->dev, sctlr);
if (ret)
goto err_rpm_put;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err_rpm_put:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
disable_clk:
clk_disable_unprepare(ss->clk);
release_dma:
sprd_spi_dma_release(ss);
free_controller:
spi_controller_put(sctlr);
return ret;
}
static void sprd_spi_remove(struct platform_device *pdev)
{
struct spi_controller *sctlr = platform_get_drvdata(pdev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
int ret;
ret = pm_runtime_get_sync(ss->dev);
if (ret < 0)
dev_err(ss->dev, "failed to resume SPI controller\n");
spi_controller_suspend(sctlr);
if (ret >= 0) {
if (ss->dma.enable)
sprd_spi_dma_release(ss);
clk_disable_unprepare(ss->clk);
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
if (ss->dma.enable)
sprd_spi_dma_release(ss);
clk_disable_unprepare(ss->clk);
return 0;
}
static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
{
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
int ret;
ret = clk_prepare_enable(ss->clk);
if (ret)
return ret;
if (!ss->dma.enable)
return 0;
ret = sprd_spi_dma_request(ss);
if (ret)
clk_disable_unprepare(ss->clk);
return ret;
}
static const struct dev_pm_ops sprd_spi_pm_ops = {
SET_RUNTIME_PM_OPS(sprd_spi_runtime_suspend,
sprd_spi_runtime_resume, NULL)
};
static const struct of_device_id sprd_spi_of_match[] = {
{ .compatible = "sprd,sc9860-spi", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
static struct platform_driver sprd_spi_driver = {
.driver = {
.name = "sprd-spi",
.of_match_table = sprd_spi_of_match,
.pm = &sprd_spi_pm_ops,
},
.probe = sprd_spi_probe,
.remove_new = sprd_spi_remove,
};
module_platform_driver(sprd_spi_driver);
MODULE_DESCRIPTION("Spreadtrum SPI Controller driver");
MODULE_AUTHOR("Lanqing Liu <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-sprd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012 - 2014 Allwinner Tech
* Pan Nan <[email protected]>
*
* Copyright (C) 2014 Maxime Ripard
* Maxime Ripard <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/dmaengine.h>
#include <linux/spi/spi.h>
#define SUN6I_AUTOSUSPEND_TIMEOUT 2000
#define SUN6I_FIFO_DEPTH 128
#define SUN8I_FIFO_DEPTH 64
#define SUN6I_GBL_CTL_REG 0x04
#define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
#define SUN6I_GBL_CTL_MASTER BIT(1)
#define SUN6I_GBL_CTL_TP BIT(7)
#define SUN6I_GBL_CTL_RST BIT(31)
#define SUN6I_TFR_CTL_REG 0x08
#define SUN6I_TFR_CTL_CPHA BIT(0)
#define SUN6I_TFR_CTL_CPOL BIT(1)
#define SUN6I_TFR_CTL_SPOL BIT(2)
#define SUN6I_TFR_CTL_CS_MASK 0x30
#define SUN6I_TFR_CTL_CS(cs) (((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
#define SUN6I_TFR_CTL_CS_MANUAL BIT(6)
#define SUN6I_TFR_CTL_CS_LEVEL BIT(7)
#define SUN6I_TFR_CTL_DHB BIT(8)
#define SUN6I_TFR_CTL_SDC BIT(11)
#define SUN6I_TFR_CTL_FBS BIT(12)
#define SUN6I_TFR_CTL_SDM BIT(13)
#define SUN6I_TFR_CTL_XCH BIT(31)
#define SUN6I_INT_CTL_REG 0x10
#define SUN6I_INT_CTL_RF_RDY BIT(0)
#define SUN6I_INT_CTL_TF_ERQ BIT(4)
#define SUN6I_INT_CTL_RF_OVF BIT(8)
#define SUN6I_INT_CTL_TC BIT(12)
#define SUN6I_INT_STA_REG 0x14
#define SUN6I_FIFO_CTL_REG 0x18
#define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_MASK 0xff
#define SUN6I_FIFO_CTL_RF_DRQ_EN BIT(8)
#define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_BITS 0
#define SUN6I_FIFO_CTL_RF_RST BIT(15)
#define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_MASK 0xff
#define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_BITS 16
#define SUN6I_FIFO_CTL_TF_DRQ_EN BIT(24)
#define SUN6I_FIFO_CTL_TF_RST BIT(31)
#define SUN6I_FIFO_STA_REG 0x1c
#define SUN6I_FIFO_STA_RF_CNT_MASK GENMASK(7, 0)
#define SUN6I_FIFO_STA_TF_CNT_MASK GENMASK(23, 16)
#define SUN6I_CLK_CTL_REG 0x24
#define SUN6I_CLK_CTL_CDR2_MASK 0xff
#define SUN6I_CLK_CTL_CDR2(div) (((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
#define SUN6I_CLK_CTL_CDR1_MASK 0xf
#define SUN6I_CLK_CTL_CDR1(div) (((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
#define SUN6I_CLK_CTL_DRS BIT(12)
#define SUN6I_MAX_XFER_SIZE 0xffffff
#define SUN6I_BURST_CNT_REG 0x30
#define SUN6I_XMIT_CNT_REG 0x34
#define SUN6I_BURST_CTL_CNT_REG 0x38
#define SUN6I_BURST_CTL_CNT_STC_MASK GENMASK(23, 0)
#define SUN6I_BURST_CTL_CNT_DRM BIT(28)
#define SUN6I_BURST_CTL_CNT_QUAD_EN BIT(29)
#define SUN6I_TXDATA_REG 0x200
#define SUN6I_RXDATA_REG 0x300
struct sun6i_spi_cfg {
unsigned long fifo_depth;
bool has_clk_ctl;
u32 mode_bits;
};
struct sun6i_spi {
struct spi_master *master;
void __iomem *base_addr;
dma_addr_t dma_addr_rx;
dma_addr_t dma_addr_tx;
struct clk *hclk;
struct clk *mclk;
struct reset_control *rstc;
struct completion done;
struct completion dma_rx_done;
const u8 *tx_buf;
u8 *rx_buf;
int len;
const struct sun6i_spi_cfg *cfg;
};
static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
{
return readl(sspi->base_addr + reg);
}
static inline void sun6i_spi_write(struct sun6i_spi *sspi, u32 reg, u32 value)
{
writel(value, sspi->base_addr + reg);
}
static inline u32 sun6i_spi_get_rx_fifo_count(struct sun6i_spi *sspi)
{
u32 reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
return FIELD_GET(SUN6I_FIFO_STA_RF_CNT_MASK, reg);
}
static inline u32 sun6i_spi_get_tx_fifo_count(struct sun6i_spi *sspi)
{
u32 reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
return FIELD_GET(SUN6I_FIFO_STA_TF_CNT_MASK, reg);
}
static inline void sun6i_spi_disable_interrupt(struct sun6i_spi *sspi, u32 mask)
{
u32 reg = sun6i_spi_read(sspi, SUN6I_INT_CTL_REG);
reg &= ~mask;
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, reg);
}
static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi)
{
u32 len;
u8 byte;
/* See how much data is available */
len = sun6i_spi_get_rx_fifo_count(sspi);
while (len--) {
byte = readb(sspi->base_addr + SUN6I_RXDATA_REG);
if (sspi->rx_buf)
*sspi->rx_buf++ = byte;
}
}
static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi)
{
u32 cnt;
int len;
u8 byte;
/* See how much data we can fit */
cnt = sspi->cfg->fifo_depth - sun6i_spi_get_tx_fifo_count(sspi);
len = min((int)cnt, sspi->len);
while (len--) {
byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
writeb(byte, sspi->base_addr + SUN6I_TXDATA_REG);
sspi->len--;
}
}
static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
{
struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
u32 reg;
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
reg &= ~SUN6I_TFR_CTL_CS_MASK;
reg |= SUN6I_TFR_CTL_CS(spi_get_chipselect(spi, 0));
if (enable)
reg |= SUN6I_TFR_CTL_CS_LEVEL;
else
reg &= ~SUN6I_TFR_CTL_CS_LEVEL;
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
}
static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
{
return SUN6I_MAX_XFER_SIZE - 1;
}
static void sun6i_spi_dma_rx_cb(void *param)
{
struct sun6i_spi *sspi = param;
complete(&sspi->dma_rx_done);
}
static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
struct spi_transfer *tfr)
{
struct dma_async_tx_descriptor *rxdesc, *txdesc;
struct spi_master *master = sspi->master;
rxdesc = NULL;
if (tfr->rx_buf) {
struct dma_slave_config rxconf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = sspi->dma_addr_rx,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_maxburst = 8,
};
dmaengine_slave_config(master->dma_rx, &rxconf);
rxdesc = dmaengine_prep_slave_sg(master->dma_rx,
tfr->rx_sg.sgl,
tfr->rx_sg.nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!rxdesc)
return -EINVAL;
rxdesc->callback_param = sspi;
rxdesc->callback = sun6i_spi_dma_rx_cb;
}
txdesc = NULL;
if (tfr->tx_buf) {
struct dma_slave_config txconf = {
.direction = DMA_MEM_TO_DEV,
.dst_addr = sspi->dma_addr_tx,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dst_maxburst = 8,
};
dmaengine_slave_config(master->dma_tx, &txconf);
txdesc = dmaengine_prep_slave_sg(master->dma_tx,
tfr->tx_sg.sgl,
tfr->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!txdesc) {
if (rxdesc)
dmaengine_terminate_sync(master->dma_rx);
return -EINVAL;
}
}
if (tfr->rx_buf) {
dmaengine_submit(rxdesc);
dma_async_issue_pending(master->dma_rx);
}
if (tfr->tx_buf) {
dmaengine_submit(txdesc);
dma_async_issue_pending(master->dma_tx);
}
return 0;
}
static int sun6i_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
unsigned int div, div_cdr1, div_cdr2, timeout;
unsigned int start, end, tx_time;
unsigned int trig_level;
unsigned int tx_len = 0, rx_len = 0, nbits = 0;
bool use_dma;
int ret = 0;
u32 reg;
if (tfr->len > SUN6I_MAX_XFER_SIZE)
return -EINVAL;
reinit_completion(&sspi->done);
reinit_completion(&sspi->dma_rx_done);
sspi->tx_buf = tfr->tx_buf;
sspi->rx_buf = tfr->rx_buf;
sspi->len = tfr->len;
use_dma = master->can_dma ? master->can_dma(master, spi, tfr) : false;
/* Clear pending interrupts */
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);
/* Reset FIFO */
sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG,
SUN6I_FIFO_CTL_RF_RST | SUN6I_FIFO_CTL_TF_RST);
reg = 0;
if (!use_dma) {
/*
* Setup FIFO interrupt trigger level
* Here we choose 3/4 of the full fifo depth, as it's
* the hardcoded value used in old generation of Allwinner
* SPI controller. (See spi-sun4i.c)
*/
trig_level = sspi->cfg->fifo_depth / 4 * 3;
} else {
/*
* Setup FIFO DMA request trigger level
* We choose 1/2 of the full fifo depth, that value will
* be used as DMA burst length.
*/
trig_level = sspi->cfg->fifo_depth / 2;
if (tfr->tx_buf)
reg |= SUN6I_FIFO_CTL_TF_DRQ_EN;
if (tfr->rx_buf)
reg |= SUN6I_FIFO_CTL_RF_DRQ_EN;
}
reg |= (trig_level << SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_BITS) |
(trig_level << SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_BITS);
sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG, reg);
/*
* Setup the transfer control register: Chip Select,
* polarities, etc.
*/
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
if (spi->mode & SPI_CPOL)
reg |= SUN6I_TFR_CTL_CPOL;
else
reg &= ~SUN6I_TFR_CTL_CPOL;
if (spi->mode & SPI_CPHA)
reg |= SUN6I_TFR_CTL_CPHA;
else
reg &= ~SUN6I_TFR_CTL_CPHA;
if (spi->mode & SPI_LSB_FIRST)
reg |= SUN6I_TFR_CTL_FBS;
else
reg &= ~SUN6I_TFR_CTL_FBS;
/*
* If it's a TX only transfer, we don't want to fill the RX
* FIFO with bogus data
*/
if (sspi->rx_buf) {
reg &= ~SUN6I_TFR_CTL_DHB;
rx_len = tfr->len;
} else {
reg |= SUN6I_TFR_CTL_DHB;
}
/* We want to control the chip select manually */
reg |= SUN6I_TFR_CTL_CS_MANUAL;
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
if (sspi->cfg->has_clk_ctl) {
unsigned int mclk_rate = clk_get_rate(sspi->mclk);
/* Ensure that we have a parent clock fast enough */
if (mclk_rate < (2 * tfr->speed_hz)) {
clk_set_rate(sspi->mclk, 2 * tfr->speed_hz);
mclk_rate = clk_get_rate(sspi->mclk);
}
/*
* Setup clock divider.
*
* We have two choices there. Either we can use the clock
* divide rate 1, which is calculated thanks to this formula:
* SPI_CLK = MOD_CLK / (2 ^ cdr)
* Or we can use CDR2, which is calculated with the formula:
* SPI_CLK = MOD_CLK / (2 * (cdr + 1))
* Wether we use the former or the latter is set through the
* DRS bit.
*
* First try CDR2, and if we can't reach the expected
* frequency, fall back to CDR1.
*/
div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz);
div_cdr2 = DIV_ROUND_UP(div_cdr1, 2);
if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS;
tfr->effective_speed_hz = mclk_rate / (2 * div_cdr2);
} else {
div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1));
reg = SUN6I_CLK_CTL_CDR1(div);
tfr->effective_speed_hz = mclk_rate / (1 << div);
}
sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
} else {
clk_set_rate(sspi->mclk, tfr->speed_hz);
tfr->effective_speed_hz = clk_get_rate(sspi->mclk);
/*
* Configure work mode.
*
* There are three work modes depending on the controller clock
* frequency:
* - normal sample mode : CLK <= 24MHz SDM=1 SDC=0
* - delay half-cycle sample mode : CLK <= 40MHz SDM=0 SDC=0
* - delay one-cycle sample mode : CLK >= 80MHz SDM=0 SDC=1
*/
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
reg &= ~(SUN6I_TFR_CTL_SDM | SUN6I_TFR_CTL_SDC);
if (tfr->effective_speed_hz <= 24000000)
reg |= SUN6I_TFR_CTL_SDM;
else if (tfr->effective_speed_hz >= 80000000)
reg |= SUN6I_TFR_CTL_SDC;
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
}
/* Finally enable the bus - doing so before might raise SCK to HIGH */
reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG);
reg |= SUN6I_GBL_CTL_BUS_ENABLE;
sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
/* Setup the transfer now... */
if (sspi->tx_buf) {
tx_len = tfr->len;
nbits = tfr->tx_nbits;
} else if (tfr->rx_buf) {
nbits = tfr->rx_nbits;
}
switch (nbits) {
case SPI_NBITS_DUAL:
reg = SUN6I_BURST_CTL_CNT_DRM;
break;
case SPI_NBITS_QUAD:
reg = SUN6I_BURST_CTL_CNT_QUAD_EN;
break;
case SPI_NBITS_SINGLE:
default:
reg = FIELD_PREP(SUN6I_BURST_CTL_CNT_STC_MASK, tx_len);
}
/* Setup the counters */
sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG, reg);
sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, tfr->len);
sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, tx_len);
if (!use_dma) {
/* Fill the TX FIFO */
sun6i_spi_fill_fifo(sspi);
} else {
ret = sun6i_spi_prepare_dma(sspi, tfr);
if (ret) {
dev_warn(&master->dev,
"%s: prepare DMA failed, ret=%d",
dev_name(&spi->dev), ret);
return ret;
}
}
/* Enable the interrupts */
reg = SUN6I_INT_CTL_TC;
if (!use_dma) {
if (rx_len > sspi->cfg->fifo_depth)
reg |= SUN6I_INT_CTL_RF_RDY;
if (tx_len > sspi->cfg->fifo_depth)
reg |= SUN6I_INT_CTL_TF_ERQ;
}
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, reg);
/* Start the transfer */
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
tx_time = spi_controller_xfer_timeout(master, tfr);
start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
msecs_to_jiffies(tx_time));
if (!use_dma) {
sun6i_spi_drain_fifo(sspi);
} else {
if (timeout && rx_len) {
/*
* Even though RX on the peripheral side has finished
* RX DMA might still be in flight
*/
timeout = wait_for_completion_timeout(&sspi->dma_rx_done,
timeout);
if (!timeout)
dev_warn(&master->dev, "RX DMA timeout\n");
}
}
end = jiffies;
if (!timeout) {
dev_warn(&master->dev,
"%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
}
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
if (ret && use_dma) {
dmaengine_terminate_sync(master->dma_rx);
dmaengine_terminate_sync(master->dma_tx);
}
return ret;
}
static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
{
struct sun6i_spi *sspi = dev_id;
u32 status = sun6i_spi_read(sspi, SUN6I_INT_STA_REG);
/* Transfer complete */
if (status & SUN6I_INT_CTL_TC) {
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
complete(&sspi->done);
return IRQ_HANDLED;
}
/* Receive FIFO 3/4 full */
if (status & SUN6I_INT_CTL_RF_RDY) {
sun6i_spi_drain_fifo(sspi);
/* Only clear the interrupt _after_ draining the FIFO */
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_RF_RDY);
return IRQ_HANDLED;
}
/* Transmit FIFO 3/4 empty */
if (status & SUN6I_INT_CTL_TF_ERQ) {
sun6i_spi_fill_fifo(sspi);
if (!sspi->len)
/* nothing left to transmit */
sun6i_spi_disable_interrupt(sspi, SUN6I_INT_CTL_TF_ERQ);
/* Only clear the interrupt _after_ re-seeding the FIFO */
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TF_ERQ);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int sun6i_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct sun6i_spi *sspi = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(sspi->hclk);
if (ret) {
dev_err(dev, "Couldn't enable AHB clock\n");
goto out;
}
ret = clk_prepare_enable(sspi->mclk);
if (ret) {
dev_err(dev, "Couldn't enable module clock\n");
goto err;
}
ret = reset_control_deassert(sspi->rstc);
if (ret) {
dev_err(dev, "Couldn't deassert the device from reset\n");
goto err2;
}
sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
return 0;
err2:
clk_disable_unprepare(sspi->mclk);
err:
clk_disable_unprepare(sspi->hclk);
out:
return ret;
}
static int sun6i_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct sun6i_spi *sspi = spi_master_get_devdata(master);
reset_control_assert(sspi->rstc);
clk_disable_unprepare(sspi->mclk);
clk_disable_unprepare(sspi->hclk);
return 0;
}
static bool sun6i_spi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
/*
* If the number of spi words to transfer is less or equal than
* the fifo length we can just fill the fifo and wait for a single
* irq, so don't bother setting up dma
*/
return xfer->len > sspi->cfg->fifo_depth;
}
static int sun6i_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct sun6i_spi *sspi;
struct resource *mem;
int ret = 0, irq;
master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
sspi->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(sspi->base_addr)) {
ret = PTR_ERR(sspi->base_addr);
goto err_free_master;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
goto err_free_master;
}
ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler,
0, "sun6i-spi", sspi);
if (ret) {
dev_err(&pdev->dev, "Cannot request IRQ\n");
goto err_free_master;
}
sspi->master = master;
sspi->cfg = of_device_get_match_data(&pdev->dev);
master->max_speed_hz = 100 * 1000 * 1000;
master->min_speed_hz = 3 * 1000;
master->use_gpio_descriptors = true;
master->set_cs = sun6i_spi_set_cs;
master->transfer_one = sun6i_spi_transfer_one;
master->num_chipselect = 4;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
sspi->cfg->mode_bits;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->max_transfer_size = sun6i_spi_max_transfer_size;
sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sspi->hclk)) {
dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
ret = PTR_ERR(sspi->hclk);
goto err_free_master;
}
sspi->mclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(sspi->mclk)) {
dev_err(&pdev->dev, "Unable to acquire module clock\n");
ret = PTR_ERR(sspi->mclk);
goto err_free_master;
}
init_completion(&sspi->done);
init_completion(&sspi->dma_rx_done);
sspi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(sspi->rstc)) {
dev_err(&pdev->dev, "Couldn't get reset controller\n");
ret = PTR_ERR(sspi->rstc);
goto err_free_master;
}
master->dma_tx = dma_request_chan(&pdev->dev, "tx");
if (IS_ERR(master->dma_tx)) {
/* Check tx to see if we need defer probing driver */
if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_free_master;
}
dev_warn(&pdev->dev, "Failed to request TX DMA channel\n");
master->dma_tx = NULL;
}
master->dma_rx = dma_request_chan(&pdev->dev, "rx");
if (IS_ERR(master->dma_rx)) {
if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_free_dma_tx;
}
dev_warn(&pdev->dev, "Failed to request RX DMA channel\n");
master->dma_rx = NULL;
}
if (master->dma_tx && master->dma_rx) {
sspi->dma_addr_tx = mem->start + SUN6I_TXDATA_REG;
sspi->dma_addr_rx = mem->start + SUN6I_RXDATA_REG;
master->can_dma = sun6i_spi_can_dma;
}
/*
* This wake-up/shutdown pattern is to be able to have the
* device woken up, even if runtime_pm is disabled
*/
ret = sun6i_spi_runtime_resume(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Couldn't resume the device\n");
goto err_free_dma_rx;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, SUN6I_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "cannot register SPI master\n");
goto err_pm_disable;
}
return 0;
err_pm_disable:
pm_runtime_disable(&pdev->dev);
sun6i_spi_runtime_suspend(&pdev->dev);
err_free_dma_rx:
if (master->dma_rx)
dma_release_channel(master->dma_rx);
err_free_dma_tx:
if (master->dma_tx)
dma_release_channel(master->dma_tx);
err_free_master:
spi_master_put(master);
return ret;
}
static void sun6i_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
pm_runtime_force_suspend(&pdev->dev);
if (master->dma_tx)
dma_release_channel(master->dma_tx);
if (master->dma_rx)
dma_release_channel(master->dma_rx);
}
static const struct sun6i_spi_cfg sun6i_a31_spi_cfg = {
.fifo_depth = SUN6I_FIFO_DEPTH,
.has_clk_ctl = true,
};
static const struct sun6i_spi_cfg sun8i_h3_spi_cfg = {
.fifo_depth = SUN8I_FIFO_DEPTH,
.has_clk_ctl = true,
};
static const struct sun6i_spi_cfg sun50i_r329_spi_cfg = {
.fifo_depth = SUN8I_FIFO_DEPTH,
.mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD,
};
static const struct of_device_id sun6i_spi_match[] = {
{ .compatible = "allwinner,sun6i-a31-spi", .data = &sun6i_a31_spi_cfg },
{ .compatible = "allwinner,sun8i-h3-spi", .data = &sun8i_h3_spi_cfg },
{
.compatible = "allwinner,sun50i-r329-spi",
.data = &sun50i_r329_spi_cfg
},
{}
};
MODULE_DEVICE_TABLE(of, sun6i_spi_match);
static const struct dev_pm_ops sun6i_spi_pm_ops = {
.runtime_resume = sun6i_spi_runtime_resume,
.runtime_suspend = sun6i_spi_runtime_suspend,
};
static struct platform_driver sun6i_spi_driver = {
.probe = sun6i_spi_probe,
.remove_new = sun6i_spi_remove,
.driver = {
.name = "sun6i-spi",
.of_match_table = sun6i_spi_match,
.pm = &sun6i_spi_pm_ops,
},
};
module_platform_driver(sun6i_spi_driver);
MODULE_AUTHOR("Pan Nan <[email protected]>");
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A31 SPI controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-sun6i.c |
// SPDX-License-Identifier: GPL-2.0
// spi-uniphier.c - Socionext UniPhier SPI controller driver
// Copyright 2012 Panasonic Corporation
// Copyright 2016-2018 Socionext Inc.
#include <linux/kernel.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
#define SSI_TIMEOUT_MS 2000
#define SSI_POLL_TIMEOUT_US 200
#define SSI_MAX_CLK_DIVIDER 254
#define SSI_MIN_CLK_DIVIDER 4
struct uniphier_spi_priv {
void __iomem *base;
dma_addr_t base_dma_addr;
struct clk *clk;
struct spi_master *master;
struct completion xfer_done;
int error;
unsigned int tx_bytes;
unsigned int rx_bytes;
const u8 *tx_buf;
u8 *rx_buf;
atomic_t dma_busy;
bool is_save_param;
u8 bits_per_word;
u16 mode;
u32 speed_hz;
};
#define SSI_CTL 0x00
#define SSI_CTL_EN BIT(0)
#define SSI_CKS 0x04
#define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
#define SSI_CKS_CKPHS BIT(14)
#define SSI_CKS_CKINIT BIT(13)
#define SSI_CKS_CKDLY BIT(12)
#define SSI_TXWDS 0x08
#define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
#define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
#define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
#define SSI_RXWDS 0x0c
#define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
#define SSI_FPS 0x10
#define SSI_FPS_FSPOL BIT(15)
#define SSI_FPS_FSTRT BIT(14)
#define SSI_SR 0x14
#define SSI_SR_BUSY BIT(7)
#define SSI_SR_RNE BIT(0)
#define SSI_IE 0x18
#define SSI_IE_TCIE BIT(4)
#define SSI_IE_RCIE BIT(3)
#define SSI_IE_TXRE BIT(2)
#define SSI_IE_RXRE BIT(1)
#define SSI_IE_RORIE BIT(0)
#define SSI_IE_ALL_MASK GENMASK(4, 0)
#define SSI_IS 0x1c
#define SSI_IS_RXRS BIT(9)
#define SSI_IS_RCID BIT(3)
#define SSI_IS_RORID BIT(0)
#define SSI_IC 0x1c
#define SSI_IC_TCIC BIT(4)
#define SSI_IC_RCIC BIT(3)
#define SSI_IC_RORIC BIT(0)
#define SSI_FC 0x20
#define SSI_FC_TXFFL BIT(12)
#define SSI_FC_TXFTH_MASK GENMASK(11, 8)
#define SSI_FC_RXFFL BIT(4)
#define SSI_FC_RXFTH_MASK GENMASK(3, 0)
#define SSI_TXDR 0x24
#define SSI_RXDR 0x24
#define SSI_FIFO_DEPTH 8U
#define SSI_FIFO_BURST_NUM 1
#define SSI_DMA_RX_BUSY BIT(1)
#define SSI_DMA_TX_BUSY BIT(0)
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
}
static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
u32 mask)
{
u32 val;
val = readl(priv->base + SSI_IE);
val |= mask;
writel(val, priv->base + SSI_IE);
}
static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
u32 mask)
{
u32 val;
val = readl(priv->base + SSI_IE);
val &= ~mask;
writel(val, priv->base + SSI_IE);
}
static void uniphier_spi_set_mode(struct spi_device *spi)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val1, val2;
/*
* clock setting
* CKPHS capture timing. 0:rising edge, 1:falling edge
* CKINIT clock initial level. 0:low, 1:high
* CKDLY clock delay. 0:no delay, 1:delay depending on FSTRT
* (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
*
* frame setting
* FSPOL frame signal porarity. 0: low, 1: high
* FSTRT start frame timing
* 0: rising edge of clock, 1: falling edge of clock
*/
switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
/* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
val2 = 0;
break;
case SPI_MODE_1:
/* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
val1 = 0;
val2 = SSI_FPS_FSTRT;
break;
case SPI_MODE_2:
/* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
val1 = SSI_CKS_CKINIT | SSI_CKS_CKDLY;
val2 = SSI_FPS_FSTRT;
break;
case SPI_MODE_3:
/* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
val1 = SSI_CKS_CKPHS | SSI_CKS_CKINIT;
val2 = 0;
break;
}
if (!(spi->mode & SPI_CS_HIGH))
val2 |= SSI_FPS_FSPOL;
writel(val1, priv->base + SSI_CKS);
writel(val2, priv->base + SSI_FPS);
val1 = 0;
if (spi->mode & SPI_LSB_FIRST)
val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
writel(val1, priv->base + SSI_TXWDS);
writel(val1, priv->base + SSI_RXWDS);
}
static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_TXWDS);
val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
writel(val, priv->base + SSI_TXWDS);
val = readl(priv->base + SSI_RXWDS);
val &= ~SSI_RXWDS_DTLEN_MASK;
val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
writel(val, priv->base + SSI_RXWDS);
}
static void uniphier_spi_set_baudrate(struct spi_device *spi,
unsigned int speed)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val, ckdiv;
/*
* the supported rates are even numbers from 4 to 254. (4,6,8...254)
* round up as we look for equal or less speed
*/
ckdiv = DIV_ROUND_UP(clk_get_rate(priv->clk), speed);
ckdiv = round_up(ckdiv, 2);
val = readl(priv->base + SSI_CKS);
val &= ~SSI_CKS_CKRAT_MASK;
val |= ckdiv & SSI_CKS_CKRAT_MASK;
writel(val, priv->base + SSI_CKS);
}
static void uniphier_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
priv->error = 0;
priv->tx_buf = t->tx_buf;
priv->rx_buf = t->rx_buf;
priv->tx_bytes = priv->rx_bytes = t->len;
if (!priv->is_save_param || priv->mode != spi->mode) {
uniphier_spi_set_mode(spi);
priv->mode = spi->mode;
priv->is_save_param = false;
}
if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
uniphier_spi_set_transfer_size(spi, t->bits_per_word);
priv->bits_per_word = t->bits_per_word;
}
if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
uniphier_spi_set_baudrate(spi, t->speed_hz);
priv->speed_hz = t->speed_hz;
}
priv->is_save_param = true;
/* reset FIFOs */
val = SSI_FC_TXFFL | SSI_FC_RXFFL;
writel(val, priv->base + SSI_FC);
}
static void uniphier_spi_send(struct uniphier_spi_priv *priv)
{
int wsize;
u32 val = 0;
wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
priv->tx_bytes -= wsize;
if (priv->tx_buf) {
switch (wsize) {
case 1:
val = *priv->tx_buf;
break;
case 2:
val = get_unaligned_le16(priv->tx_buf);
break;
case 4:
val = get_unaligned_le32(priv->tx_buf);
break;
}
priv->tx_buf += wsize;
}
writel(val, priv->base + SSI_TXDR);
}
static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
{
int rsize;
u32 val;
rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
priv->rx_bytes -= rsize;
val = readl(priv->base + SSI_RXDR);
if (priv->rx_buf) {
switch (rsize) {
case 1:
*priv->rx_buf = val;
break;
case 2:
put_unaligned_le16(val, priv->rx_buf);
break;
case 4:
put_unaligned_le32(val, priv->rx_buf);
break;
}
priv->rx_buf += rsize;
}
}
static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
unsigned int threshold)
{
u32 val;
val = readl(priv->base + SSI_FC);
val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
writel(val, priv->base + SSI_FC);
}
static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
{
unsigned int fifo_threshold, fill_words;
unsigned int bpw = bytes_per_word(priv->bits_per_word);
fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
fill_words = fifo_threshold -
DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
while (fill_words--)
uniphier_spi_send(priv);
}
static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_FPS);
if (enable)
val |= SSI_FPS_FSPOL;
else
val &= ~SSI_FPS_FSPOL;
writel(val, priv->base + SSI_FPS);
}
static bool uniphier_spi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
unsigned int bpw = bytes_per_word(priv->bits_per_word);
if ((!master->dma_tx && !master->dma_rx)
|| (!master->dma_tx && t->tx_buf)
|| (!master->dma_rx && t->rx_buf))
return false;
return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
}
static void uniphier_spi_dma_rxcb(void *data)
{
struct spi_master *master = data;
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
if (!(state & SSI_DMA_TX_BUSY))
spi_finalize_current_transfer(master);
}
static void uniphier_spi_dma_txcb(void *data)
{
struct spi_master *master = data;
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
if (!(state & SSI_DMA_RX_BUSY))
spi_finalize_current_transfer(master);
}
static int uniphier_spi_transfer_one_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
int buswidth;
atomic_set(&priv->dma_busy, 0);
uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
if (priv->bits_per_word <= 8)
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
else if (priv->bits_per_word <= 16)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
else
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (priv->rx_buf) {
struct dma_slave_config rxconf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = priv->base_dma_addr + SSI_RXDR,
.src_addr_width = buswidth,
.src_maxburst = SSI_FIFO_BURST_NUM,
};
dmaengine_slave_config(master->dma_rx, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
master->dma_rx,
t->rx_sg.sgl, t->rx_sg.nents,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto out_err_prep;
rxdesc->callback = uniphier_spi_dma_rxcb;
rxdesc->callback_param = master;
uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
dmaengine_submit(rxdesc);
dma_async_issue_pending(master->dma_rx);
}
if (priv->tx_buf) {
struct dma_slave_config txconf = {
.direction = DMA_MEM_TO_DEV,
.dst_addr = priv->base_dma_addr + SSI_TXDR,
.dst_addr_width = buswidth,
.dst_maxburst = SSI_FIFO_BURST_NUM,
};
dmaengine_slave_config(master->dma_tx, &txconf);
txdesc = dmaengine_prep_slave_sg(
master->dma_tx,
t->tx_sg.sgl, t->tx_sg.nents,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto out_err_prep;
txdesc->callback = uniphier_spi_dma_txcb;
txdesc->callback_param = master;
uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
dmaengine_submit(txdesc);
dma_async_issue_pending(master->dma_tx);
}
/* signal that we need to wait for completion */
return (priv->tx_buf || priv->rx_buf);
out_err_prep:
if (rxdesc)
dmaengine_terminate_sync(master->dma_rx);
return -EINVAL;
}
static int uniphier_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
struct device *dev = master->dev.parent;
unsigned long time_left;
reinit_completion(&priv->xfer_done);
uniphier_spi_fill_tx_fifo(priv);
uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
time_left = wait_for_completion_timeout(&priv->xfer_done,
msecs_to_jiffies(SSI_TIMEOUT_MS));
uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
if (!time_left) {
dev_err(dev, "transfer timeout.\n");
return -ETIMEDOUT;
}
return priv->error;
}
static int uniphier_spi_transfer_one_poll(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
int loop = SSI_POLL_TIMEOUT_US * 10;
while (priv->tx_bytes) {
uniphier_spi_fill_tx_fifo(priv);
while ((priv->rx_bytes - priv->tx_bytes) > 0) {
while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
&& loop--)
ndelay(100);
if (loop == -1)
goto irq_transfer;
uniphier_spi_recv(priv);
}
}
return 0;
irq_transfer:
return uniphier_spi_transfer_one_irq(master, spi, t);
}
static int uniphier_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
unsigned long threshold;
bool use_dma;
/* Terminate and return success for 0 byte length transfer */
if (!t->len)
return 0;
uniphier_spi_setup_transfer(spi, t);
use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
if (use_dma)
return uniphier_spi_transfer_one_dma(master, spi, t);
/*
* If the transfer operation will take longer than
* SSI_POLL_TIMEOUT_US, it should use irq.
*/
threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
USEC_PER_SEC * BITS_PER_BYTE);
if (t->len > threshold)
return uniphier_spi_transfer_one_irq(master, spi, t);
else
return uniphier_spi_transfer_one_poll(master, spi, t);
}
static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
writel(SSI_CTL_EN, priv->base + SSI_CTL);
return 0;
}
static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
writel(0, priv->base + SSI_CTL);
return 0;
}
static void uniphier_spi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
u32 val;
/* stop running spi transfer */
writel(0, priv->base + SSI_CTL);
/* reset FIFOs */
val = SSI_FC_TXFFL | SSI_FC_RXFFL;
writel(val, priv->base + SSI_FC);
uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
dmaengine_terminate_async(master->dma_tx);
atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
}
if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
dmaengine_terminate_async(master->dma_rx);
atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
}
}
static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
{
struct uniphier_spi_priv *priv = dev_id;
u32 val, stat;
stat = readl(priv->base + SSI_IS);
val = SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC;
writel(val, priv->base + SSI_IC);
/* rx fifo overrun */
if (stat & SSI_IS_RORID) {
priv->error = -EIO;
goto done;
}
/* rx complete */
if ((stat & SSI_IS_RCID) && (stat & SSI_IS_RXRS)) {
while ((readl(priv->base + SSI_SR) & SSI_SR_RNE) &&
(priv->rx_bytes - priv->tx_bytes) > 0)
uniphier_spi_recv(priv);
if ((readl(priv->base + SSI_SR) & SSI_SR_RNE) ||
(priv->rx_bytes != priv->tx_bytes)) {
priv->error = -EIO;
goto done;
} else if (priv->rx_bytes == 0)
goto done;
/* next tx transfer */
uniphier_spi_fill_tx_fifo(priv);
return IRQ_HANDLED;
}
return IRQ_NONE;
done:
complete(&priv->xfer_done);
return IRQ_HANDLED;
}
static int uniphier_spi_probe(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv;
struct spi_master *master;
struct resource *res;
struct dma_slave_caps caps;
u32 dma_tx_burst = 0, dma_rx_burst = 0;
unsigned long clk_rate;
int irq;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(*priv));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
priv = spi_master_get_devdata(master);
priv->master = master;
priv->is_save_param = false;
priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
}
priv->base_dma_addr = res->start;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(priv->clk);
goto out_master_put;
}
ret = clk_prepare_enable(priv->clk);
if (ret)
goto out_master_put;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto out_disable_clk;
}
ret = devm_request_irq(&pdev->dev, irq, uniphier_spi_handler,
0, "uniphier-spi", priv);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto out_disable_clk;
}
init_completion(&priv->xfer_done);
clk_rate = clk_get_rate(priv->clk);
master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->set_cs = uniphier_spi_set_cs;
master->transfer_one = uniphier_spi_transfer_one;
master->prepare_transfer_hardware
= uniphier_spi_prepare_transfer_hardware;
master->unprepare_transfer_hardware
= uniphier_spi_unprepare_transfer_hardware;
master->handle_err = uniphier_spi_handle_err;
master->can_dma = uniphier_spi_can_dma;
master->num_chipselect = 1;
master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
master->dma_tx = dma_request_chan(&pdev->dev, "tx");
if (IS_ERR_OR_NULL(master->dma_tx)) {
if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_disable_clk;
}
master->dma_tx = NULL;
dma_tx_burst = INT_MAX;
} else {
ret = dma_get_slave_caps(master->dma_tx, &caps);
if (ret) {
dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
ret);
goto out_release_dma;
}
dma_tx_burst = caps.max_burst;
}
master->dma_rx = dma_request_chan(&pdev->dev, "rx");
if (IS_ERR_OR_NULL(master->dma_rx)) {
if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_release_dma;
}
master->dma_rx = NULL;
dma_rx_burst = INT_MAX;
} else {
ret = dma_get_slave_caps(master->dma_rx, &caps);
if (ret) {
dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
ret);
goto out_release_dma;
}
dma_rx_burst = caps.max_burst;
}
master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
goto out_release_dma;
return 0;
out_release_dma:
if (!IS_ERR_OR_NULL(master->dma_rx)) {
dma_release_channel(master->dma_rx);
master->dma_rx = NULL;
}
if (!IS_ERR_OR_NULL(master->dma_tx)) {
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
}
out_disable_clk:
clk_disable_unprepare(priv->clk);
out_master_put:
spi_master_put(master);
return ret;
}
static void uniphier_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
if (master->dma_tx)
dma_release_channel(master->dma_tx);
if (master->dma_rx)
dma_release_channel(master->dma_rx);
clk_disable_unprepare(priv->clk);
}
static const struct of_device_id uniphier_spi_match[] = {
{ .compatible = "socionext,uniphier-scssi" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_spi_match);
static struct platform_driver uniphier_spi_driver = {
.probe = uniphier_spi_probe,
.remove_new = uniphier_spi_remove,
.driver = {
.name = "uniphier-spi",
.of_match_table = uniphier_spi_match,
},
};
module_platform_driver(uniphier_spi_driver);
MODULE_AUTHOR("Kunihiko Hayashi <[email protected]>");
MODULE_AUTHOR("Keiji Hayashibara <[email protected]>");
MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-uniphier.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
*
* Copyright 2016 Broadcom
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include "spi-bcm-qspi.h"
#define DRIVER_NAME "bcm_qspi"
/* BSPI register offsets */
#define BSPI_REVISION_ID 0x000
#define BSPI_SCRATCH 0x004
#define BSPI_MAST_N_BOOT_CTRL 0x008
#define BSPI_BUSY_STATUS 0x00c
#define BSPI_INTR_STATUS 0x010
#define BSPI_B0_STATUS 0x014
#define BSPI_B0_CTRL 0x018
#define BSPI_B1_STATUS 0x01c
#define BSPI_B1_CTRL 0x020
#define BSPI_STRAP_OVERRIDE_CTRL 0x024
#define BSPI_FLEX_MODE_ENABLE 0x028
#define BSPI_BITS_PER_CYCLE 0x02c
#define BSPI_BITS_PER_PHASE 0x030
#define BSPI_CMD_AND_MODE_BYTE 0x034
#define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
#define BSPI_BSPI_XOR_VALUE 0x03c
#define BSPI_BSPI_XOR_ENABLE 0x040
#define BSPI_BSPI_PIO_MODE_ENABLE 0x044
#define BSPI_BSPI_PIO_IODIR 0x048
#define BSPI_BSPI_PIO_DATA 0x04c
/* RAF register offsets */
#define BSPI_RAF_START_ADDR 0x100
#define BSPI_RAF_NUM_WORDS 0x104
#define BSPI_RAF_CTRL 0x108
#define BSPI_RAF_FULLNESS 0x10c
#define BSPI_RAF_WATERMARK 0x110
#define BSPI_RAF_STATUS 0x114
#define BSPI_RAF_READ_DATA 0x118
#define BSPI_RAF_WORD_CNT 0x11c
#define BSPI_RAF_CURR_ADDR 0x120
/* Override mode masks */
#define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
#define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
#define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
#define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
#define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
#define BSPI_ADDRLEN_3BYTES 3
#define BSPI_ADDRLEN_4BYTES 4
#define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
#define BSPI_RAF_CTRL_START_MASK BIT(0)
#define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
#define BSPI_BPP_MODE_SELECT_MASK BIT(8)
#define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
#define BSPI_READ_LENGTH 256
/* MSPI register offsets */
#define MSPI_SPCR0_LSB 0x000
#define MSPI_SPCR0_MSB 0x004
#define MSPI_SPCR0_MSB_CPHA BIT(0)
#define MSPI_SPCR0_MSB_CPOL BIT(1)
#define MSPI_SPCR0_MSB_BITS_SHIFT 0x2
#define MSPI_SPCR1_LSB 0x008
#define MSPI_SPCR1_MSB 0x00c
#define MSPI_NEWQP 0x010
#define MSPI_ENDQP 0x014
#define MSPI_SPCR2 0x018
#define MSPI_MSPI_STATUS 0x020
#define MSPI_CPTQP 0x024
#define MSPI_SPCR3 0x028
#define MSPI_REV 0x02c
#define MSPI_TXRAM 0x040
#define MSPI_RXRAM 0x0c0
#define MSPI_CDRAM 0x140
#define MSPI_WRITE_LOCK 0x180
#define MSPI_MASTER_BIT BIT(7)
#define MSPI_NUM_CDRAM 16
#define MSPI_CDRAM_OUTP BIT(8)
#define MSPI_CDRAM_CONT_BIT BIT(7)
#define MSPI_CDRAM_BITSE_BIT BIT(6)
#define MSPI_CDRAM_DT_BIT BIT(5)
#define MSPI_CDRAM_PCS 0xf
#define MSPI_SPCR2_SPE BIT(6)
#define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
#define MSPI_SPCR3_FASTBR BIT(0)
#define MSPI_SPCR3_FASTDT BIT(1)
#define MSPI_SPCR3_SYSCLKSEL_MASK GENMASK(11, 10)
#define MSPI_SPCR3_SYSCLKSEL_27 (MSPI_SPCR3_SYSCLKSEL_MASK & \
~(BIT(10) | BIT(11)))
#define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
BIT(11))
#define MSPI_SPCR3_TXRXDAM_MASK GENMASK(4, 2)
#define MSPI_SPCR3_DAM_8BYTE 0
#define MSPI_SPCR3_DAM_16BYTE (BIT(2) | BIT(4))
#define MSPI_SPCR3_DAM_32BYTE (BIT(3) | BIT(5))
#define MSPI_SPCR3_HALFDUPLEX BIT(6)
#define MSPI_SPCR3_HDOUTTYPE BIT(7)
#define MSPI_SPCR3_DATA_REG_SZ BIT(8)
#define MSPI_SPCR3_CPHARX BIT(9)
#define MSPI_MSPI_STATUS_SPIF BIT(0)
#define INTR_BASE_BIT_SHIFT 0x02
#define INTR_COUNT 0x07
#define NUM_CHIPSELECT 4
#define QSPI_SPBR_MAX 255U
#define MSPI_BASE_FREQ 27000000UL
#define OPCODE_DIOR 0xBB
#define OPCODE_QIOR 0xEB
#define OPCODE_DIOR_4B 0xBC
#define OPCODE_QIOR_4B 0xEC
#define MAX_CMD_SIZE 6
#define ADDR_4MB_MASK GENMASK(22, 0)
/* stop at end of transfer, no other reason */
#define TRANS_STATUS_BREAK_NONE 0
/* stop at end of spi_message */
#define TRANS_STATUS_BREAK_EOM 1
/* stop at end of spi_transfer if delay */
#define TRANS_STATUS_BREAK_DELAY 2
/* stop at end of spi_transfer if cs_change */
#define TRANS_STATUS_BREAK_CS_CHANGE 4
/* stop if we run out of bytes */
#define TRANS_STATUS_BREAK_NO_BYTES 8
/* events that make us stop filling TX slots */
#define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
TRANS_STATUS_BREAK_DELAY | \
TRANS_STATUS_BREAK_CS_CHANGE)
/* events that make us deassert CS */
#define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
TRANS_STATUS_BREAK_CS_CHANGE)
/*
* Used for writing and reading data in the right order
* to TXRAM and RXRAM when used as 32-bit registers respectively
*/
#define swap4bytes(__val) \
((((__val) >> 24) & 0x000000FF) | (((__val) >> 8) & 0x0000FF00) | \
(((__val) << 8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000))
struct bcm_qspi_parms {
u32 speed_hz;
u8 mode;
u8 bits_per_word;
};
struct bcm_xfer_mode {
bool flex_mode;
unsigned int width;
unsigned int addrlen;
unsigned int hp;
};
enum base_type {
MSPI,
BSPI,
CHIP_SELECT,
BASEMAX,
};
enum irq_source {
SINGLE_L2,
MUXED_L1,
};
struct bcm_qspi_irq {
const char *irq_name;
const irq_handler_t irq_handler;
int irq_source;
u32 mask;
};
struct bcm_qspi_dev_id {
const struct bcm_qspi_irq *irqp;
void *dev;
};
struct qspi_trans {
struct spi_transfer *trans;
int byte;
bool mspi_last_trans;
};
struct bcm_qspi {
struct platform_device *pdev;
struct spi_controller *host;
struct clk *clk;
u32 base_clk;
u32 max_speed_hz;
void __iomem *base[BASEMAX];
/* Some SoCs provide custom interrupt status register(s) */
struct bcm_qspi_soc_intc *soc_intc;
struct bcm_qspi_parms last_parms;
struct qspi_trans trans_pos;
int curr_cs;
int bspi_maj_rev;
int bspi_min_rev;
int bspi_enabled;
const struct spi_mem_op *bspi_rf_op;
u32 bspi_rf_op_idx;
u32 bspi_rf_op_len;
u32 bspi_rf_op_status;
struct bcm_xfer_mode xfer_mode;
u32 s3_strap_override_ctrl;
bool bspi_mode;
bool big_endian;
int num_irqs;
struct bcm_qspi_dev_id *dev_ids;
struct completion mspi_done;
struct completion bspi_done;
u8 mspi_maj_rev;
u8 mspi_min_rev;
bool mspi_spcr3_sysclk;
};
static inline bool has_bspi(struct bcm_qspi *qspi)
{
return qspi->bspi_mode;
}
/* hardware supports spcr3 and fast baud-rate */
static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
{
if (!has_bspi(qspi) &&
((qspi->mspi_maj_rev >= 1) &&
(qspi->mspi_min_rev >= 5)))
return true;
return false;
}
/* hardware supports sys clk 108Mhz */
static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
{
if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
((qspi->mspi_maj_rev >= 1) &&
(qspi->mspi_min_rev >= 6))))
return true;
return false;
}
static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
{
if (bcm_qspi_has_fastbr(qspi))
return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1);
else
return 8;
}
static u32 bcm_qspi_calc_spbr(u32 clk_speed_hz,
const struct bcm_qspi_parms *xp)
{
u32 spbr = 0;
/* SPBR = System Clock/(2 * SCK Baud Rate) */
if (xp->speed_hz)
spbr = clk_speed_hz / (xp->speed_hz * 2);
return spbr;
}
/* Read qspi controller register*/
static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
unsigned int offset)
{
return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
}
/* Write qspi controller register*/
static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
unsigned int offset, unsigned int data)
{
bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
}
/* BSPI helpers */
static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
{
int i;
/* this should normally finish within 10us */
for (i = 0; i < 1000; i++) {
if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
return 0;
udelay(1);
}
dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
return -EIO;
}
static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
{
if (qspi->bspi_maj_rev < 4)
return true;
return false;
}
static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
{
bcm_qspi_bspi_busy_poll(qspi);
/* Force rising edge for the b0/b1 'flush' field */
bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
}
static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
{
return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
}
static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
{
u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
/* BSPI v3 LR is LE only, convert data to host endianness */
if (bcm_qspi_bspi_ver_three(qspi))
data = le32_to_cpu(data);
return data;
}
static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
{
bcm_qspi_bspi_busy_poll(qspi);
bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
BSPI_RAF_CTRL_START_MASK);
}
static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
{
bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
BSPI_RAF_CTRL_CLEAR_MASK);
bcm_qspi_bspi_flush_prefetch_buffers(qspi);
}
static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
{
u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
u32 data = 0;
dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
data = bcm_qspi_bspi_lr_read_fifo(qspi);
if (likely(qspi->bspi_rf_op_len >= 4) &&
IS_ALIGNED((uintptr_t)buf, 4)) {
buf[qspi->bspi_rf_op_idx++] = data;
qspi->bspi_rf_op_len -= 4;
} else {
/* Read out remaining bytes, make sure*/
u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
data = cpu_to_le32(data);
while (qspi->bspi_rf_op_len) {
*cbuf++ = (u8)data;
data >>= 8;
qspi->bspi_rf_op_len--;
}
}
}
}
static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
int bpp, int bpc, int flex_mode)
{
bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
}
static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
const struct spi_mem_op *op, int hp)
{
int bpc = 0, bpp = 0;
u8 command = op->cmd.opcode;
int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
int addrlen = op->addr.nbytes;
int flex_mode = 1;
dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
width, addrlen, hp);
if (addrlen == BSPI_ADDRLEN_4BYTES)
bpp = BSPI_BPP_ADDR_SELECT_MASK;
if (op->dummy.nbytes)
bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
switch (width) {
case SPI_NBITS_SINGLE:
if (addrlen == BSPI_ADDRLEN_3BYTES)
/* default mode, does not need flex_cmd */
flex_mode = 0;
break;
case SPI_NBITS_DUAL:
bpc = 0x00000001;
if (hp) {
bpc |= 0x00010100; /* address and mode are 2-bit */
bpp = BSPI_BPP_MODE_SELECT_MASK;
}
break;
case SPI_NBITS_QUAD:
bpc = 0x00000002;
if (hp) {
bpc |= 0x00020200; /* address and mode are 4-bit */
bpp |= BSPI_BPP_MODE_SELECT_MASK;
}
break;
default:
return -EINVAL;
}
bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
return 0;
}
static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
const struct spi_mem_op *op, int hp)
{
int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
int addrlen = op->addr.nbytes;
u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
width, addrlen, hp);
switch (width) {
case SPI_NBITS_SINGLE:
/* clear quad/dual mode */
data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
break;
case SPI_NBITS_QUAD:
/* clear dual mode and set quad mode */
data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
break;
case SPI_NBITS_DUAL:
/* clear quad mode set dual mode */
data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
break;
default:
return -EINVAL;
}
if (addrlen == BSPI_ADDRLEN_4BYTES)
/* set 4byte mode*/
data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
else
/* clear 4 byte mode */
data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
/* set the override mode */
data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
return 0;
}
static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
const struct spi_mem_op *op, int hp)
{
int error = 0;
int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
int addrlen = op->addr.nbytes;
/* default mode */
qspi->xfer_mode.flex_mode = true;
if (!bcm_qspi_bspi_ver_three(qspi)) {
u32 val, mask;
val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
if (val & mask || qspi->s3_strap_override_ctrl & mask) {
qspi->xfer_mode.flex_mode = false;
bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
error = bcm_qspi_bspi_set_override(qspi, op, hp);
}
}
if (qspi->xfer_mode.flex_mode)
error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
if (error) {
dev_warn(&qspi->pdev->dev,
"INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
width, addrlen, hp);
} else if (qspi->xfer_mode.width != width ||
qspi->xfer_mode.addrlen != addrlen ||
qspi->xfer_mode.hp != hp) {
qspi->xfer_mode.width = width;
qspi->xfer_mode.addrlen = addrlen;
qspi->xfer_mode.hp = hp;
dev_dbg(&qspi->pdev->dev,
"cs:%d %d-lane output, %d-byte address%s\n",
qspi->curr_cs,
qspi->xfer_mode.width,
qspi->xfer_mode.addrlen,
qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
}
return error;
}
static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
{
if (!has_bspi(qspi))
return;
qspi->bspi_enabled = 1;
if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
return;
bcm_qspi_bspi_flush_prefetch_buffers(qspi);
udelay(1);
bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
udelay(1);
}
static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
{
if (!has_bspi(qspi))
return;
qspi->bspi_enabled = 0;
if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
return;
bcm_qspi_bspi_busy_poll(qspi);
bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
udelay(1);
}
static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
{
u32 rd = 0;
u32 wr = 0;
if (cs >= 0 && qspi->base[CHIP_SELECT]) {
rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
wr = (rd & ~0xff) | (1 << cs);
if (rd == wr)
return;
bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
usleep_range(10, 20);
}
dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
qspi->curr_cs = cs;
}
static bool bcmspi_parms_did_change(const struct bcm_qspi_parms * const cur,
const struct bcm_qspi_parms * const prev)
{
return (cur->speed_hz != prev->speed_hz) ||
(cur->mode != prev->mode) ||
(cur->bits_per_word != prev->bits_per_word);
}
/* MSPI helpers */
static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
const struct bcm_qspi_parms *xp)
{
u32 spcr, spbr = 0;
if (!bcmspi_parms_did_change(xp, &qspi->last_parms))
return;
if (!qspi->mspi_maj_rev)
/* legacy controller */
spcr = MSPI_MASTER_BIT;
else
spcr = 0;
/*
* Bits per transfer. BITS determines the number of data bits
* transferred if the command control bit (BITSE of a
* CDRAM Register) is equal to 1.
* If CDRAM BITSE is equal to 0, 8 data bits are transferred
* regardless
*/
if (xp->bits_per_word != 16 && xp->bits_per_word != 64)
spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT;
spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL);
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
if (bcm_qspi_has_fastbr(qspi)) {
spcr = 0;
/* enable fastbr */
spcr |= MSPI_SPCR3_FASTBR;
if (xp->mode & SPI_3WIRE)
spcr |= MSPI_SPCR3_HALFDUPLEX | MSPI_SPCR3_HDOUTTYPE;
if (bcm_qspi_has_sysclk_108(qspi)) {
/* check requested baud rate before moving to 108Mhz */
spbr = bcm_qspi_calc_spbr(MSPI_BASE_FREQ * 4, xp);
if (spbr > QSPI_SPBR_MAX) {
/* use SYSCLK_27Mhz for slower baud rates */
spcr &= ~MSPI_SPCR3_SYSCLKSEL_MASK;
qspi->base_clk = MSPI_BASE_FREQ;
} else {
/* SYSCLK_108Mhz */
spcr |= MSPI_SPCR3_SYSCLKSEL_108;
qspi->base_clk = MSPI_BASE_FREQ * 4;
}
}
if (xp->bits_per_word > 16) {
/* data_reg_size 1 (64bit) */
spcr |= MSPI_SPCR3_DATA_REG_SZ;
/* TxRx RAM data access mode 2 for 32B and set fastdt */
spcr |= MSPI_SPCR3_DAM_32BYTE | MSPI_SPCR3_FASTDT;
/*
* Set length of delay after transfer
* DTL from 0(256) to 1
*/
bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1);
} else {
/* data_reg_size[8] = 0 */
spcr &= ~(MSPI_SPCR3_DATA_REG_SZ);
/*
* TxRx RAM access mode 8B
* and disable fastdt
*/
spcr &= ~(MSPI_SPCR3_DAM_32BYTE);
}
bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
}
/* SCK Baud Rate = System Clock/(2 * SPBR) */
qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
spbr = bcm_qspi_calc_spbr(qspi->base_clk, xp);
spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
qspi->last_parms = *xp;
}
static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
struct spi_device *spi,
struct spi_transfer *trans)
{
struct bcm_qspi_parms xp;
xp.speed_hz = trans->speed_hz;
xp.bits_per_word = trans->bits_per_word;
xp.mode = spi->mode;
bcm_qspi_hw_set_parms(qspi, &xp);
}
static int bcm_qspi_setup(struct spi_device *spi)
{
struct bcm_qspi_parms *xp;
if (spi->bits_per_word > 64)
return -EINVAL;
xp = spi_get_ctldata(spi);
if (!xp) {
xp = kzalloc(sizeof(*xp), GFP_KERNEL);
if (!xp)
return -ENOMEM;
spi_set_ctldata(spi, xp);
}
xp->speed_hz = spi->max_speed_hz;
xp->mode = spi->mode;
if (spi->bits_per_word)
xp->bits_per_word = spi->bits_per_word;
else
xp->bits_per_word = 8;
return 0;
}
static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
struct qspi_trans *qt)
{
if (qt->mspi_last_trans &&
spi_transfer_is_last(qspi->host, qt->trans))
return true;
else
return false;
}
static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
struct qspi_trans *qt, int flags)
{
int ret = TRANS_STATUS_BREAK_NONE;
/* count the last transferred bytes */
if (qt->trans->bits_per_word <= 8)
qt->byte++;
else if (qt->trans->bits_per_word <= 16)
qt->byte += 2;
else if (qt->trans->bits_per_word <= 32)
qt->byte += 4;
else if (qt->trans->bits_per_word <= 64)
qt->byte += 8;
if (qt->byte >= qt->trans->len) {
/* we're at the end of the spi_transfer */
/* in TX mode, need to pause for a delay or CS change */
if (qt->trans->delay.value &&
(flags & TRANS_STATUS_BREAK_DELAY))
ret |= TRANS_STATUS_BREAK_DELAY;
if (qt->trans->cs_change &&
(flags & TRANS_STATUS_BREAK_CS_CHANGE))
ret |= TRANS_STATUS_BREAK_CS_CHANGE;
if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
ret |= TRANS_STATUS_BREAK_EOM;
else
ret |= TRANS_STATUS_BREAK_NO_BYTES;
qt->trans = NULL;
}
dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
return ret;
}
static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
{
u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
/* mask out reserved bits */
return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
}
static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
{
u32 reg_offset = MSPI_RXRAM;
u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
u32 msb_offset = reg_offset + (slot << 3);
return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
}
static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot)
{
u32 reg_offset = MSPI_RXRAM;
u32 offset = reg_offset + (slot << 3);
u32 val;
val = bcm_qspi_read(qspi, MSPI, offset);
val = swap4bytes(val);
return val;
}
static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot)
{
u32 reg_offset = MSPI_RXRAM;
u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
u32 msb_offset = reg_offset + (slot << 3);
u32 msb, lsb;
msb = bcm_qspi_read(qspi, MSPI, msb_offset);
msb = swap4bytes(msb);
lsb = bcm_qspi_read(qspi, MSPI, lsb_offset);
lsb = swap4bytes(lsb);
return ((u64)msb << 32 | lsb);
}
static void read_from_hw(struct bcm_qspi *qspi, int slots)
{
struct qspi_trans tp;
int slot;
bcm_qspi_disable_bspi(qspi);
if (slots > MSPI_NUM_CDRAM) {
/* should never happen */
dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
return;
}
tp = qspi->trans_pos;
for (slot = 0; slot < slots; slot++) {
if (tp.trans->bits_per_word <= 8) {
u8 *buf = tp.trans->rx_buf;
if (buf)
buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
dev_dbg(&qspi->pdev->dev, "RD %02x\n",
buf ? buf[tp.byte] : 0x0);
} else if (tp.trans->bits_per_word <= 16) {
u16 *buf = tp.trans->rx_buf;
if (buf)
buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
slot);
dev_dbg(&qspi->pdev->dev, "RD %04x\n",
buf ? buf[tp.byte / 2] : 0x0);
} else if (tp.trans->bits_per_word <= 32) {
u32 *buf = tp.trans->rx_buf;
if (buf)
buf[tp.byte / 4] = read_rxram_slot_u32(qspi,
slot);
dev_dbg(&qspi->pdev->dev, "RD %08x\n",
buf ? buf[tp.byte / 4] : 0x0);
} else if (tp.trans->bits_per_word <= 64) {
u64 *buf = tp.trans->rx_buf;
if (buf)
buf[tp.byte / 8] = read_rxram_slot_u64(qspi,
slot);
dev_dbg(&qspi->pdev->dev, "RD %llx\n",
buf ? buf[tp.byte / 8] : 0x0);
}
update_qspi_trans_byte_count(qspi, &tp,
TRANS_STATUS_BREAK_NONE);
}
qspi->trans_pos = tp;
}
static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
u8 val)
{
u32 reg_offset = MSPI_TXRAM + (slot << 3);
/* mask out reserved bits */
bcm_qspi_write(qspi, MSPI, reg_offset, val);
}
static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
u16 val)
{
u32 reg_offset = MSPI_TXRAM;
u32 msb_offset = reg_offset + (slot << 3);
u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
}
static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot,
u32 val)
{
u32 reg_offset = MSPI_TXRAM;
u32 msb_offset = reg_offset + (slot << 3);
bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val));
}
static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot,
u64 val)
{
u32 reg_offset = MSPI_TXRAM;
u32 msb_offset = reg_offset + (slot << 3);
u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
u32 msb = upper_32_bits(val);
u32 lsb = lower_32_bits(val);
bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb));
bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb));
}
static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
{
return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
}
static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
{
bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
}
/* Return number of slots written */
static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
{
struct qspi_trans tp;
int slot = 0, tstatus = 0;
u32 mspi_cdram = 0;
bcm_qspi_disable_bspi(qspi);
tp = qspi->trans_pos;
bcm_qspi_update_parms(qspi, spi, tp.trans);
/* Run until end of transfer or reached the max data */
while (!tstatus && slot < MSPI_NUM_CDRAM) {
mspi_cdram = MSPI_CDRAM_CONT_BIT;
if (tp.trans->bits_per_word <= 8) {
const u8 *buf = tp.trans->tx_buf;
u8 val = buf ? buf[tp.byte] : 0x00;
write_txram_slot_u8(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
} else if (tp.trans->bits_per_word <= 16) {
const u16 *buf = tp.trans->tx_buf;
u16 val = buf ? buf[tp.byte / 2] : 0x0000;
write_txram_slot_u16(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
} else if (tp.trans->bits_per_word <= 32) {
const u32 *buf = tp.trans->tx_buf;
u32 val = buf ? buf[tp.byte/4] : 0x0;
write_txram_slot_u32(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %08x\n", val);
} else if (tp.trans->bits_per_word <= 64) {
const u64 *buf = tp.trans->tx_buf;
u64 val = (buf ? buf[tp.byte/8] : 0x0);
/* use the length of delay from SPCR1_LSB */
if (bcm_qspi_has_fastbr(qspi))
mspi_cdram |= MSPI_CDRAM_DT_BIT;
write_txram_slot_u64(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %llx\n", val);
}
mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
MSPI_CDRAM_BITSE_BIT);
/* set 3wrire halfduplex mode data from host to target */
if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf)
mspi_cdram |= MSPI_CDRAM_OUTP;
if (has_bspi(qspi))
mspi_cdram &= ~1;
else
mspi_cdram |= (~(1 << spi_get_chipselect(spi, 0)) &
MSPI_CDRAM_PCS);
write_cdram_slot(qspi, slot, mspi_cdram);
tstatus = update_qspi_trans_byte_count(qspi, &tp,
TRANS_STATUS_BREAK_TX);
slot++;
}
if (!slot) {
dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
goto done;
}
dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
/*
* case 1) EOM =1, cs_change =0: SSb inactive
* case 2) EOM =1, cs_change =1: SSb stay active
* case 3) EOM =0, cs_change =0: SSb stay active
* case 4) EOM =0, cs_change =1: SSb inactive
*/
if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
== TRANS_STATUS_BREAK_CS_CHANGE) ||
((tstatus & TRANS_STATUS_BREAK_DESELECT)
== TRANS_STATUS_BREAK_EOM)) {
mspi_cdram = read_cdram_slot(qspi, slot - 1) &
~MSPI_CDRAM_CONT_BIT;
write_cdram_slot(qspi, slot - 1, mspi_cdram);
}
if (has_bspi(qspi))
bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
/* Must flush previous writes before starting MSPI operation */
mb();
/* Set cont | spe | spifie */
bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
done:
return slot;
}
static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
const struct spi_mem_op *op)
{
struct bcm_qspi *qspi = spi_controller_get_devdata(spi->controller);
u32 addr = 0, len, rdlen, len_words, from = 0;
int ret = 0;
unsigned long timeo = msecs_to_jiffies(100);
struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
if (bcm_qspi_bspi_ver_three(qspi))
if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
return -EIO;
from = op->addr.val;
if (!spi_get_csgpiod(spi, 0))
bcm_qspi_chip_select(qspi, spi_get_chipselect(spi, 0));
bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
/*
* when using flex mode we need to send
* the upper address byte to bspi
*/
if (!bcm_qspi_bspi_ver_three(qspi)) {
addr = from & 0xff000000;
bcm_qspi_write(qspi, BSPI,
BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
}
if (!qspi->xfer_mode.flex_mode)
addr = from;
else
addr = from & 0x00ffffff;
if (bcm_qspi_bspi_ver_three(qspi) == true)
addr = (addr + 0xc00000) & 0xffffff;
/*
* read into the entire buffer by breaking the reads
* into RAF buffer read lengths
*/
len = op->data.nbytes;
qspi->bspi_rf_op_idx = 0;
do {
if (len > BSPI_READ_LENGTH)
rdlen = BSPI_READ_LENGTH;
else
rdlen = len;
reinit_completion(&qspi->bspi_done);
bcm_qspi_enable_bspi(qspi);
len_words = (rdlen + 3) >> 2;
qspi->bspi_rf_op = op;
qspi->bspi_rf_op_status = 0;
qspi->bspi_rf_op_len = rdlen;
dev_dbg(&qspi->pdev->dev,
"bspi xfr addr 0x%x len 0x%x", addr, rdlen);
bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
if (qspi->soc_intc) {
/*
* clear soc MSPI and BSPI interrupts and enable
* BSPI interrupts.
*/
soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
}
/* Must flush previous writes before starting BSPI operation */
mb();
bcm_qspi_bspi_lr_start(qspi);
if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
ret = -ETIMEDOUT;
break;
}
/* set msg return length */
addr += rdlen;
len -= rdlen;
} while (len);
return ret;
}
static int bcm_qspi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *trans)
{
struct bcm_qspi *qspi = spi_controller_get_devdata(host);
int slots;
unsigned long timeo = msecs_to_jiffies(100);
if (!spi_get_csgpiod(spi, 0))
bcm_qspi_chip_select(qspi, spi_get_chipselect(spi, 0));
qspi->trans_pos.trans = trans;
qspi->trans_pos.byte = 0;
while (qspi->trans_pos.byte < trans->len) {
reinit_completion(&qspi->mspi_done);
slots = write_to_hw(qspi, spi);
if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
return -ETIMEDOUT;
}
read_from_hw(qspi, slots);
}
bcm_qspi_enable_bspi(qspi);
return 0;
}
static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
const struct spi_mem_op *op)
{
struct spi_controller *host = spi->controller;
struct bcm_qspi *qspi = spi_controller_get_devdata(host);
struct spi_transfer t[2];
u8 cmd[6] = { };
int ret, i;
memset(cmd, 0, sizeof(cmd));
memset(t, 0, sizeof(t));
/* tx */
/* opcode is in cmd[0] */
cmd[0] = op->cmd.opcode;
for (i = 0; i < op->addr.nbytes; i++)
cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
t[0].tx_buf = cmd;
t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
t[0].bits_per_word = spi->bits_per_word;
t[0].tx_nbits = op->cmd.buswidth;
/* lets mspi know that this is not last transfer */
qspi->trans_pos.mspi_last_trans = false;
ret = bcm_qspi_transfer_one(host, spi, &t[0]);
/* rx */
qspi->trans_pos.mspi_last_trans = true;
if (!ret) {
/* rx */
t[1].rx_buf = op->data.buf.in;
t[1].len = op->data.nbytes;
t[1].rx_nbits = op->data.buswidth;
t[1].bits_per_word = spi->bits_per_word;
ret = bcm_qspi_transfer_one(host, spi, &t[1]);
}
return ret;
}
static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_device *spi = mem->spi;
struct bcm_qspi *qspi = spi_controller_get_devdata(spi->controller);
int ret = 0;
bool mspi_read = false;
u32 addr = 0, len;
u_char *buf;
if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
op->data.dir != SPI_MEM_DATA_IN)
return -ENOTSUPP;
buf = op->data.buf.in;
addr = op->addr.val;
len = op->data.nbytes;
if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
/*
* The address coming into this function is a raw flash offset.
* But for BSPI <= V3, we need to convert it to a remapped BSPI
* address. If it crosses a 4MB boundary, just revert back to
* using MSPI.
*/
addr = (addr + 0xc00000) & 0xffffff;
if ((~ADDR_4MB_MASK & addr) ^
(~ADDR_4MB_MASK & (addr + len - 1)))
mspi_read = true;
}
/* non-aligned and very short transfers are handled by MSPI */
if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
len < 4)
mspi_read = true;
if (!has_bspi(qspi) || mspi_read)
return bcm_qspi_mspi_exec_mem_op(spi, op);
ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
if (!ret)
ret = bcm_qspi_bspi_exec_mem_op(spi, op);
return ret;
}
static void bcm_qspi_cleanup(struct spi_device *spi)
{
struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
kfree(xp);
}
static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
{
struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
struct bcm_qspi *qspi = qspi_dev_id->dev;
u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
if (status & MSPI_MSPI_STATUS_SPIF) {
struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
/* clear interrupt */
status &= ~MSPI_MSPI_STATUS_SPIF;
bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
if (qspi->soc_intc)
soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
complete(&qspi->mspi_done);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
{
struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
struct bcm_qspi *qspi = qspi_dev_id->dev;
struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
u32 status = qspi_dev_id->irqp->mask;
if (qspi->bspi_enabled && qspi->bspi_rf_op) {
bcm_qspi_bspi_lr_data_read(qspi);
if (qspi->bspi_rf_op_len == 0) {
qspi->bspi_rf_op = NULL;
if (qspi->soc_intc) {
/* disable soc BSPI interrupt */
soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
false);
/* indicate done */
status = INTR_BSPI_LR_SESSION_DONE_MASK;
}
if (qspi->bspi_rf_op_status)
bcm_qspi_bspi_lr_clear(qspi);
else
bcm_qspi_bspi_flush_prefetch_buffers(qspi);
}
if (qspi->soc_intc)
/* clear soc BSPI interrupt */
soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
}
status &= INTR_BSPI_LR_SESSION_DONE_MASK;
if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
complete(&qspi->bspi_done);
return IRQ_HANDLED;
}
static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
{
struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
struct bcm_qspi *qspi = qspi_dev_id->dev;
struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
dev_err(&qspi->pdev->dev, "BSPI INT error\n");
qspi->bspi_rf_op_status = -EIO;
if (qspi->soc_intc)
/* clear soc interrupt */
soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
complete(&qspi->bspi_done);
return IRQ_HANDLED;
}
static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
{
struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
struct bcm_qspi *qspi = qspi_dev_id->dev;
struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
irqreturn_t ret = IRQ_NONE;
if (soc_intc) {
u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
if (status & MSPI_DONE)
ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
else if (status & BSPI_DONE)
ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
else if (status & BSPI_ERR)
ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
}
return ret;
}
static const struct bcm_qspi_irq qspi_irq_tab[] = {
{
.irq_name = "spi_lr_fullness_reached",
.irq_handler = bcm_qspi_bspi_lr_l2_isr,
.mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
},
{
.irq_name = "spi_lr_session_aborted",
.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
.mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
},
{
.irq_name = "spi_lr_impatient",
.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
.mask = INTR_BSPI_LR_IMPATIENT_MASK,
},
{
.irq_name = "spi_lr_session_done",
.irq_handler = bcm_qspi_bspi_lr_l2_isr,
.mask = INTR_BSPI_LR_SESSION_DONE_MASK,
},
#ifdef QSPI_INT_DEBUG
/* this interrupt is for debug purposes only, dont request irq */
{
.irq_name = "spi_lr_overread",
.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
.mask = INTR_BSPI_LR_OVERREAD_MASK,
},
#endif
{
.irq_name = "mspi_done",
.irq_handler = bcm_qspi_mspi_l2_isr,
.mask = INTR_MSPI_DONE_MASK,
},
{
.irq_name = "mspi_halted",
.irq_handler = bcm_qspi_mspi_l2_isr,
.mask = INTR_MSPI_HALTED_MASK,
},
{
/* single muxed L1 interrupt source */
.irq_name = "spi_l1_intr",
.irq_handler = bcm_qspi_l1_isr,
.irq_source = MUXED_L1,
.mask = QSPI_INTERRUPTS_ALL,
},
};
static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
{
u32 val = 0;
val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
qspi->bspi_maj_rev = (val >> 8) & 0xff;
qspi->bspi_min_rev = val & 0xff;
if (!(bcm_qspi_bspi_ver_three(qspi))) {
/* Force mapping of BSPI address -> flash offset */
bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
}
qspi->bspi_enabled = 1;
bcm_qspi_disable_bspi(qspi);
bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
}
static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
{
struct bcm_qspi_parms parms;
bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
parms.mode = SPI_MODE_3;
parms.bits_per_word = 8;
parms.speed_hz = qspi->max_speed_hz;
bcm_qspi_hw_set_parms(qspi, &parms);
if (has_bspi(qspi))
bcm_qspi_bspi_init(qspi);
}
static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
{
u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
if (has_bspi(qspi))
bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
/* clear interrupt */
bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status & ~1);
}
static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
.exec_op = bcm_qspi_exec_mem_op,
};
struct bcm_qspi_data {
bool has_mspi_rev;
bool has_spcr3_sysclk;
};
static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
.has_mspi_rev = false,
.has_spcr3_sysclk = false,
};
static const struct bcm_qspi_data bcm_qspi_rev_data = {
.has_mspi_rev = true,
.has_spcr3_sysclk = false,
};
static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
.has_mspi_rev = true,
.has_spcr3_sysclk = true,
};
static const struct of_device_id bcm_qspi_of_match[] __maybe_unused = {
{
.compatible = "brcm,spi-bcm7445-qspi",
.data = &bcm_qspi_rev_data,
},
{
.compatible = "brcm,spi-bcm-qspi",
.data = &bcm_qspi_no_rev_data,
},
{
.compatible = "brcm,spi-bcm7216-qspi",
.data = &bcm_qspi_spcr3_data,
},
{
.compatible = "brcm,spi-bcm7278-qspi",
.data = &bcm_qspi_spcr3_data,
},
{},
};
MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
int bcm_qspi_probe(struct platform_device *pdev,
struct bcm_qspi_soc_intc *soc_intc)
{
const struct of_device_id *of_id = NULL;
const struct bcm_qspi_data *data;
struct device *dev = &pdev->dev;
struct bcm_qspi *qspi;
struct spi_controller *host;
struct resource *res;
int irq, ret = 0, num_ints = 0;
u32 val;
u32 rev = 0;
const char *name = NULL;
int num_irqs = ARRAY_SIZE(qspi_irq_tab);
/* We only support device-tree instantiation */
if (!dev->of_node)
return -ENODEV;
of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
if (!of_id)
return -ENODEV;
data = of_id->data;
host = devm_spi_alloc_host(dev, sizeof(struct bcm_qspi));
if (!host) {
dev_err(dev, "error allocating spi_controller\n");
return -ENOMEM;
}
qspi = spi_controller_get_devdata(host);
qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(qspi->clk))
return PTR_ERR(qspi->clk);
qspi->pdev = pdev;
qspi->trans_pos.trans = NULL;
qspi->trans_pos.byte = 0;
qspi->trans_pos.mspi_last_trans = true;
qspi->host = host;
host->bus_num = -1;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_3WIRE;
host->setup = bcm_qspi_setup;
host->transfer_one = bcm_qspi_transfer_one;
host->mem_ops = &bcm_qspi_mem_ops;
host->cleanup = bcm_qspi_cleanup;
host->dev.of_node = dev->of_node;
host->num_chipselect = NUM_CHIPSELECT;
host->use_gpio_descriptors = true;
qspi->big_endian = of_device_is_big_endian(dev->of_node);
if (!of_property_read_u32(dev->of_node, "num-cs", &val))
host->num_chipselect = val;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
if (!res)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mspi");
qspi->base[MSPI] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[MSPI]))
return PTR_ERR(qspi->base[MSPI]);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
if (res) {
qspi->base[BSPI] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[BSPI]))
return PTR_ERR(qspi->base[BSPI]);
qspi->bspi_mode = true;
} else {
qspi->bspi_mode = false;
}
dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
if (res) {
qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[CHIP_SELECT]))
return PTR_ERR(qspi->base[CHIP_SELECT]);
}
qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
GFP_KERNEL);
if (!qspi->dev_ids)
return -ENOMEM;
/*
* Some SoCs integrate spi controller (e.g., its interrupt bits)
* in specific ways
*/
if (soc_intc) {
qspi->soc_intc = soc_intc;
soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
} else {
qspi->soc_intc = NULL;
}
if (qspi->clk) {
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "failed to prepare clock\n");
goto qspi_probe_err;
}
qspi->base_clk = clk_get_rate(qspi->clk);
} else {
qspi->base_clk = MSPI_BASE_FREQ;
}
if (data->has_mspi_rev) {
rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
/* some older revs do not have a MSPI_REV register */
if ((rev & 0xff) == 0xff)
rev = 0;
}
qspi->mspi_maj_rev = (rev >> 4) & 0xf;
qspi->mspi_min_rev = rev & 0xf;
qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
/*
* On SW resets it is possible to have the mask still enabled
* Need to disable the mask and clear the status while we init
*/
bcm_qspi_hw_uninit(qspi);
for (val = 0; val < num_irqs; val++) {
irq = -1;
name = qspi_irq_tab[val].irq_name;
if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
/* get the l2 interrupts */
irq = platform_get_irq_byname_optional(pdev, name);
} else if (!num_ints && soc_intc) {
/* all mspi, bspi intrs muxed to one L1 intr */
irq = platform_get_irq(pdev, 0);
}
if (irq >= 0) {
ret = devm_request_irq(&pdev->dev, irq,
qspi_irq_tab[val].irq_handler, 0,
name,
&qspi->dev_ids[val]);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ %s not found\n", name);
goto qspi_unprepare_err;
}
qspi->dev_ids[val].dev = qspi;
qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
num_ints++;
dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
qspi_irq_tab[val].irq_name,
irq);
}
}
if (!num_ints) {
dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
ret = -EINVAL;
goto qspi_unprepare_err;
}
bcm_qspi_hw_init(qspi);
init_completion(&qspi->mspi_done);
init_completion(&qspi->bspi_done);
qspi->curr_cs = -1;
platform_set_drvdata(pdev, qspi);
qspi->xfer_mode.width = -1;
qspi->xfer_mode.addrlen = -1;
qspi->xfer_mode.hp = -1;
ret = spi_register_controller(host);
if (ret < 0) {
dev_err(dev, "can't register host\n");
goto qspi_reg_err;
}
return 0;
qspi_reg_err:
bcm_qspi_hw_uninit(qspi);
qspi_unprepare_err:
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
kfree(qspi->dev_ids);
return ret;
}
/* probe function to be called by SoC specific platform driver probe */
EXPORT_SYMBOL_GPL(bcm_qspi_probe);
void bcm_qspi_remove(struct platform_device *pdev)
{
struct bcm_qspi *qspi = platform_get_drvdata(pdev);
spi_unregister_controller(qspi->host);
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
kfree(qspi->dev_ids);
}
/* function to be called by SoC specific platform driver remove() */
EXPORT_SYMBOL_GPL(bcm_qspi_remove);
static int __maybe_unused bcm_qspi_suspend(struct device *dev)
{
struct bcm_qspi *qspi = dev_get_drvdata(dev);
/* store the override strap value */
if (!bcm_qspi_bspi_ver_three(qspi))
qspi->s3_strap_override_ctrl =
bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
spi_controller_suspend(qspi->host);
clk_disable_unprepare(qspi->clk);
bcm_qspi_hw_uninit(qspi);
return 0;
};
static int __maybe_unused bcm_qspi_resume(struct device *dev)
{
struct bcm_qspi *qspi = dev_get_drvdata(dev);
int ret = 0;
bcm_qspi_hw_init(qspi);
bcm_qspi_chip_select(qspi, qspi->curr_cs);
if (qspi->soc_intc)
/* enable MSPI interrupt */
qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
true);
ret = clk_prepare_enable(qspi->clk);
if (!ret)
spi_controller_resume(qspi->host);
return ret;
}
SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
/* pm_ops to be called by SoC specific platform driver */
EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
MODULE_AUTHOR("Kamal Dasu");
MODULE_DESCRIPTION("Broadcom QSPI driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/spi/spi-bcm-qspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* J-Core SPI controller driver
*
* Copyright (C) 2012-2016 Smart Energy Instruments, Inc.
*
* Current version by Rich Felker
* Based loosely on initial version by Oleksandr G Zhadan
*
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/delay.h>
#define DRV_NAME "jcore_spi"
#define CTRL_REG 0x0
#define DATA_REG 0x4
#define JCORE_SPI_CTRL_XMIT 0x02
#define JCORE_SPI_STAT_BUSY 0x02
#define JCORE_SPI_CTRL_LOOP 0x08
#define JCORE_SPI_CTRL_CS_BITS 0x15
#define JCORE_SPI_WAIT_RDY_MAX_LOOP 2000000
struct jcore_spi {
struct spi_controller *host;
void __iomem *base;
unsigned int cs_reg;
unsigned int speed_reg;
unsigned int speed_hz;
unsigned int clock_freq;
};
static int jcore_spi_wait(void __iomem *ctrl_reg)
{
unsigned timeout = JCORE_SPI_WAIT_RDY_MAX_LOOP;
do {
if (!(readl(ctrl_reg) & JCORE_SPI_STAT_BUSY))
return 0;
cpu_relax();
} while (--timeout);
return -EBUSY;
}
static void jcore_spi_program(struct jcore_spi *hw)
{
void __iomem *ctrl_reg = hw->base + CTRL_REG;
if (jcore_spi_wait(ctrl_reg))
dev_err(hw->host->dev.parent,
"timeout waiting to program ctrl reg.\n");
writel(hw->cs_reg | hw->speed_reg, ctrl_reg);
}
static void jcore_spi_chipsel(struct spi_device *spi, bool value)
{
struct jcore_spi *hw = spi_controller_get_devdata(spi->controller);
u32 csbit = 1U << (2 * spi_get_chipselect(spi, 0));
dev_dbg(hw->host->dev.parent, "chipselect %d\n", spi_get_chipselect(spi, 0));
if (value)
hw->cs_reg |= csbit;
else
hw->cs_reg &= ~csbit;
jcore_spi_program(hw);
}
static void jcore_spi_baudrate(struct jcore_spi *hw, int speed)
{
if (speed == hw->speed_hz)
return;
hw->speed_hz = speed;
if (speed >= hw->clock_freq / 2)
hw->speed_reg = 0;
else
hw->speed_reg = ((hw->clock_freq / 2 / speed) - 1) << 27;
jcore_spi_program(hw);
dev_dbg(hw->host->dev.parent, "speed=%d reg=0x%x\n",
speed, hw->speed_reg);
}
static int jcore_spi_txrx(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *t)
{
struct jcore_spi *hw = spi_controller_get_devdata(host);
void __iomem *ctrl_reg = hw->base + CTRL_REG;
void __iomem *data_reg = hw->base + DATA_REG;
u32 xmit;
/* data buffers */
const unsigned char *tx;
unsigned char *rx;
unsigned int len;
unsigned int count;
jcore_spi_baudrate(hw, t->speed_hz);
xmit = hw->cs_reg | hw->speed_reg | JCORE_SPI_CTRL_XMIT;
tx = t->tx_buf;
rx = t->rx_buf;
len = t->len;
for (count = 0; count < len; count++) {
if (jcore_spi_wait(ctrl_reg))
break;
writel(tx ? *tx++ : 0, data_reg);
writel(xmit, ctrl_reg);
if (jcore_spi_wait(ctrl_reg))
break;
if (rx)
*rx++ = readl(data_reg);
}
spi_finalize_current_transfer(host);
if (count < len)
return -EREMOTEIO;
return 0;
}
static int jcore_spi_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct jcore_spi *hw;
struct spi_controller *host;
struct resource *res;
u32 clock_freq;
struct clk *clk;
int err = -ENODEV;
host = spi_alloc_host(&pdev->dev, sizeof(struct jcore_spi));
if (!host)
return err;
/* Setup the host state. */
host->num_chipselect = 3;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
host->transfer_one = jcore_spi_txrx;
host->set_cs = jcore_spi_chipsel;
host->dev.of_node = node;
host->bus_num = pdev->id;
hw = spi_controller_get_devdata(host);
hw->host = host;
platform_set_drvdata(pdev, hw);
/* Find and map our resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
goto exit_busy;
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), pdev->name))
goto exit_busy;
hw->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!hw->base)
goto exit_busy;
/*
* The SPI clock rate controlled via a configurable clock divider
* which is applied to the reference clock. A 50 MHz reference is
* most suitable for obtaining standard SPI clock rates, but some
* designs may have a different reference clock, and the DT must
* make the driver aware so that it can properly program the
* requested rate. If the clock is omitted, 50 MHz is assumed.
*/
clock_freq = 50000000;
clk = devm_clk_get(&pdev->dev, "ref_clk");
if (!IS_ERR(clk)) {
if (clk_prepare_enable(clk) == 0) {
clock_freq = clk_get_rate(clk);
clk_disable_unprepare(clk);
} else
dev_warn(&pdev->dev, "could not enable ref_clk\n");
}
hw->clock_freq = clock_freq;
/* Initialize all CS bits to high. */
hw->cs_reg = JCORE_SPI_CTRL_CS_BITS;
jcore_spi_baudrate(hw, 400000);
/* Register our spi controller */
err = devm_spi_register_controller(&pdev->dev, host);
if (err)
goto exit;
return 0;
exit_busy:
err = -EBUSY;
exit:
spi_controller_put(host);
return err;
}
static const struct of_device_id jcore_spi_of_match[] = {
{ .compatible = "jcore,spi2" },
{},
};
MODULE_DEVICE_TABLE(of, jcore_spi_of_match);
static struct platform_driver jcore_spi_driver = {
.probe = jcore_spi_probe,
.driver = {
.name = DRV_NAME,
.of_match_table = jcore_spi_of_match,
},
};
module_platform_driver(jcore_spi_driver);
MODULE_DESCRIPTION("J-Core SPI driver");
MODULE_AUTHOR("Rich Felker <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/spi/spi-jcore.c |
// SPDX-License-Identifier: GPL-2.0
//
// DFL bus driver for Altera SPI Master
//
// Copyright (C) 2020 Intel Corporation, Inc.
//
// Authors:
// Matthew Gerlach <[email protected]>
//
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitfield.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/altera.h>
#include <linux/dfl.h>
#define FME_FEATURE_ID_MAX10_SPI 0xe
#define FME_FEATURE_REV_MAX10_SPI_N5010 0x1
#define SPI_CORE_PARAMETER 0x8
#define SHIFT_MODE BIT_ULL(1)
#define SHIFT_MODE_MSB 0
#define SHIFT_MODE_LSB 1
#define DATA_WIDTH GENMASK_ULL(7, 2)
#define NUM_CHIPSELECT GENMASK_ULL(13, 8)
#define CLK_POLARITY BIT_ULL(14)
#define CLK_PHASE BIT_ULL(15)
#define PERIPHERAL_ID GENMASK_ULL(47, 32)
#define SPI_CLK GENMASK_ULL(31, 22)
#define SPI_INDIRECT_ACC_OFST 0x10
#define INDIRECT_ADDR (SPI_INDIRECT_ACC_OFST+0x0)
#define INDIRECT_WR BIT_ULL(8)
#define INDIRECT_RD BIT_ULL(9)
#define INDIRECT_RD_DATA (SPI_INDIRECT_ACC_OFST+0x8)
#define INDIRECT_DATA_MASK GENMASK_ULL(31, 0)
#define INDIRECT_DEBUG BIT_ULL(32)
#define INDIRECT_WR_DATA (SPI_INDIRECT_ACC_OFST+0x10)
#define INDIRECT_TIMEOUT 10000
static int indirect_bus_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
void __iomem *base = context;
int loops;
u64 v;
writeq((reg >> 2) | INDIRECT_RD, base + INDIRECT_ADDR);
loops = 0;
while ((readq(base + INDIRECT_ADDR) & INDIRECT_RD) &&
(loops++ < INDIRECT_TIMEOUT))
cpu_relax();
if (loops >= INDIRECT_TIMEOUT) {
pr_err("%s timed out %d\n", __func__, loops);
return -ETIME;
}
v = readq(base + INDIRECT_RD_DATA);
*val = v & INDIRECT_DATA_MASK;
return 0;
}
static int indirect_bus_reg_write(void *context, unsigned int reg,
unsigned int val)
{
void __iomem *base = context;
int loops;
writeq(val, base + INDIRECT_WR_DATA);
writeq((reg >> 2) | INDIRECT_WR, base + INDIRECT_ADDR);
loops = 0;
while ((readq(base + INDIRECT_ADDR) & INDIRECT_WR) &&
(loops++ < INDIRECT_TIMEOUT))
cpu_relax();
if (loops >= INDIRECT_TIMEOUT) {
pr_err("%s timed out %d\n", __func__, loops);
return -ETIME;
}
return 0;
}
static const struct regmap_config indirect_regbus_cfg = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.fast_io = true,
.max_register = 24,
.reg_write = indirect_bus_reg_write,
.reg_read = indirect_bus_reg_read,
};
static void config_spi_host(void __iomem *base, struct spi_controller *host)
{
u64 v;
v = readq(base + SPI_CORE_PARAMETER);
host->mode_bits = SPI_CS_HIGH;
if (FIELD_GET(CLK_POLARITY, v))
host->mode_bits |= SPI_CPOL;
if (FIELD_GET(CLK_PHASE, v))
host->mode_bits |= SPI_CPHA;
host->num_chipselect = FIELD_GET(NUM_CHIPSELECT, v);
host->bits_per_word_mask =
SPI_BPW_RANGE_MASK(1, FIELD_GET(DATA_WIDTH, v));
}
static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
{
struct spi_board_info board_info = { 0 };
struct device *dev = &dfl_dev->dev;
struct spi_controller *host;
struct altera_spi *hw;
void __iomem *base;
int err;
host = devm_spi_alloc_host(dev, sizeof(struct altera_spi));
if (!host)
return -ENOMEM;
host->bus_num = -1;
hw = spi_controller_get_devdata(host);
hw->dev = dev;
base = devm_ioremap_resource(dev, &dfl_dev->mmio_res);
if (IS_ERR(base))
return PTR_ERR(base);
config_spi_host(base, host);
dev_dbg(dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
host->num_chipselect, host->bits_per_word_mask,
host->mode_bits);
hw->regmap = devm_regmap_init(dev, NULL, base, &indirect_regbus_cfg);
if (IS_ERR(hw->regmap))
return PTR_ERR(hw->regmap);
hw->irq = -EINVAL;
altera_spi_init_host(host);
err = devm_spi_register_controller(dev, host);
if (err)
return dev_err_probe(dev, err, "%s failed to register spi host\n",
__func__);
if (dfl_dev->revision == FME_FEATURE_REV_MAX10_SPI_N5010)
strscpy(board_info.modalias, "m10-n5010", SPI_NAME_SIZE);
else
strscpy(board_info.modalias, "m10-d5005", SPI_NAME_SIZE);
board_info.max_speed_hz = 12500000;
board_info.bus_num = 0;
board_info.chip_select = 0;
if (!spi_new_device(host, &board_info)) {
dev_err(dev, "%s failed to create SPI device: %s\n",
__func__, board_info.modalias);
}
return 0;
}
static const struct dfl_device_id dfl_spi_altera_ids[] = {
{ FME_ID, FME_FEATURE_ID_MAX10_SPI },
{ }
};
static struct dfl_driver dfl_spi_altera_driver = {
.drv = {
.name = "dfl-spi-altera",
},
.id_table = dfl_spi_altera_ids,
.probe = dfl_spi_altera_probe,
};
module_dfl_driver(dfl_spi_altera_driver);
MODULE_DEVICE_TABLE(dfl, dfl_spi_altera_ids);
MODULE_DESCRIPTION("DFL spi altera driver");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-altera-dfl.c |
/*
* Driver for Amlogic Meson SPI communication controller (SPICC)
*
* Copyright (C) BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <linux/pinctrl/consumer.h>
/*
* The Meson SPICC controller could support DMA based transfers, but is not
* implemented by the vendor code, and while having the registers documentation
* it has never worked on the GXL Hardware.
* The PIO mode is the only mode implemented, and due to badly designed HW :
* - all transfers are cutted in 16 words burst because the FIFO hangs on
* TX underflow, and there is no TX "Half-Empty" interrupt, so we go by
* FIFO max size chunk only
* - CS management is dumb, and goes UP between every burst, so is really a
* "Data Valid" signal than a Chip Select, GPIO link should be used instead
* to have a CS go down over the full transfer
*/
#define SPICC_MAX_BURST 128
/* Register Map */
#define SPICC_RXDATA 0x00
#define SPICC_TXDATA 0x04
#define SPICC_CONREG 0x08
#define SPICC_ENABLE BIT(0)
#define SPICC_MODE_MASTER BIT(1)
#define SPICC_XCH BIT(2)
#define SPICC_SMC BIT(3)
#define SPICC_POL BIT(4)
#define SPICC_PHA BIT(5)
#define SPICC_SSCTL BIT(6)
#define SPICC_SSPOL BIT(7)
#define SPICC_DRCTL_MASK GENMASK(9, 8)
#define SPICC_DRCTL_IGNORE 0
#define SPICC_DRCTL_FALLING 1
#define SPICC_DRCTL_LOWLEVEL 2
#define SPICC_CS_MASK GENMASK(13, 12)
#define SPICC_DATARATE_MASK GENMASK(18, 16)
#define SPICC_DATARATE_DIV4 0
#define SPICC_DATARATE_DIV8 1
#define SPICC_DATARATE_DIV16 2
#define SPICC_DATARATE_DIV32 3
#define SPICC_BITLENGTH_MASK GENMASK(24, 19)
#define SPICC_BURSTLENGTH_MASK GENMASK(31, 25)
#define SPICC_INTREG 0x0c
#define SPICC_TE_EN BIT(0) /* TX FIFO Empty Interrupt */
#define SPICC_TH_EN BIT(1) /* TX FIFO Half-Full Interrupt */
#define SPICC_TF_EN BIT(2) /* TX FIFO Full Interrupt */
#define SPICC_RR_EN BIT(3) /* RX FIFO Ready Interrupt */
#define SPICC_RH_EN BIT(4) /* RX FIFO Half-Full Interrupt */
#define SPICC_RF_EN BIT(5) /* RX FIFO Full Interrupt */
#define SPICC_RO_EN BIT(6) /* RX FIFO Overflow Interrupt */
#define SPICC_TC_EN BIT(7) /* Transfert Complete Interrupt */
#define SPICC_DMAREG 0x10
#define SPICC_DMA_ENABLE BIT(0)
#define SPICC_TXFIFO_THRESHOLD_MASK GENMASK(5, 1)
#define SPICC_RXFIFO_THRESHOLD_MASK GENMASK(10, 6)
#define SPICC_READ_BURST_MASK GENMASK(14, 11)
#define SPICC_WRITE_BURST_MASK GENMASK(18, 15)
#define SPICC_DMA_URGENT BIT(19)
#define SPICC_DMA_THREADID_MASK GENMASK(25, 20)
#define SPICC_DMA_BURSTNUM_MASK GENMASK(31, 26)
#define SPICC_STATREG 0x14
#define SPICC_TE BIT(0) /* TX FIFO Empty Interrupt */
#define SPICC_TH BIT(1) /* TX FIFO Half-Full Interrupt */
#define SPICC_TF BIT(2) /* TX FIFO Full Interrupt */
#define SPICC_RR BIT(3) /* RX FIFO Ready Interrupt */
#define SPICC_RH BIT(4) /* RX FIFO Half-Full Interrupt */
#define SPICC_RF BIT(5) /* RX FIFO Full Interrupt */
#define SPICC_RO BIT(6) /* RX FIFO Overflow Interrupt */
#define SPICC_TC BIT(7) /* Transfert Complete Interrupt */
#define SPICC_PERIODREG 0x18
#define SPICC_PERIOD GENMASK(14, 0) /* Wait cycles */
#define SPICC_TESTREG 0x1c
#define SPICC_TXCNT_MASK GENMASK(4, 0) /* TX FIFO Counter */
#define SPICC_RXCNT_MASK GENMASK(9, 5) /* RX FIFO Counter */
#define SPICC_SMSTATUS_MASK GENMASK(12, 10) /* State Machine Status */
#define SPICC_LBC_RO BIT(13) /* Loop Back Control Read-Only */
#define SPICC_LBC_W1 BIT(14) /* Loop Back Control Write-Only */
#define SPICC_SWAP_RO BIT(14) /* RX FIFO Data Swap Read-Only */
#define SPICC_SWAP_W1 BIT(15) /* RX FIFO Data Swap Write-Only */
#define SPICC_DLYCTL_RO_MASK GENMASK(20, 15) /* Delay Control Read-Only */
#define SPICC_MO_DELAY_MASK GENMASK(17, 16) /* Master Output Delay */
#define SPICC_MO_NO_DELAY 0
#define SPICC_MO_DELAY_1_CYCLE 1
#define SPICC_MO_DELAY_2_CYCLE 2
#define SPICC_MO_DELAY_3_CYCLE 3
#define SPICC_MI_DELAY_MASK GENMASK(19, 18) /* Master Input Delay */
#define SPICC_MI_NO_DELAY 0
#define SPICC_MI_DELAY_1_CYCLE 1
#define SPICC_MI_DELAY_2_CYCLE 2
#define SPICC_MI_DELAY_3_CYCLE 3
#define SPICC_MI_CAP_DELAY_MASK GENMASK(21, 20) /* Master Capture Delay */
#define SPICC_CAP_AHEAD_2_CYCLE 0
#define SPICC_CAP_AHEAD_1_CYCLE 1
#define SPICC_CAP_NO_DELAY 2
#define SPICC_CAP_DELAY_1_CYCLE 3
#define SPICC_FIFORST_RO_MASK GENMASK(22, 21) /* FIFO Softreset Read-Only */
#define SPICC_FIFORST_W1_MASK GENMASK(23, 22) /* FIFO Softreset Write-Only */
#define SPICC_DRADDR 0x20 /* Read Address of DMA */
#define SPICC_DWADDR 0x24 /* Write Address of DMA */
#define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */
#define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0)
#define SPICC_ENH_DATARATE_MASK GENMASK(23, 16)
#define SPICC_ENH_DATARATE_EN BIT(24)
#define SPICC_ENH_MOSI_OEN BIT(25)
#define SPICC_ENH_CLK_OEN BIT(26)
#define SPICC_ENH_CS_OEN BIT(27)
#define SPICC_ENH_CLK_CS_DELAY_EN BIT(28)
#define SPICC_ENH_MAIN_CLK_AO BIT(29)
#define writel_bits_relaxed(mask, val, addr) \
writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
struct meson_spicc_data {
unsigned int max_speed_hz;
unsigned int min_speed_hz;
unsigned int fifo_size;
bool has_oen;
bool has_enhance_clk_div;
bool has_pclk;
};
struct meson_spicc_device {
struct spi_master *master;
struct platform_device *pdev;
void __iomem *base;
struct clk *core;
struct clk *pclk;
struct clk_divider pow2_div;
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
struct completion done;
const struct meson_spicc_data *data;
u8 *tx_buf;
u8 *rx_buf;
unsigned int bytes_per_word;
unsigned long tx_remain;
unsigned long rx_remain;
unsigned long xfer_remain;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_idle_high;
struct pinctrl_state *pins_idle_low;
};
#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
if (!spicc->data->has_oen) {
/* Try to get pinctrl states for idle high/low */
spicc->pins_idle_high = pinctrl_lookup_state(spicc->pinctrl,
"idle-high");
if (IS_ERR(spicc->pins_idle_high)) {
dev_warn(&spicc->pdev->dev, "can't get idle-high pinctrl\n");
spicc->pins_idle_high = NULL;
}
spicc->pins_idle_low = pinctrl_lookup_state(spicc->pinctrl,
"idle-low");
if (IS_ERR(spicc->pins_idle_low)) {
dev_warn(&spicc->pdev->dev, "can't get idle-low pinctrl\n");
spicc->pins_idle_low = NULL;
}
return;
}
conf = readl_relaxed(spicc->base + SPICC_ENH_CTL0) |
SPICC_ENH_MOSI_OEN | SPICC_ENH_CLK_OEN | SPICC_ENH_CS_OEN;
writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0);
}
static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
{
return !!FIELD_GET(SPICC_TF,
readl_relaxed(spicc->base + SPICC_STATREG));
}
static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc)
{
return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF,
readl_relaxed(spicc->base + SPICC_STATREG));
}
static inline u32 meson_spicc_pull_data(struct meson_spicc_device *spicc)
{
unsigned int bytes = spicc->bytes_per_word;
unsigned int byte_shift = 0;
u32 data = 0;
u8 byte;
while (bytes--) {
byte = *spicc->tx_buf++;
data |= (byte & 0xff) << byte_shift;
byte_shift += 8;
}
spicc->tx_remain--;
return data;
}
static inline void meson_spicc_push_data(struct meson_spicc_device *spicc,
u32 data)
{
unsigned int bytes = spicc->bytes_per_word;
unsigned int byte_shift = 0;
u8 byte;
while (bytes--) {
byte = (data >> byte_shift) & 0xff;
*spicc->rx_buf++ = byte;
byte_shift += 8;
}
spicc->rx_remain--;
}
static inline void meson_spicc_rx(struct meson_spicc_device *spicc)
{
/* Empty RX FIFO */
while (spicc->rx_remain &&
meson_spicc_rxready(spicc))
meson_spicc_push_data(spicc,
readl_relaxed(spicc->base + SPICC_RXDATA));
}
static inline void meson_spicc_tx(struct meson_spicc_device *spicc)
{
/* Fill Up TX FIFO */
while (spicc->tx_remain &&
!meson_spicc_txfull(spicc))
writel_relaxed(meson_spicc_pull_data(spicc),
spicc->base + SPICC_TXDATA);
}
static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc)
{
unsigned int burst_len = min_t(unsigned int,
spicc->xfer_remain /
spicc->bytes_per_word,
spicc->data->fifo_size);
/* Setup Xfer variables */
spicc->tx_remain = burst_len;
spicc->rx_remain = burst_len;
spicc->xfer_remain -= burst_len * spicc->bytes_per_word;
/* Setup burst length */
writel_bits_relaxed(SPICC_BURSTLENGTH_MASK,
FIELD_PREP(SPICC_BURSTLENGTH_MASK,
burst_len - 1),
spicc->base + SPICC_CONREG);
/* Fill TX FIFO */
meson_spicc_tx(spicc);
}
static irqreturn_t meson_spicc_irq(int irq, void *data)
{
struct meson_spicc_device *spicc = (void *) data;
writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG);
/* Empty RX FIFO */
meson_spicc_rx(spicc);
if (!spicc->xfer_remain) {
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
complete(&spicc->done);
return IRQ_HANDLED;
}
/* Setup burst */
meson_spicc_setup_burst(spicc);
/* Start burst */
writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
return IRQ_HANDLED;
}
static void meson_spicc_auto_io_delay(struct meson_spicc_device *spicc)
{
u32 div, hz;
u32 mi_delay, cap_delay;
u32 conf;
if (spicc->data->has_enhance_clk_div) {
div = FIELD_GET(SPICC_ENH_DATARATE_MASK,
readl_relaxed(spicc->base + SPICC_ENH_CTL0));
div++;
div <<= 1;
} else {
div = FIELD_GET(SPICC_DATARATE_MASK,
readl_relaxed(spicc->base + SPICC_CONREG));
div += 2;
div = 1 << div;
}
mi_delay = SPICC_MI_NO_DELAY;
cap_delay = SPICC_CAP_AHEAD_2_CYCLE;
hz = clk_get_rate(spicc->clk);
if (hz >= 100000000)
cap_delay = SPICC_CAP_DELAY_1_CYCLE;
else if (hz >= 80000000)
cap_delay = SPICC_CAP_NO_DELAY;
else if (hz >= 40000000)
cap_delay = SPICC_CAP_AHEAD_1_CYCLE;
else if (div >= 16)
mi_delay = SPICC_MI_DELAY_3_CYCLE;
else if (div >= 8)
mi_delay = SPICC_MI_DELAY_2_CYCLE;
else if (div >= 6)
mi_delay = SPICC_MI_DELAY_1_CYCLE;
conf = readl_relaxed(spicc->base + SPICC_TESTREG);
conf &= ~(SPICC_MO_DELAY_MASK | SPICC_MI_DELAY_MASK
| SPICC_MI_CAP_DELAY_MASK);
conf |= FIELD_PREP(SPICC_MI_DELAY_MASK, mi_delay);
conf |= FIELD_PREP(SPICC_MI_CAP_DELAY_MASK, cap_delay);
writel_relaxed(conf, spicc->base + SPICC_TESTREG);
}
static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
struct spi_transfer *xfer)
{
u32 conf, conf_orig;
/* Read original configuration */
conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG);
/* Setup word width */
conf &= ~SPICC_BITLENGTH_MASK;
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK,
(spicc->bytes_per_word << 3) - 1);
/* Ignore if unchanged */
if (conf != conf_orig)
writel_relaxed(conf, spicc->base + SPICC_CONREG);
clk_set_rate(spicc->clk, xfer->speed_hz);
meson_spicc_auto_io_delay(spicc);
writel_relaxed(0, spicc->base + SPICC_DMAREG);
}
static void meson_spicc_reset_fifo(struct meson_spicc_device *spicc)
{
if (spicc->data->has_oen)
writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO,
SPICC_ENH_MAIN_CLK_AO,
spicc->base + SPICC_ENH_CTL0);
writel_bits_relaxed(SPICC_FIFORST_W1_MASK, SPICC_FIFORST_W1_MASK,
spicc->base + SPICC_TESTREG);
while (meson_spicc_rxready(spicc))
readl_relaxed(spicc->base + SPICC_RXDATA);
if (spicc->data->has_oen)
writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, 0,
spicc->base + SPICC_ENH_CTL0);
}
static int meson_spicc_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
uint64_t timeout;
/* Store current transfer */
spicc->xfer = xfer;
/* Setup transfer parameters */
spicc->tx_buf = (u8 *)xfer->tx_buf;
spicc->rx_buf = (u8 *)xfer->rx_buf;
spicc->xfer_remain = xfer->len;
/* Pre-calculate word size */
spicc->bytes_per_word =
DIV_ROUND_UP(spicc->xfer->bits_per_word, 8);
if (xfer->len % spicc->bytes_per_word)
return -EINVAL;
/* Setup transfer parameters */
meson_spicc_setup_xfer(spicc, xfer);
meson_spicc_reset_fifo(spicc);
/* Setup burst */
meson_spicc_setup_burst(spicc);
/* Setup wait for completion */
reinit_completion(&spicc->done);
/* For each byte we wait for 8 cycles of the SPI clock */
timeout = 8LL * MSEC_PER_SEC * xfer->len;
do_div(timeout, xfer->speed_hz);
/* Add 10us delay between each fifo bursts */
timeout += ((xfer->len >> 4) * 10) / MSEC_PER_SEC;
/* Increase it twice and add 200 ms tolerance */
timeout += timeout + 200;
/* Start burst */
writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
/* Enable interrupts */
writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
if (!wait_for_completion_timeout(&spicc->done, msecs_to_jiffies(timeout)))
return -ETIMEDOUT;
return 0;
}
static int meson_spicc_prepare_message(struct spi_master *master,
struct spi_message *message)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
struct spi_device *spi = message->spi;
u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Store current message */
spicc->message = message;
/* Enable Master */
conf |= SPICC_ENABLE;
conf |= SPICC_MODE_MASTER;
/* SMC = 0 */
/* Setup transfer mode */
if (spi->mode & SPI_CPOL)
conf |= SPICC_POL;
else
conf &= ~SPICC_POL;
if (!spicc->data->has_oen) {
if (spi->mode & SPI_CPOL) {
if (spicc->pins_idle_high)
pinctrl_select_state(spicc->pinctrl, spicc->pins_idle_high);
} else {
if (spicc->pins_idle_low)
pinctrl_select_state(spicc->pinctrl, spicc->pins_idle_low);
}
}
if (spi->mode & SPI_CPHA)
conf |= SPICC_PHA;
else
conf &= ~SPICC_PHA;
/* SSCTL = 0 */
if (spi->mode & SPI_CS_HIGH)
conf |= SPICC_SSPOL;
else
conf &= ~SPICC_SSPOL;
if (spi->mode & SPI_READY)
conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_LOWLEVEL);
else
conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_IGNORE);
/* Select CS */
conf |= FIELD_PREP(SPICC_CS_MASK, spi_get_chipselect(spi, 0));
/* Default 8bit word */
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
writel_relaxed(conf, spicc->base + SPICC_CONREG);
/* Setup no wait cycles by default */
writel_relaxed(0, spicc->base + SPICC_PERIODREG);
writel_bits_relaxed(SPICC_LBC_W1, 0, spicc->base + SPICC_TESTREG);
return 0;
}
static int meson_spicc_unprepare_transfer(struct spi_master *master)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
device_reset_optional(&spicc->pdev->dev);
/* Set default configuration, keeping datarate field */
writel_relaxed(conf, spicc->base + SPICC_CONREG);
if (!spicc->data->has_oen)
pinctrl_select_default_state(&spicc->pdev->dev);
return 0;
}
static int meson_spicc_setup(struct spi_device *spi)
{
if (!spi->controller_state)
spi->controller_state = spi_master_get_devdata(spi->master);
return 0;
}
static void meson_spicc_cleanup(struct spi_device *spi)
{
spi->controller_state = NULL;
}
/*
* The Clock Mux
* x-----------------x x------------x x------\
* |---| pow2 fixed div |---| pow2 div |----| |
* | x-----------------x x------------x | |
* src ---| | mux |-- out
* | x-----------------x x------------x | |
* |---| enh fixed div |---| enh div |0---| |
* x-----------------x x------------x x------/
*
* Clk path for GX series:
* src -> pow2 fixed div -> pow2 div -> out
*
* Clk path for AXG series:
* src -> pow2 fixed div -> pow2 div -> mux -> out
* src -> enh fixed div -> enh div -> mux -> out
*
* Clk path for G12A series:
* pclk -> pow2 fixed div -> pow2 div -> mux -> out
* pclk -> enh fixed div -> enh div -> mux -> out
*
* The pow2 divider is tied to the controller HW state, and the
* divider is only valid when the controller is initialized.
*
* A set of clock ops is added to make sure we don't read/set this
* clock rate while the controller is in an unknown state.
*/
static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
if (!spicc->master->cur_msg)
return 0;
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_divider *divider = to_clk_divider(hw);
struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
if (!spicc->master->cur_msg)
return -EINVAL;
return clk_divider_ops.determine_rate(hw, req);
}
static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
if (!spicc->master->cur_msg)
return -EINVAL;
return clk_divider_ops.set_rate(hw, rate, parent_rate);
}
static const struct clk_ops meson_spicc_pow2_clk_ops = {
.recalc_rate = meson_spicc_pow2_recalc_rate,
.determine_rate = meson_spicc_pow2_determine_rate,
.set_rate = meson_spicc_pow2_set_rate,
};
static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
struct clk_fixed_factor *pow2_fixed_div;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
char name[64];
memset(&init, 0, sizeof(init));
memset(&parent_data, 0, sizeof(parent_data));
init.parent_data = parent_data;
/* algorithm for pow2 div: rate = freq / 4 / (2 ^ N) */
pow2_fixed_div = devm_kzalloc(dev, sizeof(*pow2_fixed_div), GFP_KERNEL);
if (!pow2_fixed_div)
return -ENOMEM;
snprintf(name, sizeof(name), "%s#pow2_fixed_div", dev_name(dev));
init.name = name;
init.ops = &clk_fixed_factor_ops;
init.flags = 0;
if (spicc->data->has_pclk)
parent_data[0].hw = __clk_get_hw(spicc->pclk);
else
parent_data[0].hw = __clk_get_hw(spicc->core);
init.num_parents = 1;
pow2_fixed_div->mult = 1,
pow2_fixed_div->div = 4,
pow2_fixed_div->hw.init = &init;
clk = devm_clk_register(dev, &pow2_fixed_div->hw);
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
init.name = name;
init.ops = &meson_spicc_pow2_clk_ops;
/*
* Set NOCACHE here to make sure we read the actual HW value
* since we reset the HW after each transfer.
*/
init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
spicc->pow2_div.shift = 16,
spicc->pow2_div.width = 3,
spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
spicc->pow2_div.hw.init = &init;
spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
if (WARN_ON(IS_ERR(spicc->clk)))
return PTR_ERR(spicc->clk);
return 0;
}
static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
struct clk_fixed_factor *enh_fixed_div;
struct clk_divider *enh_div;
struct clk_mux *mux;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
char name[64];
memset(&init, 0, sizeof(init));
memset(&parent_data, 0, sizeof(parent_data));
init.parent_data = parent_data;
/* algorithm for enh div: rate = freq / 2 / (N + 1) */
enh_fixed_div = devm_kzalloc(dev, sizeof(*enh_fixed_div), GFP_KERNEL);
if (!enh_fixed_div)
return -ENOMEM;
snprintf(name, sizeof(name), "%s#enh_fixed_div", dev_name(dev));
init.name = name;
init.ops = &clk_fixed_factor_ops;
init.flags = 0;
if (spicc->data->has_pclk)
parent_data[0].hw = __clk_get_hw(spicc->pclk);
else
parent_data[0].hw = __clk_get_hw(spicc->core);
init.num_parents = 1;
enh_fixed_div->mult = 1,
enh_fixed_div->div = 2,
enh_fixed_div->hw.init = &init;
clk = devm_clk_register(dev, &enh_fixed_div->hw);
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
enh_div = devm_kzalloc(dev, sizeof(*enh_div), GFP_KERNEL);
if (!enh_div)
return -ENOMEM;
snprintf(name, sizeof(name), "%s#enh_div", dev_name(dev));
init.name = name;
init.ops = &clk_divider_ops;
init.flags = CLK_SET_RATE_PARENT;
parent_data[0].hw = &enh_fixed_div->hw;
init.num_parents = 1;
enh_div->shift = 16,
enh_div->width = 8,
enh_div->reg = spicc->base + SPICC_ENH_CTL0;
enh_div->hw.init = &init;
clk = devm_clk_register(dev, &enh_div->hw);
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
return -ENOMEM;
snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
init.name = name;
init.ops = &clk_mux_ops;
parent_data[0].hw = &spicc->pow2_div.hw;
parent_data[1].hw = &enh_div->hw;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
mux->mask = 0x1,
mux->shift = 24,
mux->reg = spicc->base + SPICC_ENH_CTL0;
mux->hw.init = &init;
spicc->clk = devm_clk_register(dev, &mux->hw);
if (WARN_ON(IS_ERR(spicc->clk)))
return PTR_ERR(spicc->clk);
return 0;
}
static int meson_spicc_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct meson_spicc_device *spicc;
int ret, irq;
master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
if (!master) {
dev_err(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
spicc = spi_master_get_devdata(master);
spicc->master = master;
spicc->data = of_device_get_match_data(&pdev->dev);
if (!spicc->data) {
dev_err(&pdev->dev, "failed to get match data\n");
ret = -EINVAL;
goto out_master;
}
spicc->pdev = pdev;
platform_set_drvdata(pdev, spicc);
init_completion(&spicc->done);
spicc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spicc->base)) {
dev_err(&pdev->dev, "io resource mapping failed\n");
ret = PTR_ERR(spicc->base);
goto out_master;
}
/* Set master mode and enable controller */
writel_relaxed(SPICC_ENABLE | SPICC_MODE_MASTER,
spicc->base + SPICC_CONREG);
/* Disable all IRQs */
writel_relaxed(0, spicc->base + SPICC_INTREG);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto out_master;
}
ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
0, NULL, spicc);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
goto out_master;
}
spicc->core = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(spicc->core)) {
dev_err(&pdev->dev, "core clock request failed\n");
ret = PTR_ERR(spicc->core);
goto out_master;
}
if (spicc->data->has_pclk) {
spicc->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(spicc->pclk)) {
dev_err(&pdev->dev, "pclk clock request failed\n");
ret = PTR_ERR(spicc->pclk);
goto out_master;
}
}
ret = clk_prepare_enable(spicc->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
goto out_master;
}
ret = clk_prepare_enable(spicc->pclk);
if (ret) {
dev_err(&pdev->dev, "pclk clock enable failed\n");
goto out_core_clk;
}
spicc->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(spicc->pinctrl)) {
ret = PTR_ERR(spicc->pinctrl);
goto out_clk;
}
device_reset_optional(&pdev->dev);
master->num_chipselect = 4;
master->dev.of_node = pdev->dev.of_node;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_MASK(32) |
SPI_BPW_MASK(24) |
SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
master->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX);
master->min_speed_hz = spicc->data->min_speed_hz;
master->max_speed_hz = spicc->data->max_speed_hz;
master->setup = meson_spicc_setup;
master->cleanup = meson_spicc_cleanup;
master->prepare_message = meson_spicc_prepare_message;
master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
master->transfer_one = meson_spicc_transfer_one;
master->use_gpio_descriptors = true;
meson_spicc_oen_enable(spicc);
ret = meson_spicc_pow2_clk_init(spicc);
if (ret) {
dev_err(&pdev->dev, "pow2 clock registration failed\n");
goto out_clk;
}
if (spicc->data->has_enhance_clk_div) {
ret = meson_spicc_enh_clk_init(spicc);
if (ret) {
dev_err(&pdev->dev, "clock registration failed\n");
goto out_clk;
}
}
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed\n");
goto out_clk;
}
return 0;
out_clk:
clk_disable_unprepare(spicc->pclk);
out_core_clk:
clk_disable_unprepare(spicc->core);
out_master:
spi_master_put(master);
return ret;
}
static void meson_spicc_remove(struct platform_device *pdev)
{
struct meson_spicc_device *spicc = platform_get_drvdata(pdev);
/* Disable SPI */
writel(0, spicc->base + SPICC_CONREG);
clk_disable_unprepare(spicc->core);
clk_disable_unprepare(spicc->pclk);
spi_master_put(spicc->master);
}
static const struct meson_spicc_data meson_spicc_gx_data = {
.max_speed_hz = 30000000,
.min_speed_hz = 325000,
.fifo_size = 16,
};
static const struct meson_spicc_data meson_spicc_axg_data = {
.max_speed_hz = 80000000,
.min_speed_hz = 325000,
.fifo_size = 16,
.has_oen = true,
.has_enhance_clk_div = true,
};
static const struct meson_spicc_data meson_spicc_g12a_data = {
.max_speed_hz = 166666666,
.min_speed_hz = 50000,
.fifo_size = 15,
.has_oen = true,
.has_enhance_clk_div = true,
.has_pclk = true,
};
static const struct of_device_id meson_spicc_of_match[] = {
{
.compatible = "amlogic,meson-gx-spicc",
.data = &meson_spicc_gx_data,
},
{
.compatible = "amlogic,meson-axg-spicc",
.data = &meson_spicc_axg_data,
},
{
.compatible = "amlogic,meson-g12a-spicc",
.data = &meson_spicc_g12a_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
static struct platform_driver meson_spicc_driver = {
.probe = meson_spicc_probe,
.remove_new = meson_spicc_remove,
.driver = {
.name = "meson-spicc",
.of_match_table = of_match_ptr(meson_spicc_of_match),
},
};
module_platform_driver(meson_spicc_driver);
MODULE_DESCRIPTION("Meson SPI Communication Controller driver");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-meson-spicc.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2009 Samsung Electronics Co., Ltd.
// Jaswinder Singh <[email protected]>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
#include <linux/platform_data/spi-s3c64xx.h>
#define MAX_SPI_PORTS 12
#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
#define AUTOSUSPEND_TIMEOUT 2000
/* Registers and bit-fields */
#define S3C64XX_SPI_CH_CFG 0x00
#define S3C64XX_SPI_CLK_CFG 0x04
#define S3C64XX_SPI_MODE_CFG 0x08
#define S3C64XX_SPI_CS_REG 0x0C
#define S3C64XX_SPI_INT_EN 0x10
#define S3C64XX_SPI_STATUS 0x14
#define S3C64XX_SPI_TX_DATA 0x18
#define S3C64XX_SPI_RX_DATA 0x1C
#define S3C64XX_SPI_PACKET_CNT 0x20
#define S3C64XX_SPI_PENDING_CLR 0x24
#define S3C64XX_SPI_SWAP_CFG 0x28
#define S3C64XX_SPI_FB_CLK 0x2C
#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
#define S3C64XX_SPI_CH_SW_RST (1<<5)
#define S3C64XX_SPI_CH_SLAVE (1<<4)
#define S3C64XX_SPI_CPOL_L (1<<3)
#define S3C64XX_SPI_CPHA_B (1<<2)
#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
#define S3C64XX_SPI_PSR_MASK 0xff
#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
#define S3C64XX_SPI_MODE_RX_RDY_LVL GENMASK(16, 11)
#define S3C64XX_SPI_MODE_RX_RDY_LVL_SHIFT 11
#define S3C64XX_SPI_MODE_SELF_LOOPBACK (1<<3)
#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
#define S3C64XX_SPI_MODE_4BURST (1<<0)
#define S3C64XX_SPI_CS_NSC_CNT_2 (2<<4)
#define S3C64XX_SPI_CS_AUTO (1<<1)
#define S3C64XX_SPI_CS_SIG_INACT (1<<0)
#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
#define S3C64XX_SPI_PACKET_CNT_MASK GENMASK(15, 0)
#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
#define S3C64XX_SPI_FBCLK_MSK (3<<0)
#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
(1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
FIFO_LVL_MASK(i))
#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
#define S3C64XX_SPI_TRAILCNT_OFF 19
#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
#define S3C64XX_SPI_POLLING_SIZE 32
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
#define is_polling(x) (x->cntrlr_info->polling)
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
struct s3c64xx_spi_dma_data {
struct dma_chan *ch;
dma_cookie_t cookie;
enum dma_transfer_direction direction;
};
/**
* struct s3c64xx_spi_port_config - SPI Controller hardware info
* @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
* @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
* @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
* @clk_div: Internal clock divider
* @quirks: Bitmask of known quirks
* @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
* @clk_from_cmu: True, if the controller does not include a clock mux and
* prescaler unit.
* @clk_ioclk: True if clock is present on this device
* @has_loopback: True if loopback mode can be supported
*
* The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
* differ in some aspects such as the size of the fifo and spi bus clock
* setup. Such differences are specified to the driver using this structure
* which is provided as driver data to the driver.
*/
struct s3c64xx_spi_port_config {
int fifo_lvl_mask[MAX_SPI_PORTS];
int rx_lvl_offset;
int tx_st_done;
int quirks;
int clk_div;
bool high_speed;
bool clk_from_cmu;
bool clk_ioclk;
bool has_loopback;
};
/**
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
* @clk: Pointer to the spi clock.
* @src_clk: Pointer to the clock used to generate SPI signals.
* @ioclk: Pointer to the i/o clock between host and target
* @pdev: Pointer to device's platform device data
* @host: Pointer to the SPI Protocol host.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status.
* @sfr_start: BUS address of SPI controller regs.
* @regs: Pointer to ioremap'ed controller registers.
* @xfer_completion: To indicate completion of xfer task.
* @cur_mode: Stores the active configuration of the controller.
* @cur_bpw: Stores the active bits per word settings.
* @cur_speed: Current clock speed
* @rx_dma: Local receive DMA data (e.g. chan and direction)
* @tx_dma: Local transmit DMA data (e.g. chan and direction)
* @port_conf: Local SPI port configuartion data
* @port_id: Port identification number
*/
struct s3c64xx_spi_driver_data {
void __iomem *regs;
struct clk *clk;
struct clk *src_clk;
struct clk *ioclk;
struct platform_device *pdev;
struct spi_controller *host;
struct s3c64xx_spi_info *cntrlr_info;
spinlock_t lock;
unsigned long sfr_start;
struct completion xfer_completion;
unsigned state;
unsigned cur_mode, cur_bpw;
unsigned cur_speed;
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
const struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
};
static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{
void __iomem *regs = sdd->regs;
unsigned long loops;
u32 val;
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
writel(val, regs + S3C64XX_SPI_CH_CFG);
val = readl(regs + S3C64XX_SPI_CH_CFG);
val |= S3C64XX_SPI_CH_SW_RST;
val &= ~S3C64XX_SPI_CH_HS_EN;
writel(val, regs + S3C64XX_SPI_CH_CFG);
/* Flush TxFIFO*/
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
} while (TX_FIFO_LVL(val, sdd) && loops--);
if (loops == 0)
dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
/* Flush RxFIFO*/
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
if (RX_FIFO_LVL(val, sdd))
readl(regs + S3C64XX_SPI_RX_DATA);
else
break;
} while (loops--);
if (loops == 0)
dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~S3C64XX_SPI_CH_SW_RST;
writel(val, regs + S3C64XX_SPI_CH_CFG);
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
}
static void s3c64xx_spi_dmacb(void *data)
{
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_dma_data *dma = data;
unsigned long flags;
if (dma->direction == DMA_DEV_TO_MEM)
sdd = container_of(data,
struct s3c64xx_spi_driver_data, rx_dma);
else
sdd = container_of(data,
struct s3c64xx_spi_driver_data, tx_dma);
spin_lock_irqsave(&sdd->lock, flags);
if (dma->direction == DMA_DEV_TO_MEM) {
sdd->state &= ~RXBUSY;
if (!(sdd->state & TXBUSY))
complete(&sdd->xfer_completion);
} else {
sdd->state &= ~TXBUSY;
if (!(sdd->state & RXBUSY))
complete(&sdd->xfer_completion);
}
spin_unlock_irqrestore(&sdd->lock, flags);
}
static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
struct dma_async_tx_descriptor *desc;
int ret;
memset(&config, 0, sizeof(config));
if (dma->direction == DMA_DEV_TO_MEM) {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
config.direction = dma->direction;
config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
config.src_addr_width = sdd->cur_bpw / 8;
config.src_maxburst = 1;
dmaengine_slave_config(dma->ch, &config);
} else {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
config.direction = dma->direction;
config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
config.dst_addr_width = sdd->cur_bpw / 8;
config.dst_maxburst = 1;
dmaengine_slave_config(dma->ch, &config);
}
desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
return -ENOMEM;
}
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
dma->cookie = dmaengine_submit(desc);
ret = dma_submit_error(dma->cookie);
if (ret) {
dev_err(&sdd->pdev->dev, "DMA submission failed");
return -EIO;
}
dma_async_issue_pending(dma->ch);
return 0;
}
static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
{
struct s3c64xx_spi_driver_data *sdd =
spi_controller_get_devdata(spi->controller);
if (sdd->cntrlr_info->no_cs)
return;
if (enable) {
if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
} else {
u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
ssel |= (S3C64XX_SPI_CS_AUTO |
S3C64XX_SPI_CS_NSC_CNT_2);
writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
}
} else {
if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
writel(S3C64XX_SPI_CS_SIG_INACT,
sdd->regs + S3C64XX_SPI_CS_REG);
}
}
static int s3c64xx_spi_prepare_transfer(struct spi_controller *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(spi);
if (is_polling(sdd))
return 0;
/* Requests DMA channels */
sdd->rx_dma.ch = dma_request_chan(&sdd->pdev->dev, "rx");
if (IS_ERR(sdd->rx_dma.ch)) {
dev_err(&sdd->pdev->dev, "Failed to get RX DMA channel\n");
sdd->rx_dma.ch = NULL;
return 0;
}
sdd->tx_dma.ch = dma_request_chan(&sdd->pdev->dev, "tx");
if (IS_ERR(sdd->tx_dma.ch)) {
dev_err(&sdd->pdev->dev, "Failed to get TX DMA channel\n");
dma_release_channel(sdd->rx_dma.ch);
sdd->tx_dma.ch = NULL;
sdd->rx_dma.ch = NULL;
return 0;
}
spi->dma_rx = sdd->rx_dma.ch;
spi->dma_tx = sdd->tx_dma.ch;
return 0;
}
static int s3c64xx_spi_unprepare_transfer(struct spi_controller *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(spi);
if (is_polling(sdd))
return 0;
/* Releases DMA channels if they are allocated */
if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
dma_release_channel(sdd->rx_dma.ch);
dma_release_channel(sdd->tx_dma.ch);
sdd->rx_dma.ch = NULL;
sdd->tx_dma.ch = NULL;
}
return 0;
}
static bool s3c64xx_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
} else {
return false;
}
}
static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
int ret = 0;
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
} else {
/* Always shift in data in FIFO, even if xfer is Tx only,
* this helps setting PCKT_CNT value for generating clocks
* as exactly needed.
*/
chcfg |= S3C64XX_SPI_CH_RXCH_ON;
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
}
if (xfer->tx_buf != NULL) {
sdd->state |= TXBUSY;
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
switch (sdd->cur_bpw) {
case 32:
iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len / 4);
break;
case 16:
iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len / 2);
break;
default:
iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len);
break;
}
}
}
if (xfer->rx_buf != NULL) {
sdd->state |= RXBUSY;
if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
&& !(sdd->cur_mode & SPI_CPHA))
chcfg |= S3C64XX_SPI_CH_HS_EN;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
chcfg |= S3C64XX_SPI_CH_RXCH_ON;
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}
if (ret)
return ret;
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
return 0;
}
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
int timeout_ms)
{
void __iomem *regs = sdd->regs;
unsigned long val = 1;
u32 status;
/* max fifo depth available */
u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
if (timeout_ms)
val = msecs_to_loops(timeout_ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
} while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
/* return the actual received data length */
return RX_FIFO_LVL(status, sdd);
}
static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer)
{
void __iomem *regs = sdd->regs;
unsigned long val;
u32 status;
int ms;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 30; /* some tolerance */
ms = max(ms, 100); /* minimum timeout */
val = msecs_to_jiffies(ms) + 10;
val = wait_for_completion_timeout(&sdd->xfer_completion, val);
/*
* If the previous xfer was completed within timeout, then
* proceed further else return -EIO.
* DmaTx returns after simply writing data in the FIFO,
* w/o waiting for real transmission on the bus to finish.
* DmaRx returns only after Dma read data from FIFO which
* needs bus transmission to finish, so we don't worry if
* Xfer involved Rx(with or without Tx).
*/
if (val && !xfer->rx_buf) {
val = msecs_to_loops(10);
status = readl(regs + S3C64XX_SPI_STATUS);
while ((TX_FIFO_LVL(status, sdd)
|| !S3C64XX_SPI_ST_TX_DONE(status, sdd))
&& --val) {
cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
}
}
/* If timed out while checking rx/tx status return error */
if (!val)
return -EIO;
return 0;
}
static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, bool use_irq)
{
void __iomem *regs = sdd->regs;
unsigned long val;
u32 status;
int loops;
u32 cpy_len;
u8 *buf;
int ms;
unsigned long time_us;
/* microsecs to xfer 'len' bytes @ 'cur_speed' */
time_us = (xfer->len * 8 * 1000 * 1000) / sdd->cur_speed;
ms = (time_us / 1000);
ms += 10; /* some tolerance */
/* sleep during signal transfer time */
status = readl(regs + S3C64XX_SPI_STATUS);
if (RX_FIFO_LVL(status, sdd) < xfer->len)
usleep_range(time_us / 2, time_us);
if (use_irq) {
val = msecs_to_jiffies(ms);
if (!wait_for_completion_timeout(&sdd->xfer_completion, val))
return -EIO;
}
val = msecs_to_loops(ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
if (!val)
return -EIO;
/* If it was only Tx */
if (!xfer->rx_buf) {
sdd->state &= ~TXBUSY;
return 0;
}
/*
* If the receive length is bigger than the controller fifo
* size, calculate the loops and read the fifo as many times.
* loops = length / max fifo size (calculated by using the
* fifo mask).
* For any size less than the fifo size the below code is
* executed atleast once.
*/
loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
buf = xfer->rx_buf;
do {
/* wait for data to be received in the fifo */
cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
(loops ? ms : 0));
switch (sdd->cur_bpw) {
case 32:
ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len / 4);
break;
case 16:
ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len / 2);
break;
default:
ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len);
break;
}
buf = buf + cpy_len;
} while (loops--);
sdd->state &= ~RXBUSY;
return 0;
}
static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
void __iomem *regs = sdd->regs;
int ret;
u32 val;
int div = sdd->port_conf->clk_div;
/* Disable Clock */
if (!sdd->port_conf->clk_from_cmu) {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
/* Set Polarity and Phase */
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~(S3C64XX_SPI_CH_SLAVE |
S3C64XX_SPI_CPOL_L |
S3C64XX_SPI_CPHA_B);
if (sdd->cur_mode & SPI_CPOL)
val |= S3C64XX_SPI_CPOL_L;
if (sdd->cur_mode & SPI_CPHA)
val |= S3C64XX_SPI_CPHA_B;
writel(val, regs + S3C64XX_SPI_CH_CFG);
/* Set Channel & DMA Mode */
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
| S3C64XX_SPI_MODE_CH_TSZ_MASK);
switch (sdd->cur_bpw) {
case 32:
val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
break;
case 16:
val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
break;
default:
val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
break;
}
if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback)
val |= S3C64XX_SPI_MODE_SELF_LOOPBACK;
else
val &= ~S3C64XX_SPI_MODE_SELF_LOOPBACK;
writel(val, regs + S3C64XX_SPI_MODE_CFG);
if (sdd->port_conf->clk_from_cmu) {
ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * div);
if (ret)
return ret;
sdd->cur_speed = clk_get_rate(sdd->src_clk) / div;
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_PSR_MASK;
val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / div - 1)
& S3C64XX_SPI_PSR_MASK);
writel(val, regs + S3C64XX_SPI_CLK_CFG);
/* Enable Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
return 0;
}
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
static int s3c64xx_spi_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
/* Configure feedback delay */
if (!cs)
/* No delay if not defined */
writel(0, sdd->regs + S3C64XX_SPI_FB_CLK);
else
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
return 0;
}
static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX;
}
static int s3c64xx_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
const void *tx_buf = NULL;
void *rx_buf = NULL;
int target_len = 0, origin_len = 0;
int use_dma = 0;
bool use_irq = false;
int status;
u32 speed;
u8 bpw;
unsigned long flags;
u32 rdy_lv;
u32 val;
reinit_completion(&sdd->xfer_completion);
/* Only BPW and Speed may change across transfers */
bpw = xfer->bits_per_word;
speed = xfer->speed_hz;
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
sdd->cur_mode = spi->mode;
status = s3c64xx_spi_config(sdd);
if (status)
return status;
}
if (!is_polling(sdd) && (xfer->len > fifo_len) &&
sdd->rx_dma.ch && sdd->tx_dma.ch) {
use_dma = 1;
} else if (xfer->len >= fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
origin_len = xfer->len;
target_len = xfer->len;
xfer->len = fifo_len - 1;
}
do {
/* transfer size is greater than 32, change to IRQ mode */
if (!use_dma && xfer->len > S3C64XX_SPI_POLLING_SIZE)
use_irq = true;
if (use_irq) {
reinit_completion(&sdd->xfer_completion);
rdy_lv = xfer->len;
/* Setup RDY_FIFO trigger Level
* RDY_LVL =
* fifo_lvl up to 64 byte -> N bytes
* 128 byte -> RDY_LVL * 2 bytes
* 256 byte -> RDY_LVL * 4 bytes
*/
if (fifo_len == 128)
rdy_lv /= 2;
else if (fifo_len == 256)
rdy_lv /= 4;
val = readl(sdd->regs + S3C64XX_SPI_MODE_CFG);
val &= ~S3C64XX_SPI_MODE_RX_RDY_LVL;
val |= (rdy_lv << S3C64XX_SPI_MODE_RX_RDY_LVL_SHIFT);
writel(val, sdd->regs + S3C64XX_SPI_MODE_CFG);
/* Enable FIFO_RDY_EN IRQ */
val = readl(sdd->regs + S3C64XX_SPI_INT_EN);
writel((val | S3C64XX_SPI_INT_RX_FIFORDY_EN),
sdd->regs + S3C64XX_SPI_INT_EN);
}
spin_lock_irqsave(&sdd->lock, flags);
/* Pending only which is to be done */
sdd->state &= ~RXBUSY;
sdd->state &= ~TXBUSY;
/* Start the signals */
s3c64xx_spi_set_cs(spi, true);
status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
spin_unlock_irqrestore(&sdd->lock, flags);
if (status) {
dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
break;
}
if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer);
else
status = s3c64xx_wait_for_pio(sdd, xfer, use_irq);
if (status) {
dev_err(&spi->dev,
"I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
(sdd->state & RXBUSY) ? 'f' : 'p',
(sdd->state & TXBUSY) ? 'f' : 'p',
xfer->len, use_dma ? 1 : 0, status);
if (use_dma) {
struct dma_tx_state s;
if (xfer->tx_buf && (sdd->state & TXBUSY)) {
dmaengine_pause(sdd->tx_dma.ch);
dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
dmaengine_terminate_all(sdd->tx_dma.ch);
dev_err(&spi->dev, "TX residue: %d\n", s.residue);
}
if (xfer->rx_buf && (sdd->state & RXBUSY)) {
dmaengine_pause(sdd->rx_dma.ch);
dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
dmaengine_terminate_all(sdd->rx_dma.ch);
dev_err(&spi->dev, "RX residue: %d\n", s.residue);
}
}
} else {
s3c64xx_flush_fifo(sdd);
}
if (target_len > 0) {
target_len -= xfer->len;
if (xfer->tx_buf)
xfer->tx_buf += xfer->len;
if (xfer->rx_buf)
xfer->rx_buf += xfer->len;
if (target_len >= fifo_len)
xfer->len = fifo_len - 1;
else
xfer->len = target_len;
}
} while (target_len > 0);
if (origin_len) {
/* Restore original xfer buffers and length */
xfer->tx_buf = tx_buf;
xfer->rx_buf = rx_buf;
xfer->len = origin_len;
}
return status;
}
static struct s3c64xx_spi_csinfo *s3c64xx_get_target_ctrldata(
struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs;
struct device_node *target_np, *data_np = NULL;
u32 fb_delay = 0;
target_np = spi->dev.of_node;
if (!target_np) {
dev_err(&spi->dev, "device node not found\n");
return ERR_PTR(-EINVAL);
}
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
data_np = of_get_child_by_name(target_np, "controller-data");
if (!data_np) {
dev_info(&spi->dev, "feedback delay set to default (0)\n");
return cs;
}
of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
cs->fb_delay = fb_delay;
of_node_put(data_np);
return cs;
}
/*
* Here we only check the validity of requested configuration
* and save the configuration in a local data-structure.
* The controller is actually configured only just before we
* get a message to transfer.
*/
static int s3c64xx_spi_setup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
int err;
int div;
sdd = spi_controller_get_devdata(spi->controller);
if (spi->dev.of_node) {
cs = s3c64xx_get_target_ctrldata(spi);
spi->controller_data = cs;
}
/* NULL is fine, we just avoid using the FB delay (=0) */
if (IS_ERR(cs)) {
dev_err(&spi->dev, "No CS for SPI(%d)\n", spi_get_chipselect(spi, 0));
return -ENODEV;
}
if (!spi_get_ctldata(spi))
spi_set_ctldata(spi, cs);
pm_runtime_get_sync(&sdd->pdev->dev);
div = sdd->port_conf->clk_div;
/* Check if we can provide the requested rate */
if (!sdd->port_conf->clk_from_cmu) {
u32 psr, speed;
/* Max possible */
speed = clk_get_rate(sdd->src_clk) / div / (0 + 1);
if (spi->max_speed_hz > speed)
spi->max_speed_hz = speed;
psr = clk_get_rate(sdd->src_clk) / div / spi->max_speed_hz - 1;
psr &= S3C64XX_SPI_PSR_MASK;
if (psr == S3C64XX_SPI_PSR_MASK)
psr--;
speed = clk_get_rate(sdd->src_clk) / div / (psr + 1);
if (spi->max_speed_hz < speed) {
if (psr+1 < S3C64XX_SPI_PSR_MASK) {
psr++;
} else {
err = -EINVAL;
goto setup_exit;
}
}
speed = clk_get_rate(sdd->src_clk) / div / (psr + 1);
if (spi->max_speed_hz >= speed) {
spi->max_speed_hz = speed;
} else {
dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
spi->max_speed_hz);
err = -EINVAL;
goto setup_exit;
}
}
pm_runtime_mark_last_busy(&sdd->pdev->dev);
pm_runtime_put_autosuspend(&sdd->pdev->dev);
s3c64xx_spi_set_cs(spi, false);
return 0;
setup_exit:
pm_runtime_mark_last_busy(&sdd->pdev->dev);
pm_runtime_put_autosuspend(&sdd->pdev->dev);
/* setup() returns with device de-selected */
s3c64xx_spi_set_cs(spi, false);
spi_set_ctldata(spi, NULL);
/* This was dynamically allocated on the DT path */
if (spi->dev.of_node)
kfree(cs);
return err;
}
static void s3c64xx_spi_cleanup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
/* This was dynamically allocated on the DT path */
if (spi->dev.of_node)
kfree(cs);
spi_set_ctldata(spi, NULL);
}
static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
{
struct s3c64xx_spi_driver_data *sdd = data;
struct spi_controller *spi = sdd->host;
unsigned int val, clr = 0;
val = readl(sdd->regs + S3C64XX_SPI_STATUS);
if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
dev_err(&spi->dev, "RX overrun\n");
}
if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
dev_err(&spi->dev, "RX underrun\n");
}
if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
dev_err(&spi->dev, "TX overrun\n");
}
if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
dev_err(&spi->dev, "TX underrun\n");
}
if (val & S3C64XX_SPI_ST_RX_FIFORDY) {
complete(&sdd->xfer_completion);
/* No pending clear irq, turn-off INT_EN_RX_FIFO_RDY */
val = readl(sdd->regs + S3C64XX_SPI_INT_EN);
writel((val & ~S3C64XX_SPI_INT_RX_FIFORDY_EN),
sdd->regs + S3C64XX_SPI_INT_EN);
}
/* Clear the pending irq by setting and then clearing it */
writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
return IRQ_HANDLED;
}
static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
{
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned int val;
sdd->cur_speed = 0;
if (sci->no_cs)
writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
/* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN);
if (!sdd->port_conf->clk_from_cmu)
writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
regs + S3C64XX_SPI_CLK_CFG);
writel(0, regs + S3C64XX_SPI_MODE_CFG);
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
/* Clear any irq pending bits, should set and clear the bits */
val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
S3C64XX_SPI_PND_TX_OVERRUN_CLR |
S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
writel(val, regs + S3C64XX_SPI_PENDING_CLR);
writel(0, regs + S3C64XX_SPI_PENDING_CLR);
writel(0, regs + S3C64XX_SPI_SWAP_CFG);
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~S3C64XX_SPI_MODE_4BURST;
val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
s3c64xx_flush_fifo(sdd);
}
#ifdef CONFIG_OF
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
struct s3c64xx_spi_info *sci;
u32 temp;
sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
if (!sci)
return ERR_PTR(-ENOMEM);
if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
sci->src_clk_nr = 0;
} else {
sci->src_clk_nr = temp;
}
if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
sci->num_cs = 1;
} else {
sci->num_cs = temp;
}
sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
sci->polling = !of_property_present(dev->of_node, "dmas");
return sci;
}
#else
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
return dev_get_platdata(dev);
}
#endif
static inline const struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
struct platform_device *pdev)
{
#ifdef CONFIG_OF
if (pdev->dev.of_node)
return of_device_get_match_data(&pdev->dev);
#endif
return (const struct s3c64xx_spi_port_config *)platform_get_device_id(pdev)->driver_data;
}
static int s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
struct spi_controller *host;
int ret, irq;
char clk_name[16];
if (!sci && pdev->dev.of_node) {
sci = s3c64xx_spi_parse_dt(&pdev->dev);
if (IS_ERR(sci))
return PTR_ERR(sci);
}
if (!sci)
return dev_err_probe(&pdev->dev, -ENODEV,
"Platform_data missing!\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*sdd));
if (!host)
return dev_err_probe(&pdev->dev, -ENOMEM,
"Unable to allocate SPI Host\n");
platform_set_drvdata(pdev, host);
sdd = spi_controller_get_devdata(host);
sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
sdd->host = host;
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "spi");
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
"Failed to get alias id\n");
sdd->port_id = ret;
} else {
sdd->port_id = pdev->id;
}
sdd->cur_bpw = 8;
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
sdd->rx_dma.direction = DMA_DEV_TO_MEM;
host->dev.of_node = pdev->dev.of_node;
host->bus_num = sdd->port_id;
host->setup = s3c64xx_spi_setup;
host->cleanup = s3c64xx_spi_cleanup;
host->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
host->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
host->prepare_message = s3c64xx_spi_prepare_message;
host->transfer_one = s3c64xx_spi_transfer_one;
host->max_transfer_size = s3c64xx_spi_max_transfer_size;
host->num_chipselect = sci->num_cs;
host->use_gpio_descriptors = true;
host->dma_alignment = 8;
host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
/* the spi->mode bits understood by this driver: */
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
if (sdd->port_conf->has_loopback)
host->mode_bits |= SPI_LOOP;
host->auto_runtime_pm = true;
if (!is_polling(sdd))
host->can_dma = s3c64xx_spi_can_dma;
sdd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(sdd->regs))
return PTR_ERR(sdd->regs);
sdd->sfr_start = mem_res->start;
if (sci->cfg_gpio && sci->cfg_gpio())
return dev_err_probe(&pdev->dev, -EBUSY,
"Unable to config gpio\n");
/* Setup clocks */
sdd->clk = devm_clk_get_enabled(&pdev->dev, "spi");
if (IS_ERR(sdd->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sdd->clk),
"Unable to acquire clock 'spi'\n");
sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
sdd->src_clk = devm_clk_get_enabled(&pdev->dev, clk_name);
if (IS_ERR(sdd->src_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(sdd->src_clk),
"Unable to acquire clock '%s'\n",
clk_name);
if (sdd->port_conf->clk_ioclk) {
sdd->ioclk = devm_clk_get_enabled(&pdev->dev, "spi_ioclk");
if (IS_ERR(sdd->ioclk))
return dev_err_probe(&pdev->dev, PTR_ERR(sdd->ioclk),
"Unable to acquire 'ioclk'\n");
}
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
/* Setup Deufult Mode */
s3c64xx_spi_hwinit(sdd);
spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion);
ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
"spi-s3c64xx", sdd);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
goto err_pm_put;
}
writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret != 0) {
dev_err(&pdev->dev, "cannot register SPI host: %d\n", ret);
goto err_pm_put;
}
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Targets attached\n",
sdd->port_id, host->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err_pm_put:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
return ret;
}
static void s3c64xx_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
pm_runtime_get_sync(&pdev->dev);
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
if (!is_polling(sdd)) {
dma_release_channel(sdd->rx_dma.ch);
dma_release_channel(sdd->tx_dma.ch);
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
}
#ifdef CONFIG_PM_SLEEP
static int s3c64xx_spi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
int ret = spi_controller_suspend(host);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret < 0)
return ret;
sdd->cur_speed = 0; /* Output Clock is stopped */
return 0;
}
static int s3c64xx_spi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
int ret;
if (sci->cfg_gpio)
sci->cfg_gpio();
ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
return spi_controller_resume(host);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int s3c64xx_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
clk_disable_unprepare(sdd->clk);
clk_disable_unprepare(sdd->src_clk);
clk_disable_unprepare(sdd->ioclk);
return 0;
}
static int s3c64xx_spi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
int ret;
if (sdd->port_conf->clk_ioclk) {
ret = clk_prepare_enable(sdd->ioclk);
if (ret != 0)
return ret;
}
ret = clk_prepare_enable(sdd->src_clk);
if (ret != 0)
goto err_disable_ioclk;
ret = clk_prepare_enable(sdd->clk);
if (ret != 0)
goto err_disable_src_clk;
s3c64xx_spi_hwinit(sdd);
writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
return 0;
err_disable_src_clk:
clk_disable_unprepare(sdd->src_clk);
err_disable_ioclk:
clk_disable_unprepare(sdd->ioclk);
return ret;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops s3c64xx_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
s3c64xx_spi_runtime_resume, NULL)
};
static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
.fifo_lvl_mask = { 0x7f },
.rx_lvl_offset = 13,
.tx_st_done = 21,
.clk_div = 2,
.high_speed = true,
};
static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
.fifo_lvl_mask = { 0x7f, 0x7F },
.rx_lvl_offset = 13,
.tx_st_done = 21,
.clk_div = 2,
};
static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
.high_speed = true,
};
static const struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
.high_speed = true,
.clk_from_cmu = true,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
static const struct s3c64xx_spi_port_config exynos7_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
.high_speed = true,
.clk_from_cmu = true,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
.high_speed = true,
.clk_from_cmu = true,
.clk_ioclk = true,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
static const struct s3c64xx_spi_port_config exynosautov9_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f},
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 4,
.high_speed = true,
.clk_from_cmu = true,
.clk_ioclk = true,
.has_loopback = true,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
static const struct s3c64xx_spi_port_config fsd_spi_port_config = {
.fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f},
.rx_lvl_offset = 15,
.tx_st_done = 25,
.clk_div = 2,
.high_speed = true,
.clk_from_cmu = true,
.clk_ioclk = false,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
.driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
}, {
.name = "s3c6410-spi",
.driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
},
{ },
};
static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,s3c2443-spi",
.data = (void *)&s3c2443_spi_port_config,
},
{ .compatible = "samsung,s3c6410-spi",
.data = (void *)&s3c6410_spi_port_config,
},
{ .compatible = "samsung,s5pv210-spi",
.data = (void *)&s5pv210_spi_port_config,
},
{ .compatible = "samsung,exynos4210-spi",
.data = (void *)&exynos4_spi_port_config,
},
{ .compatible = "samsung,exynos7-spi",
.data = (void *)&exynos7_spi_port_config,
},
{ .compatible = "samsung,exynos5433-spi",
.data = (void *)&exynos5433_spi_port_config,
},
{ .compatible = "samsung,exynosautov9-spi",
.data = (void *)&exynosautov9_spi_port_config,
},
{ .compatible = "tesla,fsd-spi",
.data = (void *)&fsd_spi_port_config,
},
{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
static struct platform_driver s3c64xx_spi_driver = {
.driver = {
.name = "s3c64xx-spi",
.pm = &s3c64xx_spi_pm,
.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
},
.probe = s3c64xx_spi_probe,
.remove_new = s3c64xx_spi_remove,
.id_table = s3c64xx_spi_driver_ids,
};
MODULE_ALIAS("platform:s3c64xx-spi");
module_platform_driver(s3c64xx_spi_driver);
MODULE_AUTHOR("Jaswinder Singh <[email protected]>");
MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-s3c64xx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* au1550 psc spi controller driver
* may work also with au1200, au1210, au1250
* will not work on au1000, au1100 and au1500 (no full spi controller there)
*
* Copyright (c) 2006 ATRON electronic GmbH
* Author: Jan Nikitenko <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1550_spi.h>
static unsigned int usedma = 1;
module_param(usedma, uint, 0644);
/*
#define AU1550_SPI_DEBUG_LOOPBACK
*/
#define AU1550_SPI_DBDMA_DESCRIPTORS 1
#define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U
struct au1550_spi {
struct spi_bitbang bitbang;
volatile psc_spi_t __iomem *regs;
int irq;
unsigned int len;
unsigned int tx_count;
unsigned int rx_count;
const u8 *tx;
u8 *rx;
void (*rx_word)(struct au1550_spi *hw);
void (*tx_word)(struct au1550_spi *hw);
int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
irqreturn_t (*irq_callback)(struct au1550_spi *hw);
struct completion host_done;
unsigned int usedma;
u32 dma_tx_id;
u32 dma_rx_id;
u32 dma_tx_ch;
u32 dma_rx_ch;
u8 *dma_rx_tmpbuf;
unsigned int dma_rx_tmpbuf_size;
u32 dma_rx_tmpbuf_addr;
struct spi_controller *host;
struct device *dev;
struct au1550_spi_info *pdata;
struct resource *ioarea;
};
/* we use an 8-bit memory device for dma transfers to/from spi fifo */
static dbdev_tab_t au1550_spi_mem_dbdev = {
.dev_id = DBDMA_MEM_CHAN,
.dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC,
.dev_tsize = 0,
.dev_devwidth = 8,
.dev_physaddr = 0x00000000,
.dev_intlevel = 0,
.dev_intpolarity = 0
};
static int ddma_memid; /* id to above mem dma device */
static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw);
/*
* compute BRG and DIV bits to setup spi clock based on main input clock rate
* that was specified in platform data structure
* according to au1550 datasheet:
* psc_tempclk = psc_mainclk / (2 << DIV)
* spiclk = psc_tempclk / (2 * (BRG + 1))
* BRG valid range is 4..63
* DIV valid range is 0..3
*/
static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned int speed_hz)
{
u32 mainclk_hz = hw->pdata->mainclk_hz;
u32 div, brg;
for (div = 0; div < 4; div++) {
brg = mainclk_hz / speed_hz / (4 << div);
/* now we have BRG+1 in brg, so count with that */
if (brg < (4 + 1)) {
brg = (4 + 1); /* speed_hz too big */
break; /* set lowest brg (div is == 0) */
}
if (brg <= (63 + 1))
break; /* we have valid brg and div */
}
if (div == 4) {
div = 3; /* speed_hz too small */
brg = (63 + 1); /* set highest brg and div */
}
brg--;
return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div);
}
static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw)
{
hw->regs->psc_spimsk =
PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO
| PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO
| PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD;
wmb(); /* drain writebuffer */
hw->regs->psc_spievent =
PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO
| PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO
| PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD;
wmb(); /* drain writebuffer */
}
static void au1550_spi_reset_fifos(struct au1550_spi *hw)
{
u32 pcr;
hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC;
wmb(); /* drain writebuffer */
do {
pcr = hw->regs->psc_spipcr;
wmb(); /* drain writebuffer */
} while (pcr != 0);
}
/*
* dma transfers are used for the most common spi word size of 8-bits
* we cannot easily change already set up dma channels' width, so if we wanted
* dma support for more than 8-bit words (up to 24 bits), we would need to
* setup dma channels from scratch on each spi transfer, based on bits_per_word
* instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits
* transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode
* callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set()
*/
static void au1550_spi_chipsel(struct spi_device *spi, int value)
{
struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
u32 cfg, stat;
switch (value) {
case BITBANG_CS_INACTIVE:
if (hw->pdata->deactivate_cs)
hw->pdata->deactivate_cs(hw->pdata, spi_get_chipselect(spi, 0),
cspol);
break;
case BITBANG_CS_ACTIVE:
au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
cfg = hw->regs->psc_spicfg;
wmb(); /* drain writebuffer */
hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
wmb(); /* drain writebuffer */
if (spi->mode & SPI_CPOL)
cfg |= PSC_SPICFG_BI;
else
cfg &= ~PSC_SPICFG_BI;
if (spi->mode & SPI_CPHA)
cfg &= ~PSC_SPICFG_CDE;
else
cfg |= PSC_SPICFG_CDE;
if (spi->mode & SPI_LSB_FIRST)
cfg |= PSC_SPICFG_MLF;
else
cfg &= ~PSC_SPICFG_MLF;
if (hw->usedma && spi->bits_per_word <= 8)
cfg &= ~PSC_SPICFG_DD_DISABLE;
else
cfg |= PSC_SPICFG_DD_DISABLE;
cfg = PSC_SPICFG_CLR_LEN(cfg);
cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word);
cfg = PSC_SPICFG_CLR_BAUD(cfg);
cfg &= ~PSC_SPICFG_SET_DIV(3);
cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz);
hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE;
wmb(); /* drain writebuffer */
do {
stat = hw->regs->psc_spistat;
wmb(); /* drain writebuffer */
} while ((stat & PSC_SPISTAT_DR) == 0);
if (hw->pdata->activate_cs)
hw->pdata->activate_cs(hw->pdata, spi_get_chipselect(spi, 0),
cspol);
break;
}
}
static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
{
struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
unsigned int bpw, hz;
u32 cfg, stat;
if (t) {
bpw = t->bits_per_word;
hz = t->speed_hz;
} else {
bpw = spi->bits_per_word;
hz = spi->max_speed_hz;
}
if (!hz)
return -EINVAL;
au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
cfg = hw->regs->psc_spicfg;
wmb(); /* drain writebuffer */
hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
wmb(); /* drain writebuffer */
if (hw->usedma && bpw <= 8)
cfg &= ~PSC_SPICFG_DD_DISABLE;
else
cfg |= PSC_SPICFG_DD_DISABLE;
cfg = PSC_SPICFG_CLR_LEN(cfg);
cfg |= PSC_SPICFG_SET_LEN(bpw);
cfg = PSC_SPICFG_CLR_BAUD(cfg);
cfg &= ~PSC_SPICFG_SET_DIV(3);
cfg |= au1550_spi_baudcfg(hw, hz);
hw->regs->psc_spicfg = cfg;
wmb(); /* drain writebuffer */
if (cfg & PSC_SPICFG_DE_ENABLE) {
do {
stat = hw->regs->psc_spistat;
wmb(); /* drain writebuffer */
} while ((stat & PSC_SPISTAT_DR) == 0);
}
au1550_spi_reset_fifos(hw);
au1550_spi_mask_ack_all(hw);
return 0;
}
/*
* for dma spi transfers, we have to setup rx channel, otherwise there is
* no reliable way how to recognize that spi transfer is done
* dma complete callbacks are called before real spi transfer is finished
* and if only tx dma channel is set up (and rx fifo overflow event masked)
* spi host done event irq is not generated unless rx fifo is empty (emptied)
* so we need rx tmp buffer to use for rx dma if user does not provide one
*/
static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned int size)
{
hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL);
if (!hw->dma_rx_tmpbuf)
return -ENOMEM;
hw->dma_rx_tmpbuf_size = size;
hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
size, DMA_FROM_DEVICE);
if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
kfree(hw->dma_rx_tmpbuf);
hw->dma_rx_tmpbuf = 0;
hw->dma_rx_tmpbuf_size = 0;
return -EFAULT;
}
return 0;
}
static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw)
{
dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr,
hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE);
kfree(hw->dma_rx_tmpbuf);
hw->dma_rx_tmpbuf = 0;
hw->dma_rx_tmpbuf_size = 0;
}
static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
{
struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
dma_addr_t dma_tx_addr;
dma_addr_t dma_rx_addr;
u32 res;
hw->len = t->len;
hw->tx_count = 0;
hw->rx_count = 0;
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
dma_tx_addr = t->tx_dma;
dma_rx_addr = t->rx_dma;
/*
* check if buffers are already dma mapped, map them otherwise:
* - first map the TX buffer, so cache data gets written to memory
* - then map the RX buffer, so that cache entries (with
* soon-to-be-stale data) get removed
* use rx buffer in place of tx if tx buffer was not provided
* use temp rx buffer (preallocated or realloc to fit) for rx dma
*/
if (t->tx_buf) {
if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */
dma_tx_addr = dma_map_single(hw->dev,
(void *)t->tx_buf,
t->len, DMA_TO_DEVICE);
if (dma_mapping_error(hw->dev, dma_tx_addr))
dev_err(hw->dev, "tx dma map error\n");
}
}
if (t->rx_buf) {
if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */
dma_rx_addr = dma_map_single(hw->dev,
(void *)t->rx_buf,
t->len, DMA_FROM_DEVICE);
if (dma_mapping_error(hw->dev, dma_rx_addr))
dev_err(hw->dev, "rx dma map error\n");
}
} else {
if (t->len > hw->dma_rx_tmpbuf_size) {
int ret;
au1550_spi_dma_rxtmp_free(hw);
ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len,
AU1550_SPI_DMA_RXTMP_MINSIZE));
if (ret < 0)
return ret;
}
hw->rx = hw->dma_rx_tmpbuf;
dma_rx_addr = hw->dma_rx_tmpbuf_addr;
dma_sync_single_for_device(hw->dev, dma_rx_addr,
t->len, DMA_FROM_DEVICE);
}
if (!t->tx_buf) {
dma_sync_single_for_device(hw->dev, dma_rx_addr,
t->len, DMA_BIDIRECTIONAL);
hw->tx = hw->rx;
}
/* put buffers on the ring */
res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx),
t->len, DDMA_FLAGS_IE);
if (!res)
dev_err(hw->dev, "rx dma put dest error\n");
res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx),
t->len, DDMA_FLAGS_IE);
if (!res)
dev_err(hw->dev, "tx dma put source error\n");
au1xxx_dbdma_start(hw->dma_rx_ch);
au1xxx_dbdma_start(hw->dma_tx_ch);
/* by default enable nearly all events interrupt */
hw->regs->psc_spimsk = PSC_SPIMSK_SD;
wmb(); /* drain writebuffer */
/* start the transfer */
hw->regs->psc_spipcr = PSC_SPIPCR_MS;
wmb(); /* drain writebuffer */
wait_for_completion(&hw->host_done);
au1xxx_dbdma_stop(hw->dma_tx_ch);
au1xxx_dbdma_stop(hw->dma_rx_ch);
if (!t->rx_buf) {
/* using the temporal preallocated and premapped buffer */
dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len,
DMA_FROM_DEVICE);
}
/* unmap buffers if mapped above */
if (t->rx_buf && t->rx_dma == 0)
dma_unmap_single(hw->dev, dma_rx_addr, t->len,
DMA_FROM_DEVICE);
if (t->tx_buf && t->tx_dma == 0)
dma_unmap_single(hw->dev, dma_tx_addr, t->len,
DMA_TO_DEVICE);
return min(hw->rx_count, hw->tx_count);
}
static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
{
u32 stat, evnt;
stat = hw->regs->psc_spistat;
evnt = hw->regs->psc_spievent;
wmb(); /* drain writebuffer */
if ((stat & PSC_SPISTAT_DI) == 0) {
dev_err(hw->dev, "Unexpected IRQ!\n");
return IRQ_NONE;
}
if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
| PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
| PSC_SPIEVNT_TU | PSC_SPIEVNT_SD))
!= 0) {
/*
* due to an spi error we consider transfer as done,
* so mask all events until before next transfer start
* and stop the possibly running dma immediately
*/
au1550_spi_mask_ack_all(hw);
au1xxx_dbdma_stop(hw->dma_rx_ch);
au1xxx_dbdma_stop(hw->dma_tx_ch);
/* get number of transferred bytes */
hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch);
hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch);
au1xxx_dbdma_reset(hw->dma_rx_ch);
au1xxx_dbdma_reset(hw->dma_tx_ch);
au1550_spi_reset_fifos(hw);
if (evnt == PSC_SPIEVNT_RO)
dev_err(hw->dev,
"dma transfer: receive FIFO overflow!\n");
else
dev_err(hw->dev,
"dma transfer: unexpected SPI error (event=0x%x stat=0x%x)!\n",
evnt, stat);
complete(&hw->host_done);
return IRQ_HANDLED;
}
if ((evnt & PSC_SPIEVNT_MD) != 0) {
/* transfer completed successfully */
au1550_spi_mask_ack_all(hw);
hw->rx_count = hw->len;
hw->tx_count = hw->len;
complete(&hw->host_done);
}
return IRQ_HANDLED;
}
/* routines to handle different word sizes in pio mode */
#define AU1550_SPI_RX_WORD(size, mask) \
static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \
{ \
u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \
wmb(); /* drain writebuffer */ \
if (hw->rx) { \
*(u##size *)hw->rx = (u##size)fifoword; \
hw->rx += (size) / 8; \
} \
hw->rx_count += (size) / 8; \
}
#define AU1550_SPI_TX_WORD(size, mask) \
static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \
{ \
u32 fifoword = 0; \
if (hw->tx) { \
fifoword = *(u##size *)hw->tx & (u32)(mask); \
hw->tx += (size) / 8; \
} \
hw->tx_count += (size) / 8; \
if (hw->tx_count >= hw->len) \
fifoword |= PSC_SPITXRX_LC; \
hw->regs->psc_spitxrx = fifoword; \
wmb(); /* drain writebuffer */ \
}
AU1550_SPI_RX_WORD(8, 0xff)
AU1550_SPI_RX_WORD(16, 0xffff)
AU1550_SPI_RX_WORD(32, 0xffffff)
AU1550_SPI_TX_WORD(8, 0xff)
AU1550_SPI_TX_WORD(16, 0xffff)
AU1550_SPI_TX_WORD(32, 0xffffff)
static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
{
u32 stat, mask;
struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->len = t->len;
hw->tx_count = 0;
hw->rx_count = 0;
/* by default enable nearly all events after filling tx fifo */
mask = PSC_SPIMSK_SD;
/* fill the transmit FIFO */
while (hw->tx_count < hw->len) {
hw->tx_word(hw);
if (hw->tx_count >= hw->len) {
/* mask tx fifo request interrupt as we are done */
mask |= PSC_SPIMSK_TR;
}
stat = hw->regs->psc_spistat;
wmb(); /* drain writebuffer */
if (stat & PSC_SPISTAT_TF)
break;
}
/* enable event interrupts */
hw->regs->psc_spimsk = mask;
wmb(); /* drain writebuffer */
/* start the transfer */
hw->regs->psc_spipcr = PSC_SPIPCR_MS;
wmb(); /* drain writebuffer */
wait_for_completion(&hw->host_done);
return min(hw->rx_count, hw->tx_count);
}
static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
{
int busy;
u32 stat, evnt;
stat = hw->regs->psc_spistat;
evnt = hw->regs->psc_spievent;
wmb(); /* drain writebuffer */
if ((stat & PSC_SPISTAT_DI) == 0) {
dev_err(hw->dev, "Unexpected IRQ!\n");
return IRQ_NONE;
}
if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
| PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
| PSC_SPIEVNT_SD))
!= 0) {
/*
* due to an error we consider transfer as done,
* so mask all events until before next transfer start
*/
au1550_spi_mask_ack_all(hw);
au1550_spi_reset_fifos(hw);
dev_err(hw->dev,
"pio transfer: unexpected SPI error (event=0x%x stat=0x%x)!\n",
evnt, stat);
complete(&hw->host_done);
return IRQ_HANDLED;
}
/*
* while there is something to read from rx fifo
* or there is a space to write to tx fifo:
*/
do {
busy = 0;
stat = hw->regs->psc_spistat;
wmb(); /* drain writebuffer */
/*
* Take care to not let the Rx FIFO overflow.
*
* We only write a byte if we have read one at least. Initially,
* the write fifo is full, so we should read from the read fifo
* first.
* In case we miss a word from the read fifo, we should get a
* RO event and should back out.
*/
if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) {
hw->rx_word(hw);
busy = 1;
if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len)
hw->tx_word(hw);
}
} while (busy);
hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR;
wmb(); /* drain writebuffer */
/*
* Restart the SPI transmission in case of a transmit underflow.
* This seems to work despite the notes in the Au1550 data book
* of Figure 8-4 with flowchart for SPI host operation:
*
* """Note 1: An XFR Error Interrupt occurs, unless masked,
* for any of the following events: Tx FIFO Underflow,
* Rx FIFO Overflow, or Multiple-host Error
* Note 2: In case of a Tx Underflow Error, all zeroes are
* transmitted."""
*
* By simply restarting the spi transfer on Tx Underflow Error,
* we assume that spi transfer was paused instead of zeroes
* transmittion mentioned in the Note 2 of Au1550 data book.
*/
if (evnt & PSC_SPIEVNT_TU) {
hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD;
wmb(); /* drain writebuffer */
hw->regs->psc_spipcr = PSC_SPIPCR_MS;
wmb(); /* drain writebuffer */
}
if (hw->rx_count >= hw->len) {
/* transfer completed successfully */
au1550_spi_mask_ack_all(hw);
complete(&hw->host_done);
}
return IRQ_HANDLED;
}
static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
return hw->txrx_bufs(spi, t);
}
static irqreturn_t au1550_spi_irq(int irq, void *dev)
{
struct au1550_spi *hw = dev;
return hw->irq_callback(hw);
}
static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw)
{
if (bpw <= 8) {
if (hw->usedma) {
hw->txrx_bufs = &au1550_spi_dma_txrxb;
hw->irq_callback = &au1550_spi_dma_irq_callback;
} else {
hw->rx_word = &au1550_spi_rx_word_8;
hw->tx_word = &au1550_spi_tx_word_8;
hw->txrx_bufs = &au1550_spi_pio_txrxb;
hw->irq_callback = &au1550_spi_pio_irq_callback;
}
} else if (bpw <= 16) {
hw->rx_word = &au1550_spi_rx_word_16;
hw->tx_word = &au1550_spi_tx_word_16;
hw->txrx_bufs = &au1550_spi_pio_txrxb;
hw->irq_callback = &au1550_spi_pio_irq_callback;
} else {
hw->rx_word = &au1550_spi_rx_word_32;
hw->tx_word = &au1550_spi_tx_word_32;
hw->txrx_bufs = &au1550_spi_pio_txrxb;
hw->irq_callback = &au1550_spi_pio_irq_callback;
}
}
static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
{
u32 stat, cfg;
/* set up the PSC for SPI mode */
hw->regs->psc_ctrl = PSC_CTRL_DISABLE;
wmb(); /* drain writebuffer */
hw->regs->psc_sel = PSC_SEL_PS_SPIMODE;
wmb(); /* drain writebuffer */
hw->regs->psc_spicfg = 0;
wmb(); /* drain writebuffer */
hw->regs->psc_ctrl = PSC_CTRL_ENABLE;
wmb(); /* drain writebuffer */
do {
stat = hw->regs->psc_spistat;
wmb(); /* drain writebuffer */
} while ((stat & PSC_SPISTAT_SR) == 0);
cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE;
cfg |= PSC_SPICFG_SET_LEN(8);
cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8;
/* use minimal allowed brg and div values as initial setting: */
cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0);
#ifdef AU1550_SPI_DEBUG_LOOPBACK
cfg |= PSC_SPICFG_LB;
#endif
hw->regs->psc_spicfg = cfg;
wmb(); /* drain writebuffer */
au1550_spi_mask_ack_all(hw);
hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE;
wmb(); /* drain writebuffer */
do {
stat = hw->regs->psc_spistat;
wmb(); /* drain writebuffer */
} while ((stat & PSC_SPISTAT_DR) == 0);
au1550_spi_reset_fifos(hw);
}
static int au1550_spi_probe(struct platform_device *pdev)
{
struct au1550_spi *hw;
struct spi_controller *host;
struct resource *r;
int err = 0;
host = spi_alloc_host(&pdev->dev, sizeof(struct au1550_spi));
if (host == NULL) {
dev_err(&pdev->dev, "No memory for spi_controller\n");
err = -ENOMEM;
goto err_nomem;
}
/* the spi->mode bits understood by this driver: */
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 24);
hw = spi_controller_get_devdata(host);
hw->host = host;
hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
if (hw->pdata == NULL) {
dev_err(&pdev->dev, "No platform data supplied\n");
err = -ENOENT;
goto err_no_pdata;
}
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!r) {
dev_err(&pdev->dev, "no IRQ\n");
err = -ENODEV;
goto err_no_iores;
}
hw->irq = r->start;
hw->usedma = 0;
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (r) {
hw->dma_tx_id = r->start;
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (r) {
hw->dma_rx_id = r->start;
if (usedma && ddma_memid) {
if (pdev->dev.dma_mask == NULL)
dev_warn(&pdev->dev, "no dma mask\n");
else
hw->usedma = 1;
}
}
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no mmio resource\n");
err = -ENODEV;
goto err_no_iores;
}
hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t),
pdev->name);
if (!hw->ioarea) {
dev_err(&pdev->dev, "Cannot reserve iomem region\n");
err = -ENXIO;
goto err_no_iores;
}
hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t));
if (!hw->regs) {
dev_err(&pdev->dev, "cannot ioremap\n");
err = -ENXIO;
goto err_ioremap;
}
platform_set_drvdata(pdev, hw);
init_completion(&hw->host_done);
hw->bitbang.master = hw->host;
hw->bitbang.setup_transfer = au1550_spi_setupxfer;
hw->bitbang.chipselect = au1550_spi_chipsel;
hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
if (hw->usedma) {
hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid,
hw->dma_tx_id, NULL, (void *)hw);
if (hw->dma_tx_ch == 0) {
dev_err(&pdev->dev,
"Cannot allocate tx dma channel\n");
err = -ENXIO;
goto err_no_txdma;
}
au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8);
if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch,
AU1550_SPI_DBDMA_DESCRIPTORS) == 0) {
dev_err(&pdev->dev,
"Cannot allocate tx dma descriptors\n");
err = -ENXIO;
goto err_no_txdma_descr;
}
hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id,
ddma_memid, NULL, (void *)hw);
if (hw->dma_rx_ch == 0) {
dev_err(&pdev->dev,
"Cannot allocate rx dma channel\n");
err = -ENXIO;
goto err_no_rxdma;
}
au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8);
if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch,
AU1550_SPI_DBDMA_DESCRIPTORS) == 0) {
dev_err(&pdev->dev,
"Cannot allocate rx dma descriptors\n");
err = -ENXIO;
goto err_no_rxdma_descr;
}
err = au1550_spi_dma_rxtmp_alloc(hw,
AU1550_SPI_DMA_RXTMP_MINSIZE);
if (err < 0) {
dev_err(&pdev->dev,
"Cannot allocate initial rx dma tmp buffer\n");
goto err_dma_rxtmp_alloc;
}
}
au1550_spi_bits_handlers_set(hw, 8);
err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
goto err_no_irq;
}
host->bus_num = pdev->id;
host->num_chipselect = hw->pdata->num_chipselect;
/*
* precompute valid range for spi freq - from au1550 datasheet:
* psc_tempclk = psc_mainclk / (2 << DIV)
* spiclk = psc_tempclk / (2 * (BRG + 1))
* BRG valid range is 4..63
* DIV valid range is 0..3
* round the min and max frequencies to values that would still
* produce valid brg and div
*/
{
int min_div = (2 << 0) * (2 * (4 + 1));
int max_div = (2 << 3) * (2 * (63 + 1));
host->max_speed_hz = hw->pdata->mainclk_hz / min_div;
host->min_speed_hz =
hw->pdata->mainclk_hz / (max_div + 1) + 1;
}
au1550_spi_setup_psc_as_spi(hw);
err = spi_bitbang_start(&hw->bitbang);
if (err) {
dev_err(&pdev->dev, "Failed to register SPI host\n");
goto err_register;
}
dev_info(&pdev->dev,
"spi host registered: bus_num=%d num_chipselect=%d\n",
host->bus_num, host->num_chipselect);
return 0;
err_register:
free_irq(hw->irq, hw);
err_no_irq:
au1550_spi_dma_rxtmp_free(hw);
err_dma_rxtmp_alloc:
err_no_rxdma_descr:
if (hw->usedma)
au1xxx_dbdma_chan_free(hw->dma_rx_ch);
err_no_rxdma:
err_no_txdma_descr:
if (hw->usedma)
au1xxx_dbdma_chan_free(hw->dma_tx_ch);
err_no_txdma:
iounmap((void __iomem *)hw->regs);
err_ioremap:
release_mem_region(r->start, sizeof(psc_spi_t));
err_no_iores:
err_no_pdata:
spi_controller_put(hw->host);
err_nomem:
return err;
}
static void au1550_spi_remove(struct platform_device *pdev)
{
struct au1550_spi *hw = platform_get_drvdata(pdev);
dev_info(&pdev->dev, "spi host remove: bus_num=%d\n",
hw->host->bus_num);
spi_bitbang_stop(&hw->bitbang);
free_irq(hw->irq, hw);
iounmap((void __iomem *)hw->regs);
release_mem_region(hw->ioarea->start, sizeof(psc_spi_t));
if (hw->usedma) {
au1550_spi_dma_rxtmp_free(hw);
au1xxx_dbdma_chan_free(hw->dma_rx_ch);
au1xxx_dbdma_chan_free(hw->dma_tx_ch);
}
spi_controller_put(hw->host);
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:au1550-spi");
static struct platform_driver au1550_spi_drv = {
.probe = au1550_spi_probe,
.remove_new = au1550_spi_remove,
.driver = {
.name = "au1550-spi",
},
};
static int __init au1550_spi_init(void)
{
/*
* create memory device with 8 bits dev_devwidth
* needed for proper byte ordering to spi fifo
*/
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1550:
case ALCHEMY_CPU_AU1200:
case ALCHEMY_CPU_AU1300:
break;
default:
return -ENODEV;
}
if (usedma) {
ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
if (!ddma_memid)
printk(KERN_ERR "au1550-spi: cannot add memory dbdma device\n");
}
return platform_driver_register(&au1550_spi_drv);
}
module_init(au1550_spi_init);
static void __exit au1550_spi_exit(void)
{
if (usedma && ddma_memid)
au1xxx_ddma_del_device(ddma_memid);
platform_driver_unregister(&au1550_spi_drv);
}
module_exit(au1550_spi_exit);
MODULE_DESCRIPTION("Au1550 PSC SPI Driver");
MODULE_AUTHOR("Jan Nikitenko <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-au1550.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Ludovic Barre <[email protected]> for STMicroelectronics.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/spi/spi-mem.h>
#define QSPI_CR 0x00
#define CR_EN BIT(0)
#define CR_ABORT BIT(1)
#define CR_DMAEN BIT(2)
#define CR_TCEN BIT(3)
#define CR_SSHIFT BIT(4)
#define CR_DFM BIT(6)
#define CR_FSEL BIT(7)
#define CR_FTHRES_SHIFT 8
#define CR_TEIE BIT(16)
#define CR_TCIE BIT(17)
#define CR_FTIE BIT(18)
#define CR_SMIE BIT(19)
#define CR_TOIE BIT(20)
#define CR_APMS BIT(22)
#define CR_PRESC_MASK GENMASK(31, 24)
#define QSPI_DCR 0x04
#define DCR_FSIZE_MASK GENMASK(20, 16)
#define QSPI_SR 0x08
#define SR_TEF BIT(0)
#define SR_TCF BIT(1)
#define SR_FTF BIT(2)
#define SR_SMF BIT(3)
#define SR_TOF BIT(4)
#define SR_BUSY BIT(5)
#define SR_FLEVEL_MASK GENMASK(13, 8)
#define QSPI_FCR 0x0c
#define FCR_CTEF BIT(0)
#define FCR_CTCF BIT(1)
#define FCR_CSMF BIT(3)
#define QSPI_DLR 0x10
#define QSPI_CCR 0x14
#define CCR_INST_MASK GENMASK(7, 0)
#define CCR_IMODE_MASK GENMASK(9, 8)
#define CCR_ADMODE_MASK GENMASK(11, 10)
#define CCR_ADSIZE_MASK GENMASK(13, 12)
#define CCR_DCYC_MASK GENMASK(22, 18)
#define CCR_DMODE_MASK GENMASK(25, 24)
#define CCR_FMODE_MASK GENMASK(27, 26)
#define CCR_FMODE_INDW (0U << 26)
#define CCR_FMODE_INDR (1U << 26)
#define CCR_FMODE_APM (2U << 26)
#define CCR_FMODE_MM (3U << 26)
#define CCR_BUSWIDTH_0 0x0
#define CCR_BUSWIDTH_1 0x1
#define CCR_BUSWIDTH_2 0x2
#define CCR_BUSWIDTH_4 0x3
#define QSPI_AR 0x18
#define QSPI_ABR 0x1c
#define QSPI_DR 0x20
#define QSPI_PSMKR 0x24
#define QSPI_PSMAR 0x28
#define QSPI_PIR 0x2c
#define QSPI_LPTR 0x30
#define STM32_QSPI_MAX_MMAP_SZ SZ_256M
#define STM32_QSPI_MAX_NORCHIP 2
#define STM32_FIFO_TIMEOUT_US 30000
#define STM32_BUSY_TIMEOUT_US 100000
#define STM32_ABT_TIMEOUT_US 100000
#define STM32_COMP_TIMEOUT_MS 1000
#define STM32_AUTOSUSPEND_DELAY -1
struct stm32_qspi_flash {
u32 cs;
u32 presc;
};
struct stm32_qspi {
struct device *dev;
struct spi_controller *ctrl;
phys_addr_t phys_base;
void __iomem *io_base;
void __iomem *mm_base;
resource_size_t mm_size;
struct clk *clk;
u32 clk_rate;
struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
struct completion data_completion;
struct completion match_completion;
u32 fmode;
struct dma_chan *dma_chtx;
struct dma_chan *dma_chrx;
struct completion dma_completion;
u32 cr_reg;
u32 dcr_reg;
unsigned long status_timeout;
/*
* to protect device configuration, could be different between
* 2 flash access (bk1, bk2)
*/
struct mutex lock;
};
static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
{
struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
u32 cr, sr;
cr = readl_relaxed(qspi->io_base + QSPI_CR);
sr = readl_relaxed(qspi->io_base + QSPI_SR);
if (cr & CR_SMIE && sr & SR_SMF) {
/* disable irq */
cr &= ~CR_SMIE;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
complete(&qspi->match_completion);
return IRQ_HANDLED;
}
if (sr & (SR_TEF | SR_TCF)) {
/* disable irq */
cr &= ~CR_TCIE & ~CR_TEIE;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
complete(&qspi->data_completion);
}
return IRQ_HANDLED;
}
static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
{
*val = readb_relaxed(addr);
}
static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
{
writeb_relaxed(*val, addr);
}
static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
const struct spi_mem_op *op)
{
void (*tx_fifo)(u8 *val, void __iomem *addr);
u32 len = op->data.nbytes, sr;
u8 *buf;
int ret;
if (op->data.dir == SPI_MEM_DATA_IN) {
tx_fifo = stm32_qspi_read_fifo;
buf = op->data.buf.in;
} else {
tx_fifo = stm32_qspi_write_fifo;
buf = (u8 *)op->data.buf.out;
}
while (len--) {
ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR,
sr, (sr & SR_FTF), 1,
STM32_FIFO_TIMEOUT_US);
if (ret) {
dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n",
len, sr);
return ret;
}
tx_fifo(buf++, qspi->io_base + QSPI_DR);
}
return 0;
}
static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
const struct spi_mem_op *op)
{
memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val,
op->data.nbytes);
return 0;
}
static void stm32_qspi_dma_callback(void *arg)
{
struct completion *dma_completion = arg;
complete(dma_completion);
}
static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
const struct spi_mem_op *op)
{
struct dma_async_tx_descriptor *desc;
enum dma_transfer_direction dma_dir;
struct dma_chan *dma_ch;
struct sg_table sgt;
dma_cookie_t cookie;
u32 cr, t_out;
int err;
if (op->data.dir == SPI_MEM_DATA_IN) {
dma_dir = DMA_DEV_TO_MEM;
dma_ch = qspi->dma_chrx;
} else {
dma_dir = DMA_MEM_TO_DEV;
dma_ch = qspi->dma_chtx;
}
/*
* spi_map_buf return -EINVAL if the buffer is not DMA-able
* (DMA-able: in vmalloc | kmap | virt_addr_valid)
*/
err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
if (err)
return err;
desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
dma_dir, DMA_PREP_INTERRUPT);
if (!desc) {
err = -ENOMEM;
goto out_unmap;
}
cr = readl_relaxed(qspi->io_base + QSPI_CR);
reinit_completion(&qspi->dma_completion);
desc->callback = stm32_qspi_dma_callback;
desc->callback_param = &qspi->dma_completion;
cookie = dmaengine_submit(desc);
err = dma_submit_error(cookie);
if (err)
goto out;
dma_async_issue_pending(dma_ch);
writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
if (!wait_for_completion_timeout(&qspi->dma_completion,
msecs_to_jiffies(t_out)))
err = -ETIMEDOUT;
if (err)
dmaengine_terminate_all(dma_ch);
out:
writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
out_unmap:
spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
return err;
}
static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
{
if (!op->data.nbytes)
return 0;
if (qspi->fmode == CCR_FMODE_MM)
return stm32_qspi_tx_mm(qspi, op);
else if (((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
(op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx)) &&
op->data.nbytes > 4)
if (!stm32_qspi_tx_dma(qspi, op))
return 0;
return stm32_qspi_tx_poll(qspi, op);
}
static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
{
u32 sr;
return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr,
!(sr & SR_BUSY), 1,
STM32_BUSY_TIMEOUT_US);
}
static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
{
u32 cr, sr;
int err = 0;
if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
qspi->fmode == CCR_FMODE_APM)
goto out;
reinit_completion(&qspi->data_completion);
cr = readl_relaxed(qspi->io_base + QSPI_CR);
writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
if (!wait_for_completion_timeout(&qspi->data_completion,
msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
err = -ETIMEDOUT;
} else {
sr = readl_relaxed(qspi->io_base + QSPI_SR);
if (sr & SR_TEF)
err = -EIO;
}
out:
/* clear flags */
writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
if (!err)
err = stm32_qspi_wait_nobusy(qspi);
return err;
}
static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
{
u32 cr;
reinit_completion(&qspi->match_completion);
cr = readl_relaxed(qspi->io_base + QSPI_CR);
writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR);
if (!wait_for_completion_timeout(&qspi->match_completion,
msecs_to_jiffies(qspi->status_timeout)))
return -ETIMEDOUT;
writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR);
return 0;
}
static int stm32_qspi_get_mode(u8 buswidth)
{
if (buswidth == 4)
return CCR_BUSWIDTH_4;
return buswidth;
}
static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master);
struct stm32_qspi_flash *flash = &qspi->flash[spi_get_chipselect(spi, 0)];
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth,
op->addr.val, op->data.nbytes);
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_PRESC_MASK & ~CR_FSEL;
cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
cr |= FIELD_PREP(CR_FSEL, flash->cs);
writel_relaxed(cr, qspi->io_base + QSPI_CR);
if (op->data.nbytes)
writel_relaxed(op->data.nbytes - 1,
qspi->io_base + QSPI_DLR);
ccr = qspi->fmode;
ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
ccr |= FIELD_PREP(CCR_IMODE_MASK,
stm32_qspi_get_mode(op->cmd.buswidth));
if (op->addr.nbytes) {
ccr |= FIELD_PREP(CCR_ADMODE_MASK,
stm32_qspi_get_mode(op->addr.buswidth));
ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
}
if (op->dummy.nbytes)
ccr |= FIELD_PREP(CCR_DCYC_MASK,
op->dummy.nbytes * 8 / op->dummy.buswidth);
if (op->data.nbytes) {
ccr |= FIELD_PREP(CCR_DMODE_MASK,
stm32_qspi_get_mode(op->data.buswidth));
}
writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
if (qspi->fmode == CCR_FMODE_APM)
err_poll_status = stm32_qspi_wait_poll_status(qspi);
err = stm32_qspi_tx(qspi, op);
/*
* Abort in:
* -error case
* -read memory map: prefetching must be stopped if we read the last
* byte of device (device size - fifo size). like device size is not
* knows, the prefetching is always stop.
*/
if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM)
goto abort;
/* wait end of tx in indirect mode */
err = stm32_qspi_wait_cmd(qspi);
if (err)
goto abort;
return 0;
abort:
cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
/* wait clear of abort bit by hw */
timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR,
cr, !(cr & CR_ABORT), 1,
STM32_ABT_TIMEOUT_US);
writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR);
if (err || err_poll_status || timeout)
dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
__func__, err, err_poll_status, timeout);
return err;
}
static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op,
u16 mask, u16 match,
unsigned long initial_delay_us,
unsigned long polling_rate_us,
unsigned long timeout_ms)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
int ret;
if (!spi_mem_supports_op(mem, op))
return -EOPNOTSUPP;
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0)
return ret;
mutex_lock(&qspi->lock);
writel_relaxed(mask, qspi->io_base + QSPI_PSMKR);
writel_relaxed(match, qspi->io_base + QSPI_PSMAR);
qspi->fmode = CCR_FMODE_APM;
qspi->status_timeout = timeout_ms;
ret = stm32_qspi_send(mem->spi, op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
pm_runtime_put_autosuspend(qspi->dev);
return ret;
}
static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
int ret;
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0)
return ret;
mutex_lock(&qspi->lock);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
qspi->fmode = CCR_FMODE_INDR;
else
qspi->fmode = CCR_FMODE_INDW;
ret = stm32_qspi_send(mem->spi, op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
pm_runtime_put_autosuspend(qspi->dev);
return ret;
}
static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
return -EOPNOTSUPP;
/* should never happen, as mm_base == null is an error probe exit condition */
if (!qspi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
return -EOPNOTSUPP;
if (!qspi->mm_size)
return -EOPNOTSUPP;
return 0;
}
static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
struct spi_mem_op op;
u32 addr_max;
int ret;
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0)
return ret;
mutex_lock(&qspi->lock);
/* make a local copy of desc op_tmpl and complete dirmap rdesc
* spi_mem_op template with offs, len and *buf in order to get
* all needed transfer information into struct spi_mem_op
*/
memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
dev_dbg(qspi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
op.data.nbytes = len;
op.addr.val = desc->info.offset + offs;
op.data.buf.in = buf;
addr_max = op.addr.val + op.data.nbytes + 1;
if (addr_max < qspi->mm_size && op.addr.buswidth)
qspi->fmode = CCR_FMODE_MM;
else
qspi->fmode = CCR_FMODE_INDR;
ret = stm32_qspi_send(desc->mem->spi, &op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
pm_runtime_put_autosuspend(qspi->dev);
return ret ?: len;
}
static int stm32_qspi_transfer_one_message(struct spi_controller *ctrl,
struct spi_message *msg)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct spi_transfer *transfer;
struct spi_device *spi = msg->spi;
struct spi_mem_op op;
int ret = 0;
if (!spi_get_csgpiod(spi, 0))
return -EOPNOTSUPP;
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0)
return ret;
mutex_lock(&qspi->lock);
gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true);
list_for_each_entry(transfer, &msg->transfers, transfer_list) {
u8 dummy_bytes = 0;
memset(&op, 0, sizeof(op));
dev_dbg(qspi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
transfer->tx_buf, transfer->tx_nbits,
transfer->rx_buf, transfer->rx_nbits,
transfer->len, transfer->dummy_data);
/*
* QSPI hardware supports dummy bytes transfer.
* If current transfer is dummy byte, merge it with the next
* transfer in order to take into account QSPI block constraint
*/
if (transfer->dummy_data) {
op.dummy.buswidth = transfer->tx_nbits;
op.dummy.nbytes = transfer->len;
dummy_bytes = transfer->len;
/* if happens, means that message is not correctly built */
if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
ret = -EINVAL;
goto end_of_transfer;
}
transfer = list_next_entry(transfer, transfer_list);
}
op.data.nbytes = transfer->len;
if (transfer->rx_buf) {
qspi->fmode = CCR_FMODE_INDR;
op.data.buswidth = transfer->rx_nbits;
op.data.dir = SPI_MEM_DATA_IN;
op.data.buf.in = transfer->rx_buf;
} else {
qspi->fmode = CCR_FMODE_INDW;
op.data.buswidth = transfer->tx_nbits;
op.data.dir = SPI_MEM_DATA_OUT;
op.data.buf.out = transfer->tx_buf;
}
ret = stm32_qspi_send(spi, &op);
if (ret)
goto end_of_transfer;
msg->actual_length += transfer->len + dummy_bytes;
}
end_of_transfer:
gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false);
mutex_unlock(&qspi->lock);
msg->status = ret;
spi_finalize_current_message(ctrl);
pm_runtime_mark_last_busy(qspi->dev);
pm_runtime_put_autosuspend(qspi->dev);
return ret;
}
static int stm32_qspi_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->master;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
u32 presc, mode;
int ret;
if (ctrl->busy)
return -EBUSY;
if (!spi->max_speed_hz)
return -EINVAL;
mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
gpiod_count(qspi->dev, "cs") == -ENOENT)) {
dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
dev_err(qspi->dev, "configuration not supported\n");
return -EINVAL;
}
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0)
return ret;
presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
flash = &qspi->flash[spi_get_chipselect(spi, 0)];
flash->cs = spi_get_chipselect(spi, 0);
flash->presc = presc;
mutex_lock(&qspi->lock);
qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
/*
* Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
* are both set in spi->mode and "cs-gpios" properties is found in DT
*/
if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
qspi->cr_reg |= CR_DFM;
dev_dbg(qspi->dev, "Dual flash mode enable");
}
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
qspi->dcr_reg = DCR_FSIZE_MASK;
writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
pm_runtime_put_autosuspend(qspi->dev);
return 0;
}
static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
{
struct dma_slave_config dma_cfg;
struct device *dev = qspi->dev;
int ret = 0;
memset(&dma_cfg, 0, sizeof(dma_cfg));
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
dma_cfg.src_maxburst = 4;
dma_cfg.dst_maxburst = 4;
qspi->dma_chrx = dma_request_chan(dev, "rx");
if (IS_ERR(qspi->dma_chrx)) {
ret = PTR_ERR(qspi->dma_chrx);
qspi->dma_chrx = NULL;
if (ret == -EPROBE_DEFER)
goto out;
} else {
if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
dev_err(dev, "dma rx config failed\n");
dma_release_channel(qspi->dma_chrx);
qspi->dma_chrx = NULL;
}
}
qspi->dma_chtx = dma_request_chan(dev, "tx");
if (IS_ERR(qspi->dma_chtx)) {
ret = PTR_ERR(qspi->dma_chtx);
qspi->dma_chtx = NULL;
} else {
if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
dev_err(dev, "dma tx config failed\n");
dma_release_channel(qspi->dma_chtx);
qspi->dma_chtx = NULL;
}
}
out:
init_completion(&qspi->dma_completion);
if (ret != -EPROBE_DEFER)
ret = 0;
return ret;
}
static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
{
if (qspi->dma_chtx)
dma_release_channel(qspi->dma_chtx);
if (qspi->dma_chrx)
dma_release_channel(qspi->dma_chrx);
}
/*
* no special host constraint, so use default spi_mem_default_supports_op
* to check supported mode.
*/
static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
.exec_op = stm32_qspi_exec_op,
.dirmap_create = stm32_qspi_dirmap_create,
.dirmap_read = stm32_qspi_dirmap_read,
.poll_status = stm32_qspi_poll_status,
};
static int stm32_qspi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *ctrl;
struct reset_control *rstc;
struct stm32_qspi *qspi;
struct resource *res;
int ret, irq;
ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
if (!ctrl)
return -ENOMEM;
qspi = spi_controller_get_devdata(ctrl);
qspi->ctrl = ctrl;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
qspi->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->io_base))
return PTR_ERR(qspi->io_base);
qspi->phys_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
qspi->mm_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->mm_base))
return PTR_ERR(qspi->mm_base);
qspi->mm_size = resource_size(res);
if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
init_completion(&qspi->data_completion);
init_completion(&qspi->match_completion);
qspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qspi->clk))
return PTR_ERR(qspi->clk);
qspi->clk_rate = clk_get_rate(qspi->clk);
if (!qspi->clk_rate)
return -EINVAL;
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
return ret;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret == -EPROBE_DEFER)
goto err_clk_disable;
} else {
reset_control_assert(rstc);
udelay(2);
reset_control_deassert(rstc);
}
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
ret = stm32_qspi_dma_setup(qspi);
if (ret)
goto err_dma_free;
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL
| SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_OCTAL;
ctrl->setup = stm32_qspi_setup;
ctrl->bus_num = -1;
ctrl->mem_ops = &stm32_qspi_mem_ops;
ctrl->use_gpio_descriptors = true;
ctrl->transfer_one_message = stm32_qspi_transfer_one_message;
ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
ctrl->dev.of_node = dev->of_node;
pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_get_noresume(dev);
ret = spi_register_master(ctrl);
if (ret)
goto err_pm_runtime_free;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
err_pm_runtime_free:
pm_runtime_get_sync(qspi->dev);
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
mutex_destroy(&qspi->lock);
pm_runtime_put_noidle(qspi->dev);
pm_runtime_disable(qspi->dev);
pm_runtime_set_suspended(qspi->dev);
pm_runtime_dont_use_autosuspend(qspi->dev);
err_dma_free:
stm32_qspi_dma_free(qspi);
err_clk_disable:
clk_disable_unprepare(qspi->clk);
return ret;
}
static void stm32_qspi_remove(struct platform_device *pdev)
{
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
pm_runtime_get_sync(qspi->dev);
spi_unregister_master(qspi->ctrl);
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
stm32_qspi_dma_free(qspi);
mutex_destroy(&qspi->lock);
pm_runtime_put_noidle(qspi->dev);
pm_runtime_disable(qspi->dev);
pm_runtime_set_suspended(qspi->dev);
pm_runtime_dont_use_autosuspend(qspi->dev);
clk_disable_unprepare(qspi->clk);
}
static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev)
{
struct stm32_qspi *qspi = dev_get_drvdata(dev);
clk_disable_unprepare(qspi->clk);
return 0;
}
static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev)
{
struct stm32_qspi *qspi = dev_get_drvdata(dev);
return clk_prepare_enable(qspi->clk);
}
static int __maybe_unused stm32_qspi_suspend(struct device *dev)
{
pinctrl_pm_select_sleep_state(dev);
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused stm32_qspi_resume(struct device *dev)
{
struct stm32_qspi *qspi = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
pinctrl_pm_select_default_state(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
static const struct dev_pm_ops stm32_qspi_pm_ops = {
SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend,
stm32_qspi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume)
};
static const struct of_device_id stm32_qspi_match[] = {
{.compatible = "st,stm32f469-qspi"},
{}
};
MODULE_DEVICE_TABLE(of, stm32_qspi_match);
static struct platform_driver stm32_qspi_driver = {
.probe = stm32_qspi_probe,
.remove_new = stm32_qspi_remove,
.driver = {
.name = "stm32-qspi",
.of_match_table = stm32_qspi_match,
.pm = &stm32_qspi_pm_ops,
},
};
module_platform_driver(stm32_qspi_driver);
MODULE_AUTHOR("Ludovic Barre <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-stm32-qspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI controller driver for the Mikrotik RB4xx boards
*
* Copyright (C) 2010 Gabor Juhos <[email protected]>
* Copyright (C) 2015 Bert Vermeulen <[email protected]>
*
* This file was based on the patches for Linux 2.6.27.39 published by
* MikroTik for their RouterBoard 4xx series devices.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
#include <asm/mach-ath79/ar71xx_regs.h>
struct rb4xx_spi {
void __iomem *base;
struct clk *clk;
};
static inline u32 rb4xx_read(struct rb4xx_spi *rbspi, u32 reg)
{
return __raw_readl(rbspi->base + reg);
}
static inline void rb4xx_write(struct rb4xx_spi *rbspi, u32 reg, u32 value)
{
__raw_writel(value, rbspi->base + reg);
}
static inline void do_spi_clk(struct rb4xx_spi *rbspi, u32 spi_ioc, int value)
{
u32 regval;
regval = spi_ioc;
if (value & BIT(0))
regval |= AR71XX_SPI_IOC_DO;
rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval);
rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval | AR71XX_SPI_IOC_CLK);
}
static void do_spi_byte(struct rb4xx_spi *rbspi, u32 spi_ioc, u8 byte)
{
int i;
for (i = 7; i >= 0; i--)
do_spi_clk(rbspi, spi_ioc, byte >> i);
}
/* The CS2 pin is used to clock in a second bit per clock cycle. */
static inline void do_spi_clk_two(struct rb4xx_spi *rbspi, u32 spi_ioc,
u8 value)
{
u32 regval;
regval = spi_ioc;
if (value & BIT(1))
regval |= AR71XX_SPI_IOC_DO;
if (value & BIT(0))
regval |= AR71XX_SPI_IOC_CS2;
rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval);
rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval | AR71XX_SPI_IOC_CLK);
}
/* Two bits at a time, msb first */
static void do_spi_byte_two(struct rb4xx_spi *rbspi, u32 spi_ioc, u8 byte)
{
do_spi_clk_two(rbspi, spi_ioc, byte >> 6);
do_spi_clk_two(rbspi, spi_ioc, byte >> 4);
do_spi_clk_two(rbspi, spi_ioc, byte >> 2);
do_spi_clk_two(rbspi, spi_ioc, byte >> 0);
}
static void rb4xx_set_cs(struct spi_device *spi, bool enable)
{
struct rb4xx_spi *rbspi = spi_controller_get_devdata(spi->controller);
/*
* Setting CS is done along with bitbanging the actual values,
* since it's all on the same hardware register. However the
* CPLD needs CS deselected after every command.
*/
if (enable)
rb4xx_write(rbspi, AR71XX_SPI_REG_IOC,
AR71XX_SPI_IOC_CS0 | AR71XX_SPI_IOC_CS1);
}
static int rb4xx_transfer_one(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *t)
{
struct rb4xx_spi *rbspi = spi_controller_get_devdata(host);
int i;
u32 spi_ioc;
u8 *rx_buf;
const u8 *tx_buf;
/*
* Prime the SPI register with the SPI device selected. The m25p80 boot
* flash and CPLD share the CS0 pin. This works because the CPLD's
* command set was designed to almost not clash with that of the
* boot flash.
*/
if (spi_get_chipselect(spi, 0) == 2)
/* MMC */
spi_ioc = AR71XX_SPI_IOC_CS0;
else
/* Boot flash and CPLD */
spi_ioc = AR71XX_SPI_IOC_CS1;
tx_buf = t->tx_buf;
rx_buf = t->rx_buf;
for (i = 0; i < t->len; ++i) {
if (t->tx_nbits == SPI_NBITS_DUAL)
/* CPLD can use two-wire transfers */
do_spi_byte_two(rbspi, spi_ioc, tx_buf[i]);
else
do_spi_byte(rbspi, spi_ioc, tx_buf[i]);
if (!rx_buf)
continue;
rx_buf[i] = rb4xx_read(rbspi, AR71XX_SPI_REG_RDS);
}
spi_finalize_current_transfer(host);
return 0;
}
static int rb4xx_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct clk *ahb_clk;
struct rb4xx_spi *rbspi;
int err;
void __iomem *spi_base;
spi_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi_base))
return PTR_ERR(spi_base);
host = devm_spi_alloc_host(&pdev->dev, sizeof(*rbspi));
if (!host)
return -ENOMEM;
ahb_clk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(ahb_clk))
return PTR_ERR(ahb_clk);
host->dev.of_node = pdev->dev.of_node;
host->bus_num = 0;
host->num_chipselect = 3;
host->mode_bits = SPI_TX_DUAL;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->flags = SPI_CONTROLLER_MUST_TX;
host->transfer_one = rb4xx_transfer_one;
host->set_cs = rb4xx_set_cs;
rbspi = spi_controller_get_devdata(host);
rbspi->base = spi_base;
rbspi->clk = ahb_clk;
platform_set_drvdata(pdev, rbspi);
err = devm_spi_register_controller(&pdev->dev, host);
if (err) {
dev_err(&pdev->dev, "failed to register SPI host\n");
return err;
}
err = clk_prepare_enable(ahb_clk);
if (err)
return err;
/* Enable SPI */
rb4xx_write(rbspi, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
return 0;
}
static void rb4xx_spi_remove(struct platform_device *pdev)
{
struct rb4xx_spi *rbspi = platform_get_drvdata(pdev);
clk_disable_unprepare(rbspi->clk);
}
static const struct of_device_id rb4xx_spi_dt_match[] = {
{ .compatible = "mikrotik,rb4xx-spi" },
{ },
};
MODULE_DEVICE_TABLE(of, rb4xx_spi_dt_match);
static struct platform_driver rb4xx_spi_drv = {
.probe = rb4xx_spi_probe,
.remove_new = rb4xx_spi_remove,
.driver = {
.name = "rb4xx-spi",
.of_match_table = of_match_ptr(rb4xx_spi_dt_match),
},
};
module_platform_driver(rb4xx_spi_drv);
MODULE_DESCRIPTION("Mikrotik RB4xx SPI controller driver");
MODULE_AUTHOR("Gabor Juhos <[email protected]>");
MODULE_AUTHOR("Bert Vermeulen <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-rb4xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Designware SPI core controller driver (refer pxa2xx_spi.c)
*
* Copyright (c) 2009, Intel Corporation.
*/
#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/string.h>
#include <linux/of.h>
#include "spi-dw.h"
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
/* Slave spi_device related */
struct dw_spi_chip_data {
u32 cr0;
u32 rx_sample_dly; /* RX sample delay */
};
#ifdef CONFIG_DEBUG_FS
#define DW_SPI_DBGFS_REG(_name, _off) \
{ \
.name = _name, \
.offset = _off, \
}
static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
};
static void dw_spi_debugfs_init(struct dw_spi *dws)
{
char name[32];
snprintf(name, 32, "dw_spi%d", dws->host->bus_num);
dws->debugfs = debugfs_create_dir(name, NULL);
dws->regset.regs = dw_spi_dbgfs_regs;
dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
dws->regset.base = dws->regs;
debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
}
static void dw_spi_debugfs_remove(struct dw_spi *dws)
{
debugfs_remove_recursive(dws->debugfs);
}
#else
static inline void dw_spi_debugfs_init(struct dw_spi *dws)
{
}
static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
{
}
#endif /* CONFIG_DEBUG_FS */
void dw_spi_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
bool cs_high = !!(spi->mode & SPI_CS_HIGH);
/*
* DW SPI controller demands any native CS being set in order to
* proceed with data transfer. So in order to activate the SPI
* communications we must set a corresponding bit in the Slave
* Enable register no matter whether the SPI core is configured to
* support active-high or active-low CS level.
*/
if (cs_high == enable)
dw_writel(dws, DW_SPI_SER, BIT(spi_get_chipselect(spi, 0)));
else
dw_writel(dws, DW_SPI_SER, 0);
}
EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, SPI_DW_CORE);
/* Return the max entries we can fill into tx fifo */
static inline u32 dw_spi_tx_max(struct dw_spi *dws)
{
u32 tx_room, rxtx_gap;
tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
/*
* Another concern is about the tx/rx mismatch, we
* though to use (dws->fifo_len - rxflr - txflr) as
* one maximum value for tx, but it doesn't cover the
* data which is out of tx/rx fifo and inside the
* shift registers. So a control from sw point of
* view is taken.
*/
rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
return min3((u32)dws->tx_len, tx_room, rxtx_gap);
}
/* Return the max entries we should read out of rx fifo */
static inline u32 dw_spi_rx_max(struct dw_spi *dws)
{
return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
}
static void dw_writer(struct dw_spi *dws)
{
u32 max = dw_spi_tx_max(dws);
u32 txw = 0;
while (max--) {
if (dws->tx) {
if (dws->n_bytes == 1)
txw = *(u8 *)(dws->tx);
else if (dws->n_bytes == 2)
txw = *(u16 *)(dws->tx);
else
txw = *(u32 *)(dws->tx);
dws->tx += dws->n_bytes;
}
dw_write_io_reg(dws, DW_SPI_DR, txw);
--dws->tx_len;
}
}
static void dw_reader(struct dw_spi *dws)
{
u32 max = dw_spi_rx_max(dws);
u32 rxw;
while (max--) {
rxw = dw_read_io_reg(dws, DW_SPI_DR);
if (dws->rx) {
if (dws->n_bytes == 1)
*(u8 *)(dws->rx) = rxw;
else if (dws->n_bytes == 2)
*(u16 *)(dws->rx) = rxw;
else
*(u32 *)(dws->rx) = rxw;
dws->rx += dws->n_bytes;
}
--dws->rx_len;
}
}
int dw_spi_check_status(struct dw_spi *dws, bool raw)
{
u32 irq_status;
int ret = 0;
if (raw)
irq_status = dw_readl(dws, DW_SPI_RISR);
else
irq_status = dw_readl(dws, DW_SPI_ISR);
if (irq_status & DW_SPI_INT_RXOI) {
dev_err(&dws->host->dev, "RX FIFO overflow detected\n");
ret = -EIO;
}
if (irq_status & DW_SPI_INT_RXUI) {
dev_err(&dws->host->dev, "RX FIFO underflow detected\n");
ret = -EIO;
}
if (irq_status & DW_SPI_INT_TXOI) {
dev_err(&dws->host->dev, "TX FIFO overflow detected\n");
ret = -EIO;
}
/* Generically handle the erroneous situation */
if (ret) {
dw_spi_reset_chip(dws);
if (dws->host->cur_msg)
dws->host->cur_msg->status = ret;
}
return ret;
}
EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, SPI_DW_CORE);
static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
{
u16 irq_status = dw_readl(dws, DW_SPI_ISR);
if (dw_spi_check_status(dws, false)) {
spi_finalize_current_transfer(dws->host);
return IRQ_HANDLED;
}
/*
* Read data from the Rx FIFO every time we've got a chance executing
* this method. If there is nothing left to receive, terminate the
* procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
* final stage of the transfer. By doing so we'll get the next IRQ
* right when the leftover incoming data is received.
*/
dw_reader(dws);
if (!dws->rx_len) {
dw_spi_mask_intr(dws, 0xff);
spi_finalize_current_transfer(dws->host);
} else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
}
/*
* Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
* disabled after the data transmission is finished so not to
* have the TXE IRQ flood at the final stage of the transfer.
*/
if (irq_status & DW_SPI_INT_TXEI) {
dw_writer(dws);
if (!dws->tx_len)
dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
}
return IRQ_HANDLED;
}
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
struct spi_controller *host = dev_id;
struct dw_spi *dws = spi_controller_get_devdata(host);
u16 irq_status = dw_readl(dws, DW_SPI_ISR) & DW_SPI_INT_MASK;
if (!irq_status)
return IRQ_NONE;
if (!host->cur_msg) {
dw_spi_mask_intr(dws, 0xff);
return IRQ_HANDLED;
}
return dws->transfer_handler(dws);
}
static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
{
u32 cr0 = 0;
if (dw_spi_ip_is(dws, PSSI)) {
/* CTRLR0[ 5: 4] Frame Format */
cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
/*
* SPI mode (SCPOL|SCPH)
* CTRLR0[ 6] Serial Clock Phase
* CTRLR0[ 7] Serial Clock Polarity
*/
if (spi->mode & SPI_CPOL)
cr0 |= DW_PSSI_CTRLR0_SCPOL;
if (spi->mode & SPI_CPHA)
cr0 |= DW_PSSI_CTRLR0_SCPHA;
/* CTRLR0[11] Shift Register Loop */
if (spi->mode & SPI_LOOP)
cr0 |= DW_PSSI_CTRLR0_SRL;
} else {
/* CTRLR0[ 7: 6] Frame Format */
cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
/*
* SPI mode (SCPOL|SCPH)
* CTRLR0[ 8] Serial Clock Phase
* CTRLR0[ 9] Serial Clock Polarity
*/
if (spi->mode & SPI_CPOL)
cr0 |= DW_HSSI_CTRLR0_SCPOL;
if (spi->mode & SPI_CPHA)
cr0 |= DW_HSSI_CTRLR0_SCPHA;
/* CTRLR0[13] Shift Register Loop */
if (spi->mode & SPI_LOOP)
cr0 |= DW_HSSI_CTRLR0_SRL;
/* CTRLR0[31] MST */
if (dw_spi_ver_is_ge(dws, HSSI, 102A))
cr0 |= DW_HSSI_CTRLR0_MST;
}
return cr0;
}
void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
struct dw_spi_cfg *cfg)
{
struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
u32 cr0 = chip->cr0;
u32 speed_hz;
u16 clk_div;
/* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
cr0 |= (cfg->dfs - 1) << dws->dfs_offset;
if (dw_spi_ip_is(dws, PSSI))
/* CTRLR0[ 9:8] Transfer Mode */
cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_TMOD_MASK, cfg->tmode);
else
/* CTRLR0[11:10] Transfer Mode */
cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_TMOD_MASK, cfg->tmode);
dw_writel(dws, DW_SPI_CTRLR0, cr0);
if (cfg->tmode == DW_SPI_CTRLR0_TMOD_EPROMREAD ||
cfg->tmode == DW_SPI_CTRLR0_TMOD_RO)
dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
/* Note DW APB SSI clock divider doesn't support odd numbers */
clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
speed_hz = dws->max_freq / clk_div;
if (dws->current_freq != speed_hz) {
dw_spi_set_clk(dws, clk_div);
dws->current_freq = speed_hz;
}
/* Update RX sample delay if required */
if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
dws->cur_rx_sample_dly = chip->rx_sample_dly;
}
}
EXPORT_SYMBOL_NS_GPL(dw_spi_update_config, SPI_DW_CORE);
static void dw_spi_irq_setup(struct dw_spi *dws)
{
u16 level;
u8 imask;
/*
* Originally Tx and Rx data lengths match. Rx FIFO Threshold level
* will be adjusted at the final stage of the IRQ-based SPI transfer
* execution so not to lose the leftover of the incoming data.
*/
level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
dws->transfer_handler = dw_spi_transfer_handler;
imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
dw_spi_umask_intr(dws, imask);
}
/*
* The iterative procedure of the poll-based transfer is simple: write as much
* as possible to the Tx FIFO, wait until the pending to receive data is ready
* to be read, read it from the Rx FIFO and check whether the performed
* procedure has been successful.
*
* Note this method the same way as the IRQ-based transfer won't work well for
* the SPI devices connected to the controller with native CS due to the
* automatic CS assertion/de-assertion.
*/
static int dw_spi_poll_transfer(struct dw_spi *dws,
struct spi_transfer *transfer)
{
struct spi_delay delay;
u16 nbits;
int ret;
delay.unit = SPI_DELAY_UNIT_SCK;
nbits = dws->n_bytes * BITS_PER_BYTE;
do {
dw_writer(dws);
delay.value = nbits * (dws->rx_len - dws->tx_len);
spi_delay_exec(&delay, transfer);
dw_reader(dws);
ret = dw_spi_check_status(dws, true);
if (ret)
return ret;
} while (dws->rx_len);
return 0;
}
static int dw_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *transfer)
{
struct dw_spi *dws = spi_controller_get_devdata(host);
struct dw_spi_cfg cfg = {
.tmode = DW_SPI_CTRLR0_TMOD_TR,
.dfs = transfer->bits_per_word,
.freq = transfer->speed_hz,
};
int ret;
dws->dma_mapped = 0;
dws->n_bytes =
roundup_pow_of_two(DIV_ROUND_UP(transfer->bits_per_word,
BITS_PER_BYTE));
dws->tx = (void *)transfer->tx_buf;
dws->tx_len = transfer->len / dws->n_bytes;
dws->rx = transfer->rx_buf;
dws->rx_len = dws->tx_len;
/* Ensure the data above is visible for all CPUs */
smp_mb();
dw_spi_enable_chip(dws, 0);
dw_spi_update_config(dws, spi, &cfg);
transfer->effective_speed_hz = dws->current_freq;
/* Check if current transfer is a DMA transaction */
if (host->can_dma && host->can_dma(host, spi, transfer))
dws->dma_mapped = host->cur_msg_mapped;
/* For poll mode just disable all interrupts */
dw_spi_mask_intr(dws, 0xff);
if (dws->dma_mapped) {
ret = dws->dma_ops->dma_setup(dws, transfer);
if (ret)
return ret;
}
dw_spi_enable_chip(dws, 1);
if (dws->dma_mapped)
return dws->dma_ops->dma_transfer(dws, transfer);
else if (dws->irq == IRQ_NOTCONNECTED)
return dw_spi_poll_transfer(dws, transfer);
dw_spi_irq_setup(dws);
return 1;
}
static void dw_spi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
struct dw_spi *dws = spi_controller_get_devdata(host);
if (dws->dma_mapped)
dws->dma_ops->dma_stop(dws);
dw_spi_reset_chip(dws);
}
static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
if (op->data.dir == SPI_MEM_DATA_IN)
op->data.nbytes = clamp_val(op->data.nbytes, 0, DW_SPI_NDF_MASK + 1);
return 0;
}
static bool dw_spi_supports_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
return false;
return spi_mem_default_supports_op(mem, op);
}
static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
{
unsigned int i, j, len;
u8 *out;
/*
* Calculate the total length of the EEPROM command transfer and
* either use the pre-allocated buffer or create a temporary one.
*/
len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
if (op->data.dir == SPI_MEM_DATA_OUT)
len += op->data.nbytes;
if (len <= DW_SPI_BUF_SIZE) {
out = dws->buf;
} else {
out = kzalloc(len, GFP_KERNEL);
if (!out)
return -ENOMEM;
}
/*
* Collect the operation code, address and dummy bytes into the single
* buffer. If it's a transfer with data to be sent, also copy it into the
* single buffer in order to speed the data transmission up.
*/
for (i = 0; i < op->cmd.nbytes; ++i)
out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
for (j = 0; j < op->addr.nbytes; ++i, ++j)
out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
for (j = 0; j < op->dummy.nbytes; ++i, ++j)
out[i] = 0x0;
if (op->data.dir == SPI_MEM_DATA_OUT)
memcpy(&out[i], op->data.buf.out, op->data.nbytes);
dws->n_bytes = 1;
dws->tx = out;
dws->tx_len = len;
if (op->data.dir == SPI_MEM_DATA_IN) {
dws->rx = op->data.buf.in;
dws->rx_len = op->data.nbytes;
} else {
dws->rx = NULL;
dws->rx_len = 0;
}
return 0;
}
static void dw_spi_free_mem_buf(struct dw_spi *dws)
{
if (dws->tx != dws->buf)
kfree(dws->tx);
}
static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
{
u32 room, entries, sts;
unsigned int len;
u8 *buf;
/*
* At initial stage we just pre-fill the Tx FIFO in with no rush,
* since native CS hasn't been enabled yet and the automatic data
* transmission won't start til we do that.
*/
len = min(dws->fifo_len, dws->tx_len);
buf = dws->tx;
while (len--)
dw_write_io_reg(dws, DW_SPI_DR, *buf++);
/*
* After setting any bit in the SER register the transmission will
* start automatically. We have to keep up with that procedure
* otherwise the CS de-assertion will happen whereupon the memory
* operation will be pre-terminated.
*/
len = dws->tx_len - ((void *)buf - dws->tx);
dw_spi_set_cs(spi, false);
while (len) {
entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
if (!entries) {
dev_err(&dws->host->dev, "CS de-assertion on Tx\n");
return -EIO;
}
room = min(dws->fifo_len - entries, len);
for (; room; --room, --len)
dw_write_io_reg(dws, DW_SPI_DR, *buf++);
}
/*
* Data fetching will start automatically if the EEPROM-read mode is
* activated. We have to keep up with the incoming data pace to
* prevent the Rx FIFO overflow causing the inbound data loss.
*/
len = dws->rx_len;
buf = dws->rx;
while (len) {
entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
if (!entries) {
sts = readl_relaxed(dws->regs + DW_SPI_RISR);
if (sts & DW_SPI_INT_RXOI) {
dev_err(&dws->host->dev, "FIFO overflow on Rx\n");
return -EIO;
}
continue;
}
entries = min(entries, len);
for (; entries; --entries, --len)
*buf++ = dw_read_io_reg(dws, DW_SPI_DR);
}
return 0;
}
static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
{
return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
}
static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
{
int retry = DW_SPI_WAIT_RETRIES;
struct spi_delay delay;
unsigned long ns, us;
u32 nents;
nents = dw_readl(dws, DW_SPI_TXFLR);
ns = NSEC_PER_SEC / dws->current_freq * nents;
ns *= dws->n_bytes * BITS_PER_BYTE;
if (ns <= NSEC_PER_USEC) {
delay.unit = SPI_DELAY_UNIT_NSECS;
delay.value = ns;
} else {
us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
delay.unit = SPI_DELAY_UNIT_USECS;
delay.value = clamp_val(us, 0, USHRT_MAX);
}
while (dw_spi_ctlr_busy(dws) && retry--)
spi_delay_exec(&delay, NULL);
if (retry < 0) {
dev_err(&dws->host->dev, "Mem op hanged up\n");
return -EIO;
}
return 0;
}
static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
{
dw_spi_enable_chip(dws, 0);
dw_spi_set_cs(spi, true);
dw_spi_enable_chip(dws, 1);
}
/*
* The SPI memory operation implementation below is the best choice for the
* devices, which are selected by the native chip-select lane. It's
* specifically developed to workaround the problem with automatic chip-select
* lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
* SPI-mem core calls exec_op() callback only if the GPIO-based CS is
* unavailable.
*/
static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
struct dw_spi_cfg cfg;
unsigned long flags;
int ret;
/*
* Collect the outbound data into a single buffer to speed the
* transmission up at least on the initial stage.
*/
ret = dw_spi_init_mem_buf(dws, op);
if (ret)
return ret;
/*
* DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
* operation. Transmit-only mode is suitable for the rest of them.
*/
cfg.dfs = 8;
cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
if (op->data.dir == SPI_MEM_DATA_IN) {
cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.ndf = op->data.nbytes;
} else {
cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
}
dw_spi_enable_chip(dws, 0);
dw_spi_update_config(dws, mem->spi, &cfg);
dw_spi_mask_intr(dws, 0xff);
dw_spi_enable_chip(dws, 1);
/*
* DW APB SSI controller has very nasty peculiarities. First originally
* (without any vendor-specific modifications) it doesn't provide a
* direct way to set and clear the native chip-select signal. Instead
* the controller asserts the CS lane if Tx FIFO isn't empty and a
* transmission is going on, and automatically de-asserts it back to
* the high level if the Tx FIFO doesn't have anything to be pushed
* out. Due to that a multi-tasking or heavy IRQs activity might be
* fatal, since the transfer procedure preemption may cause the Tx FIFO
* getting empty and sudden CS de-assertion, which in the middle of the
* transfer will most likely cause the data loss. Secondly the
* EEPROM-read or Read-only DW SPI transfer modes imply the incoming
* data being automatically pulled in into the Rx FIFO. So if the
* driver software is late in fetching the data from the FIFO before
* it's overflown, new incoming data will be lost. In order to make
* sure the executed memory operations are CS-atomic and to prevent the
* Rx FIFO overflow we have to disable the local interrupts so to block
* any preemption during the subsequent IO operations.
*
* Note. At some circumstances disabling IRQs may not help to prevent
* the problems described above. The CS de-assertion and Rx FIFO
* overflow may still happen due to the relatively slow system bus or
* CPU not working fast enough, so the write-then-read algo implemented
* here just won't keep up with the SPI bus data transfer. Such
* situation is highly platform specific and is supposed to be fixed by
* manually restricting the SPI bus frequency using the
* dws->max_mem_freq parameter.
*/
local_irq_save(flags);
preempt_disable();
ret = dw_spi_write_then_read(dws, mem->spi);
local_irq_restore(flags);
preempt_enable();
/*
* Wait for the operation being finished and check the controller
* status only if there hasn't been any run-time error detected. In the
* former case it's just pointless. In the later one to prevent an
* additional error message printing since any hw error flag being set
* would be due to an error detected on the data transfer.
*/
if (!ret) {
ret = dw_spi_wait_mem_op_done(dws);
if (!ret)
ret = dw_spi_check_status(dws, true);
}
dw_spi_stop_mem_op(dws, mem->spi);
dw_spi_free_mem_buf(dws);
return ret;
}
/*
* Initialize the default memory operations if a glue layer hasn't specified
* custom ones. Direct mapping operations will be preserved anyway since DW SPI
* controller doesn't have an embedded dirmap interface. Note the memory
* operations implemented in this driver is the best choice only for the DW APB
* SSI controller with standard native CS functionality. If a hardware vendor
* has fixed the automatic CS assertion/de-assertion peculiarity, then it will
* be safer to use the normal SPI-messages-based transfers implementation.
*/
static void dw_spi_init_mem_ops(struct dw_spi *dws)
{
if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
!dws->set_cs) {
dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
dws->mem_ops.supports_op = dw_spi_supports_mem_op;
dws->mem_ops.exec_op = dw_spi_exec_mem_op;
if (!dws->max_mem_freq)
dws->max_mem_freq = dws->max_freq;
}
}
/* This may be called twice for each spi dev */
static int dw_spi_setup(struct spi_device *spi)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct dw_spi_chip_data *chip;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
u32 rx_sample_dly_ns;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
spi_set_ctldata(spi, chip);
/* Get specific / default rx-sample-delay */
if (device_property_read_u32(&spi->dev,
"rx-sample-delay-ns",
&rx_sample_dly_ns) != 0)
/* Use default controller value */
rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
NSEC_PER_SEC /
dws->max_freq);
}
/*
* Update CR0 data each time the setup callback is invoked since
* the device parameters could have been changed, for instance, by
* the MMC SPI driver or something else.
*/
chip->cr0 = dw_spi_prepare_cr0(dws, spi);
return 0;
}
static void dw_spi_cleanup(struct spi_device *spi)
{
struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
kfree(chip);
spi_set_ctldata(spi, NULL);
}
/* Restart the controller, disable all interrupts, clean rx fifo */
static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
{
dw_spi_reset_chip(dws);
/*
* Retrieve the Synopsys component version if it hasn't been specified
* by the platform. CoreKit version ID is encoded as a 3-chars ASCII
* code enclosed with '*' (typical for the most of Synopsys IP-cores).
*/
if (!dws->ver) {
dws->ver = dw_readl(dws, DW_SPI_VERSION);
dev_dbg(dev, "Synopsys DWC%sSSI v%c.%c%c\n",
dw_spi_ip_is(dws, PSSI) ? " APB " : " ",
DW_SPI_GET_BYTE(dws->ver, 3), DW_SPI_GET_BYTE(dws->ver, 2),
DW_SPI_GET_BYTE(dws->ver, 1));
}
/*
* Try to detect the FIFO depth if not set by interface driver,
* the depth could be from 2 to 256 from HW spec
*/
if (!dws->fifo_len) {
u32 fifo;
for (fifo = 1; fifo < 256; fifo++) {
dw_writel(dws, DW_SPI_TXFTLR, fifo);
if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
break;
}
dw_writel(dws, DW_SPI_TXFTLR, 0);
dws->fifo_len = (fifo == 1) ? 0 : fifo;
dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
}
/*
* Detect CTRLR0.DFS field size and offset by testing the lowest bits
* writability. Note DWC SSI controller also has the extended DFS, but
* with zero offset.
*/
if (dw_spi_ip_is(dws, PSSI)) {
u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
dw_spi_enable_chip(dws, 0);
dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff);
cr0 = dw_readl(dws, DW_SPI_CTRLR0);
dw_writel(dws, DW_SPI_CTRLR0, tmp);
dw_spi_enable_chip(dws, 1);
if (!(cr0 & DW_PSSI_CTRLR0_DFS_MASK)) {
dws->caps |= DW_SPI_CAP_DFS32;
dws->dfs_offset = __bf_shf(DW_PSSI_CTRLR0_DFS32_MASK);
dev_dbg(dev, "Detected 32-bits max data frame size\n");
}
} else {
dws->caps |= DW_SPI_CAP_DFS32;
}
/* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
}
int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
struct spi_controller *host;
int ret;
if (!dws)
return -EINVAL;
host = spi_alloc_host(dev, 0);
if (!host)
return -ENOMEM;
device_set_node(&host->dev, dev_fwnode(dev));
dws->host = host;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
spi_controller_set_devdata(host, dws);
/* Basic HW init */
dw_spi_hw_init(dev, dws);
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
host);
if (ret < 0 && ret != -ENOTCONN) {
dev_err(dev, "can not get IRQ\n");
goto err_free_host;
}
dw_spi_init_mem_ops(dws);
host->use_gpio_descriptors = true;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
if (dws->caps & DW_SPI_CAP_DFS32)
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
else
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
host->bus_num = dws->bus_num;
host->num_chipselect = dws->num_cs;
host->setup = dw_spi_setup;
host->cleanup = dw_spi_cleanup;
if (dws->set_cs)
host->set_cs = dws->set_cs;
else
host->set_cs = dw_spi_set_cs;
host->transfer_one = dw_spi_transfer_one;
host->handle_err = dw_spi_handle_err;
if (dws->mem_ops.exec_op)
host->mem_ops = &dws->mem_ops;
host->max_speed_hz = dws->max_freq;
host->flags = SPI_CONTROLLER_GPIO_SS;
host->auto_runtime_pm = true;
/* Get default rx sample delay */
device_property_read_u32(dev, "rx-sample-delay-ns",
&dws->def_rx_sample_dly_ns);
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dev, dws);
if (ret == -EPROBE_DEFER) {
goto err_free_irq;
} else if (ret) {
dev_warn(dev, "DMA init failed\n");
} else {
host->can_dma = dws->dma_ops->can_dma;
host->flags |= SPI_CONTROLLER_MUST_TX;
}
}
ret = spi_register_controller(host);
if (ret) {
dev_err_probe(dev, ret, "problem registering spi host\n");
goto err_dma_exit;
}
dw_spi_debugfs_init(dws);
return 0;
err_dma_exit:
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
dw_spi_enable_chip(dws, 0);
err_free_irq:
free_irq(dws->irq, host);
err_free_host:
spi_controller_put(host);
return ret;
}
EXPORT_SYMBOL_NS_GPL(dw_spi_add_host, SPI_DW_CORE);
void dw_spi_remove_host(struct dw_spi *dws)
{
dw_spi_debugfs_remove(dws);
spi_unregister_controller(dws->host);
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
dw_spi_shutdown_chip(dws);
free_irq(dws->irq, dws->host);
}
EXPORT_SYMBOL_NS_GPL(dw_spi_remove_host, SPI_DW_CORE);
int dw_spi_suspend_host(struct dw_spi *dws)
{
int ret;
ret = spi_controller_suspend(dws->host);
if (ret)
return ret;
dw_spi_shutdown_chip(dws);
return 0;
}
EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_host, SPI_DW_CORE);
int dw_spi_resume_host(struct dw_spi *dws)
{
dw_spi_hw_init(&dws->host->dev, dws);
return spi_controller_resume(dws->host);
}
EXPORT_SYMBOL_NS_GPL(dw_spi_resume_host, SPI_DW_CORE);
MODULE_AUTHOR("Feng Tang <[email protected]>");
MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-dw-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support Infineon TLE62x0 driver chips
*
* Copyright (c) 2007 Simtec Electronics
* Ben Dooks, <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/tle62x0.h>
#define CMD_READ 0x00
#define CMD_SET 0xff
#define DIAG_NORMAL 0x03
#define DIAG_OVERLOAD 0x02
#define DIAG_OPEN 0x01
#define DIAG_SHORTGND 0x00
struct tle62x0_state {
struct spi_device *us;
struct mutex lock;
unsigned int nr_gpio;
unsigned int gpio_state;
unsigned char tx_buff[4];
unsigned char rx_buff[4];
};
static int to_gpio_num(struct device_attribute *attr);
static inline int tle62x0_write(struct tle62x0_state *st)
{
unsigned char *buff = st->tx_buff;
unsigned int gpio_state = st->gpio_state;
buff[0] = CMD_SET;
if (st->nr_gpio == 16) {
buff[1] = gpio_state >> 8;
buff[2] = gpio_state;
} else {
buff[1] = gpio_state;
}
dev_dbg(&st->us->dev, "buff %3ph\n", buff);
return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
}
static inline int tle62x0_read(struct tle62x0_state *st)
{
unsigned char *txbuff = st->tx_buff;
struct spi_transfer xfer = {
.tx_buf = txbuff,
.rx_buf = st->rx_buff,
.len = (st->nr_gpio * 2) / 8,
};
struct spi_message msg;
txbuff[0] = CMD_READ;
txbuff[1] = 0x00;
txbuff[2] = 0x00;
txbuff[3] = 0x00;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
return spi_sync(st->us, &msg);
}
static unsigned char *decode_fault(unsigned int fault_code)
{
fault_code &= 3;
switch (fault_code) {
case DIAG_NORMAL:
return "N";
case DIAG_OVERLOAD:
return "V";
case DIAG_OPEN:
return "O";
case DIAG_SHORTGND:
return "G";
}
return "?";
}
static ssize_t tle62x0_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tle62x0_state *st = dev_get_drvdata(dev);
char *bp = buf;
unsigned char *buff = st->rx_buff;
unsigned long fault = 0;
int ptr;
int ret;
mutex_lock(&st->lock);
ret = tle62x0_read(st);
dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
if (ret < 0) {
mutex_unlock(&st->lock);
return ret;
}
for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
fault <<= 8;
fault |= ((unsigned long)buff[ptr]);
dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
}
for (ptr = 0; ptr < st->nr_gpio; ptr++) {
bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
}
*bp++ = '\n';
mutex_unlock(&st->lock);
return bp - buf;
}
static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
static ssize_t tle62x0_gpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tle62x0_state *st = dev_get_drvdata(dev);
int gpio_num = to_gpio_num(attr);
int value;
mutex_lock(&st->lock);
value = (st->gpio_state >> gpio_num) & 1;
mutex_unlock(&st->lock);
return sysfs_emit(buf, "%d", value);
}
static ssize_t tle62x0_gpio_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct tle62x0_state *st = dev_get_drvdata(dev);
int gpio_num = to_gpio_num(attr);
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (buf == endp)
return -EINVAL;
dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
mutex_lock(&st->lock);
if (val)
st->gpio_state |= 1 << gpio_num;
else
st->gpio_state &= ~(1 << gpio_num);
tle62x0_write(st);
mutex_unlock(&st->lock);
return len;
}
static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static struct device_attribute *gpio_attrs[] = {
[0] = &dev_attr_gpio1,
[1] = &dev_attr_gpio2,
[2] = &dev_attr_gpio3,
[3] = &dev_attr_gpio4,
[4] = &dev_attr_gpio5,
[5] = &dev_attr_gpio6,
[6] = &dev_attr_gpio7,
[7] = &dev_attr_gpio8,
[8] = &dev_attr_gpio9,
[9] = &dev_attr_gpio10,
[10] = &dev_attr_gpio11,
[11] = &dev_attr_gpio12,
[12] = &dev_attr_gpio13,
[13] = &dev_attr_gpio14,
[14] = &dev_attr_gpio15,
[15] = &dev_attr_gpio16
};
static int to_gpio_num(struct device_attribute *attr)
{
int ptr;
for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
if (gpio_attrs[ptr] == attr)
return ptr;
}
return -1;
}
static int tle62x0_probe(struct spi_device *spi)
{
struct tle62x0_state *st;
struct tle62x0_pdata *pdata;
int ptr;
int ret;
pdata = dev_get_platdata(&spi->dev);
if (pdata == NULL) {
dev_err(&spi->dev, "no device data specified\n");
return -EINVAL;
}
st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
st->us = spi;
st->nr_gpio = pdata->gpio_count;
st->gpio_state = pdata->init_state;
mutex_init(&st->lock);
ret = device_create_file(&spi->dev, &dev_attr_status_show);
if (ret) {
dev_err(&spi->dev, "cannot create status attribute\n");
goto err_status;
}
for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
if (ret) {
dev_err(&spi->dev, "cannot create gpio attribute\n");
goto err_gpios;
}
}
/* tle62x0_write(st); */
spi_set_drvdata(spi, st);
return 0;
err_gpios:
while (--ptr >= 0)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
device_remove_file(&spi->dev, &dev_attr_status_show);
err_status:
kfree(st);
return ret;
}
static void tle62x0_remove(struct spi_device *spi)
{
struct tle62x0_state *st = spi_get_drvdata(spi);
int ptr;
for (ptr = 0; ptr < st->nr_gpio; ptr++)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
device_remove_file(&spi->dev, &dev_attr_status_show);
kfree(st);
}
static struct spi_driver tle62x0_driver = {
.driver = {
.name = "tle62x0",
},
.probe = tle62x0_probe,
.remove = tle62x0_remove,
};
module_spi_driver(tle62x0_driver);
MODULE_AUTHOR("Ben Dooks <[email protected]>");
MODULE_DESCRIPTION("TLE62x0 SPI driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:tle62x0");
| linux-master | drivers/spi/spi-tle62x0.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// HiSilicon SPI Controller Driver for Kunpeng SoCs
//
// Copyright (c) 2021 HiSilicon Technologies Co., Ltd.
// Author: Jay Fang <[email protected]>
//
// This code is based on spi-dw-core.c.
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
/* Register offsets */
#define HISI_SPI_CSCR 0x00 /* cs control register */
#define HISI_SPI_CR 0x04 /* spi common control register */
#define HISI_SPI_ENR 0x08 /* spi enable register */
#define HISI_SPI_FIFOC 0x0c /* fifo level control register */
#define HISI_SPI_IMR 0x10 /* interrupt mask register */
#define HISI_SPI_DIN 0x14 /* data in register */
#define HISI_SPI_DOUT 0x18 /* data out register */
#define HISI_SPI_SR 0x1c /* status register */
#define HISI_SPI_RISR 0x20 /* raw interrupt status register */
#define HISI_SPI_ISR 0x24 /* interrupt status register */
#define HISI_SPI_ICR 0x28 /* interrupt clear register */
#define HISI_SPI_VERSION 0xe0 /* version register */
/* Bit fields in HISI_SPI_CR */
#define CR_LOOP_MASK GENMASK(1, 1)
#define CR_CPOL_MASK GENMASK(2, 2)
#define CR_CPHA_MASK GENMASK(3, 3)
#define CR_DIV_PRE_MASK GENMASK(11, 4)
#define CR_DIV_POST_MASK GENMASK(19, 12)
#define CR_BPW_MASK GENMASK(24, 20)
#define CR_SPD_MODE_MASK GENMASK(25, 25)
/* Bit fields in HISI_SPI_FIFOC */
#define FIFOC_TX_MASK GENMASK(5, 3)
#define FIFOC_RX_MASK GENMASK(11, 9)
/* Bit fields in HISI_SPI_IMR, 4 bits */
#define IMR_RXOF BIT(0) /* Receive Overflow */
#define IMR_RXTO BIT(1) /* Receive Timeout */
#define IMR_RX BIT(2) /* Receive */
#define IMR_TX BIT(3) /* Transmit */
#define IMR_MASK (IMR_RXOF | IMR_RXTO | IMR_RX | IMR_TX)
/* Bit fields in HISI_SPI_SR, 5 bits */
#define SR_TXE BIT(0) /* Transmit FIFO empty */
#define SR_TXNF BIT(1) /* Transmit FIFO not full */
#define SR_RXNE BIT(2) /* Receive FIFO not empty */
#define SR_RXF BIT(3) /* Receive FIFO full */
#define SR_BUSY BIT(4) /* Busy Flag */
/* Bit fields in HISI_SPI_ISR, 4 bits */
#define ISR_RXOF BIT(0) /* Receive Overflow */
#define ISR_RXTO BIT(1) /* Receive Timeout */
#define ISR_RX BIT(2) /* Receive */
#define ISR_TX BIT(3) /* Transmit */
#define ISR_MASK (ISR_RXOF | ISR_RXTO | ISR_RX | ISR_TX)
/* Bit fields in HISI_SPI_ICR, 2 bits */
#define ICR_RXOF BIT(0) /* Receive Overflow */
#define ICR_RXTO BIT(1) /* Receive Timeout */
#define ICR_MASK (ICR_RXOF | ICR_RXTO)
#define DIV_POST_MAX 0xFF
#define DIV_POST_MIN 0x00
#define DIV_PRE_MAX 0xFE
#define DIV_PRE_MIN 0x02
#define CLK_DIV_MAX ((1 + DIV_POST_MAX) * DIV_PRE_MAX)
#define CLK_DIV_MIN ((1 + DIV_POST_MIN) * DIV_PRE_MIN)
#define DEFAULT_NUM_CS 1
#define HISI_SPI_WAIT_TIMEOUT_MS 10UL
enum hisi_spi_rx_level_trig {
HISI_SPI_RX_1,
HISI_SPI_RX_4,
HISI_SPI_RX_8,
HISI_SPI_RX_16,
HISI_SPI_RX_32,
HISI_SPI_RX_64,
HISI_SPI_RX_128
};
enum hisi_spi_tx_level_trig {
HISI_SPI_TX_1_OR_LESS,
HISI_SPI_TX_4_OR_LESS,
HISI_SPI_TX_8_OR_LESS,
HISI_SPI_TX_16_OR_LESS,
HISI_SPI_TX_32_OR_LESS,
HISI_SPI_TX_64_OR_LESS,
HISI_SPI_TX_128_OR_LESS
};
enum hisi_spi_frame_n_bytes {
HISI_SPI_N_BYTES_NULL,
HISI_SPI_N_BYTES_U8,
HISI_SPI_N_BYTES_U16,
HISI_SPI_N_BYTES_U32 = 4
};
/* Slave spi_dev related */
struct hisi_chip_data {
u32 cr;
u32 speed_hz; /* baud rate */
u16 clk_div; /* baud rate divider */
/* clk_div = (1 + div_post) * div_pre */
u8 div_post; /* value from 0 to 255 */
u8 div_pre; /* value from 2 to 254 (even only!) */
};
struct hisi_spi {
struct device *dev;
void __iomem *regs;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
/* Current message transfer state info */
const void *tx;
unsigned int tx_len;
void *rx;
unsigned int rx_len;
u8 n_bytes; /* current is a 1/2/4 bytes op */
struct dentry *debugfs;
struct debugfs_regset32 regset;
};
#define HISI_SPI_DBGFS_REG(_name, _off) \
{ \
.name = _name, \
.offset = _off, \
}
static const struct debugfs_reg32 hisi_spi_regs[] = {
HISI_SPI_DBGFS_REG("CSCR", HISI_SPI_CSCR),
HISI_SPI_DBGFS_REG("CR", HISI_SPI_CR),
HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
HISI_SPI_DBGFS_REG("ICR", HISI_SPI_ICR),
HISI_SPI_DBGFS_REG("VERSION", HISI_SPI_VERSION),
};
static int hisi_spi_debugfs_init(struct hisi_spi *hs)
{
char name[32];
struct spi_controller *host;
host = container_of(hs->dev, struct spi_controller, dev);
snprintf(name, 32, "hisi_spi%d", host->bus_num);
hs->debugfs = debugfs_create_dir(name, NULL);
if (IS_ERR(hs->debugfs))
return -ENOMEM;
hs->regset.regs = hisi_spi_regs;
hs->regset.nregs = ARRAY_SIZE(hisi_spi_regs);
hs->regset.base = hs->regs;
debugfs_create_regset32("registers", 0400, hs->debugfs, &hs->regset);
return 0;
}
static u32 hisi_spi_busy(struct hisi_spi *hs)
{
return readl(hs->regs + HISI_SPI_SR) & SR_BUSY;
}
static u32 hisi_spi_rx_not_empty(struct hisi_spi *hs)
{
return readl(hs->regs + HISI_SPI_SR) & SR_RXNE;
}
static u32 hisi_spi_tx_not_full(struct hisi_spi *hs)
{
return readl(hs->regs + HISI_SPI_SR) & SR_TXNF;
}
static void hisi_spi_flush_fifo(struct hisi_spi *hs)
{
unsigned long limit = loops_per_jiffy << 1;
do {
while (hisi_spi_rx_not_empty(hs))
readl(hs->regs + HISI_SPI_DOUT);
} while (hisi_spi_busy(hs) && limit--);
}
/* Disable the controller and all interrupts */
static void hisi_spi_disable(struct hisi_spi *hs)
{
writel(0, hs->regs + HISI_SPI_ENR);
writel(IMR_MASK, hs->regs + HISI_SPI_IMR);
writel(ICR_MASK, hs->regs + HISI_SPI_ICR);
}
static u8 hisi_spi_n_bytes(struct spi_transfer *transfer)
{
if (transfer->bits_per_word <= 8)
return HISI_SPI_N_BYTES_U8;
else if (transfer->bits_per_word <= 16)
return HISI_SPI_N_BYTES_U16;
else
return HISI_SPI_N_BYTES_U32;
}
static void hisi_spi_reader(struct hisi_spi *hs)
{
u32 max = min_t(u32, hs->rx_len, hs->fifo_len);
u32 rxw;
while (hisi_spi_rx_not_empty(hs) && max--) {
rxw = readl(hs->regs + HISI_SPI_DOUT);
/* Check the transfer's original "rx" is not null */
if (hs->rx) {
switch (hs->n_bytes) {
case HISI_SPI_N_BYTES_U8:
*(u8 *)(hs->rx) = rxw;
break;
case HISI_SPI_N_BYTES_U16:
*(u16 *)(hs->rx) = rxw;
break;
case HISI_SPI_N_BYTES_U32:
*(u32 *)(hs->rx) = rxw;
break;
}
hs->rx += hs->n_bytes;
}
--hs->rx_len;
}
}
static void hisi_spi_writer(struct hisi_spi *hs)
{
u32 max = min_t(u32, hs->tx_len, hs->fifo_len);
u32 txw = 0;
while (hisi_spi_tx_not_full(hs) && max--) {
/* Check the transfer's original "tx" is not null */
if (hs->tx) {
switch (hs->n_bytes) {
case HISI_SPI_N_BYTES_U8:
txw = *(u8 *)(hs->tx);
break;
case HISI_SPI_N_BYTES_U16:
txw = *(u16 *)(hs->tx);
break;
case HISI_SPI_N_BYTES_U32:
txw = *(u32 *)(hs->tx);
break;
}
hs->tx += hs->n_bytes;
}
writel(txw, hs->regs + HISI_SPI_DIN);
--hs->tx_len;
}
}
static void __hisi_calc_div_reg(struct hisi_chip_data *chip)
{
chip->div_pre = DIV_PRE_MAX;
while (chip->div_pre >= DIV_PRE_MIN) {
if (chip->clk_div % chip->div_pre == 0)
break;
chip->div_pre -= 2;
}
if (chip->div_pre > chip->clk_div)
chip->div_pre = chip->clk_div;
chip->div_post = (chip->clk_div / chip->div_pre) - 1;
}
static u32 hisi_calc_effective_speed(struct spi_controller *host,
struct hisi_chip_data *chip, u32 speed_hz)
{
u32 effective_speed;
/* Note clock divider doesn't support odd numbers */
chip->clk_div = DIV_ROUND_UP(host->max_speed_hz, speed_hz) + 1;
chip->clk_div &= 0xfffe;
if (chip->clk_div > CLK_DIV_MAX)
chip->clk_div = CLK_DIV_MAX;
effective_speed = host->max_speed_hz / chip->clk_div;
if (chip->speed_hz != effective_speed) {
__hisi_calc_div_reg(chip);
chip->speed_hz = effective_speed;
}
return effective_speed;
}
static u32 hisi_spi_prepare_cr(struct spi_device *spi)
{
u32 cr = FIELD_PREP(CR_SPD_MODE_MASK, 1);
cr |= FIELD_PREP(CR_CPHA_MASK, (spi->mode & SPI_CPHA) ? 1 : 0);
cr |= FIELD_PREP(CR_CPOL_MASK, (spi->mode & SPI_CPOL) ? 1 : 0);
cr |= FIELD_PREP(CR_LOOP_MASK, (spi->mode & SPI_LOOP) ? 1 : 0);
return cr;
}
static void hisi_spi_hw_init(struct hisi_spi *hs)
{
hisi_spi_disable(hs);
/* FIFO default config */
writel(FIELD_PREP(FIFOC_TX_MASK, HISI_SPI_TX_64_OR_LESS) |
FIELD_PREP(FIFOC_RX_MASK, HISI_SPI_RX_16),
hs->regs + HISI_SPI_FIFOC);
hs->fifo_len = 256;
}
static irqreturn_t hisi_spi_irq(int irq, void *dev_id)
{
struct spi_controller *host = dev_id;
struct hisi_spi *hs = spi_controller_get_devdata(host);
u32 irq_status = readl(hs->regs + HISI_SPI_ISR) & ISR_MASK;
if (!irq_status)
return IRQ_NONE;
if (!host->cur_msg)
return IRQ_HANDLED;
/* Error handling */
if (irq_status & ISR_RXOF) {
dev_err(hs->dev, "interrupt_transfer: fifo overflow\n");
host->cur_msg->status = -EIO;
goto finalize_transfer;
}
/*
* Read data from the Rx FIFO every time. If there is
* nothing left to receive, finalize the transfer.
*/
hisi_spi_reader(hs);
if (!hs->rx_len)
goto finalize_transfer;
/* Send data out when Tx FIFO IRQ triggered */
if (irq_status & ISR_TX)
hisi_spi_writer(hs);
return IRQ_HANDLED;
finalize_transfer:
hisi_spi_disable(hs);
spi_finalize_current_transfer(host);
return IRQ_HANDLED;
}
static int hisi_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *transfer)
{
struct hisi_spi *hs = spi_controller_get_devdata(host);
struct hisi_chip_data *chip = spi_get_ctldata(spi);
u32 cr = chip->cr;
/* Update per transfer options for speed and bpw */
transfer->effective_speed_hz =
hisi_calc_effective_speed(host, chip, transfer->speed_hz);
cr |= FIELD_PREP(CR_DIV_PRE_MASK, chip->div_pre);
cr |= FIELD_PREP(CR_DIV_POST_MASK, chip->div_post);
cr |= FIELD_PREP(CR_BPW_MASK, transfer->bits_per_word - 1);
writel(cr, hs->regs + HISI_SPI_CR);
hisi_spi_flush_fifo(hs);
hs->n_bytes = hisi_spi_n_bytes(transfer);
hs->tx = transfer->tx_buf;
hs->tx_len = transfer->len / hs->n_bytes;
hs->rx = transfer->rx_buf;
hs->rx_len = hs->tx_len;
/*
* Ensure that the transfer data above has been updated
* before the interrupt to start.
*/
smp_mb();
/* Enable all interrupts and the controller */
writel(~(u32)IMR_MASK, hs->regs + HISI_SPI_IMR);
writel(1, hs->regs + HISI_SPI_ENR);
return 1;
}
static void hisi_spi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
struct hisi_spi *hs = spi_controller_get_devdata(host);
hisi_spi_disable(hs);
/*
* Wait for interrupt handler that is
* already in timeout to complete.
*/
msleep(HISI_SPI_WAIT_TIMEOUT_MS);
}
static int hisi_spi_setup(struct spi_device *spi)
{
struct hisi_chip_data *chip;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
spi_set_ctldata(spi, chip);
}
chip->cr = hisi_spi_prepare_cr(spi);
return 0;
}
static void hisi_spi_cleanup(struct spi_device *spi)
{
struct hisi_chip_data *chip = spi_get_ctldata(spi);
kfree(chip);
spi_set_ctldata(spi, NULL);
}
static int hisi_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *host;
struct hisi_spi *hs;
int ret, irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = devm_spi_alloc_host(dev, sizeof(*hs));
if (!host)
return -ENOMEM;
platform_set_drvdata(pdev, host);
hs = spi_controller_get_devdata(host);
hs->dev = dev;
hs->irq = irq;
hs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hs->regs))
return PTR_ERR(hs->regs);
/* Specify maximum SPI clocking speed (host only) by firmware */
ret = device_property_read_u32(dev, "spi-max-frequency",
&host->max_speed_hz);
if (ret) {
dev_err(dev, "failed to get max SPI clocking speed, ret=%d\n",
ret);
return -EINVAL;
}
ret = device_property_read_u16(dev, "num-cs",
&host->num_chipselect);
if (ret)
host->num_chipselect = DEFAULT_NUM_CS;
host->use_gpio_descriptors = true;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
host->bus_num = pdev->id;
host->setup = hisi_spi_setup;
host->cleanup = hisi_spi_cleanup;
host->transfer_one = hisi_spi_transfer_one;
host->handle_err = hisi_spi_handle_err;
host->dev.fwnode = dev->fwnode;
hisi_spi_hw_init(hs);
ret = devm_request_irq(dev, hs->irq, hisi_spi_irq, 0, dev_name(dev),
host);
if (ret < 0) {
dev_err(dev, "failed to get IRQ=%d, ret=%d\n", hs->irq, ret);
return ret;
}
ret = spi_register_controller(host);
if (ret) {
dev_err(dev, "failed to register spi host, ret=%d\n", ret);
return ret;
}
if (hisi_spi_debugfs_init(hs))
dev_info(dev, "failed to create debugfs dir\n");
dev_info(dev, "hw version:0x%x max-freq:%u kHz\n",
readl(hs->regs + HISI_SPI_VERSION),
host->max_speed_hz / 1000);
return 0;
}
static void hisi_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct hisi_spi *hs = spi_controller_get_devdata(host);
debugfs_remove_recursive(hs->debugfs);
spi_unregister_controller(host);
}
static const struct acpi_device_id hisi_spi_acpi_match[] = {
{"HISI03E1", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_spi_acpi_match);
static struct platform_driver hisi_spi_driver = {
.probe = hisi_spi_probe,
.remove_new = hisi_spi_remove,
.driver = {
.name = "hisi-kunpeng-spi",
.acpi_match_table = hisi_spi_acpi_match,
},
};
module_platform_driver(hisi_spi_driver);
MODULE_AUTHOR("Jay Fang <[email protected]>");
MODULE_DESCRIPTION("HiSilicon SPI Controller Driver for Kunpeng SoCs");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-hisi-kunpeng.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Altera SPI driver
*
* Copyright (C) 2008 Thomas Chou <[email protected]>
*
* Based on spi_s3c24xx.c, which is:
* Copyright (c) 2006 Ben Dooks
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/altera.h>
#include <linux/spi/spi.h>
#include <linux/io.h>
#include <linux/of.h>
#define DRV_NAME "spi_altera"
enum altera_spi_type {
ALTERA_SPI_TYPE_UNKNOWN,
ALTERA_SPI_TYPE_SUBDEV,
};
static const struct regmap_config spi_altera_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.fast_io = true,
};
static int altera_spi_probe(struct platform_device *pdev)
{
const struct platform_device_id *platid = platform_get_device_id(pdev);
struct altera_spi_platform_data *pdata = dev_get_platdata(&pdev->dev);
enum altera_spi_type type = ALTERA_SPI_TYPE_UNKNOWN;
struct altera_spi *hw;
struct spi_controller *host;
int err = -ENODEV;
u16 i;
host = spi_alloc_host(&pdev->dev, sizeof(struct altera_spi));
if (!host)
return err;
/* setup the host state. */
host->bus_num = -1;
if (pdata) {
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
dev_err(&pdev->dev,
"Invalid number of chipselect: %u\n",
pdata->num_chipselect);
err = -EINVAL;
goto exit;
}
host->num_chipselect = pdata->num_chipselect;
host->mode_bits = pdata->mode_bits;
host->bits_per_word_mask = pdata->bits_per_word_mask;
} else {
host->num_chipselect = 16;
host->mode_bits = SPI_CS_HIGH;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
}
host->dev.of_node = pdev->dev.of_node;
hw = spi_controller_get_devdata(host);
hw->dev = &pdev->dev;
if (platid)
type = platid->driver_data;
/* find and map our resources */
if (type == ALTERA_SPI_TYPE_SUBDEV) {
struct resource *regoff;
hw->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!hw->regmap) {
dev_err(&pdev->dev, "get regmap failed\n");
goto exit;
}
regoff = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (regoff)
hw->regoff = regoff->start;
} else {
void __iomem *res;
res = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(res)) {
err = PTR_ERR(res);
goto exit;
}
hw->regmap = devm_regmap_init_mmio(&pdev->dev, res,
&spi_altera_config);
if (IS_ERR(hw->regmap)) {
dev_err(&pdev->dev, "regmap mmio init failed\n");
err = PTR_ERR(hw->regmap);
goto exit;
}
}
altera_spi_init_host(host);
/* irq is optional */
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq >= 0) {
err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0,
pdev->name, host);
if (err)
goto exit;
}
err = devm_spi_register_controller(&pdev->dev, host);
if (err)
goto exit;
if (pdata) {
for (i = 0; i < pdata->num_devices; i++) {
if (!spi_new_device(host, pdata->devices + i))
dev_warn(&pdev->dev,
"unable to create SPI device: %s\n",
pdata->devices[i].modalias);
}
}
dev_info(&pdev->dev, "regoff %u, irq %d\n", hw->regoff, hw->irq);
return 0;
exit:
spi_controller_put(host);
return err;
}
#ifdef CONFIG_OF
static const struct of_device_id altera_spi_match[] = {
{ .compatible = "ALTR,spi-1.0", },
{ .compatible = "altr,spi-1.0", },
{},
};
MODULE_DEVICE_TABLE(of, altera_spi_match);
#endif /* CONFIG_OF */
static const struct platform_device_id altera_spi_ids[] = {
{ DRV_NAME, ALTERA_SPI_TYPE_UNKNOWN },
{ "subdev_spi_altera", ALTERA_SPI_TYPE_SUBDEV },
{ }
};
MODULE_DEVICE_TABLE(platform, altera_spi_ids);
static struct platform_driver altera_spi_driver = {
.probe = altera_spi_probe,
.driver = {
.name = DRV_NAME,
.pm = NULL,
.of_match_table = of_match_ptr(altera_spi_match),
},
.id_table = altera_spi_ids,
};
module_platform_driver(altera_spi_driver);
MODULE_DESCRIPTION("Altera SPI driver");
MODULE_AUTHOR("Thomas Chou <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/spi/spi-altera-platform.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/spi/spi-loopback-test.c
*
* (c) Martin Sperl <[email protected]>
*
* Loopback test driver to test several typical spi_message conditions
* that a spi_master driver may encounter
* this can also get used for regression testing
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/vmalloc.h>
#include <linux/spi/spi.h>
#include "spi-test.h"
/* flag to only simulate transfers */
static int simulate_only;
module_param(simulate_only, int, 0);
MODULE_PARM_DESC(simulate_only, "if not 0 do not execute the spi message");
/* dump spi messages */
static int dump_messages;
module_param(dump_messages, int, 0);
MODULE_PARM_DESC(dump_messages,
"=1 dump the basic spi_message_structure, " \
"=2 dump the spi_message_structure including data, " \
"=3 dump the spi_message structure before and after execution");
/* the device is jumpered for loopback - enabling some rx_buf tests */
static int loopback;
module_param(loopback, int, 0);
MODULE_PARM_DESC(loopback,
"if set enable loopback mode, where the rx_buf " \
"is checked to match tx_buf after the spi_message " \
"is executed");
static int loop_req;
module_param(loop_req, int, 0);
MODULE_PARM_DESC(loop_req,
"if set controller will be asked to enable test loop mode. " \
"If controller supported it, MISO and MOSI will be connected");
static int no_cs;
module_param(no_cs, int, 0);
MODULE_PARM_DESC(no_cs,
"if set Chip Select (CS) will not be used");
/* run tests only for a specific length */
static int run_only_iter_len = -1;
module_param(run_only_iter_len, int, 0);
MODULE_PARM_DESC(run_only_iter_len,
"only run tests for a length of this number in iterate_len list");
/* run only a specific test */
static int run_only_test = -1;
module_param(run_only_test, int, 0);
MODULE_PARM_DESC(run_only_test,
"only run the test with this number (0-based !)");
/* use vmalloc'ed buffers */
static int use_vmalloc;
module_param(use_vmalloc, int, 0644);
MODULE_PARM_DESC(use_vmalloc,
"use vmalloc'ed buffers instead of kmalloc'ed");
/* check rx ranges */
static int check_ranges = 1;
module_param(check_ranges, int, 0644);
MODULE_PARM_DESC(check_ranges,
"checks rx_buffer pattern are valid");
static unsigned int delay_ms = 100;
module_param(delay_ms, uint, 0644);
MODULE_PARM_DESC(delay_ms,
"delay between tests, in milliseconds (default: 100)");
/* the actual tests to execute */
static struct spi_test spi_tests[] = {
{
.description = "tx/rx-transfer - start of page",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_rx_align = ITERATE_ALIGN,
.transfer_count = 1,
.transfers = {
{
.tx_buf = TX(0),
.rx_buf = RX(0),
},
},
},
{
.description = "tx/rx-transfer - crossing PAGE_SIZE",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_rx_align = ITERATE_ALIGN,
.transfer_count = 1,
.transfers = {
{
.tx_buf = TX(PAGE_SIZE - 4),
.rx_buf = RX(PAGE_SIZE - 4),
},
},
},
{
.description = "tx-transfer - only",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.transfer_count = 1,
.transfers = {
{
.tx_buf = TX(0),
},
},
},
{
.description = "rx-transfer - only",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_rx_align = ITERATE_ALIGN,
.transfer_count = 1,
.transfers = {
{
.rx_buf = RX(0),
},
},
},
{
.description = "two tx-transfers - alter both",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(0) | BIT(1),
.transfer_count = 2,
.transfers = {
{
.tx_buf = TX(0),
},
{
/* this is why we cant use ITERATE_MAX_LEN */
.tx_buf = TX(SPI_TEST_MAX_SIZE_HALF),
},
},
},
{
.description = "two tx-transfers - alter first",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(0),
.transfer_count = 2,
.transfers = {
{
.tx_buf = TX(64),
},
{
.len = 1,
.tx_buf = TX(0),
},
},
},
{
.description = "two tx-transfers - alter second",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(1),
.transfer_count = 2,
.transfers = {
{
.len = 16,
.tx_buf = TX(0),
},
{
.tx_buf = TX(64),
},
},
},
{
.description = "two transfers tx then rx - alter both",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(0) | BIT(1),
.transfer_count = 2,
.transfers = {
{
.tx_buf = TX(0),
},
{
.rx_buf = RX(0),
},
},
},
{
.description = "two transfers tx then rx - alter tx",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(0),
.transfer_count = 2,
.transfers = {
{
.tx_buf = TX(0),
},
{
.len = 1,
.rx_buf = RX(0),
},
},
},
{
.description = "two transfers tx then rx - alter rx",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(1),
.transfer_count = 2,
.transfers = {
{
.len = 1,
.tx_buf = TX(0),
},
{
.rx_buf = RX(0),
},
},
},
{
.description = "two tx+rx transfers - alter both",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(0) | BIT(1),
.transfer_count = 2,
.transfers = {
{
.tx_buf = TX(0),
.rx_buf = RX(0),
},
{
/* making sure we align without overwrite
* the reason we can not use ITERATE_MAX_LEN
*/
.tx_buf = TX(SPI_TEST_MAX_SIZE_HALF),
.rx_buf = RX(SPI_TEST_MAX_SIZE_HALF),
},
},
},
{
.description = "two tx+rx transfers - alter first",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(0),
.transfer_count = 2,
.transfers = {
{
/* making sure we align without overwrite */
.tx_buf = TX(1024),
.rx_buf = RX(1024),
},
{
.len = 1,
/* making sure we align without overwrite */
.tx_buf = TX(0),
.rx_buf = RX(0),
},
},
},
{
.description = "two tx+rx transfers - alter second",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_transfer_mask = BIT(1),
.transfer_count = 2,
.transfers = {
{
.len = 1,
.tx_buf = TX(0),
.rx_buf = RX(0),
},
{
/* making sure we align without overwrite */
.tx_buf = TX(1024),
.rx_buf = RX(1024),
},
},
},
{
.description = "two tx+rx transfers - delay after transfer",
.fill_option = FILL_COUNT_8,
.iterate_len = { ITERATE_MAX_LEN },
.iterate_transfer_mask = BIT(0) | BIT(1),
.transfer_count = 2,
.transfers = {
{
.tx_buf = TX(0),
.rx_buf = RX(0),
.delay = {
.value = 1000,
.unit = SPI_DELAY_UNIT_USECS,
},
},
{
.tx_buf = TX(0),
.rx_buf = RX(0),
.delay = {
.value = 1000,
.unit = SPI_DELAY_UNIT_USECS,
},
},
},
},
{
.description = "three tx+rx transfers with overlapping cache lines",
.fill_option = FILL_COUNT_8,
/*
* This should be large enough for the controller driver to
* choose to transfer it with DMA.
*/
.iterate_len = { 512, -1 },
.iterate_transfer_mask = BIT(1),
.transfer_count = 3,
.transfers = {
{
.len = 1,
.tx_buf = TX(0),
.rx_buf = RX(0),
},
{
.tx_buf = TX(1),
.rx_buf = RX(1),
},
{
.len = 1,
.tx_buf = TX(513),
.rx_buf = RX(513),
},
},
},
{ /* end of tests sequence */ }
};
static int spi_loopback_test_probe(struct spi_device *spi)
{
int ret;
if (loop_req || no_cs) {
spi->mode |= loop_req ? SPI_LOOP : 0;
spi->mode |= no_cs ? SPI_NO_CS : 0;
ret = spi_setup(spi);
if (ret) {
dev_err(&spi->dev, "SPI setup with SPI_LOOP or SPI_NO_CS failed (%d)\n",
ret);
return ret;
}
}
dev_info(&spi->dev, "Executing spi-loopback-tests\n");
ret = spi_test_run_tests(spi, spi_tests);
dev_info(&spi->dev, "Finished spi-loopback-tests with return: %i\n",
ret);
return ret;
}
/* non const match table to permit to change via a module parameter */
static struct of_device_id spi_loopback_test_of_match[] = {
{ .compatible = "linux,spi-loopback-test", },
{ }
};
/* allow to override the compatible string via a module_parameter */
module_param_string(compatible, spi_loopback_test_of_match[0].compatible,
sizeof(spi_loopback_test_of_match[0].compatible),
0000);
MODULE_DEVICE_TABLE(of, spi_loopback_test_of_match);
static struct spi_driver spi_loopback_test_driver = {
.driver = {
.name = "spi-loopback-test",
.owner = THIS_MODULE,
.of_match_table = spi_loopback_test_of_match,
},
.probe = spi_loopback_test_probe,
};
module_spi_driver(spi_loopback_test_driver);
MODULE_AUTHOR("Martin Sperl <[email protected]>");
MODULE_DESCRIPTION("test spi_driver to check core functionality");
MODULE_LICENSE("GPL");
/*-------------------------------------------------------------------------*/
/* spi_test implementation */
#define RANGE_CHECK(ptr, plen, start, slen) \
((ptr >= start) && (ptr + plen <= start + slen))
/* we allocate one page more, to allow for offsets */
#define SPI_TEST_MAX_SIZE_PLUS (SPI_TEST_MAX_SIZE + PAGE_SIZE)
static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len)
{
/* limit the hex_dump */
if (len < 1024) {
print_hex_dump(KERN_INFO, pre,
DUMP_PREFIX_OFFSET, 16, 1,
ptr, len, 0);
return;
}
/* print head */
print_hex_dump(KERN_INFO, pre,
DUMP_PREFIX_OFFSET, 16, 1,
ptr, 512, 0);
/* print tail */
pr_info("%s truncated - continuing at offset %04zx\n",
pre, len - 512);
print_hex_dump(KERN_INFO, pre,
DUMP_PREFIX_OFFSET, 16, 1,
ptr + (len - 512), 512, 0);
}
static void spi_test_dump_message(struct spi_device *spi,
struct spi_message *msg,
bool dump_data)
{
struct spi_transfer *xfer;
int i;
u8 b;
dev_info(&spi->dev, " spi_msg@%pK\n", msg);
if (msg->status)
dev_info(&spi->dev, " status: %i\n",
msg->status);
dev_info(&spi->dev, " frame_length: %i\n",
msg->frame_length);
dev_info(&spi->dev, " actual_length: %i\n",
msg->actual_length);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
dev_info(&spi->dev, " spi_transfer@%pK\n", xfer);
dev_info(&spi->dev, " len: %i\n", xfer->len);
dev_info(&spi->dev, " tx_buf: %pK\n", xfer->tx_buf);
if (dump_data && xfer->tx_buf)
spi_test_print_hex_dump(" TX: ",
xfer->tx_buf,
xfer->len);
dev_info(&spi->dev, " rx_buf: %pK\n", xfer->rx_buf);
if (dump_data && xfer->rx_buf)
spi_test_print_hex_dump(" RX: ",
xfer->rx_buf,
xfer->len);
/* check for unwritten test pattern on rx_buf */
if (xfer->rx_buf) {
for (i = 0 ; i < xfer->len ; i++) {
b = ((u8 *)xfer->rx_buf)[xfer->len - 1 - i];
if (b != SPI_TEST_PATTERN_UNWRITTEN)
break;
}
if (i)
dev_info(&spi->dev,
" rx_buf filled with %02x starts at offset: %i\n",
SPI_TEST_PATTERN_UNWRITTEN,
xfer->len - i);
}
}
}
struct rx_ranges {
struct list_head list;
u8 *start;
u8 *end;
};
static int rx_ranges_cmp(void *priv, const struct list_head *a,
const struct list_head *b)
{
struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list);
struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
if (rx_a->start > rx_b->start)
return 1;
if (rx_a->start < rx_b->start)
return -1;
return 0;
}
static int spi_check_rx_ranges(struct spi_device *spi,
struct spi_message *msg,
void *rx)
{
struct spi_transfer *xfer;
struct rx_ranges ranges[SPI_TEST_MAX_TRANSFERS], *r;
int i = 0;
LIST_HEAD(ranges_list);
u8 *addr;
int ret = 0;
/* loop over all transfers to fill in the rx_ranges */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* if there is no rx, then no check is needed */
if (!xfer->rx_buf)
continue;
/* fill in the rx_range */
if (RANGE_CHECK(xfer->rx_buf, xfer->len,
rx, SPI_TEST_MAX_SIZE_PLUS)) {
ranges[i].start = xfer->rx_buf;
ranges[i].end = xfer->rx_buf + xfer->len;
list_add(&ranges[i].list, &ranges_list);
i++;
}
}
/* if no ranges, then we can return and avoid the checks...*/
if (!i)
return 0;
/* sort the list */
list_sort(NULL, &ranges_list, rx_ranges_cmp);
/* and iterate over all the rx addresses */
for (addr = rx; addr < (u8 *)rx + SPI_TEST_MAX_SIZE_PLUS; addr++) {
/* if we are the DO not write pattern,
* then continue with the loop...
*/
if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE)
continue;
/* check if we are inside a range */
list_for_each_entry(r, &ranges_list, list) {
/* if so then set to end... */
if ((addr >= r->start) && (addr < r->end))
addr = r->end;
}
/* second test after a (hopefull) translation */
if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE)
continue;
/* if still not found then something has modified too much */
/* we could list the "closest" transfer here... */
dev_err(&spi->dev,
"loopback strangeness - rx changed outside of allowed range at: %pK\n",
addr);
/* do not return, only set ret,
* so that we list all addresses
*/
ret = -ERANGE;
}
return ret;
}
static int spi_test_check_elapsed_time(struct spi_device *spi,
struct spi_test *test)
{
int i;
unsigned long long estimated_time = 0;
unsigned long long delay_usecs = 0;
for (i = 0; i < test->transfer_count; i++) {
struct spi_transfer *xfer = test->transfers + i;
unsigned long long nbits = (unsigned long long)BITS_PER_BYTE *
xfer->len;
delay_usecs += xfer->delay.value;
if (!xfer->speed_hz)
continue;
estimated_time += div_u64(nbits * NSEC_PER_SEC, xfer->speed_hz);
}
estimated_time += delay_usecs * NSEC_PER_USEC;
if (test->elapsed_time < estimated_time) {
dev_err(&spi->dev,
"elapsed time %lld ns is shorter than minimum estimated time %lld ns\n",
test->elapsed_time, estimated_time);
return -EINVAL;
}
return 0;
}
static int spi_test_check_loopback_result(struct spi_device *spi,
struct spi_message *msg,
void *tx, void *rx)
{
struct spi_transfer *xfer;
u8 rxb, txb;
size_t i;
int ret;
/* checks rx_buffer pattern are valid with loopback or without */
if (check_ranges) {
ret = spi_check_rx_ranges(spi, msg, rx);
if (ret)
return ret;
}
/* if we run without loopback, then return now */
if (!loopback)
return 0;
/* if applicable to transfer check that rx_buf is equal to tx_buf */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* if there is no rx, then no check is needed */
if (!xfer->len || !xfer->rx_buf)
continue;
/* so depending on tx_buf we need to handle things */
if (xfer->tx_buf) {
for (i = 0; i < xfer->len; i++) {
txb = ((u8 *)xfer->tx_buf)[i];
rxb = ((u8 *)xfer->rx_buf)[i];
if (txb != rxb)
goto mismatch_error;
}
} else {
/* first byte received */
txb = ((u8 *)xfer->rx_buf)[0];
/* first byte may be 0 or xff */
if (!((txb == 0) || (txb == 0xff))) {
dev_err(&spi->dev,
"loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n",
txb);
return -EINVAL;
}
/* check that all bytes are identical */
for (i = 1; i < xfer->len; i++) {
rxb = ((u8 *)xfer->rx_buf)[i];
if (rxb != txb)
goto mismatch_error;
}
}
}
return 0;
mismatch_error:
dev_err(&spi->dev,
"loopback strangeness - transfer mismatch on byte %04zx - expected 0x%02x, but got 0x%02x\n",
i, txb, rxb);
return -EINVAL;
}
static int spi_test_translate(struct spi_device *spi,
void **ptr, size_t len,
void *tx, void *rx)
{
size_t off;
/* return on null */
if (!*ptr)
return 0;
/* in the MAX_SIZE_HALF case modify the pointer */
if (((size_t)*ptr) & SPI_TEST_MAX_SIZE_HALF)
/* move the pointer to the correct range */
*ptr += (SPI_TEST_MAX_SIZE_PLUS / 2) -
SPI_TEST_MAX_SIZE_HALF;
/* RX range
* - we check against MAX_SIZE_PLUS to allow for automated alignment
*/
if (RANGE_CHECK(*ptr, len, RX(0), SPI_TEST_MAX_SIZE_PLUS)) {
off = *ptr - RX(0);
*ptr = rx + off;
return 0;
}
/* TX range */
if (RANGE_CHECK(*ptr, len, TX(0), SPI_TEST_MAX_SIZE_PLUS)) {
off = *ptr - TX(0);
*ptr = tx + off;
return 0;
}
dev_err(&spi->dev,
"PointerRange [%pK:%pK[ not in range [%pK:%pK[ or [%pK:%pK[\n",
*ptr, *ptr + len,
RX(0), RX(SPI_TEST_MAX_SIZE),
TX(0), TX(SPI_TEST_MAX_SIZE));
return -EINVAL;
}
static int spi_test_fill_pattern(struct spi_device *spi,
struct spi_test *test)
{
struct spi_transfer *xfers = test->transfers;
u8 *tx_buf;
size_t count = 0;
int i, j;
#ifdef __BIG_ENDIAN
#define GET_VALUE_BYTE(value, index, bytes) \
(value >> (8 * (bytes - 1 - count % bytes)))
#else
#define GET_VALUE_BYTE(value, index, bytes) \
(value >> (8 * (count % bytes)))
#endif
/* fill all transfers with the pattern requested */
for (i = 0; i < test->transfer_count; i++) {
/* fill rx_buf with SPI_TEST_PATTERN_UNWRITTEN */
if (xfers[i].rx_buf)
memset(xfers[i].rx_buf, SPI_TEST_PATTERN_UNWRITTEN,
xfers[i].len);
/* if tx_buf is NULL then skip */
tx_buf = (u8 *)xfers[i].tx_buf;
if (!tx_buf)
continue;
/* modify all the transfers */
for (j = 0; j < xfers[i].len; j++, tx_buf++, count++) {
/* fill tx */
switch (test->fill_option) {
case FILL_MEMSET_8:
*tx_buf = test->fill_pattern;
break;
case FILL_MEMSET_16:
*tx_buf = GET_VALUE_BYTE(test->fill_pattern,
count, 2);
break;
case FILL_MEMSET_24:
*tx_buf = GET_VALUE_BYTE(test->fill_pattern,
count, 3);
break;
case FILL_MEMSET_32:
*tx_buf = GET_VALUE_BYTE(test->fill_pattern,
count, 4);
break;
case FILL_COUNT_8:
*tx_buf = count;
break;
case FILL_COUNT_16:
*tx_buf = GET_VALUE_BYTE(count, count, 2);
break;
case FILL_COUNT_24:
*tx_buf = GET_VALUE_BYTE(count, count, 3);
break;
case FILL_COUNT_32:
*tx_buf = GET_VALUE_BYTE(count, count, 4);
break;
case FILL_TRANSFER_BYTE_8:
*tx_buf = j;
break;
case FILL_TRANSFER_BYTE_16:
*tx_buf = GET_VALUE_BYTE(j, j, 2);
break;
case FILL_TRANSFER_BYTE_24:
*tx_buf = GET_VALUE_BYTE(j, j, 3);
break;
case FILL_TRANSFER_BYTE_32:
*tx_buf = GET_VALUE_BYTE(j, j, 4);
break;
case FILL_TRANSFER_NUM:
*tx_buf = i;
break;
default:
dev_err(&spi->dev,
"unsupported fill_option: %i\n",
test->fill_option);
return -EINVAL;
}
}
}
return 0;
}
static int _spi_test_run_iter(struct spi_device *spi,
struct spi_test *test,
void *tx, void *rx)
{
struct spi_message *msg = &test->msg;
struct spi_transfer *x;
int i, ret;
/* initialize message - zero-filled via static initialization */
spi_message_init_no_memset(msg);
/* fill rx with the DO_NOT_WRITE pattern */
memset(rx, SPI_TEST_PATTERN_DO_NOT_WRITE, SPI_TEST_MAX_SIZE_PLUS);
/* add the individual transfers */
for (i = 0; i < test->transfer_count; i++) {
x = &test->transfers[i];
/* patch the values of tx_buf */
ret = spi_test_translate(spi, (void **)&x->tx_buf, x->len,
(void *)tx, rx);
if (ret)
return ret;
/* patch the values of rx_buf */
ret = spi_test_translate(spi, &x->rx_buf, x->len,
(void *)tx, rx);
if (ret)
return ret;
/* and add it to the list */
spi_message_add_tail(x, msg);
}
/* fill in the transfer buffers with pattern */
ret = spi_test_fill_pattern(spi, test);
if (ret)
return ret;
/* and execute */
if (test->execute_msg)
ret = test->execute_msg(spi, test, tx, rx);
else
ret = spi_test_execute_msg(spi, test, tx, rx);
/* handle result */
if (ret == test->expected_return)
return 0;
dev_err(&spi->dev,
"test failed - test returned %i, but we expect %i\n",
ret, test->expected_return);
if (ret)
return ret;
/* if it is 0, as we expected something else,
* then return something special
*/
return -EFAULT;
}
static int spi_test_run_iter(struct spi_device *spi,
const struct spi_test *testtemplate,
void *tx, void *rx,
size_t len,
size_t tx_off,
size_t rx_off
)
{
struct spi_test test;
int i, tx_count, rx_count;
/* copy the test template to test */
memcpy(&test, testtemplate, sizeof(test));
/* if iterate_transfer_mask is not set,
* then set it to first transfer only
*/
if (!(test.iterate_transfer_mask & (BIT(test.transfer_count) - 1)))
test.iterate_transfer_mask = 1;
/* count number of transfers with tx/rx_buf != NULL */
rx_count = tx_count = 0;
for (i = 0; i < test.transfer_count; i++) {
if (test.transfers[i].tx_buf)
tx_count++;
if (test.transfers[i].rx_buf)
rx_count++;
}
/* in some iteration cases warn and exit early,
* as there is nothing to do, that has not been tested already...
*/
if (tx_off && (!tx_count)) {
dev_warn_once(&spi->dev,
"%s: iterate_tx_off configured with tx_buf==NULL - ignoring\n",
test.description);
return 0;
}
if (rx_off && (!rx_count)) {
dev_warn_once(&spi->dev,
"%s: iterate_rx_off configured with rx_buf==NULL - ignoring\n",
test.description);
return 0;
}
/* write out info */
if (!(len || tx_off || rx_off)) {
dev_info(&spi->dev, "Running test %s\n", test.description);
} else {
dev_info(&spi->dev,
" with iteration values: len = %zu, tx_off = %zu, rx_off = %zu\n",
len, tx_off, rx_off);
}
/* update in the values from iteration values */
for (i = 0; i < test.transfer_count; i++) {
/* only when bit in transfer mask is set */
if (!(test.iterate_transfer_mask & BIT(i)))
continue;
test.transfers[i].len = len;
if (test.transfers[i].tx_buf)
test.transfers[i].tx_buf += tx_off;
if (test.transfers[i].rx_buf)
test.transfers[i].rx_buf += rx_off;
}
/* and execute */
return _spi_test_run_iter(spi, &test, tx, rx);
}
/**
* spi_test_execute_msg - default implementation to run a test
*
* @spi: @spi_device on which to run the @spi_message
* @test: the test to execute, which already contains @msg
* @tx: the tx buffer allocated for the test sequence
* @rx: the rx buffer allocated for the test sequence
*
* Returns: error code of spi_sync as well as basic error checking
*/
int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test,
void *tx, void *rx)
{
struct spi_message *msg = &test->msg;
int ret = 0;
int i;
/* only if we do not simulate */
if (!simulate_only) {
ktime_t start;
/* dump the complete message before and after the transfer */
if (dump_messages == 3)
spi_test_dump_message(spi, msg, true);
start = ktime_get();
/* run spi message */
ret = spi_sync(spi, msg);
test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start));
if (ret == -ETIMEDOUT) {
dev_info(&spi->dev,
"spi-message timed out - rerunning...\n");
/* rerun after a few explicit schedules */
for (i = 0; i < 16; i++)
schedule();
ret = spi_sync(spi, msg);
}
if (ret) {
dev_err(&spi->dev,
"Failed to execute spi_message: %i\n",
ret);
goto exit;
}
/* do some extra error checks */
if (msg->frame_length != msg->actual_length) {
dev_err(&spi->dev,
"actual length differs from expected\n");
ret = -EIO;
goto exit;
}
/* run rx-buffer tests */
ret = spi_test_check_loopback_result(spi, msg, tx, rx);
if (ret)
goto exit;
ret = spi_test_check_elapsed_time(spi, test);
}
/* if requested or on error dump message (including data) */
exit:
if (dump_messages || ret)
spi_test_dump_message(spi, msg,
(dump_messages >= 2) || (ret));
return ret;
}
EXPORT_SYMBOL_GPL(spi_test_execute_msg);
/**
* spi_test_run_test - run an individual spi_test
* including all the relevant iterations on:
* length and buffer alignment
*
* @spi: the spi_device to send the messages to
* @test: the test which we need to execute
* @tx: the tx buffer allocated for the test sequence
* @rx: the rx buffer allocated for the test sequence
*
* Returns: status code of spi_sync or other failures
*/
int spi_test_run_test(struct spi_device *spi, const struct spi_test *test,
void *tx, void *rx)
{
int idx_len;
size_t len;
size_t tx_align, rx_align;
int ret;
/* test for transfer limits */
if (test->transfer_count >= SPI_TEST_MAX_TRANSFERS) {
dev_err(&spi->dev,
"%s: Exceeded max number of transfers with %i\n",
test->description, test->transfer_count);
return -E2BIG;
}
/* setting up some values in spi_message
* based on some settings in spi_master
* some of this can also get done in the run() method
*/
/* iterate over all the iterable values using macros
* (to make it a bit more readable...
*/
#define FOR_EACH_ALIGNMENT(var) \
for (var = 0; \
var < (test->iterate_##var ? \
(spi->master->dma_alignment ? \
spi->master->dma_alignment : \
test->iterate_##var) : \
1); \
var++)
for (idx_len = 0; idx_len < SPI_TEST_MAX_ITERATE &&
(len = test->iterate_len[idx_len]) != -1; idx_len++) {
if ((run_only_iter_len > -1) && len != run_only_iter_len)
continue;
FOR_EACH_ALIGNMENT(tx_align) {
FOR_EACH_ALIGNMENT(rx_align) {
/* and run the iteration */
ret = spi_test_run_iter(spi, test,
tx, rx,
len,
tx_align,
rx_align);
if (ret)
return ret;
}
}
}
return 0;
}
EXPORT_SYMBOL_GPL(spi_test_run_test);
/**
* spi_test_run_tests - run an array of spi_messages tests
* @spi: the spi device on which to run the tests
* @tests: NULL-terminated array of @spi_test
*
* Returns: status errors as per @spi_test_run_test()
*/
int spi_test_run_tests(struct spi_device *spi,
struct spi_test *tests)
{
char *rx = NULL, *tx = NULL;
int ret = 0, count = 0;
struct spi_test *test;
/* allocate rx/tx buffers of 128kB size without devm
* in the hope that is on a page boundary
*/
if (use_vmalloc)
rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
else
rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
if (!rx)
return -ENOMEM;
if (use_vmalloc)
tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
else
tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
if (!tx) {
ret = -ENOMEM;
goto err_tx;
}
/* now run the individual tests in the table */
for (test = tests, count = 0; test->description[0];
test++, count++) {
/* only run test if requested */
if ((run_only_test > -1) && (count != run_only_test))
continue;
/* run custom implementation */
if (test->run_test)
ret = test->run_test(spi, test, tx, rx);
else
ret = spi_test_run_test(spi, test, tx, rx);
if (ret)
goto out;
/* add some delays so that we can easily
* detect the individual tests when using a logic analyzer
* we also add scheduling to avoid potential spi_timeouts...
*/
if (delay_ms)
mdelay(delay_ms);
schedule();
}
out:
kvfree(tx);
err_tx:
kvfree(rx);
return ret;
}
EXPORT_SYMBOL_GPL(spi_test_run_tests);
| linux-master | drivers/spi/spi-loopback-test.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Xilinx, Inc.
*
* Author: Naga Sureshkumar Relli <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
#include <linux/spi/spi-mem.h>
/* Register offset definitions */
#define ZYNQ_QSPI_CONFIG_OFFSET 0x00 /* Configuration Register, RW */
#define ZYNQ_QSPI_STATUS_OFFSET 0x04 /* Interrupt Status Register, RO */
#define ZYNQ_QSPI_IEN_OFFSET 0x08 /* Interrupt Enable Register, WO */
#define ZYNQ_QSPI_IDIS_OFFSET 0x0C /* Interrupt Disable Reg, WO */
#define ZYNQ_QSPI_IMASK_OFFSET 0x10 /* Interrupt Enabled Mask Reg,RO */
#define ZYNQ_QSPI_ENABLE_OFFSET 0x14 /* Enable/Disable Register, RW */
#define ZYNQ_QSPI_DELAY_OFFSET 0x18 /* Delay Register, RW */
#define ZYNQ_QSPI_TXD_00_00_OFFSET 0x1C /* Transmit 4-byte inst, WO */
#define ZYNQ_QSPI_TXD_00_01_OFFSET 0x80 /* Transmit 1-byte inst, WO */
#define ZYNQ_QSPI_TXD_00_10_OFFSET 0x84 /* Transmit 2-byte inst, WO */
#define ZYNQ_QSPI_TXD_00_11_OFFSET 0x88 /* Transmit 3-byte inst, WO */
#define ZYNQ_QSPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
#define ZYNQ_QSPI_SIC_OFFSET 0x24 /* Slave Idle Count Register, RW */
#define ZYNQ_QSPI_TX_THRESH_OFFSET 0x28 /* TX FIFO Watermark Reg, RW */
#define ZYNQ_QSPI_RX_THRESH_OFFSET 0x2C /* RX FIFO Watermark Reg, RW */
#define ZYNQ_QSPI_GPIO_OFFSET 0x30 /* GPIO Register, RW */
#define ZYNQ_QSPI_LINEAR_CFG_OFFSET 0xA0 /* Linear Adapter Config Ref, RW */
#define ZYNQ_QSPI_MOD_ID_OFFSET 0xFC /* Module ID Register, RO */
/*
* QSPI Configuration Register bit Masks
*
* This register contains various control bits that effect the operation
* of the QSPI controller
*/
#define ZYNQ_QSPI_CONFIG_IFMODE_MASK BIT(31) /* Flash Memory Interface */
#define ZYNQ_QSPI_CONFIG_MANSRT_MASK BIT(16) /* Manual TX Start */
#define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK BIT(15) /* Enable Manual TX Mode */
#define ZYNQ_QSPI_CONFIG_SSFORCE_MASK BIT(14) /* Manual Chip Select */
#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3) /* Baud Rate Mask */
#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2) /* Clock Phase Control */
#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1) /* Clock Polarity Control */
#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6) /* FIFO width */
#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
/*
* QSPI Configuration Register - Baud rate and slave select
*
* These are the values used in the calculation of baud rate divisor and
* setting the slave select.
*/
#define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
#define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift */
#define ZYNQ_QSPI_CONFIG_PCS BIT(10) /* Peripheral Chip Select */
/*
* QSPI Interrupt Registers bit Masks
*
* All the four interrupt registers (Status/Mask/Enable/Disable) have the same
* bit definitions.
*/
#define ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK BIT(0) /* QSPI RX FIFO Overflow */
#define ZYNQ_QSPI_IXR_TXNFULL_MASK BIT(2) /* QSPI TX FIFO Overflow */
#define ZYNQ_QSPI_IXR_TXFULL_MASK BIT(3) /* QSPI TX FIFO is full */
#define ZYNQ_QSPI_IXR_RXNEMTY_MASK BIT(4) /* QSPI RX FIFO Not Empty */
#define ZYNQ_QSPI_IXR_RXF_FULL_MASK BIT(5) /* QSPI RX FIFO is full */
#define ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK BIT(6) /* QSPI TX FIFO Underflow */
#define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK | \
ZYNQ_QSPI_IXR_TXNFULL_MASK | \
ZYNQ_QSPI_IXR_TXFULL_MASK | \
ZYNQ_QSPI_IXR_RXNEMTY_MASK | \
ZYNQ_QSPI_IXR_RXF_FULL_MASK | \
ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK)
#define ZYNQ_QSPI_IXR_RXTX_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
ZYNQ_QSPI_IXR_RXNEMTY_MASK)
/*
* QSPI Enable Register bit Masks
*
* This register is used to enable or disable the QSPI controller
*/
#define ZYNQ_QSPI_ENABLE_ENABLE_MASK BIT(0) /* QSPI Enable Bit Mask */
/*
* QSPI Linear Configuration Register
*
* It is named Linear Configuration but it controls other modes when not in
* linear mode also.
*/
#define ZYNQ_QSPI_LCFG_TWO_MEM BIT(30) /* LQSPI Two memories */
#define ZYNQ_QSPI_LCFG_SEP_BUS BIT(29) /* LQSPI Separate bus */
#define ZYNQ_QSPI_LCFG_U_PAGE BIT(28) /* LQSPI Upper Page */
#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
#define ZYNQ_QSPI_FAST_READ_QOUT_CODE 0x6B /* read instruction code */
#define ZYNQ_QSPI_FIFO_DEPTH 63 /* FIFO depth in words */
#define ZYNQ_QSPI_RX_THRESHOLD 32 /* Rx FIFO threshold level */
#define ZYNQ_QSPI_TX_THRESHOLD 1 /* Tx FIFO threshold level */
/*
* The modebits configurable by the driver to make the SPI support different
* data formats
*/
#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
/* Maximum number of chip selects */
#define ZYNQ_QSPI_MAX_NUM_CS 2
/**
* struct zynq_qspi - Defines qspi driver instance
* @dev: Pointer to the this device's information
* @regs: Virtual address of the QSPI controller registers
* @refclk: Pointer to the peripheral clock
* @pclk: Pointer to the APB clock
* @irq: IRQ number
* @txbuf: Pointer to the TX buffer
* @rxbuf: Pointer to the RX buffer
* @tx_bytes: Number of bytes left to transfer
* @rx_bytes: Number of bytes left to receive
* @data_completion: completion structure
*/
struct zynq_qspi {
struct device *dev;
void __iomem *regs;
struct clk *refclk;
struct clk *pclk;
int irq;
u8 *txbuf;
u8 *rxbuf;
int tx_bytes;
int rx_bytes;
struct completion data_completion;
};
/*
* Inline functions for the QSPI controller read/write
*/
static inline u32 zynq_qspi_read(struct zynq_qspi *xqspi, u32 offset)
{
return readl_relaxed(xqspi->regs + offset);
}
static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
u32 val)
{
writel_relaxed(val, xqspi->regs + offset);
}
/**
* zynq_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynq_qspi structure
* @num_cs: Number of connected CS (to enable dual memories if needed)
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
* - Master mode
* - Baud rate divisor is set to 2
* - Tx threshold set to 1l Rx threshold set to 32
* - Flash memory interface mode enabled
* - Size of the word to be transferred as 8 bit
* This function performs the following actions
* - Disable and clear all the interrupts
* - Enable manual slave select
* - Enable manual start
* - Deselect all the chip select lines
* - Set the size of the word to be transferred as 32 bit
* - Set the little endian mode of TX FIFO and
* - Enable the QSPI controller
*/
static void zynq_qspi_init_hw(struct zynq_qspi *xqspi, unsigned int num_cs)
{
u32 config_reg;
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
/* Disable linear mode as the boot loader may have used it */
config_reg = 0;
/* At the same time, enable dual mode if more than 1 CS is available */
if (num_cs > 1)
config_reg |= ZYNQ_QSPI_LCFG_TWO_MEM;
zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
/* Clear the RX FIFO */
while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
ZYNQ_QSPI_IXR_RXNEMTY_MASK)
zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
config_reg &= ~(ZYNQ_QSPI_CONFIG_MSTREN_MASK |
ZYNQ_QSPI_CONFIG_CPOL_MASK |
ZYNQ_QSPI_CONFIG_CPHA_MASK |
ZYNQ_QSPI_CONFIG_BDRATE_MASK |
ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
ZYNQ_QSPI_CONFIG_MANSRTEN_MASK |
ZYNQ_QSPI_CONFIG_MANSRT_MASK);
config_reg |= (ZYNQ_QSPI_CONFIG_MSTREN_MASK |
ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
ZYNQ_QSPI_CONFIG_FWIDTH_MASK |
ZYNQ_QSPI_CONFIG_IFMODE_MASK);
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
zynq_qspi_write(xqspi, ZYNQ_QSPI_RX_THRESH_OFFSET,
ZYNQ_QSPI_RX_THRESHOLD);
zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
ZYNQ_QSPI_TX_THRESHOLD);
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
}
static bool zynq_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
/*
* The number of address bytes should be equal to or less than 3 bytes.
*/
if (op->addr.nbytes > 3)
return false;
return true;
}
/**
* zynq_qspi_rxfifo_op - Read 1..4 bytes from RxFIFO to RX buffer
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be read (1..4)
*/
static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
{
u32 data;
data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
if (xqspi->rxbuf) {
memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
xqspi->rxbuf += size;
}
xqspi->rx_bytes -= size;
if (xqspi->rx_bytes < 0)
xqspi->rx_bytes = 0;
}
/**
* zynq_qspi_txfifo_op - Write 1..4 bytes from TX buffer to TxFIFO
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be written (1..4)
*/
static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
{
static const unsigned int offset[4] = {
ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
u32 data;
if (xqspi->txbuf) {
data = 0xffffffff;
memcpy(&data, xqspi->txbuf, size);
xqspi->txbuf += size;
} else {
data = 0;
}
xqspi->tx_bytes -= size;
zynq_qspi_write(xqspi, offset[size - 1], data);
}
/**
* zynq_qspi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
* @assert: 1 for select or 0 for deselect the chip select line
*/
static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
{
struct spi_controller *ctlr = spi->master;
struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
u32 config_reg;
/* Select the lower (CS0) or upper (CS1) memory */
if (ctlr->num_chipselect > 1) {
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET);
if (!spi_get_chipselect(spi, 0))
config_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE;
else
config_reg |= ZYNQ_QSPI_LCFG_U_PAGE;
zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
}
/* Ground the line to assert the CS */
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
if (assert)
config_reg &= ~ZYNQ_QSPI_CONFIG_PCS;
else
config_reg |= ZYNQ_QSPI_CONFIG_PCS;
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
}
/**
* zynq_qspi_config_op - Configure QSPI controller for specified transfer
* @xqspi: Pointer to the zynq_qspi structure
* @spi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
*
* Return: 0 on success and -EINVAL on invalid input parameter
*
* Note: If the requested frequency is not an exact match with what can be
* obtained using the prescalar value, the driver sets the clock frequency which
* is lower than the requested frequency (maximum lower) for the transfer. If
* the requested frequency is higher or lower than that is supported by the QSPI
* controller the driver will set the highest or lowest frequency supported by
* controller.
*/
static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
{
u32 config_reg, baud_rate_val = 0;
/*
* Set the clock frequency
* The baud rate divisor is not a direct mapping to the value written
* into the configuration register (config_reg[5:3])
* i.e. 000 - divide by 2
* 001 - divide by 4
* ----------------
* 111 - divide by 256
*/
while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
(clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
spi->max_speed_hz)
baud_rate_val++;
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
/* Set the QSPI clock phase and clock polarity */
config_reg &= (~ZYNQ_QSPI_CONFIG_CPHA_MASK) &
(~ZYNQ_QSPI_CONFIG_CPOL_MASK);
if (spi->mode & SPI_CPHA)
config_reg |= ZYNQ_QSPI_CONFIG_CPHA_MASK;
if (spi->mode & SPI_CPOL)
config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
config_reg |= (baud_rate_val << ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT);
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
return 0;
}
/**
* zynq_qspi_setup_op - Configure the QSPI controller
* @spi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer, baud
* rate and divisor value to setup the requested qspi clock.
*
* Return: 0 on success and error value on failure
*/
static int zynq_qspi_setup_op(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->master;
struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
if (ctlr->busy)
return -EBUSY;
clk_enable(qspi->refclk);
clk_enable(qspi->pclk);
zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
return 0;
}
/**
* zynq_qspi_write_op - Fills the TX FIFO with as many bytes as possible
* @xqspi: Pointer to the zynq_qspi structure
* @txcount: Maximum number of words to write
* @txempty: Indicates that TxFIFO is empty
*/
static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
bool txempty)
{
int count, len, k;
len = xqspi->tx_bytes;
if (len && len < 4) {
/*
* We must empty the TxFIFO between accesses to TXD0,
* TXD1, TXD2, TXD3.
*/
if (txempty)
zynq_qspi_txfifo_op(xqspi, len);
return;
}
count = len / 4;
if (count > txcount)
count = txcount;
if (xqspi->txbuf) {
iowrite32_rep(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
xqspi->txbuf, count);
xqspi->txbuf += count * 4;
} else {
for (k = 0; k < count; k++)
writel_relaxed(0, xqspi->regs +
ZYNQ_QSPI_TXD_00_00_OFFSET);
}
xqspi->tx_bytes -= count * 4;
}
/**
* zynq_qspi_read_op - Drains the RX FIFO by as many bytes as possible
* @xqspi: Pointer to the zynq_qspi structure
* @rxcount: Maximum number of words to read
*/
static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
{
int count, len, k;
len = xqspi->rx_bytes - xqspi->tx_bytes;
count = len / 4;
if (count > rxcount)
count = rxcount;
if (xqspi->rxbuf) {
ioread32_rep(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
xqspi->rxbuf, count);
xqspi->rxbuf += count * 4;
} else {
for (k = 0; k < count; k++)
readl_relaxed(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET);
}
xqspi->rx_bytes -= count * 4;
len -= count * 4;
if (len && len < 4 && count < rxcount)
zynq_qspi_rxfifo_op(xqspi, len);
}
/**
* zynq_qspi_irq - Interrupt service routine of the QSPI controller
* @irq: IRQ number
* @dev_id: Pointer to the xqspi structure
*
* This function handles TX empty only.
* On TX empty interrupt this function reads the received data from RX FIFO and
* fills the TX FIFO if there is any data remaining to be transferred.
*
* Return: IRQ_HANDLED when interrupt is handled; IRQ_NONE otherwise.
*/
static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
{
u32 intr_status;
bool txempty;
struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
if ((intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) ||
(intr_status & ZYNQ_QSPI_IXR_RXNEMTY_MASK)) {
/*
* This bit is set when Tx FIFO has < THRESHOLD entries.
* We have the THRESHOLD value set to 1,
* so this bit indicates Tx FIFO is empty.
*/
txempty = !!(intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK);
/* Read out the data from the RX FIFO */
zynq_qspi_read_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
if (xqspi->tx_bytes) {
/* There is more data to send */
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
txempty);
} else {
/*
* If transfer and receive is completed then only send
* complete signal.
*/
if (!xqspi->rx_bytes) {
zynq_qspi_write(xqspi,
ZYNQ_QSPI_IDIS_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
complete(&xqspi->data_completion);
}
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/**
* zynq_qspi_exec_mem_op() - Initiates the QSPI transfer
* @mem: the SPI memory
* @op: the memory operation to execute
*
* Executes a memory operation.
*
* This function first selects the chip and starts the memory operation.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
int err = 0, i;
u8 *tmpbuf;
dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth);
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi);
if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
xqspi->txbuf = (u8 *)&op->cmd.opcode;
xqspi->rxbuf = NULL;
xqspi->tx_bytes = op->cmd.nbytes;
xqspi->rx_bytes = op->cmd.nbytes;
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
if (op->addr.nbytes) {
for (i = 0; i < op->addr.nbytes; i++) {
xqspi->txbuf[i] = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
}
reinit_completion(&xqspi->data_completion);
xqspi->rxbuf = NULL;
xqspi->tx_bytes = op->addr.nbytes;
xqspi->rx_bytes = op->addr.nbytes;
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
if (op->dummy.nbytes) {
tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
if (!tmpbuf)
return -ENOMEM;
memset(tmpbuf, 0xff, op->dummy.nbytes);
reinit_completion(&xqspi->data_completion);
xqspi->txbuf = tmpbuf;
xqspi->rxbuf = NULL;
xqspi->tx_bytes = op->dummy.nbytes;
xqspi->rx_bytes = op->dummy.nbytes;
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
kfree(tmpbuf);
}
if (op->data.nbytes) {
reinit_completion(&xqspi->data_completion);
if (op->data.dir == SPI_MEM_DATA_OUT) {
xqspi->txbuf = (u8 *)op->data.buf.out;
xqspi->tx_bytes = op->data.nbytes;
xqspi->rxbuf = NULL;
xqspi->rx_bytes = op->data.nbytes;
} else {
xqspi->txbuf = NULL;
xqspi->rxbuf = (u8 *)op->data.buf.in;
xqspi->rx_bytes = op->data.nbytes;
xqspi->tx_bytes = op->data.nbytes;
}
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
zynq_qspi_chipselect(mem->spi, false);
return err;
}
static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
.supports_op = zynq_qspi_supports_op,
.exec_op = zynq_qspi_exec_mem_op,
};
/**
* zynq_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function initializes the driver data structures and the hardware.
*
* Return: 0 on success and error value on failure
*/
static int zynq_qspi_probe(struct platform_device *pdev)
{
int ret = 0;
struct spi_controller *ctlr;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct zynq_qspi *xqspi;
u32 num_cs;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
return -ENOMEM;
xqspi = spi_controller_get_devdata(ctlr);
xqspi->dev = dev;
platform_set_drvdata(pdev, xqspi);
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
goto remove_master;
}
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(&pdev->dev, "pclk clock not found.\n");
ret = PTR_ERR(xqspi->pclk);
goto remove_master;
}
init_completion(&xqspi->data_completion);
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xqspi->refclk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
ret = PTR_ERR(xqspi->refclk);
goto remove_master;
}
ret = clk_prepare_enable(xqspi->pclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable APB clock.\n");
goto remove_master;
}
ret = clk_prepare_enable(xqspi->refclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
goto clk_dis_pclk;
}
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq < 0) {
ret = xqspi->irq;
goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
0, pdev->name, xqspi);
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
goto clk_dis_all;
}
ret = of_property_read_u32(np, "num-cs",
&num_cs);
if (ret < 0) {
ctlr->num_chipselect = 1;
} else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
ret = -EINVAL;
dev_err(&pdev->dev, "only 2 chip selects are available\n");
goto clk_dis_all;
} else {
ctlr->num_chipselect = num_cs;
}
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->mem_ops = &zynq_qspi_mem_ops;
ctlr->setup = zynq_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->dev.of_node = np;
/* QSPI controller initializations */
zynq_qspi_init_hw(xqspi, ctlr->num_chipselect);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
goto clk_dis_all;
}
return ret;
clk_dis_all:
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
remove_master:
spi_controller_put(ctlr);
return ret;
}
/**
* zynq_qspi_remove - Remove method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees all resources allocated to
* the device.
*
* Return: 0 on success and error value on failure
*/
static void zynq_qspi_remove(struct platform_device *pdev)
{
struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
}
static const struct of_device_id zynq_qspi_of_match[] = {
{ .compatible = "xlnx,zynq-qspi-1.0", },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, zynq_qspi_of_match);
/*
* zynq_qspi_driver - This structure defines the QSPI platform driver
*/
static struct platform_driver zynq_qspi_driver = {
.probe = zynq_qspi_probe,
.remove_new = zynq_qspi_remove,
.driver = {
.name = "zynq-qspi",
.of_match_table = zynq_qspi_of_match,
},
};
module_platform_driver(zynq_qspi_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Zynq QSPI driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-zynq-qspi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
// SPI init/core code
//
// Copyright (C) 2005 David Brownell
// Copyright (C) 2008 Secret Lab Technologies Ltd.
#include <linux/acpi.h>
#include <linux/cache.h>
#include <linux/clk/clk-conf.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/sched/rt.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <uapi/linux/sched/types.h>
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
static DEFINE_IDR(spi_master_idr);
static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
spi_controller_put(spi->controller);
kfree(spi->driver_override);
free_percpu(spi->pcpu_statistics);
kfree(spi);
}
static ssize_t
modalias_show(struct device *dev, struct device_attribute *a, char *buf)
{
const struct spi_device *spi = to_spi_device(dev);
int len;
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *a,
const char *buf, size_t count)
{
struct spi_device *spi = to_spi_device(dev);
int ret;
ret = driver_set_override(dev, &spi->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *a, char *buf)
{
const struct spi_device *spi = to_spi_device(dev);
ssize_t len;
device_lock(dev);
len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
{
struct spi_statistics __percpu *pcpu_stats;
if (dev)
pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
else
pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
if (pcpu_stats) {
int cpu;
for_each_possible_cpu(cpu) {
struct spi_statistics *stat;
stat = per_cpu_ptr(pcpu_stats, cpu);
u64_stats_init(&stat->syncp);
}
}
return pcpu_stats;
}
static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
char *buf, size_t offset)
{
u64 val = 0;
int i;
for_each_possible_cpu(i) {
const struct spi_statistics *pcpu_stats;
u64_stats_t *field;
unsigned int start;
u64 inc;
pcpu_stats = per_cpu_ptr(stat, i);
field = (void *)pcpu_stats + offset;
do {
start = u64_stats_fetch_begin(&pcpu_stats->syncp);
inc = u64_stats_read(field);
} while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
val += inc;
}
return sysfs_emit(buf, "%llu\n", val);
}
#define SPI_STATISTICS_ATTRS(field, file) \
static ssize_t spi_controller_##field##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct spi_controller *ctlr = container_of(dev, \
struct spi_controller, dev); \
return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
} \
static struct device_attribute dev_attr_spi_controller_##field = { \
.attr = { .name = file, .mode = 0444 }, \
.show = spi_controller_##field##_show, \
}; \
static ssize_t spi_device_##field##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct spi_device *spi = to_spi_device(dev); \
return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
} \
static struct device_attribute dev_attr_spi_device_##field = { \
.attr = { .name = file, .mode = 0444 }, \
.show = spi_device_##field##_show, \
}
#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
char *buf) \
{ \
return spi_emit_pcpu_stats(stat, buf, \
offsetof(struct spi_statistics, field)); \
} \
SPI_STATISTICS_ATTRS(name, file)
#define SPI_STATISTICS_SHOW(field) \
SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
field)
SPI_STATISTICS_SHOW(messages);
SPI_STATISTICS_SHOW(transfers);
SPI_STATISTICS_SHOW(errors);
SPI_STATISTICS_SHOW(timedout);
SPI_STATISTICS_SHOW(spi_sync);
SPI_STATISTICS_SHOW(spi_sync_immediate);
SPI_STATISTICS_SHOW(spi_async);
SPI_STATISTICS_SHOW(bytes);
SPI_STATISTICS_SHOW(bytes_rx);
SPI_STATISTICS_SHOW(bytes_tx);
#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
"transfer_bytes_histo_" number, \
transfer_bytes_histo[index])
SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
SPI_STATISTICS_SHOW(transfers_split_maxsize);
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_driver_override.attr,
NULL,
};
static const struct attribute_group spi_dev_group = {
.attrs = spi_dev_attrs,
};
static struct attribute *spi_device_statistics_attrs[] = {
&dev_attr_spi_device_messages.attr,
&dev_attr_spi_device_transfers.attr,
&dev_attr_spi_device_errors.attr,
&dev_attr_spi_device_timedout.attr,
&dev_attr_spi_device_spi_sync.attr,
&dev_attr_spi_device_spi_sync_immediate.attr,
&dev_attr_spi_device_spi_async.attr,
&dev_attr_spi_device_bytes.attr,
&dev_attr_spi_device_bytes_rx.attr,
&dev_attr_spi_device_bytes_tx.attr,
&dev_attr_spi_device_transfer_bytes_histo0.attr,
&dev_attr_spi_device_transfer_bytes_histo1.attr,
&dev_attr_spi_device_transfer_bytes_histo2.attr,
&dev_attr_spi_device_transfer_bytes_histo3.attr,
&dev_attr_spi_device_transfer_bytes_histo4.attr,
&dev_attr_spi_device_transfer_bytes_histo5.attr,
&dev_attr_spi_device_transfer_bytes_histo6.attr,
&dev_attr_spi_device_transfer_bytes_histo7.attr,
&dev_attr_spi_device_transfer_bytes_histo8.attr,
&dev_attr_spi_device_transfer_bytes_histo9.attr,
&dev_attr_spi_device_transfer_bytes_histo10.attr,
&dev_attr_spi_device_transfer_bytes_histo11.attr,
&dev_attr_spi_device_transfer_bytes_histo12.attr,
&dev_attr_spi_device_transfer_bytes_histo13.attr,
&dev_attr_spi_device_transfer_bytes_histo14.attr,
&dev_attr_spi_device_transfer_bytes_histo15.attr,
&dev_attr_spi_device_transfer_bytes_histo16.attr,
&dev_attr_spi_device_transfers_split_maxsize.attr,
NULL,
};
static const struct attribute_group spi_device_statistics_group = {
.name = "statistics",
.attrs = spi_device_statistics_attrs,
};
static const struct attribute_group *spi_dev_groups[] = {
&spi_dev_group,
&spi_device_statistics_group,
NULL,
};
static struct attribute *spi_controller_statistics_attrs[] = {
&dev_attr_spi_controller_messages.attr,
&dev_attr_spi_controller_transfers.attr,
&dev_attr_spi_controller_errors.attr,
&dev_attr_spi_controller_timedout.attr,
&dev_attr_spi_controller_spi_sync.attr,
&dev_attr_spi_controller_spi_sync_immediate.attr,
&dev_attr_spi_controller_spi_async.attr,
&dev_attr_spi_controller_bytes.attr,
&dev_attr_spi_controller_bytes_rx.attr,
&dev_attr_spi_controller_bytes_tx.attr,
&dev_attr_spi_controller_transfer_bytes_histo0.attr,
&dev_attr_spi_controller_transfer_bytes_histo1.attr,
&dev_attr_spi_controller_transfer_bytes_histo2.attr,
&dev_attr_spi_controller_transfer_bytes_histo3.attr,
&dev_attr_spi_controller_transfer_bytes_histo4.attr,
&dev_attr_spi_controller_transfer_bytes_histo5.attr,
&dev_attr_spi_controller_transfer_bytes_histo6.attr,
&dev_attr_spi_controller_transfer_bytes_histo7.attr,
&dev_attr_spi_controller_transfer_bytes_histo8.attr,
&dev_attr_spi_controller_transfer_bytes_histo9.attr,
&dev_attr_spi_controller_transfer_bytes_histo10.attr,
&dev_attr_spi_controller_transfer_bytes_histo11.attr,
&dev_attr_spi_controller_transfer_bytes_histo12.attr,
&dev_attr_spi_controller_transfer_bytes_histo13.attr,
&dev_attr_spi_controller_transfer_bytes_histo14.attr,
&dev_attr_spi_controller_transfer_bytes_histo15.attr,
&dev_attr_spi_controller_transfer_bytes_histo16.attr,
&dev_attr_spi_controller_transfers_split_maxsize.attr,
NULL,
};
static const struct attribute_group spi_controller_statistics_group = {
.name = "statistics",
.attrs = spi_controller_statistics_attrs,
};
static const struct attribute_group *spi_master_groups[] = {
&spi_controller_statistics_group,
NULL,
};
static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
struct spi_transfer *xfer,
struct spi_controller *ctlr)
{
int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
struct spi_statistics *stats;
if (l2len < 0)
l2len = 0;
get_cpu();
stats = this_cpu_ptr(pcpu_stats);
u64_stats_update_begin(&stats->syncp);
u64_stats_inc(&stats->transfers);
u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
u64_stats_add(&stats->bytes, xfer->len);
if ((xfer->tx_buf) &&
(xfer->tx_buf != ctlr->dummy_tx))
u64_stats_add(&stats->bytes_tx, xfer->len);
if ((xfer->rx_buf) &&
(xfer->rx_buf != ctlr->dummy_rx))
u64_stats_add(&stats->bytes_rx, xfer->len);
u64_stats_update_end(&stats->syncp);
put_cpu();
}
/*
* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
* and the sysfs version makes coldplug work too.
*/
static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
{
while (id->name[0]) {
if (!strcmp(name, id->name))
return id;
id++;
}
return NULL;
}
const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
{
const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
return spi_match_id(sdrv->id_table, sdev->modalias);
}
EXPORT_SYMBOL_GPL(spi_get_device_id);
const void *spi_get_device_match_data(const struct spi_device *sdev)
{
const void *match;
match = device_get_match_data(&sdev->dev);
if (match)
return match;
return (const void *)spi_get_device_id(sdev)->driver_data;
}
EXPORT_SYMBOL_GPL(spi_get_device_match_data);
static int spi_match_device(struct device *dev, struct device_driver *drv)
{
const struct spi_device *spi = to_spi_device(dev);
const struct spi_driver *sdrv = to_spi_driver(drv);
/* Check override first, and if set, only use the named driver */
if (spi->driver_override)
return strcmp(spi->driver_override, drv->name) == 0;
/* Attempt an OF style match */
if (of_driver_match_device(dev, drv))
return 1;
/* Then try ACPI */
if (acpi_driver_match_device(dev, drv))
return 1;
if (sdrv->id_table)
return !!spi_match_id(sdrv->id_table, spi->modalias);
return strcmp(spi->modalias, drv->name) == 0;
}
static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct spi_device *spi = to_spi_device(dev);
int rc;
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
}
static int spi_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
struct spi_device *spi = to_spi_device(dev);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret)
return ret;
if (dev->of_node) {
spi->irq = of_irq_get(dev->of_node, 0);
if (spi->irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (spi->irq < 0)
spi->irq = 0;
}
ret = dev_pm_domain_attach(dev, true);
if (ret)
return ret;
if (sdrv->probe) {
ret = sdrv->probe(spi);
if (ret)
dev_pm_domain_detach(dev, true);
}
return ret;
}
static void spi_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
if (sdrv->remove)
sdrv->remove(to_spi_device(dev));
dev_pm_domain_detach(dev, true);
}
static void spi_shutdown(struct device *dev)
{
if (dev->driver) {
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
if (sdrv->shutdown)
sdrv->shutdown(to_spi_device(dev));
}
}
struct bus_type spi_bus_type = {
.name = "spi",
.dev_groups = spi_dev_groups,
.match = spi_match_device,
.uevent = spi_uevent,
.probe = spi_probe,
.remove = spi_remove,
.shutdown = spi_shutdown,
};
EXPORT_SYMBOL_GPL(spi_bus_type);
/**
* __spi_register_driver - register a SPI driver
* @owner: owner module of the driver to register
* @sdrv: the driver to register
* Context: can sleep
*
* Return: zero on success, else a negative error code.
*/
int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
{
sdrv->driver.owner = owner;
sdrv->driver.bus = &spi_bus_type;
/*
* For Really Good Reasons we use spi: modaliases not of:
* modaliases for DT so module autoloading won't work if we
* don't have a spi_device_id as well as a compatible string.
*/
if (sdrv->driver.of_match_table) {
const struct of_device_id *of_id;
for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
of_id++) {
const char *of_name;
/* Strip off any vendor prefix */
of_name = strnchr(of_id->compatible,
sizeof(of_id->compatible), ',');
if (of_name)
of_name++;
else
of_name = of_id->compatible;
if (sdrv->id_table) {
const struct spi_device_id *spi_id;
spi_id = spi_match_id(sdrv->id_table, of_name);
if (spi_id)
continue;
} else {
if (strcmp(sdrv->driver.name, of_name) == 0)
continue;
}
pr_warn("SPI driver %s has no spi_device_id for %s\n",
sdrv->driver.name, of_id->compatible);
}
}
return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(__spi_register_driver);
/*-------------------------------------------------------------------------*/
/*
* SPI devices should normally not be created by SPI device drivers; that
* would make them board-specific. Similarly with SPI controller drivers.
* Device registration normally goes into like arch/.../mach.../board-YYY.c
* with other readonly (flashable) information about mainboard devices.
*/
struct boardinfo {
struct list_head list;
struct spi_board_info board_info;
};
static LIST_HEAD(board_list);
static LIST_HEAD(spi_controller_list);
/*
* Used to protect add/del operation for board_info list and
* spi_controller list, and their matching process also used
* to protect object of type struct idr.
*/
static DEFINE_MUTEX(board_lock);
/**
* spi_alloc_device - Allocate a new SPI device
* @ctlr: Controller to which device is connected
* Context: can sleep
*
* Allows a driver to allocate and initialize a spi_device without
* registering it immediately. This allows a driver to directly
* fill the spi_device with device parameters before calling
* spi_add_device() on it.
*
* Caller is responsible to call spi_add_device() on the returned
* spi_device structure to add it to the SPI controller. If the caller
* needs to discard the spi_device without adding it, then it should
* call spi_dev_put() on it.
*
* Return: a pointer to the new device, or NULL.
*/
struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
{
struct spi_device *spi;
if (!spi_controller_get(ctlr))
return NULL;
spi = kzalloc(sizeof(*spi), GFP_KERNEL);
if (!spi) {
spi_controller_put(ctlr);
return NULL;
}
spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
if (!spi->pcpu_statistics) {
kfree(spi);
spi_controller_put(ctlr);
return NULL;
}
spi->master = spi->controller = ctlr;
spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->mode = ctlr->buswidth_override_bits;
device_initialize(&spi->dev);
return spi;
}
EXPORT_SYMBOL_GPL(spi_alloc_device);
static void spi_dev_set_name(struct spi_device *spi)
{
struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
if (adev) {
dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
return;
}
dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
spi_get_chipselect(spi, 0));
}
static int spi_dev_check(struct device *dev, void *data)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_device *new_spi = data;
if (spi->controller == new_spi->controller &&
spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
return -EBUSY;
return 0;
}
static void spi_cleanup(struct spi_device *spi)
{
if (spi->controller->cleanup)
spi->controller->cleanup(spi);
}
static int __spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
int status;
/* Chipselects are numbered 0..max; validate. */
if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
ctlr->num_chipselect);
return -EINVAL;
}
/* Set the bus ID string */
spi_dev_set_name(spi);
/*
* We need to make sure there's no other device with this
* chipselect **BEFORE** we call setup(), else we'll trash
* its configuration.
*/
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi_get_chipselect(spi, 0));
return status;
}
/* Controller may unregister concurrently */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
!device_is_registered(&ctlr->dev)) {
return -ENODEV;
}
if (ctlr->cs_gpiods)
spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
/*
* Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
*/
status = spi_setup(spi);
if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status);
return status;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
spi_cleanup(spi);
} else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
}
return status;
}
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
*
* Companion function to spi_alloc_device. Devices allocated with
* spi_alloc_device can be added onto the SPI bus with this function.
*
* Return: 0 on success; negative errno on failure
*/
int spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
int status;
mutex_lock(&ctlr->add_lock);
status = __spi_add_device(spi);
mutex_unlock(&ctlr->add_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
/**
* spi_new_device - instantiate one new SPI device
* @ctlr: Controller to which device is connected
* @chip: Describes the SPI device
* Context: can sleep
*
* On typical mainboards, this is purely internal; and it's not needed
* after board init creates the hard-wired devices. Some development
* platforms may not be able to use spi_register_board_info though, and
* this is exported so that for example a USB or parport based adapter
* driver could add devices (which it would learn about out-of-band).
*
* Return: the new device, or NULL.
*/
struct spi_device *spi_new_device(struct spi_controller *ctlr,
struct spi_board_info *chip)
{
struct spi_device *proxy;
int status;
/*
* NOTE: caller did any chip->bus_num checks necessary.
*
* Also, unless we change the return value convention to use
* error-or-pointer (not NULL-or-pointer), troubleshootability
* suggests syslogged diagnostics are best here (ugh).
*/
proxy = spi_alloc_device(ctlr);
if (!proxy)
return NULL;
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
spi_set_chipselect(proxy, 0, chip->chip_select);
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
proxy->dev.platform_data = (void *) chip->platform_data;
proxy->controller_data = chip->controller_data;
proxy->controller_state = NULL;
if (chip->swnode) {
status = device_add_software_node(&proxy->dev, chip->swnode);
if (status) {
dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
chip->modalias, status);
goto err_dev_put;
}
}
status = spi_add_device(proxy);
if (status < 0)
goto err_dev_put;
return proxy;
err_dev_put:
device_remove_software_node(&proxy->dev);
spi_dev_put(proxy);
return NULL;
}
EXPORT_SYMBOL_GPL(spi_new_device);
/**
* spi_unregister_device - unregister a single SPI device
* @spi: spi_device to unregister
*
* Start making the passed SPI device vanish. Normally this would be handled
* by spi_unregister_controller().
*/
void spi_unregister_device(struct spi_device *spi)
{
if (!spi)
return;
if (spi->dev.of_node) {
of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
of_node_put(spi->dev.of_node);
}
if (ACPI_COMPANION(&spi->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_remove_software_node(&spi->dev);
device_del(&spi->dev);
spi_cleanup(spi);
put_device(&spi->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
struct spi_board_info *bi)
{
struct spi_device *dev;
if (ctlr->bus_num != bi->bus_num)
return;
dev = spi_new_device(ctlr, bi);
if (!dev)
dev_err(ctlr->dev.parent, "can't create new device for %s\n",
bi->modalias);
}
/**
* spi_register_board_info - register SPI devices for a given board
* @info: array of chip descriptors
* @n: how many descriptors are provided
* Context: can sleep
*
* Board-specific early init code calls this (probably during arch_initcall)
* with segments of the SPI device table. Any device nodes are created later,
* after the relevant parent SPI controller (bus_num) is defined. We keep
* this table of devices forever, so that reloading a controller driver will
* not make Linux forget about these hard-wired devices.
*
* Other code can also call this, e.g. a particular add-on board might provide
* SPI devices through its expansion connector, so code initializing that board
* would naturally declare its SPI devices.
*
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
*
* Return: zero on success, else a negative error code.
*/
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
struct boardinfo *bi;
int i;
if (!n)
return 0;
bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
if (!bi)
return -ENOMEM;
for (i = 0; i < n; i++, bi++, info++) {
struct spi_controller *ctlr;
memcpy(&bi->board_info, info, sizeof(*info));
mutex_lock(&board_lock);
list_add_tail(&bi->list, &board_list);
list_for_each_entry(ctlr, &spi_controller_list, list)
spi_match_controller_to_boardinfo(ctlr,
&bi->board_info);
mutex_unlock(&board_lock);
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* Core methods for SPI resource management */
/**
* spi_res_alloc - allocate a spi resource that is life-cycle managed
* during the processing of a spi_message while using
* spi_transfer_one
* @spi: the SPI device for which we allocate memory
* @release: the release code to execute for this resource
* @size: size to alloc and return
* @gfp: GFP allocation flags
*
* Return: the pointer to the allocated data
*
* This may get enhanced in the future to allocate from a memory pool
* of the @spi_device or @spi_controller to avoid repeated allocations.
*/
static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
size_t size, gfp_t gfp)
{
struct spi_res *sres;
sres = kzalloc(sizeof(*sres) + size, gfp);
if (!sres)
return NULL;
INIT_LIST_HEAD(&sres->entry);
sres->release = release;
return sres->data;
}
/**
* spi_res_free - free an SPI resource
* @res: pointer to the custom data of a resource
*/
static void spi_res_free(void *res)
{
struct spi_res *sres = container_of(res, struct spi_res, data);
if (!res)
return;
WARN_ON(!list_empty(&sres->entry));
kfree(sres);
}
/**
* spi_res_add - add a spi_res to the spi_message
* @message: the SPI message
* @res: the spi_resource
*/
static void spi_res_add(struct spi_message *message, void *res)
{
struct spi_res *sres = container_of(res, struct spi_res, data);
WARN_ON(!list_empty(&sres->entry));
list_add_tail(&sres->entry, &message->resources);
}
/**
* spi_res_release - release all SPI resources for this message
* @ctlr: the @spi_controller
* @message: the @spi_message
*/
static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
{
struct spi_res *res, *tmp;
list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
if (res->release)
res->release(ctlr, message, res->data);
list_del(&res->entry);
kfree(res);
}
}
/*-------------------------------------------------------------------------*/
static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
{
bool activate = enable;
/*
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
(!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
trace_spi_set_cs(spi, activate);
spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
spi_delay_exec(&spi->cs_hold, NULL);
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
if (spi_get_csgpiod(spi, 0)) {
if (!(spi->mode & SPI_NO_CS)) {
/*
* Historically ACPI has no means of the GPIO polarity and
* thus the SPISerialBus() resource defines it on the per-chip
* basis. In order to avoid a chain of negations, the GPIO
* polarity is considered being Active High. Even for the cases
* when _DSD() is involved (in the updated versions of ACPI)
* the GPIO CS polarity must be defined Active High to avoid
* ambiguity. That's why we use enable, that takes SPI_CS_HIGH
* into account.
*/
if (has_acpi_companion(&spi->dev))
gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
else
/* Polarity handled by GPIO library */
gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
if (activate)
spi_delay_exec(&spi->cs_setup, NULL);
else
spi_delay_exec(&spi->cs_inactive, NULL);
}
}
#ifdef CONFIG_HAS_DMA
static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir, unsigned long attrs)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev);
#ifdef CONFIG_HIGHMEM
const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
(unsigned long)buf < (PKMAP_BASE +
(LAST_PKMAP * PAGE_SIZE)));
#else
const bool kmap_buf = false;
#endif
int desc_len;
int sgs;
struct page *vm_page;
struct scatterlist *sg;
void *sg_buf;
size_t min;
int i, ret;
if (vmalloced_buf || kmap_buf) {
desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else if (virt_addr_valid(buf)) {
desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
} else {
return -EINVAL;
}
ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
if (ret != 0)
return ret;
sg = &sgt->sgl[0];
for (i = 0; i < sgs; i++) {
if (vmalloced_buf || kmap_buf) {
/*
* Next scatterlist entry size is the minimum between
* the desc_len and the remaining buffer length that
* fits in a page.
*/
min = min_t(size_t, desc_len,
min_t(size_t, len,
PAGE_SIZE - offset_in_page(buf)));
if (vmalloced_buf)
vm_page = vmalloc_to_page(buf);
else
vm_page = kmap_to_page(buf);
if (!vm_page) {
sg_free_table(sgt);
return -ENOMEM;
}
sg_set_page(sg, vm_page,
min, offset_in_page(buf));
} else {
min = min_t(size_t, len, desc_len);
sg_buf = buf;
sg_set_buf(sg, sg_buf, min);
}
buf += min;
len -= min;
sg = sg_next(sg);
}
ret = dma_map_sgtable(dev, sgt, dir, attrs);
if (ret < 0) {
sg_free_table(sgt);
return ret;
}
return 0;
}
int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
{
return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
}
static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir,
unsigned long attrs)
{
if (sgt->orig_nents) {
dma_unmap_sgtable(dev, sgt, dir, attrs);
sg_free_table(sgt);
sgt->orig_nents = 0;
sgt->nents = 0;
}
}
void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
}
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
struct spi_transfer *xfer;
int ret;
if (!ctlr->can_dma)
return 0;
if (ctlr->dma_tx)
tx_dev = ctlr->dma_tx->device->dev;
else if (ctlr->dma_map_dev)
tx_dev = ctlr->dma_map_dev;
else
tx_dev = ctlr->dev.parent;
if (ctlr->dma_rx)
rx_dev = ctlr->dma_rx->device->dev;
else if (ctlr->dma_map_dev)
rx_dev = ctlr->dma_map_dev;
else
rx_dev = ctlr->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync is done before each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
if (xfer->tx_buf != NULL) {
ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
(void *)xfer->tx_buf,
xfer->len, DMA_TO_DEVICE,
attrs);
if (ret != 0)
return ret;
}
if (xfer->rx_buf != NULL) {
ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE, attrs);
if (ret != 0) {
spi_unmap_buf_attrs(ctlr, tx_dev,
&xfer->tx_sg, DMA_TO_DEVICE,
attrs);
return ret;
}
}
}
ctlr->cur_rx_dma_dev = rx_dev;
ctlr->cur_tx_dma_dev = tx_dev;
ctlr->cur_msg_mapped = true;
return 0;
}
static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
struct spi_transfer *xfer;
if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
return 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync has already been done after each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
DMA_FROM_DEVICE, attrs);
spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
DMA_TO_DEVICE, attrs);
}
ctlr->cur_msg_mapped = false;
return 0;
}
static void spi_dma_sync_for_device(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
if (!ctlr->cur_msg_mapped)
return;
if (xfer->tx_sg.orig_nents)
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
if (xfer->rx_sg.orig_nents)
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
if (!ctlr->cur_msg_mapped)
return;
if (xfer->rx_sg.orig_nents)
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
if (xfer->tx_sg.orig_nents)
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
struct spi_message *msg)
{
return 0;
}
static inline int __spi_unmap_msg(struct spi_controller *ctlr,
struct spi_message *msg)
{
return 0;
}
static void spi_dma_sync_for_device(struct spi_controller *ctrl,
struct spi_transfer *xfer)
{
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
struct spi_transfer *xfer)
{
}
#endif /* !CONFIG_HAS_DMA */
static inline int spi_unmap_msg(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_transfer *xfer;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/*
* Restore the original value of tx_buf or rx_buf if they are
* NULL.
*/
if (xfer->tx_buf == ctlr->dummy_tx)
xfer->tx_buf = NULL;
if (xfer->rx_buf == ctlr->dummy_rx)
xfer->rx_buf = NULL;
}
return __spi_unmap_msg(ctlr, msg);
}
static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct spi_transfer *xfer;
void *tmp;
unsigned int max_tx, max_rx;
if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
&& !(msg->spi->mode & SPI_3WIRE)) {
max_tx = 0;
max_rx = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
!xfer->tx_buf)
max_tx = max(xfer->len, max_tx);
if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
!xfer->rx_buf)
max_rx = max(xfer->len, max_rx);
}
if (max_tx) {
tmp = krealloc(ctlr->dummy_tx, max_tx,
GFP_KERNEL | GFP_DMA | __GFP_ZERO);
if (!tmp)
return -ENOMEM;
ctlr->dummy_tx = tmp;
}
if (max_rx) {
tmp = krealloc(ctlr->dummy_rx, max_rx,
GFP_KERNEL | GFP_DMA);
if (!tmp)
return -ENOMEM;
ctlr->dummy_rx = tmp;
}
if (max_tx || max_rx) {
list_for_each_entry(xfer, &msg->transfers,
transfer_list) {
if (!xfer->len)
continue;
if (!xfer->tx_buf)
xfer->tx_buf = ctlr->dummy_tx;
if (!xfer->rx_buf)
xfer->rx_buf = ctlr->dummy_rx;
}
}
}
return __spi_map_msg(ctlr, msg);
}
static int spi_transfer_wait(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer *xfer)
{
struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
return -EINTR;
}
} else {
if (!speed_hz)
speed_hz = 100000;
/*
* For each byte we wait for 8 cycles of the SPI clock.
* Since speed is defined in Hz and we want milliseconds,
* use respective multiplier, but before the division,
* otherwise we may get 0 for short transfers.
*/
ms = 8LL * MSEC_PER_SEC * xfer->len;
do_div(ms, speed_hz);
/*
* Increase it twice and add 200 ms tolerance, use
* predefined maximum in case of overflow.
*/
ms += ms + 200;
if (ms > UINT_MAX)
ms = UINT_MAX;
ms = wait_for_completion_timeout(&ctlr->xfer_completion,
msecs_to_jiffies(ms));
if (ms == 0) {
SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
dev_err(&msg->spi->dev,
"SPI transfer timed out\n");
return -ETIMEDOUT;
}
}
return 0;
}
static void _spi_transfer_delay_ns(u32 ns)
{
if (!ns)
return;
if (ns <= NSEC_PER_USEC) {
ndelay(ns);
} else {
u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
if (us <= 10)
udelay(us);
else
usleep_range(us, us + DIV_ROUND_UP(us, 10));
}
}
int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
{
u32 delay = _delay->value;
u32 unit = _delay->unit;
u32 hz;
if (!delay)
return 0;
switch (unit) {
case SPI_DELAY_UNIT_USECS:
delay *= NSEC_PER_USEC;
break;
case SPI_DELAY_UNIT_NSECS:
/* Nothing to do here */
break;
case SPI_DELAY_UNIT_SCK:
/* Clock cycles need to be obtained from spi_transfer */
if (!xfer)
return -EINVAL;
/*
* If there is unknown effective speed, approximate it
* by underestimating with half of the requested Hz.
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
if (!hz)
return -EINVAL;
/* Convert delay to nanoseconds */
delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
break;
default:
return -EINVAL;
}
return delay;
}
EXPORT_SYMBOL_GPL(spi_delay_to_ns);
int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
{
int delay;
might_sleep();
if (!_delay)
return -EINVAL;
delay = spi_delay_to_ns(_delay, xfer);
if (delay < 0)
return delay;
_spi_transfer_delay_ns(delay);
return 0;
}
EXPORT_SYMBOL_GPL(spi_delay_exec);
static void _spi_transfer_cs_change_delay(struct spi_message *msg,
struct spi_transfer *xfer)
{
u32 default_delay_ns = 10 * NSEC_PER_USEC;
u32 delay = xfer->cs_change_delay.value;
u32 unit = xfer->cs_change_delay.unit;
int ret;
/* Return early on "fast" mode - for everything but USECS */
if (!delay) {
if (unit == SPI_DELAY_UNIT_USECS)
_spi_transfer_delay_ns(default_delay_ns);
return;
}
ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
if (ret) {
dev_err_once(&msg->spi->dev,
"Use of unsupported delay unit %i, using default of %luus\n",
unit, default_delay_ns / NSEC_PER_USEC);
_spi_transfer_delay_ns(default_delay_ns);
}
}
void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
struct spi_transfer *xfer)
{
_spi_transfer_cs_change_delay(msg, xfer);
}
EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
* This is a standard implementation of transfer_one_message() for
* drivers which implement a transfer_one() operation. It provides
* standard handling of delays and chip select management.
*/
static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
spi_set_cs(msg->spi, !xfer->cs_off, false);
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
spi_statistics_add_transfer_stats(statm, xfer, ctlr);
spi_statistics_add_transfer_stats(stats, xfer, ctlr);
if (!ctlr->ptp_sts_supported) {
xfer->ptp_sts_word_pre = 0;
ptp_read_system_prets(xfer->ptp_sts);
}
if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
spi_dma_sync_for_cpu(ctlr, xfer);
if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
ctlr->fallback = true;
xfer->error &= ~SPI_TRANS_FAIL_NO_START;
goto fallback_pio;
}
SPI_STATISTICS_INCREMENT_FIELD(statm,
errors);
SPI_STATISTICS_INCREMENT_FIELD(stats,
errors);
dev_err(&msg->spi->dev,
"SPI transfer failed: %d\n", ret);
goto out;
}
if (ret > 0) {
ret = spi_transfer_wait(ctlr, msg, xfer);
if (ret < 0)
msg->status = ret;
}
spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
"Bufferless transfer has length %u\n",
xfer->len);
}
if (!ctlr->ptp_sts_supported) {
ptp_read_system_postts(xfer->ptp_sts);
xfer->ptp_sts_word_post = xfer->len;
}
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
goto out;
spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
keep_cs = true;
} else {
if (!xfer->cs_off)
spi_set_cs(msg->spi, false, false);
_spi_transfer_cs_change_delay(msg, xfer);
if (!list_next_entry(xfer, transfer_list)->cs_off)
spi_set_cs(msg->spi, true, false);
}
} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
spi_set_cs(msg->spi, xfer->cs_off, false);
}
msg->actual_length += xfer->len;
}
out:
if (ret != 0 || !keep_cs)
spi_set_cs(msg->spi, false, false);
if (msg->status == -EINPROGRESS)
msg->status = ret;
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
spi_finalize_current_message(ctlr);
return ret;
}
/**
* spi_finalize_current_transfer - report completion of a transfer
* @ctlr: the controller reporting completion
*
* Called by SPI drivers using the core transfer_one_message()
* implementation to notify it that the current interrupt driven
* transfer has finished and the next one may be scheduled.
*/
void spi_finalize_current_transfer(struct spi_controller *ctlr)
{
complete(&ctlr->xfer_completion);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
static void spi_idle_runtime_pm(struct spi_controller *ctlr)
{
if (ctlr->auto_runtime_pm) {
pm_runtime_mark_last_busy(ctlr->dev.parent);
pm_runtime_put_autosuspend(ctlr->dev.parent);
}
}
static int __spi_pump_transfer_message(struct spi_controller *ctlr,
struct spi_message *msg, bool was_busy)
{
struct spi_transfer *xfer;
int ret;
if (!was_busy && ctlr->auto_runtime_pm) {
ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) {
pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
return ret;
}
}
if (!was_busy)
trace_spi_controller_busy(ctlr);
if (!was_busy && ctlr->prepare_transfer_hardware) {
ret = ctlr->prepare_transfer_hardware(ctlr);
if (ret) {
dev_err(&ctlr->dev,
"failed to prepare transfer hardware: %d\n",
ret);
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
msg->status = ret;
spi_finalize_current_message(ctlr);
return ret;
}
}
trace_spi_message_start(msg);
ret = spi_split_transfers_maxsize(ctlr, msg,
spi_max_transfer_size(msg->spi),
GFP_KERNEL | GFP_DMA);
if (ret) {
msg->status = ret;
spi_finalize_current_message(ctlr);
return ret;
}
if (ctlr->prepare_message) {
ret = ctlr->prepare_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev, "failed to prepare message: %d\n",
ret);
msg->status = ret;
spi_finalize_current_message(ctlr);
return ret;
}
msg->prepared = true;
}
ret = spi_map_msg(ctlr, msg);
if (ret) {
msg->status = ret;
spi_finalize_current_message(ctlr);
return ret;
}
if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
xfer->ptp_sts_word_pre = 0;
ptp_read_system_prets(xfer->ptp_sts);
}
}
/*
* Drivers implementation of transfer_one_message() must arrange for
* spi_finalize_current_message() to get called. Most drivers will do
* this in the calling context, but some don't. For those cases, a
* completion is used to guarantee that this function does not return
* until spi_finalize_current_message() is done accessing
* ctlr->cur_msg.
* Use of the following two flags enable to opportunistically skip the
* use of the completion since its use involves expensive spin locks.
* In case of a race with the context that calls
* spi_finalize_current_message() the completion will always be used,
* due to strict ordering of these flags using barriers.
*/
WRITE_ONCE(ctlr->cur_msg_incomplete, true);
WRITE_ONCE(ctlr->cur_msg_need_completion, false);
reinit_completion(&ctlr->cur_msg_completion);
smp_wmb(); /* Make these available to spi_finalize_current_message() */
ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
"failed to transfer one message from queue\n");
return ret;
}
WRITE_ONCE(ctlr->cur_msg_need_completion, true);
smp_mb(); /* See spi_finalize_current_message()... */
if (READ_ONCE(ctlr->cur_msg_incomplete))
wait_for_completion(&ctlr->cur_msg_completion);
return 0;
}
/**
* __spi_pump_messages - function which processes SPI message queue
* @ctlr: controller to process queue for
* @in_kthread: true if we are in the context of the message pump thread
*
* This function checks if there is any SPI message in the queue that
* needs processing and if so call out to the driver to initialize hardware
* and transfer each message.
*
* Note that it is called both from the kthread itself and also from
* inside spi_sync(); the queue extraction handling at the top of the
* function should deal with this safely.
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
struct spi_message *msg;
bool was_busy = false;
unsigned long flags;
int ret;
/* Take the I/O mutex */
mutex_lock(&ctlr->io_mutex);
/* Lock queue */
spin_lock_irqsave(&ctlr->queue_lock, flags);
/* Make sure we are not already running a message */
if (ctlr->cur_msg)
goto out_unlock;
/* Check if the queue is idle */
if (list_empty(&ctlr->queue) || !ctlr->running) {
if (!ctlr->busy)
goto out_unlock;
/* Defer any non-atomic teardown to the thread */
if (!in_kthread) {
if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
!ctlr->unprepare_transfer_hardware) {
spi_idle_runtime_pm(ctlr);
ctlr->busy = false;
ctlr->queue_empty = true;
trace_spi_controller_idle(ctlr);
} else {
kthread_queue_work(ctlr->kworker,
&ctlr->pump_messages);
}
goto out_unlock;
}
ctlr->busy = false;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
kfree(ctlr->dummy_rx);
ctlr->dummy_rx = NULL;
kfree(ctlr->dummy_tx);
ctlr->dummy_tx = NULL;
if (ctlr->unprepare_transfer_hardware &&
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
spi_idle_runtime_pm(ctlr);
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->queue_empty = true;
goto out_unlock;
}
/* Extract head of queue */
msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
ctlr->cur_msg = msg;
list_del_init(&msg->queue);
if (ctlr->busy)
was_busy = true;
else
ctlr->busy = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
ctlr->cur_msg = NULL;
ctlr->fallback = false;
mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret)
cond_resched();
return;
out_unlock:
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
mutex_unlock(&ctlr->io_mutex);
}
/**
* spi_pump_messages - kthread work function which processes spi message queue
* @work: pointer to kthread work struct contained in the controller struct
*/
static void spi_pump_messages(struct kthread_work *work)
{
struct spi_controller *ctlr =
container_of(work, struct spi_controller, pump_messages);
__spi_pump_messages(ctlr, true);
}
/**
* spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
* @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will disable IRQs and preemption for the duration of the
* transfer, for less jitter in time measurement. Only compatible
* with PIO drivers. If true, must follow up with
* spi_take_timestamp_post or otherwise system will crash.
* WARNING: for fully predictable results, the CPU frequency must
* also be under control (governor).
*
* This is a helper for drivers to collect the beginning of the TX timestamp
* for the requested byte from the SPI transfer. The frequency with which this
* function must be called (once per word, once for the whole transfer, once
* per batch of words etc) is arbitrary as long as the @tx buffer offset is
* greater than or equal to the requested byte at the time of the call. The
* timestamp is only taken once, at the first such call. It is assumed that
* the driver advances its @tx buffer pointer monotonically.
*/
void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer,
size_t progress, bool irqs_off)
{
if (!xfer->ptp_sts)
return;
if (xfer->timestamped)
return;
if (progress > xfer->ptp_sts_word_pre)
return;
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_pre = progress;
if (irqs_off) {
local_irq_save(ctlr->irq_flags);
preempt_disable();
}
ptp_read_system_prets(xfer->ptp_sts);
}
EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
/**
* spi_take_timestamp_post - helper to collect the end of the TX timestamp
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
* @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
*
* This is a helper for drivers to collect the end of the TX timestamp for
* the requested byte from the SPI transfer. Can be called with an arbitrary
* frequency: only the first call where @tx exceeds or is equal to the
* requested word will be timestamped.
*/
void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
size_t progress, bool irqs_off)
{
if (!xfer->ptp_sts)
return;
if (xfer->timestamped)
return;
if (progress < xfer->ptp_sts_word_post)
return;
ptp_read_system_postts(xfer->ptp_sts);
if (irqs_off) {
local_irq_restore(ctlr->irq_flags);
preempt_enable();
}
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_post = progress;
xfer->timestamped = 1;
}
EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
/**
* spi_set_thread_rt - set the controller to pump at realtime priority
* @ctlr: controller to boost priority of
*
* This can be called because the controller requested realtime priority
* (by setting the ->rt value before calling spi_register_controller()) or
* because a device on the bus said that its transfers needed realtime
* priority.
*
* NOTE: at the moment if any device on a bus says it needs realtime then
* the thread will be at realtime priority for all transfers on that
* controller. If this eventually becomes a problem we may see if we can
* find a way to boost the priority only temporarily during relevant
* transfers.
*/
static void spi_set_thread_rt(struct spi_controller *ctlr)
{
dev_info(&ctlr->dev,
"will run message pump with realtime priority\n");
sched_set_fifo(ctlr->kworker->task);
}
static int spi_init_queue(struct spi_controller *ctlr)
{
ctlr->running = false;
ctlr->busy = false;
ctlr->queue_empty = true;
ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
if (IS_ERR(ctlr->kworker)) {
dev_err(&ctlr->dev, "failed to create message pump kworker\n");
return PTR_ERR(ctlr->kworker);
}
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
/*
* Controller config will indicate if this controller should run the
* message pump with high (realtime) priority to reduce the transfer
* latency on the bus by minimising the delay between a transfer
* request and the scheduling of the message pump thread. Without this
* setting the message pump thread will remain at default priority.
*/
if (ctlr->rt)
spi_set_thread_rt(ctlr);
return 0;
}
/**
* spi_get_next_queued_message() - called by driver to check for queued
* messages
* @ctlr: the controller to check for queued messages
*
* If there are more messages in the queue, the next message is returned from
* this call.
*
* Return: the next message in the queue, else NULL if the queue is empty.
*/
struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
{
struct spi_message *next;
unsigned long flags;
/* Get a pointer to the next message, if any */
spin_lock_irqsave(&ctlr->queue_lock, flags);
next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
queue);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return next;
}
EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
/**
* spi_finalize_current_message() - the current message is complete
* @ctlr: the controller to return the message to
*
* Called by the driver to notify the core that the message in the front of the
* queue is complete and can be removed from the queue.
*/
void spi_finalize_current_message(struct spi_controller *ctlr)
{
struct spi_transfer *xfer;
struct spi_message *mesg;
int ret;
mesg = ctlr->cur_msg;
if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
ptp_read_system_postts(xfer->ptp_sts);
xfer->ptp_sts_word_post = xfer->len;
}
}
if (unlikely(ctlr->ptp_sts_supported))
list_for_each_entry(xfer, &mesg->transfers, transfer_list)
WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
spi_unmap_msg(ctlr, mesg);
/*
* In the prepare_messages callback the SPI bus has the opportunity
* to split a transfer to smaller chunks.
*
* Release the split transfers here since spi_map_msg() is done on
* the split transfers.
*/
spi_res_release(ctlr, mesg);
if (mesg->prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
ret);
}
}
mesg->prepared = false;
WRITE_ONCE(ctlr->cur_msg_incomplete, false);
smp_mb(); /* See __spi_pump_transfer_message()... */
if (READ_ONCE(ctlr->cur_msg_need_completion))
complete(&ctlr->cur_msg_completion);
trace_spi_message_done(mesg);
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
static int spi_start_queue(struct spi_controller *ctlr)
{
unsigned long flags;
spin_lock_irqsave(&ctlr->queue_lock, flags);
if (ctlr->running || ctlr->busy) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return -EBUSY;
}
ctlr->running = true;
ctlr->cur_msg = NULL;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
return 0;
}
static int spi_stop_queue(struct spi_controller *ctlr)
{
unsigned long flags;
unsigned limit = 500;
int ret = 0;
spin_lock_irqsave(&ctlr->queue_lock, flags);
/*
* This is a bit lame, but is optimized for the common execution path.
* A wait_queue on the ctlr->busy could be used, but then the common
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead.
*/
while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
usleep_range(10000, 11000);
spin_lock_irqsave(&ctlr->queue_lock, flags);
}
if (!list_empty(&ctlr->queue) || ctlr->busy)
ret = -EBUSY;
else
ctlr->running = false;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
if (ret) {
dev_warn(&ctlr->dev, "could not stop message queue\n");
return ret;
}
return ret;
}
static int spi_destroy_queue(struct spi_controller *ctlr)
{
int ret;
ret = spi_stop_queue(ctlr);
/*
* kthread_flush_worker will block until all work is done.
* If the reason that stop_queue timed out is that the work will never
* finish, then it does no good to call flush/stop thread, so
* return anyway.
*/
if (ret) {
dev_err(&ctlr->dev, "problem destroying queue\n");
return ret;
}
kthread_destroy_worker(ctlr->kworker);
return 0;
}
static int __spi_queued_transfer(struct spi_device *spi,
struct spi_message *msg,
bool need_pump)
{
struct spi_controller *ctlr = spi->controller;
unsigned long flags;
spin_lock_irqsave(&ctlr->queue_lock, flags);
if (!ctlr->running) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return -ESHUTDOWN;
}
msg->actual_length = 0;
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &ctlr->queue);
ctlr->queue_empty = false;
if (!ctlr->busy && need_pump)
kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
}
/**
* spi_queued_transfer - transfer function for queued transfers
* @spi: SPI device which is requesting transfer
* @msg: SPI message which is to handled is queued to driver queue
*
* Return: zero on success, else a negative error code.
*/
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
return __spi_queued_transfer(spi, msg, true);
}
static int spi_controller_initialize_queue(struct spi_controller *ctlr)
{
int ret;
ctlr->transfer = spi_queued_transfer;
if (!ctlr->transfer_one_message)
ctlr->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
ret = spi_init_queue(ctlr);
if (ret) {
dev_err(&ctlr->dev, "problem initializing queue\n");
goto err_init_queue;
}
ctlr->queued = true;
ret = spi_start_queue(ctlr);
if (ret) {
dev_err(&ctlr->dev, "problem starting queue\n");
goto err_start_queue;
}
return 0;
err_start_queue:
spi_destroy_queue(ctlr);
err_init_queue:
return ret;
}
/**
* spi_flush_queue - Send all pending messages in the queue from the callers'
* context
* @ctlr: controller to process queue for
*
* This should be used when one wants to ensure all pending messages have been
* sent before doing something. Is used by the spi-mem code to make sure SPI
* memory operations do not preempt regular SPI transfers that have been queued
* before the spi-mem operation.
*/
void spi_flush_queue(struct spi_controller *ctlr)
{
if (ctlr->transfer == spi_queued_transfer)
__spi_pump_messages(ctlr, false);
}
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
static void of_spi_parse_dt_cs_delay(struct device_node *nc,
struct spi_delay *delay, const char *prop)
{
u32 value;
if (!of_property_read_u32(nc, prop, &value)) {
if (value > U16_MAX) {
delay->value = DIV_ROUND_UP(value, 1000);
delay->unit = SPI_DELAY_UNIT_USECS;
} else {
delay->value = value;
delay->unit = SPI_DELAY_UNIT_NSECS;
}
}
}
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
u32 value;
int rc;
/* Mode (clock phase/polarity/etc.) */
if (of_property_read_bool(nc, "spi-cpha"))
spi->mode |= SPI_CPHA;
if (of_property_read_bool(nc, "spi-cpol"))
spi->mode |= SPI_CPOL;
if (of_property_read_bool(nc, "spi-3wire"))
spi->mode |= SPI_3WIRE;
if (of_property_read_bool(nc, "spi-lsb-first"))
spi->mode |= SPI_LSB_FIRST;
if (of_property_read_bool(nc, "spi-cs-high"))
spi->mode |= SPI_CS_HIGH;
/* Device DUAL/QUAD mode */
if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
switch (value) {
case 0:
spi->mode |= SPI_NO_TX;
break;
case 1:
break;
case 2:
spi->mode |= SPI_TX_DUAL;
break;
case 4:
spi->mode |= SPI_TX_QUAD;
break;
case 8:
spi->mode |= SPI_TX_OCTAL;
break;
default:
dev_warn(&ctlr->dev,
"spi-tx-bus-width %d not supported\n",
value);
break;
}
}
if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
switch (value) {
case 0:
spi->mode |= SPI_NO_RX;
break;
case 1:
break;
case 2:
spi->mode |= SPI_RX_DUAL;
break;
case 4:
spi->mode |= SPI_RX_QUAD;
break;
case 8:
spi->mode |= SPI_RX_OCTAL;
break;
default:
dev_warn(&ctlr->dev,
"spi-rx-bus-width %d not supported\n",
value);
break;
}
}
if (spi_controller_is_slave(ctlr)) {
if (!of_node_name_eq(nc, "slave")) {
dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
nc);
return -EINVAL;
}
return 0;
}
/* Device address */
rc = of_property_read_u32(nc, "reg", &value);
if (rc) {
dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
nc, rc);
return rc;
}
spi_set_chipselect(spi, 0, value);
/* Device speed */
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
/* Device CS delays */
of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
return 0;
}
static struct spi_device *
of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
{
struct spi_device *spi;
int rc;
/* Alloc an spi_device */
spi = spi_alloc_device(ctlr);
if (!spi) {
dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
rc = -ENOMEM;
goto err_out;
}
/* Select device driver */
rc = of_alias_from_compatible(nc, spi->modalias,
sizeof(spi->modalias));
if (rc < 0) {
dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
goto err_out;
}
rc = of_spi_parse_dt(ctlr, spi, nc);
if (rc)
goto err_out;
/* Store a pointer to the node in the device structure */
of_node_get(nc);
device_set_node(&spi->dev, of_fwnode_handle(nc));
/* Register the new device */
rc = spi_add_device(spi);
if (rc) {
dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
goto err_of_node_put;
}
return spi;
err_of_node_put:
of_node_put(nc);
err_out:
spi_dev_put(spi);
return ERR_PTR(rc);
}
/**
* of_register_spi_devices() - Register child devices onto the SPI bus
* @ctlr: Pointer to spi_controller device
*
* Registers an spi_device for each child node of controller node which
* represents a valid SPI slave.
*/
static void of_register_spi_devices(struct spi_controller *ctlr)
{
struct spi_device *spi;
struct device_node *nc;
for_each_available_child_of_node(ctlr->dev.of_node, nc) {
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
spi = of_register_spi_device(ctlr, nc);
if (IS_ERR(spi)) {
dev_warn(&ctlr->dev,
"Failed to create SPI device for %pOF\n", nc);
of_node_clear_flag(nc, OF_POPULATED);
}
}
}
#else
static void of_register_spi_devices(struct spi_controller *ctlr) { }
#endif
/**
* spi_new_ancillary_device() - Register ancillary SPI device
* @spi: Pointer to the main SPI device registering the ancillary device
* @chip_select: Chip Select of the ancillary device
*
* Register an ancillary SPI device; for example some chips have a chip-select
* for normal device usage and another one for setup/firmware upload.
*
* This may only be called from main SPI device's probe routine.
*
* Return: 0 on success; negative errno on failure
*/
struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
u8 chip_select)
{
struct spi_controller *ctlr = spi->controller;
struct spi_device *ancillary;
int rc = 0;
/* Alloc an spi_device */
ancillary = spi_alloc_device(ctlr);
if (!ancillary) {
rc = -ENOMEM;
goto err_out;
}
strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
/* Use provided chip-select for ancillary device */
spi_set_chipselect(ancillary, 0, chip_select);
/* Take over SPI mode/speed from SPI main device */
ancillary->max_speed_hz = spi->max_speed_hz;
ancillary->mode = spi->mode;
WARN_ON(!mutex_is_locked(&ctlr->add_lock));
/* Register the new device */
rc = __spi_add_device(ancillary);
if (rc) {
dev_err(&spi->dev, "failed to register ancillary device\n");
goto err_out;
}
return ancillary;
err_out:
spi_dev_put(ancillary);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
#ifdef CONFIG_ACPI
struct acpi_spi_lookup {
struct spi_controller *ctlr;
u32 max_speed_hz;
u32 mode;
int irq;
u8 bits_per_word;
u8 chip_select;
int n;
int index;
};
static int acpi_spi_count(struct acpi_resource *ares, void *data)
{
struct acpi_resource_spi_serialbus *sb;
int *count = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
sb = &ares->data.spi_serial_bus;
if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
return 1;
*count = *count + 1;
return 1;
}
/**
* acpi_spi_count_resources - Count the number of SpiSerialBus resources
* @adev: ACPI device
*
* Return: the number of SpiSerialBus resources in the ACPI-device's
* resource-list; or a negative error code.
*/
int acpi_spi_count_resources(struct acpi_device *adev)
{
LIST_HEAD(r);
int count = 0;
int ret;
ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&r);
return count;
}
EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
struct acpi_spi_lookup *lookup)
{
const union acpi_object *obj;
if (!x86_apple_machine)
return;
if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length >= 4)
lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8)
lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
lookup->mode |= SPI_LSB_FIRST;
if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
lookup->mode |= SPI_CPOL;
if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
lookup->mode |= SPI_CPHA;
}
static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
struct acpi_spi_lookup *lookup = data;
struct spi_controller *ctlr = lookup->ctlr;
if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
struct acpi_resource_spi_serialbus *sb;
acpi_handle parent_handle;
acpi_status status;
sb = &ares->data.spi_serial_bus;
if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
if (lookup->index != -1 && lookup->n++ != lookup->index)
return 1;
status = acpi_get_handle(NULL,
sb->resource_source.string_ptr,
&parent_handle);
if (ACPI_FAILURE(status))
return -ENODEV;
if (ctlr) {
if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
return -ENODEV;
} else {
struct acpi_device *adev;
adev = acpi_fetch_acpi_dev(parent_handle);
if (!adev)
return -ENODEV;
ctlr = acpi_spi_find_controller_by_adev(adev);
if (!ctlr)
return -EPROBE_DEFER;
lookup->ctlr = ctlr;
}
/*
* ACPI DeviceSelection numbering is handled by the
* host controller driver in Windows and can vary
* from driver to driver. In Linux we always expect
* 0 .. max - 1 so we need to ask the driver to
* translate between the two schemes.
*/
if (ctlr->fw_translate_cs) {
int cs = ctlr->fw_translate_cs(ctlr,
sb->device_selection);
if (cs < 0)
return cs;
lookup->chip_select = cs;
} else {
lookup->chip_select = sb->device_selection;
}
lookup->max_speed_hz = sb->connection_speed;
lookup->bits_per_word = sb->data_bit_length;
if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
lookup->mode |= SPI_CPHA;
if (sb->clock_polarity == ACPI_SPI_START_HIGH)
lookup->mode |= SPI_CPOL;
if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
lookup->mode |= SPI_CS_HIGH;
}
} else if (lookup->irq < 0) {
struct resource r;
if (acpi_dev_resource_interrupt(ares, 0, &r))
lookup->irq = r.start;
}
/* Always tell the ACPI core to skip this resource */
return 1;
}
/**
* acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
* @ctlr: controller to which the spi device belongs
* @adev: ACPI Device for the spi device
* @index: Index of the spi resource inside the ACPI Node
*
* This should be used to allocate a new SPI device from and ACPI Device node.
* The caller is responsible for calling spi_add_device to register the SPI device.
*
* If ctlr is set to NULL, the Controller for the SPI device will be looked up
* using the resource.
* If index is set to -1, index is not used.
* Note: If index is -1, ctlr must be set.
*
* Return: a pointer to the new device, or ERR_PTR on error.
*/
struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_device *adev,
int index)
{
acpi_handle parent_handle = NULL;
struct list_head resource_list;
struct acpi_spi_lookup lookup = {};
struct spi_device *spi;
int ret;
if (!ctlr && index == -1)
return ERR_PTR(-EINVAL);
lookup.ctlr = ctlr;
lookup.irq = -1;
lookup.index = index;
lookup.n = 0;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
acpi_spi_add_resource, &lookup);
acpi_dev_free_resource_list(&resource_list);
if (ret < 0)
/* Found SPI in _CRS but it points to another controller */
return ERR_PTR(ret);
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
/* Apple does not use _CRS but nested devices for SPI slaves */
acpi_spi_parse_apple_properties(adev, &lookup);
}
if (!lookup.max_speed_hz)
return ERR_PTR(-ENODEV);
spi = spi_alloc_device(lookup.ctlr);
if (!spi) {
dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
dev_name(&adev->dev));
return ERR_PTR(-ENOMEM);
}
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
spi_set_chipselect(spi, 0, lookup.chip_select);
return spi;
}
EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
struct acpi_device *adev)
{
struct spi_device *spi;
if (acpi_bus_get_status(adev) || !adev->status.present ||
acpi_device_enumerated(adev))
return AE_OK;
spi = acpi_spi_device_alloc(ctlr, adev, -1);
if (IS_ERR(spi)) {
if (PTR_ERR(spi) == -ENOMEM)
return AE_NO_MEMORY;
else
return AE_OK;
}
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
sizeof(spi->modalias));
if (spi->irq < 0)
spi->irq = acpi_dev_gpio_irq_get(adev, 0);
acpi_device_set_enumerated(adev);
adev->power.flags.ignore_parent = true;
if (spi_add_device(spi)) {
adev->power.flags.ignore_parent = false;
dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
dev_name(&adev->dev));
spi_dev_put(spi);
}
return AE_OK;
}
static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct spi_controller *ctlr = data;
if (!adev)
return AE_OK;
return acpi_register_spi_device(ctlr, adev);
}
#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
static void acpi_register_spi_devices(struct spi_controller *ctlr)
{
acpi_status status;
acpi_handle handle;
handle = ACPI_HANDLE(ctlr->dev.parent);
if (!handle)
return;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
SPI_ACPI_ENUMERATE_MAX_DEPTH,
acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
#endif /* CONFIG_ACPI */
static void spi_controller_release(struct device *dev)
{
struct spi_controller *ctlr;
ctlr = container_of(dev, struct spi_controller, dev);
kfree(ctlr);
}
static struct class spi_master_class = {
.name = "spi_master",
.dev_release = spi_controller_release,
.dev_groups = spi_master_groups,
};
#ifdef CONFIG_SPI_SLAVE
/**
* spi_slave_abort - abort the ongoing transfer request on an SPI slave
* controller
* @spi: device used for the current transfer
*/
int spi_slave_abort(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
return ctlr->slave_abort(ctlr);
return -ENOTSUPP;
}
EXPORT_SYMBOL_GPL(spi_slave_abort);
int spi_target_abort(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
if (spi_controller_is_target(ctlr) && ctlr->target_abort)
return ctlr->target_abort(ctlr);
return -ENOTSUPP;
}
EXPORT_SYMBOL_GPL(spi_target_abort);
static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
child = device_find_any_child(&ctlr->dev);
return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct spi_device *spi;
struct device *child;
char name[32];
int rc;
rc = sscanf(buf, "%31s", name);
if (rc != 1 || !name[0])
return -EINVAL;
child = device_find_any_child(&ctlr->dev);
if (child) {
/* Remove registered slave */
device_unregister(child);
put_device(child);
}
if (strcmp(name, "(null)")) {
/* Register new slave */
spi = spi_alloc_device(ctlr);
if (!spi)
return -ENOMEM;
strscpy(spi->modalias, name, sizeof(spi->modalias));
rc = spi_add_device(spi);
if (rc) {
spi_dev_put(spi);
return rc;
}
}
return count;
}
static DEVICE_ATTR_RW(slave);
static struct attribute *spi_slave_attrs[] = {
&dev_attr_slave.attr,
NULL,
};
static const struct attribute_group spi_slave_group = {
.attrs = spi_slave_attrs,
};
static const struct attribute_group *spi_slave_groups[] = {
&spi_controller_statistics_group,
&spi_slave_group,
NULL,
};
static struct class spi_slave_class = {
.name = "spi_slave",
.dev_release = spi_controller_release,
.dev_groups = spi_slave_groups,
};
#else
extern struct class spi_slave_class; /* dummy */
#endif
/**
* __spi_alloc_controller - allocate an SPI master or slave controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
* memory is in the driver_data field of the returned device, accessible
* with spi_controller_get_devdata(); the memory is cacheline aligned;
* drivers granting DMA access to portions of their private data need to
* round up @size using ALIGN(size, dma_get_cache_alignment()).
* @slave: flag indicating whether to allocate an SPI master (false) or SPI
* slave (true) controller
* Context: can sleep
*
* This call is used only by SPI controller drivers, which are the
* only ones directly touching chip registers. It's how they allocate
* an spi_controller structure, prior to calling spi_register_controller().
*
* This must be called from context that can sleep.
*
* The caller is responsible for assigning the bus number and initializing the
* controller's methods before calling spi_register_controller(); and (after
* errors adding the device) calling spi_controller_put() to prevent a memory
* leak.
*
* Return: the SPI controller structure on success, else NULL.
*/
struct spi_controller *__spi_alloc_controller(struct device *dev,
unsigned int size, bool slave)
{
struct spi_controller *ctlr;
size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
if (!dev)
return NULL;
ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
if (!ctlr)
return NULL;
device_initialize(&ctlr->dev);
INIT_LIST_HEAD(&ctlr->queue);
spin_lock_init(&ctlr->queue_lock);
spin_lock_init(&ctlr->bus_lock_spinlock);
mutex_init(&ctlr->bus_lock_mutex);
mutex_init(&ctlr->io_mutex);
mutex_init(&ctlr->add_lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
ctlr->slave = slave;
if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
ctlr->dev.class = &spi_slave_class;
else
ctlr->dev.class = &spi_master_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
return ctlr;
}
EXPORT_SYMBOL_GPL(__spi_alloc_controller);
static void devm_spi_release_controller(struct device *dev, void *ctlr)
{
spi_controller_put(*(struct spi_controller **)ctlr);
}
/**
* __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
* @dev: physical device of SPI controller
* @size: how much zeroed driver-private data to allocate
* @slave: whether to allocate an SPI master (false) or SPI slave (true)
* Context: can sleep
*
* Allocate an SPI controller and automatically release a reference on it
* when @dev is unbound from its driver. Drivers are thus relieved from
* having to call spi_controller_put().
*
* The arguments to this function are identical to __spi_alloc_controller().
*
* Return: the SPI controller structure on success, else NULL.
*/
struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
unsigned int size,
bool slave)
{
struct spi_controller **ptr, *ctlr;
ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return NULL;
ctlr = __spi_alloc_controller(dev, size, slave);
if (ctlr) {
ctlr->devm_allocated = true;
*ptr = ctlr;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return ctlr;
}
EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
/**
* spi_get_gpio_descs() - grab chip select GPIOs for the master
* @ctlr: The SPI master to grab GPIO descriptors for
*/
static int spi_get_gpio_descs(struct spi_controller *ctlr)
{
int nb, i;
struct gpio_desc **cs;
struct device *dev = &ctlr->dev;
unsigned long native_cs_mask = 0;
unsigned int num_cs_gpios = 0;
nb = gpiod_count(dev, "cs");
if (nb < 0) {
/* No GPIOs at all is fine, else return the error */
if (nb == -ENOENT)
return 0;
return nb;
}
ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
GFP_KERNEL);
if (!cs)
return -ENOMEM;
ctlr->cs_gpiods = cs;
for (i = 0; i < nb; i++) {
/*
* Most chipselects are active low, the inverted
* semantics are handled by special quirks in gpiolib,
* so initializing them GPIOD_OUT_LOW here means
* "unasserted", in most cases this will drive the physical
* line high.
*/
cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
GPIOD_OUT_LOW);
if (IS_ERR(cs[i]))
return PTR_ERR(cs[i]);
if (cs[i]) {
/*
* If we find a CS GPIO, name it after the device and
* chip select line.
*/
char *gpioname;
gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
dev_name(dev), i);
if (!gpioname)
return -ENOMEM;
gpiod_set_consumer_name(cs[i], gpioname);
num_cs_gpios++;
continue;
}
if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
dev_err(dev, "Invalid native chip select %d\n", i);
return -EINVAL;
}
native_cs_mask |= BIT(i);
}
ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
dev_err(dev, "No unused native chip select available\n");
return -EINVAL;
}
return 0;
}
static int spi_controller_check_ops(struct spi_controller *ctlr)
{
/*
* The controller may implement only the high-level SPI-memory like
* operations if it does not support regular SPI transfers, and this is
* valid use case.
* If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
* one of the ->transfer_xxx() method be implemented.
*/
if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
if (!ctlr->transfer && !ctlr->transfer_one &&
!ctlr->transfer_one_message) {
return -EINVAL;
}
}
return 0;
}
/* Allocate dynamic bus number using Linux idr */
static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
{
int id;
mutex_lock(&board_lock);
id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id == -ENOSPC ? -EBUSY : id;
ctlr->bus_num = id;
return 0;
}
/**
* spi_register_controller - register SPI master or slave controller
* @ctlr: initialized master, originally from spi_alloc_master() or
* spi_alloc_slave()
* Context: can sleep
*
* SPI controllers connect to their drivers using some non-SPI bus,
* such as the platform bus. The final stage of probe() in that code
* includes calling spi_register_controller() to hook up to this SPI bus glue.
*
* SPI controllers use board specific (often SOC specific) bus numbers,
* and board-specific addressing for SPI devices combines those numbers
* with chip select numbers. Since SPI does not directly support dynamic
* device identification, boards need configuration tables telling which
* chip is at which address.
*
* This must be called from context that can sleep. It returns zero on
* success, else a negative error code (dropping the controller's refcount).
* After a successful return, the caller is responsible for calling
* spi_unregister_controller().
*
* Return: zero on success, else a negative error code.
*/
int spi_register_controller(struct spi_controller *ctlr)
{
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
int first_dynamic;
int status;
if (!dev)
return -ENODEV;
/*
* Make sure all necessary hooks are implemented before registering
* the SPI controller.
*/
status = spi_controller_check_ops(ctlr);
if (status)
return status;
if (ctlr->bus_num < 0)
ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
if (ctlr->bus_num >= 0) {
/* Devices with a fixed bus num must check-in with the num */
status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
if (status)
return status;
}
if (ctlr->bus_num < 0) {
first_dynamic = of_alias_get_highest_id("spi");
if (first_dynamic < 0)
first_dynamic = 0;
else
first_dynamic++;
status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
if (status)
return status;
}
ctlr->bus_lock_flag = 0;
init_completion(&ctlr->xfer_completion);
init_completion(&ctlr->cur_msg_completion);
if (!ctlr->max_dma_len)
ctlr->max_dma_len = INT_MAX;
/*
* Register the device, then userspace will see it.
* Registration fails if the bus ID is in use.
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
goto free_bus_id;
/*
* A controller using GPIO descriptors always
* supports SPI_CS_HIGH if need be.
*/
ctlr->mode_bits |= SPI_CS_HIGH;
}
/*
* Even if it's just one always-selected device, there must
* be at least one chipselect.
*/
if (!ctlr->num_chipselect) {
status = -EINVAL;
goto free_bus_id;
}
/* Setting last_cs to -1 means no chip selected */
ctlr->last_cs = -1;
status = device_add(&ctlr->dev);
if (status < 0)
goto free_bus_id;
dev_dbg(dev, "registered %s %s\n",
spi_controller_is_slave(ctlr) ? "slave" : "master",
dev_name(&ctlr->dev));
/*
* If we're using a queued driver, start the queue. Note that we don't
* need the queueing logic if the driver is only supporting high-level
* memory operations.
*/
if (ctlr->transfer) {
dev_info(dev, "controller is unqueued, this is deprecated\n");
} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
status = spi_controller_initialize_queue(ctlr);
if (status) {
device_del(&ctlr->dev);
goto free_bus_id;
}
}
/* Add statistics */
ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
if (!ctlr->pcpu_statistics) {
dev_err(dev, "Error allocating per-cpu statistics\n");
status = -ENOMEM;
goto destroy_queue;
}
mutex_lock(&board_lock);
list_add_tail(&ctlr->list, &spi_controller_list);
list_for_each_entry(bi, &board_list, list)
spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
mutex_unlock(&board_lock);
/* Register devices from the device tree and ACPI */
of_register_spi_devices(ctlr);
acpi_register_spi_devices(ctlr);
return status;
destroy_queue:
spi_destroy_queue(ctlr);
free_bus_id:
mutex_lock(&board_lock);
idr_remove(&spi_master_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_register_controller);
static void devm_spi_unregister(struct device *dev, void *res)
{
spi_unregister_controller(*(struct spi_controller **)res);
}
/**
* devm_spi_register_controller - register managed SPI master or slave
* controller
* @dev: device managing SPI controller
* @ctlr: initialized controller, originally from spi_alloc_master() or
* spi_alloc_slave()
* Context: can sleep
*
* Register a SPI device as with spi_register_controller() which will
* automatically be unregistered and freed.
*
* Return: zero on success, else a negative error code.
*/
int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr)
{
struct spi_controller **ptr;
int ret;
ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = spi_register_controller(ctlr);
if (!ret) {
*ptr = ctlr;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return ret;
}
EXPORT_SYMBOL_GPL(devm_spi_register_controller);
static int __unregister(struct device *dev, void *null)
{
spi_unregister_device(to_spi_device(dev));
return 0;
}
/**
* spi_unregister_controller - unregister SPI master or slave controller
* @ctlr: the controller being unregistered
* Context: can sleep
*
* This call is used only by SPI controller drivers, which are the
* only ones directly touching chip registers.
*
* This must be called from context that can sleep.
*
* Note that this function also drops a reference to the controller.
*/
void spi_unregister_controller(struct spi_controller *ctlr)
{
struct spi_controller *found;
int id = ctlr->bus_num;
/* Prevent addition of new devices, unregister existing ones */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
mutex_lock(&ctlr->add_lock);
device_for_each_child(&ctlr->dev, NULL, __unregister);
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
found = idr_find(&spi_master_idr, id);
mutex_unlock(&board_lock);
if (ctlr->queued) {
if (spi_destroy_queue(ctlr))
dev_err(&ctlr->dev, "queue remove failed\n");
}
mutex_lock(&board_lock);
list_del(&ctlr->list);
mutex_unlock(&board_lock);
device_del(&ctlr->dev);
/* Free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
idr_remove(&spi_master_idr, id);
mutex_unlock(&board_lock);
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
mutex_unlock(&ctlr->add_lock);
/*
* Release the last reference on the controller if its driver
* has not yet been converted to devm_spi_alloc_master/slave().
*/
if (!ctlr->devm_allocated)
put_device(&ctlr->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
int spi_controller_suspend(struct spi_controller *ctlr)
{
int ret;
/* Basically no-ops for non-queued controllers */
if (!ctlr->queued)
return 0;
ret = spi_stop_queue(ctlr);
if (ret)
dev_err(&ctlr->dev, "queue stop failed\n");
return ret;
}
EXPORT_SYMBOL_GPL(spi_controller_suspend);
int spi_controller_resume(struct spi_controller *ctlr)
{
int ret;
if (!ctlr->queued)
return 0;
ret = spi_start_queue(ctlr);
if (ret)
dev_err(&ctlr->dev, "queue restart failed\n");
return ret;
}
EXPORT_SYMBOL_GPL(spi_controller_resume);
/*-------------------------------------------------------------------------*/
/* Core methods for spi_message alterations */
static void __spi_replace_transfers_release(struct spi_controller *ctlr,
struct spi_message *msg,
void *res)
{
struct spi_replaced_transfers *rxfer = res;
size_t i;
/* Call extra callback if requested */
if (rxfer->release)
rxfer->release(ctlr, msg, res);
/* Insert replaced transfers back into the message */
list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
/* Remove the formerly inserted entries */
for (i = 0; i < rxfer->inserted; i++)
list_del(&rxfer->inserted_transfers[i].transfer_list);
}
/**
* spi_replace_transfers - replace transfers with several transfers
* and register change with spi_message.resources
* @msg: the spi_message we work upon
* @xfer_first: the first spi_transfer we want to replace
* @remove: number of transfers to remove
* @insert: the number of transfers we want to insert instead
* @release: extra release code necessary in some circumstances
* @extradatasize: extra data to allocate (with alignment guarantees
* of struct @spi_transfer)
* @gfp: gfp flags
*
* Returns: pointer to @spi_replaced_transfers,
* PTR_ERR(...) in case of errors.
*/
static struct spi_replaced_transfers *spi_replace_transfers(
struct spi_message *msg,
struct spi_transfer *xfer_first,
size_t remove,
size_t insert,
spi_replaced_release_t release,
size_t extradatasize,
gfp_t gfp)
{
struct spi_replaced_transfers *rxfer;
struct spi_transfer *xfer;
size_t i;
/* Allocate the structure using spi_res */
rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
struct_size(rxfer, inserted_transfers, insert)
+ extradatasize,
gfp);
if (!rxfer)
return ERR_PTR(-ENOMEM);
/* The release code to invoke before running the generic release */
rxfer->release = release;
/* Assign extradata */
if (extradatasize)
rxfer->extradata =
&rxfer->inserted_transfers[insert];
/* Init the replaced_transfers list */
INIT_LIST_HEAD(&rxfer->replaced_transfers);
/*
* Assign the list_entry after which we should reinsert
* the @replaced_transfers - it may be spi_message.messages!
*/
rxfer->replaced_after = xfer_first->transfer_list.prev;
/* Remove the requested number of transfers */
for (i = 0; i < remove; i++) {
/*
* If the entry after replaced_after it is msg->transfers
* then we have been requested to remove more transfers
* than are in the list.
*/
if (rxfer->replaced_after->next == &msg->transfers) {
dev_err(&msg->spi->dev,
"requested to remove more spi_transfers than are available\n");
/* Insert replaced transfers back into the message */
list_splice(&rxfer->replaced_transfers,
rxfer->replaced_after);
/* Free the spi_replace_transfer structure... */
spi_res_free(rxfer);
/* ...and return with an error */
return ERR_PTR(-EINVAL);
}
/*
* Remove the entry after replaced_after from list of
* transfers and add it to list of replaced_transfers.
*/
list_move_tail(rxfer->replaced_after->next,
&rxfer->replaced_transfers);
}
/*
* Create copy of the given xfer with identical settings
* based on the first transfer to get removed.
*/
for (i = 0; i < insert; i++) {
/* We need to run in reverse order */
xfer = &rxfer->inserted_transfers[insert - 1 - i];
/* Copy all spi_transfer data */
memcpy(xfer, xfer_first, sizeof(*xfer));
/* Add to list */
list_add(&xfer->transfer_list, rxfer->replaced_after);
/* Clear cs_change and delay for all but the last */
if (i) {
xfer->cs_change = false;
xfer->delay.value = 0;
}
}
/* Set up inserted... */
rxfer->inserted = insert;
/* ...and register it with spi_res/spi_message */
spi_res_add(msg, rxfer);
return rxfer;
}
static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer **xferp,
size_t maxsize,
gfp_t gfp)
{
struct spi_transfer *xfer = *xferp, *xfers;
struct spi_replaced_transfers *srt;
size_t offset;
size_t count, i;
/* Calculate how many we have to replace */
count = DIV_ROUND_UP(xfer->len, maxsize);
/* Create replacement */
srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
if (IS_ERR(srt))
return PTR_ERR(srt);
xfers = srt->inserted_transfers;
/*
* Now handle each of those newly inserted spi_transfers.
* Note that the replacements spi_transfers all are preset
* to the same values as *xferp, so tx_buf, rx_buf and len
* are all identical (as well as most others)
* so we just have to fix up len and the pointers.
*
* This also includes support for the depreciated
* spi_message.is_dma_mapped interface.
*/
/*
* The first transfer just needs the length modified, so we
* run it outside the loop.
*/
xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
/* All the others need rx_buf/tx_buf also set */
for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
/* Update rx_buf, tx_buf and DMA */
if (xfers[i].rx_buf)
xfers[i].rx_buf += offset;
if (xfers[i].rx_dma)
xfers[i].rx_dma += offset;
if (xfers[i].tx_buf)
xfers[i].tx_buf += offset;
if (xfers[i].tx_dma)
xfers[i].tx_dma += offset;
/* Update length */
xfers[i].len = min(maxsize, xfers[i].len - offset);
}
/*
* We set up xferp to the last entry we have inserted,
* so that we skip those already split transfers.
*/
*xferp = &xfers[count - 1];
/* Increment statistics counters */
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
transfers_split_maxsize);
SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
transfers_split_maxsize);
return 0;
}
/**
* spi_split_transfers_maxsize - split spi transfers into multiple transfers
* when an individual transfer exceeds a
* certain size
* @ctlr: the @spi_controller for this transfer
* @msg: the @spi_message to transform
* @maxsize: the maximum when to apply this
* @gfp: GFP allocation flags
*
* Return: status of transformation
*/
int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
size_t maxsize,
gfp_t gfp)
{
struct spi_transfer *xfer;
int ret;
/*
* Iterate over the transfer_list,
* but note that xfer is advanced to the last transfer inserted
* to avoid checking sizes again unnecessarily (also xfer does
* potentially belong to a different list by the time the
* replacement has happened).
*/
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
maxsize, gfp);
if (ret)
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
/**
* spi_split_transfers_maxwords - split SPI transfers into multiple transfers
* when an individual transfer exceeds a
* certain number of SPI words
* @ctlr: the @spi_controller for this transfer
* @msg: the @spi_message to transform
* @maxwords: the number of words to limit each transfer to
* @gfp: GFP allocation flags
*
* Return: status of transformation
*/
int spi_split_transfers_maxwords(struct spi_controller *ctlr,
struct spi_message *msg,
size_t maxwords,
gfp_t gfp)
{
struct spi_transfer *xfer;
/*
* Iterate over the transfer_list,
* but note that xfer is advanced to the last transfer inserted
* to avoid checking sizes again unnecessarily (also xfer does
* potentially belong to a different list by the time the
* replacement has happened).
*/
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
size_t maxsize;
int ret;
maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
maxsize, gfp);
if (ret)
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
/*-------------------------------------------------------------------------*/
/*
* Core methods for SPI controller protocol drivers. Some of the
* other core methods are currently defined as inline functions.
*/
static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
u8 bits_per_word)
{
if (ctlr->bits_per_word_mask) {
/* Only 32 bits fit in the mask */
if (bits_per_word > 32)
return -EINVAL;
if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
return -EINVAL;
}
return 0;
}
/**
* spi_set_cs_timing - configure CS setup, hold, and inactive delays
* @spi: the device that requires specific CS timing configuration
*
* Return: zero on success, else a negative error code.
*/
static int spi_set_cs_timing(struct spi_device *spi)
{
struct device *parent = spi->controller->dev.parent;
int status = 0;
if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
if (spi->controller->auto_runtime_pm) {
status = pm_runtime_get_sync(parent);
if (status < 0) {
pm_runtime_put_noidle(parent);
dev_err(&spi->controller->dev, "Failed to power device: %d\n",
status);
return status;
}
status = spi->controller->set_cs_timing(spi);
pm_runtime_mark_last_busy(parent);
pm_runtime_put_autosuspend(parent);
} else {
status = spi->controller->set_cs_timing(spi);
}
}
return status;
}
/**
* spi_setup - setup SPI mode and clock rate
* @spi: the device whose settings are being modified
* Context: can sleep, and no requests are queued to the device
*
* SPI protocol drivers may need to update the transfer mode if the
* device doesn't work with its default. They may likewise need
* to update clock rates or word sizes from initial values. This function
* changes those settings, and must be called from a context that can sleep.
* Except for SPI_CS_HIGH, which takes effect immediately, the changes take
* effect the next time the device is selected and data is transferred to
* or from it. When this function returns, the SPI device is deselected.
*
* Note that this call will fail if the protocol driver specifies an option
* that the underlying controller or its driver does not support. For
* example, not all hardware supports wire transfers using nine bit words,
* LSB-first wire encoding, or active-high chipselects.
*
* Return: zero on success, else a negative error code.
*/
int spi_setup(struct spi_device *spi)
{
unsigned bad_bits, ugly_bits;
int status = 0;
/*
* Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
* are set at the same time.
*/
if ((hweight_long(spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
(hweight_long(spi->mode &
(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
dev_err(&spi->dev,
"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
return -EINVAL;
}
/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
if ((spi->mode & SPI_3WIRE) && (spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
/*
* Help drivers fail *cleanly* when they need options
* that aren't supported with their current controller.
* SPI_CS_WORD has a fallback software implementation,
* so it is ignored here.
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
SPI_NO_TX | SPI_NO_RX);
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
if (ugly_bits) {
dev_warn(&spi->dev,
"setup: ignoring unsupported mode bits %x\n",
ugly_bits);
spi->mode &= ~ugly_bits;
bad_bits &= ~ugly_bits;
}
if (bad_bits) {
dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
bad_bits);
return -EINVAL;
}
if (!spi->bits_per_word) {
spi->bits_per_word = 8;
} else {
/*
* Some controllers may not support the default 8 bits-per-word
* so only perform the check when this is explicitly provided.
*/
status = __spi_validate_bits_per_word(spi->controller,
spi->bits_per_word);
if (status)
return status;
}
if (spi->controller->max_speed_hz &&
(!spi->max_speed_hz ||
spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
mutex_lock(&spi->controller->io_mutex);
if (spi->controller->setup) {
status = spi->controller->setup(spi);
if (status) {
mutex_unlock(&spi->controller->io_mutex);
dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
status);
return status;
}
}
status = spi_set_cs_timing(spi);
if (status) {
mutex_unlock(&spi->controller->io_mutex);
return status;
}
if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
status = pm_runtime_resume_and_get(spi->controller->dev.parent);
if (status < 0) {
mutex_unlock(&spi->controller->io_mutex);
dev_err(&spi->controller->dev, "Failed to power device: %d\n",
status);
return status;
}
/*
* We do not want to return positive value from pm_runtime_get,
* there are many instances of devices calling spi_setup() and
* checking for a non-zero return value instead of a negative
* return value.
*/
status = 0;
spi_set_cs(spi, false, true);
pm_runtime_mark_last_busy(spi->controller->dev.parent);
pm_runtime_put_autosuspend(spi->controller->dev.parent);
} else {
spi_set_cs(spi, false, true);
}
mutex_unlock(&spi->controller->io_mutex);
if (spi->rt && !spi->controller->rt) {
spi->controller->rt = true;
spi_set_thread_rt(spi->controller);
}
trace_spi_setup(spi, status);
dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
spi->mode & SPI_MODE_X_MASK,
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
(spi->mode & SPI_3WIRE) ? "3wire, " : "",
(spi->mode & SPI_LOOP) ? "loopback, " : "",
spi->bits_per_word, spi->max_speed_hz,
status);
return status;
}
EXPORT_SYMBOL_GPL(spi_setup);
static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
struct spi_device *spi)
{
int delay1, delay2;
delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
if (delay1 < 0)
return delay1;
delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
if (delay2 < 0)
return delay2;
if (delay1 < delay2)
memcpy(&xfer->word_delay, &spi->word_delay,
sizeof(xfer->word_delay));
return 0;
}
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
struct spi_transfer *xfer;
int w_size;
if (list_empty(&message->transfers))
return -EINVAL;
/*
* If an SPI controller does not support toggling the CS line on each
* transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
* for the CS line, we can emulate the CS-per-word hardware function by
* splitting transfers into one-word transfers and ensuring that
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
spi_get_csgpiod(spi, 0))) {
size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
int ret;
/* spi_split_transfers_maxsize() requires message->spi */
message->spi = spi;
ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
GFP_KERNEL);
if (ret)
return ret;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
/* Don't change cs_change on the last entry in the list */
if (list_is_last(&xfer->transfer_list, &message->transfers))
break;
xfer->cs_change = 1;
}
}
/*
* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
* software limitations.
*/
if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
(spi->mode & SPI_3WIRE)) {
unsigned flags = ctlr->flags;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
if (xfer->rx_buf && xfer->tx_buf)
return -EINVAL;
if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
return -EINVAL;
if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
return -EINVAL;
}
}
/*
* Set transfer bits_per_word and max speed as spi device default if
* it is not set for this transfer.
* Set transfer tx_nbits and rx_nbits as single transfer default
* (SPI_NBITS_SINGLE) if it is not set for this transfer.
* Ensure transfer word_delay is at least as long as that required by
* device itself.
*/
message->frame_length = 0;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
xfer->effective_speed_hz = 0;
message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
xfer->speed_hz = ctlr->max_speed_hz;
if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
return -EINVAL;
/*
* SPI transfer length should be multiple of SPI word size
* where SPI word size should be power-of-two multiple.
*/
if (xfer->bits_per_word <= 8)
w_size = 1;
else if (xfer->bits_per_word <= 16)
w_size = 2;
else
w_size = 4;
/* No partial transfers accepted */
if (xfer->len % w_size)
return -EINVAL;
if (xfer->speed_hz && ctlr->min_speed_hz &&
xfer->speed_hz < ctlr->min_speed_hz)
return -EINVAL;
if (xfer->tx_buf && !xfer->tx_nbits)
xfer->tx_nbits = SPI_NBITS_SINGLE;
if (xfer->rx_buf && !xfer->rx_nbits)
xfer->rx_nbits = SPI_NBITS_SINGLE;
/*
* Check transfer tx/rx_nbits:
* 1. check the value matches one of single, dual and quad
* 2. check tx/rx_nbits match the mode in spi_device
*/
if (xfer->tx_buf) {
if (spi->mode & SPI_NO_TX)
return -EINVAL;
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
xfer->tx_nbits != SPI_NBITS_DUAL &&
xfer->tx_nbits != SPI_NBITS_QUAD)
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
!(spi->mode & SPI_TX_QUAD))
return -EINVAL;
}
/* Check transfer rx_nbits */
if (xfer->rx_buf) {
if (spi->mode & SPI_NO_RX)
return -EINVAL;
if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
xfer->rx_nbits != SPI_NBITS_DUAL &&
xfer->rx_nbits != SPI_NBITS_QUAD)
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
!(spi->mode & SPI_RX_QUAD))
return -EINVAL;
}
if (_spi_xfer_word_delay_update(xfer, spi))
return -EINVAL;
}
message->status = -EINPROGRESS;
return 0;
}
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
struct spi_transfer *xfer;
/*
* Some controllers do not support doing regular SPI transfers. Return
* ENOTSUPP when this is the case.
*/
if (!ctlr->transfer)
return -ENOTSUPP;
message->spi = spi;
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
trace_spi_message_submit(message);
if (!ctlr->ptp_sts_supported) {
list_for_each_entry(xfer, &message->transfers, transfer_list) {
xfer->ptp_sts_word_pre = 0;
ptp_read_system_prets(xfer->ptp_sts);
}
}
return ctlr->transfer(spi, message);
}
/**
* spi_async - asynchronous SPI transfer
* @spi: device with which data will be exchanged
* @message: describes the data transfers, including completion callback
* Context: any (IRQs may be blocked, etc)
*
* This call may be used in_irq and other contexts which can't sleep,
* as well as from task contexts which can sleep.
*
* The completion callback is invoked in a context which can't sleep.
* Before that invocation, the value of message->status is undefined.
* When the callback is issued, message->status holds either zero (to
* indicate complete success) or a negative error code. After that
* callback returns, the driver which issued the transfer request may
* deallocate the associated memory; it's no longer in use by any SPI
* core or controller driver code.
*
* Note that although all messages to a spi_device are handled in
* FIFO order, messages may go to different devices in other orders.
* Some device might be higher priority, or have various "hard" access
* time requirements, for example.
*
* On detection of any fault during the transfer, processing of
* the entire message is aborted, and the device is deselected.
* Until returning from the associated message completion callback,
* no other spi_message queued to that device will be processed.
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
*
* Return: zero on success, else a negative error code.
*/
int spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
int ret;
unsigned long flags;
ret = __spi_validate(spi, message);
if (ret != 0)
return ret;
spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
if (ctlr->bus_lock_flag)
ret = -EBUSY;
else
ret = __spi_async(spi, message);
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(spi_async);
/**
* spi_async_locked - version of spi_async with exclusive bus usage
* @spi: device with which data will be exchanged
* @message: describes the data transfers, including completion callback
* Context: any (IRQs may be blocked, etc)
*
* This call may be used in_irq and other contexts which can't sleep,
* as well as from task contexts which can sleep.
*
* The completion callback is invoked in a context which can't sleep.
* Before that invocation, the value of message->status is undefined.
* When the callback is issued, message->status holds either zero (to
* indicate complete success) or a negative error code. After that
* callback returns, the driver which issued the transfer request may
* deallocate the associated memory; it's no longer in use by any SPI
* core or controller driver code.
*
* Note that although all messages to a spi_device are handled in
* FIFO order, messages may go to different devices in other orders.
* Some device might be higher priority, or have various "hard" access
* time requirements, for example.
*
* On detection of any fault during the transfer, processing of
* the entire message is aborted, and the device is deselected.
* Until returning from the associated message completion callback,
* no other spi_message queued to that device will be processed.
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
*
* Return: zero on success, else a negative error code.
*/
static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
int ret;
unsigned long flags;
ret = __spi_validate(spi, message);
if (ret != 0)
return ret;
spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
ret = __spi_async(spi, message);
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
return ret;
}
static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
{
bool was_busy;
int ret;
mutex_lock(&ctlr->io_mutex);
was_busy = ctlr->busy;
ctlr->cur_msg = msg;
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
if (ret)
goto out;
ctlr->cur_msg = NULL;
ctlr->fallback = false;
if (!was_busy) {
kfree(ctlr->dummy_rx);
ctlr->dummy_rx = NULL;
kfree(ctlr->dummy_tx);
ctlr->dummy_tx = NULL;
if (ctlr->unprepare_transfer_hardware &&
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
spi_idle_runtime_pm(ctlr);
}
out:
mutex_unlock(&ctlr->io_mutex);
}
/*-------------------------------------------------------------------------*/
/*
* Utility methods for SPI protocol drivers, layered on
* top of the core. Some other utility methods are defined as
* inline functions.
*/
static void spi_complete(void *arg)
{
complete(arg);
}
static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
int status;
struct spi_controller *ctlr = spi->controller;
status = __spi_validate(spi, message);
if (status != 0)
return status;
message->spi = spi;
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
/*
* Checking queue_empty here only guarantees async/sync message
* ordering when coming from the same context. It does not need to
* guard against reentrancy from a different context. The io_mutex
* will catch those cases.
*/
if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
message->actual_length = 0;
message->status = -EINPROGRESS;
trace_spi_message_submit(message);
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
__spi_transfer_message_noqueue(ctlr, message);
return message->status;
}
/*
* There are messages in the async queue that could have originated
* from the same context, so we need to preserve ordering.
* Therefor we send the message to the async queue and wait until they
* are completed.
*/
message->complete = spi_complete;
message->context = &done;
status = spi_async_locked(spi, message);
if (status == 0) {
wait_for_completion(&done);
status = message->status;
}
message->context = NULL;
return status;
}
/**
* spi_sync - blocking/synchronous SPI data transfers
* @spi: device with which data will be exchanged
* @message: describes the data transfers
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
* is non-interruptible, and has no timeout. Low-overhead controller
* drivers may DMA directly into and out of the message buffers.
*
* Note that the SPI device's chip select is active during the message,
* and then is normally disabled between messages. Drivers for some
* frequently-used devices may want to minimize costs of selecting a chip,
* by leaving it selected in anticipation that the next message will go
* to the same chip. (That may increase power usage.)
*
* Also, the caller is guaranteeing that the memory associated with the
* message will not be freed before this call returns.
*
* Return: zero on success, else a negative error code.
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
int ret;
mutex_lock(&spi->controller->bus_lock_mutex);
ret = __spi_sync(spi, message);
mutex_unlock(&spi->controller->bus_lock_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(spi_sync);
/**
* spi_sync_locked - version of spi_sync with exclusive bus usage
* @spi: device with which data will be exchanged
* @message: describes the data transfers
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
* is non-interruptible, and has no timeout. Low-overhead controller
* drivers may DMA directly into and out of the message buffers.
*
* This call should be used by drivers that require exclusive access to the
* SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
* be released by a spi_bus_unlock call when the exclusive access is over.
*
* Return: zero on success, else a negative error code.
*/
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
return __spi_sync(spi, message);
}
EXPORT_SYMBOL_GPL(spi_sync_locked);
/**
* spi_bus_lock - obtain a lock for exclusive SPI bus usage
* @ctlr: SPI bus master that should be locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
* is non-interruptible, and has no timeout.
*
* This call should be used by drivers that require exclusive access to the
* SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
* exclusive access is over. Data transfer must be done by spi_sync_locked
* and spi_async_locked calls when the SPI bus lock is held.
*
* Return: always zero.
*/
int spi_bus_lock(struct spi_controller *ctlr)
{
unsigned long flags;
mutex_lock(&ctlr->bus_lock_mutex);
spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
ctlr->bus_lock_flag = 1;
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
/* Mutex remains locked until spi_bus_unlock() is called */
return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_lock);
/**
* spi_bus_unlock - release the lock for exclusive SPI bus usage
* @ctlr: SPI bus master that was locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
* is non-interruptible, and has no timeout.
*
* This call releases an SPI bus lock previously obtained by an spi_bus_lock
* call.
*
* Return: always zero.
*/
int spi_bus_unlock(struct spi_controller *ctlr)
{
ctlr->bus_lock_flag = 0;
mutex_unlock(&ctlr->bus_lock_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_unlock);
/* Portable code must never pass more than 32 bytes */
#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
static u8 *buf;
/**
* spi_write_then_read - SPI synchronous write followed by read
* @spi: device with which data will be exchanged
* @txbuf: data to be written (need not be DMA-safe)
* @n_tx: size of txbuf, in bytes
* @rxbuf: buffer into which data will be read (need not be DMA-safe)
* @n_rx: size of rxbuf, in bytes
* Context: can sleep
*
* This performs a half duplex MicroWire style transaction with the
* device, sending txbuf and then reading rxbuf. The return value
* is zero for success, else a negative errno status code.
* This call may only be used from a context that may sleep.
*
* Parameters to this routine are always copied using a small buffer.
* Performance-sensitive or bulk transfer code should instead use
* spi_{async,sync}() calls with DMA-safe buffers.
*
* Return: zero on success, else a negative error code.
*/
int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
void *rxbuf, unsigned n_rx)
{
static DEFINE_MUTEX(lock);
int status;
struct spi_message message;
struct spi_transfer x[2];
u8 *local_buf;
/*
* Use preallocated DMA-safe buffer if we can. We can't avoid
* copying here, (as a pure convenience thing), but we can
* keep heap costs out of the hot path unless someone else is
* using the pre-allocated buffer or the transfer is too large.
*/
if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
GFP_KERNEL | GFP_DMA);
if (!local_buf)
return -ENOMEM;
} else {
local_buf = buf;
}
spi_message_init(&message);
memset(x, 0, sizeof(x));
if (n_tx) {
x[0].len = n_tx;
spi_message_add_tail(&x[0], &message);
}
if (n_rx) {
x[1].len = n_rx;
spi_message_add_tail(&x[1], &message);
}
memcpy(local_buf, txbuf, n_tx);
x[0].tx_buf = local_buf;
x[1].rx_buf = local_buf + n_tx;
/* Do the I/O */
status = spi_sync(spi, &message);
if (status == 0)
memcpy(rxbuf, x[1].rx_buf, n_rx);
if (x[0].tx_buf == buf)
mutex_unlock(&lock);
else
kfree(local_buf);
return status;
}
EXPORT_SYMBOL_GPL(spi_write_then_read);
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
/* Must call put_device() when done with returned spi_device device */
static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
{
struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
return dev ? to_spi_device(dev) : NULL;
}
/* The spi controllers are not using spi_bus, so we find it with another way */
static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
{
struct device *dev;
dev = class_find_device_by_of_node(&spi_master_class, node);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
dev = class_find_device_by_of_node(&spi_slave_class, node);
if (!dev)
return NULL;
/* Reference got in class_find_device */
return container_of(dev, struct spi_controller, dev);
}
static int of_spi_notify(struct notifier_block *nb, unsigned long action,
void *arg)
{
struct of_reconfig_data *rd = arg;
struct spi_controller *ctlr;
struct spi_device *spi;
switch (of_reconfig_get_state_change(action, arg)) {
case OF_RECONFIG_CHANGE_ADD:
ctlr = of_find_spi_controller_by_node(rd->dn->parent);
if (ctlr == NULL)
return NOTIFY_OK; /* Not for us */
if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
put_device(&ctlr->dev);
return NOTIFY_OK;
}
/*
* Clear the flag before adding the device so that fw_devlink
* doesn't skip adding consumers to this device.
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
spi = of_register_spi_device(ctlr, rd->dn);
put_device(&ctlr->dev);
if (IS_ERR(spi)) {
pr_err("%s: failed to create for '%pOF'\n",
__func__, rd->dn);
of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(spi));
}
break;
case OF_RECONFIG_CHANGE_REMOVE:
/* Already depopulated? */
if (!of_node_check_flag(rd->dn, OF_POPULATED))
return NOTIFY_OK;
/* Find our device by node */
spi = of_find_spi_device_by_node(rd->dn);
if (spi == NULL)
return NOTIFY_OK; /* No? not meant for us */
/* Unregister takes one ref away */
spi_unregister_device(spi);
/* And put the reference of the find */
put_device(&spi->dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block spi_of_notifier = {
.notifier_call = of_spi_notify,
};
#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
extern struct notifier_block spi_of_notifier;
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
#if IS_ENABLED(CONFIG_ACPI)
static int spi_acpi_controller_match(struct device *dev, const void *data)
{
return ACPI_COMPANION(dev->parent) == data;
}
static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
{
struct device *dev;
dev = class_find_device(&spi_master_class, NULL, adev,
spi_acpi_controller_match);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
dev = class_find_device(&spi_slave_class, NULL, adev,
spi_acpi_controller_match);
if (!dev)
return NULL;
return container_of(dev, struct spi_controller, dev);
}
static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
{
struct device *dev;
dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
return to_spi_device(dev);
}
static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
void *arg)
{
struct acpi_device *adev = arg;
struct spi_controller *ctlr;
struct spi_device *spi;
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
if (!ctlr)
break;
acpi_register_spi_device(ctlr, adev);
put_device(&ctlr->dev);
break;
case ACPI_RECONFIG_DEVICE_REMOVE:
if (!acpi_device_enumerated(adev))
break;
spi = acpi_spi_find_device_by_adev(adev);
if (!spi)
break;
spi_unregister_device(spi);
put_device(&spi->dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block spi_acpi_notifier = {
.notifier_call = acpi_spi_notify,
};
#else
extern struct notifier_block spi_acpi_notifier;
#endif
static int __init spi_init(void)
{
int status;
buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
if (!buf) {
status = -ENOMEM;
goto err0;
}
status = bus_register(&spi_bus_type);
if (status < 0)
goto err1;
status = class_register(&spi_master_class);
if (status < 0)
goto err2;
if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
status = class_register(&spi_slave_class);
if (status < 0)
goto err3;
}
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
if (IS_ENABLED(CONFIG_ACPI))
WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
return 0;
err3:
class_unregister(&spi_master_class);
err2:
bus_unregister(&spi_bus_type);
err1:
kfree(buf);
buf = NULL;
err0:
return status;
}
/*
* A board_info is normally registered in arch_initcall(),
* but even essential drivers wait till later.
*
* REVISIT only boardinfo really needs static linking. The rest (device and
* driver registration) _could_ be dynamically linked (modular) ... Costs
* include needing to have boardinfo data structures be much more public.
*/
postcore_initcall(spi_init);
| linux-master | drivers/spi/spi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell Orion SPI controller driver
*
* Author: Shadi Ammouri <[email protected]>
* Copyright (C) 2007-2008 Marvell Ltd.
*/
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/sizes.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
/* Runtime PM autosuspend timeout: PM is fairly light on this driver */
#define SPI_AUTOSUSPEND_TIMEOUT 200
/* Some SoCs using this driver support up to 8 chip selects.
* It is up to the implementer to only use the chip selects
* that are available.
*/
#define ORION_NUM_CHIPSELECTS 8
#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
#define ORION_SPI_IF_CTRL_REG 0x00
#define ORION_SPI_IF_CONFIG_REG 0x04
#define ORION_SPI_IF_RXLSBF BIT(14)
#define ORION_SPI_IF_TXLSBF BIT(13)
#define ORION_SPI_DATA_OUT_REG 0x08
#define ORION_SPI_DATA_IN_REG 0x0c
#define ORION_SPI_INT_CAUSE_REG 0x10
#define ORION_SPI_TIMING_PARAMS_REG 0x18
/* Register for the "Direct Mode" */
#define SPI_DIRECT_WRITE_CONFIG_REG 0x20
#define ORION_SPI_TMISO_SAMPLE_MASK (0x3 << 6)
#define ORION_SPI_TMISO_SAMPLE_1 (1 << 6)
#define ORION_SPI_TMISO_SAMPLE_2 (2 << 6)
#define ORION_SPI_MODE_CPOL (1 << 11)
#define ORION_SPI_MODE_CPHA (1 << 12)
#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5)
#define ORION_SPI_CLK_PRESCALE_MASK 0x1F
#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF
#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
ORION_SPI_MODE_CPHA)
#define ORION_SPI_CS_MASK 0x1C
#define ORION_SPI_CS_SHIFT 2
#define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \
ORION_SPI_CS_MASK)
enum orion_spi_type {
ORION_SPI,
ARMADA_SPI,
};
struct orion_spi_dev {
enum orion_spi_type typ;
/*
* min_divisor and max_hz should be exclusive, the only we can
* have both is for managing the armada-370-spi case with old
* device tree
*/
unsigned long max_hz;
unsigned int min_divisor;
unsigned int max_divisor;
u32 prescale_mask;
bool is_errata_50mhz_ac;
};
struct orion_direct_acc {
void __iomem *vaddr;
u32 size;
};
struct orion_child_options {
struct orion_direct_acc direct_access;
};
struct orion_spi {
struct spi_controller *host;
void __iomem *base;
struct clk *clk;
struct clk *axi_clk;
const struct orion_spi_dev *devdata;
struct device *dev;
struct orion_child_options child[ORION_NUM_CHIPSELECTS];
};
#ifdef CONFIG_PM
static int orion_spi_runtime_suspend(struct device *dev);
static int orion_spi_runtime_resume(struct device *dev);
#endif
static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
{
return orion_spi->base + reg;
}
static inline void
orion_spi_setbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
{
void __iomem *reg_addr = spi_reg(orion_spi, reg);
u32 val;
val = readl(reg_addr);
val |= mask;
writel(val, reg_addr);
}
static inline void
orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
{
void __iomem *reg_addr = spi_reg(orion_spi, reg);
u32 val;
val = readl(reg_addr);
val &= ~mask;
writel(val, reg_addr);
}
static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
{
u32 tclk_hz;
u32 rate;
u32 prescale;
u32 reg;
struct orion_spi *orion_spi;
const struct orion_spi_dev *devdata;
orion_spi = spi_controller_get_devdata(spi->controller);
devdata = orion_spi->devdata;
tclk_hz = clk_get_rate(orion_spi->clk);
if (devdata->typ == ARMADA_SPI) {
/*
* Given the core_clk (tclk_hz) and the target rate (speed) we
* determine the best values for SPR (in [0 .. 15]) and SPPR (in
* [0..7]) such that
*
* core_clk / (SPR * 2 ** SPPR)
*
* is as big as possible but not bigger than speed.
*/
/* best integer divider: */
unsigned divider = DIV_ROUND_UP(tclk_hz, speed);
unsigned spr, sppr;
if (divider < 16) {
/* This is the easy case, divider is less than 16 */
spr = divider;
sppr = 0;
} else {
unsigned two_pow_sppr;
/*
* Find the highest bit set in divider. This and the
* three next bits define SPR (apart from rounding).
* SPPR is then the number of zero bits that must be
* appended:
*/
sppr = fls(divider) - 4;
/*
* As SPR only has 4 bits, we have to round divider up
* to the next multiple of 2 ** sppr.
*/
two_pow_sppr = 1 << sppr;
divider = (divider + two_pow_sppr - 1) & -two_pow_sppr;
/*
* recalculate sppr as rounding up divider might have
* increased it enough to change the position of the
* highest set bit. In this case the bit that now
* doesn't make it into SPR is 0, so there is no need to
* round again.
*/
sppr = fls(divider) - 4;
spr = divider >> sppr;
/*
* Now do range checking. SPR is constructed to have a
* width of 4 bits, so this is fine for sure. So we
* still need to check for sppr to fit into 3 bits:
*/
if (sppr > 7)
return -EINVAL;
}
prescale = ((sppr & 0x6) << 5) | ((sppr & 0x1) << 4) | spr;
} else {
/*
* the supported rates are: 4,6,8...30
* round up as we look for equal or less speed
*/
rate = DIV_ROUND_UP(tclk_hz, speed);
rate = roundup(rate, 2);
/* check if requested speed is too small */
if (rate > 30)
return -EINVAL;
if (rate < 4)
rate = 4;
/* Convert the rate to SPI clock divisor value. */
prescale = 0x10 + rate/2;
}
reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
reg = ((reg & ~devdata->prescale_mask) | prescale);
writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
return 0;
}
static void
orion_spi_mode_set(struct spi_device *spi)
{
u32 reg;
struct orion_spi *orion_spi;
orion_spi = spi_controller_get_devdata(spi->controller);
reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
reg &= ~ORION_SPI_MODE_MASK;
if (spi->mode & SPI_CPOL)
reg |= ORION_SPI_MODE_CPOL;
if (spi->mode & SPI_CPHA)
reg |= ORION_SPI_MODE_CPHA;
if (spi->mode & SPI_LSB_FIRST)
reg |= ORION_SPI_IF_RXLSBF | ORION_SPI_IF_TXLSBF;
else
reg &= ~(ORION_SPI_IF_RXLSBF | ORION_SPI_IF_TXLSBF);
writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
}
static void
orion_spi_50mhz_ac_timing_erratum(struct spi_device *spi, unsigned int speed)
{
u32 reg;
struct orion_spi *orion_spi;
orion_spi = spi_controller_get_devdata(spi->controller);
/*
* Erratum description: (Erratum NO. FE-9144572) The device
* SPI interface supports frequencies of up to 50 MHz.
* However, due to this erratum, when the device core clock is
* 250 MHz and the SPI interfaces is configured for 50MHz SPI
* clock and CPOL=CPHA=1 there might occur data corruption on
* reads from the SPI device.
* Erratum Workaround:
* Work in one of the following configurations:
* 1. Set CPOL=CPHA=0 in "SPI Interface Configuration
* Register".
* 2. Set TMISO_SAMPLE value to 0x2 in "SPI Timing Parameters 1
* Register" before setting the interface.
*/
reg = readl(spi_reg(orion_spi, ORION_SPI_TIMING_PARAMS_REG));
reg &= ~ORION_SPI_TMISO_SAMPLE_MASK;
if (clk_get_rate(orion_spi->clk) == 250000000 &&
speed == 50000000 && spi->mode & SPI_CPOL &&
spi->mode & SPI_CPHA)
reg |= ORION_SPI_TMISO_SAMPLE_2;
else
reg |= ORION_SPI_TMISO_SAMPLE_1; /* This is the default value */
writel(reg, spi_reg(orion_spi, ORION_SPI_TIMING_PARAMS_REG));
}
/*
* called only when no transfer is active on the bus
*/
static int
orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct orion_spi *orion_spi;
unsigned int speed = spi->max_speed_hz;
unsigned int bits_per_word = spi->bits_per_word;
int rc;
orion_spi = spi_controller_get_devdata(spi->controller);
if ((t != NULL) && t->speed_hz)
speed = t->speed_hz;
if ((t != NULL) && t->bits_per_word)
bits_per_word = t->bits_per_word;
orion_spi_mode_set(spi);
if (orion_spi->devdata->is_errata_50mhz_ac)
orion_spi_50mhz_ac_timing_erratum(spi, speed);
rc = orion_spi_baudrate_set(spi, speed);
if (rc)
return rc;
if (bits_per_word == 16)
orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
ORION_SPI_IF_8_16_BIT_MODE);
else
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
ORION_SPI_IF_8_16_BIT_MODE);
return 0;
}
static void orion_spi_set_cs(struct spi_device *spi, bool enable)
{
struct orion_spi *orion_spi;
void __iomem *ctrl_reg;
u32 val;
orion_spi = spi_controller_get_devdata(spi->controller);
ctrl_reg = spi_reg(orion_spi, ORION_SPI_IF_CTRL_REG);
val = readl(ctrl_reg);
/* Clear existing chip-select and assertion state */
val &= ~(ORION_SPI_CS_MASK | 0x1);
/*
* If this line is using a GPIO to control chip select, this internal
* .set_cs() function will still be called, so we clear any previous
* chip select. The CS we activate will not have any elecrical effect,
* as it is handled by a GPIO, but that doesn't matter. What we need
* is to deassert the old chip select and assert some other chip select.
*/
val |= ORION_SPI_CS(spi_get_chipselect(spi, 0));
/*
* Chip select logic is inverted from spi_set_cs(). For lines using a
* GPIO to do chip select SPI_CS_HIGH is enforced and inversion happens
* in the GPIO library, but we don't care about that, because in those
* cases we are dealing with an unused native CS anyways so the polarity
* doesn't matter.
*/
if (!enable)
val |= 0x1;
/*
* To avoid toggling unwanted chip selects update the register
* with a single write.
*/
writel(val, ctrl_reg);
}
static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
{
int i;
for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) {
if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG)))
return 1;
udelay(1);
}
return -1;
}
static inline int
orion_spi_write_read_8bit(struct spi_device *spi,
const u8 **tx_buf, u8 **rx_buf)
{
void __iomem *tx_reg, *rx_reg, *int_reg;
struct orion_spi *orion_spi;
bool cs_single_byte;
cs_single_byte = spi->mode & SPI_CS_WORD;
orion_spi = spi_controller_get_devdata(spi->controller);
if (cs_single_byte)
orion_spi_set_cs(spi, 0);
tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
/* clear the interrupt cause register */
writel(0x0, int_reg);
if (tx_buf && *tx_buf)
writel(*(*tx_buf)++, tx_reg);
else
writel(0, tx_reg);
if (orion_spi_wait_till_ready(orion_spi) < 0) {
if (cs_single_byte) {
orion_spi_set_cs(spi, 1);
/* Satisfy some SLIC devices requirements */
udelay(4);
}
dev_err(&spi->dev, "TXS timed out\n");
return -1;
}
if (rx_buf && *rx_buf)
*(*rx_buf)++ = readl(rx_reg);
if (cs_single_byte) {
orion_spi_set_cs(spi, 1);
/* Satisfy some SLIC devices requirements */
udelay(4);
}
return 1;
}
static inline int
orion_spi_write_read_16bit(struct spi_device *spi,
const u16 **tx_buf, u16 **rx_buf)
{
void __iomem *tx_reg, *rx_reg, *int_reg;
struct orion_spi *orion_spi;
if (spi->mode & SPI_CS_WORD) {
dev_err(&spi->dev, "SPI_CS_WORD is only supported for 8 bit words\n");
return -1;
}
orion_spi = spi_controller_get_devdata(spi->controller);
tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
/* clear the interrupt cause register */
writel(0x0, int_reg);
if (tx_buf && *tx_buf)
writel(__cpu_to_le16(get_unaligned((*tx_buf)++)), tx_reg);
else
writel(0, tx_reg);
if (orion_spi_wait_till_ready(orion_spi) < 0) {
dev_err(&spi->dev, "TXS timed out\n");
return -1;
}
if (rx_buf && *rx_buf)
put_unaligned(__le16_to_cpu(readl(rx_reg)), (*rx_buf)++);
return 1;
}
static unsigned int
orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
{
unsigned int count;
int word_len;
struct orion_spi *orion_spi;
int cs = spi_get_chipselect(spi, 0);
void __iomem *vaddr;
word_len = spi->bits_per_word;
count = xfer->len;
orion_spi = spi_controller_get_devdata(spi->controller);
/*
* Use SPI direct write mode if base address is available
* and SPI_CS_WORD flag is not set.
* Otherwise fall back to PIO mode for this transfer.
*/
vaddr = orion_spi->child[cs].direct_access.vaddr;
if (vaddr && xfer->tx_buf && word_len == 8 && (spi->mode & SPI_CS_WORD) == 0) {
unsigned int cnt = count / 4;
unsigned int rem = count % 4;
/*
* Send the TX-data to the SPI device via the direct
* mapped address window
*/
iowrite32_rep(vaddr, xfer->tx_buf, cnt);
if (rem) {
u32 *buf = (u32 *)xfer->tx_buf;
iowrite8_rep(vaddr, &buf[cnt], rem);
}
return count;
}
if (word_len == 8) {
const u8 *tx = xfer->tx_buf;
u8 *rx = xfer->rx_buf;
do {
if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
goto out;
count--;
spi_delay_exec(&xfer->word_delay, xfer);
} while (count);
} else if (word_len == 16) {
const u16 *tx = xfer->tx_buf;
u16 *rx = xfer->rx_buf;
do {
if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
goto out;
count -= 2;
spi_delay_exec(&xfer->word_delay, xfer);
} while (count);
}
out:
return xfer->len - count;
}
static int orion_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
int status = 0;
status = orion_spi_setup_transfer(spi, t);
if (status < 0)
return status;
if (t->len)
orion_spi_write_read(spi, t);
return status;
}
static int orion_spi_setup(struct spi_device *spi)
{
int ret;
#ifdef CONFIG_PM
struct orion_spi *orion_spi = spi_controller_get_devdata(spi->controller);
struct device *dev = orion_spi->dev;
orion_spi_runtime_resume(dev);
#endif
ret = orion_spi_setup_transfer(spi, NULL);
#ifdef CONFIG_PM
orion_spi_runtime_suspend(dev);
#endif
return ret;
}
static int orion_spi_reset(struct orion_spi *orion_spi)
{
/* Verify that the CS is deasserted */
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
/* Don't deassert CS between the direct mapped SPI transfers */
writel(0, spi_reg(orion_spi, SPI_DIRECT_WRITE_CONFIG_REG));
return 0;
}
static const struct orion_spi_dev orion_spi_dev_data = {
.typ = ORION_SPI,
.min_divisor = 4,
.max_divisor = 30,
.prescale_mask = ORION_SPI_CLK_PRESCALE_MASK,
};
static const struct orion_spi_dev armada_370_spi_dev_data = {
.typ = ARMADA_SPI,
.min_divisor = 4,
.max_divisor = 1920,
.max_hz = 50000000,
.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
};
static const struct orion_spi_dev armada_xp_spi_dev_data = {
.typ = ARMADA_SPI,
.max_hz = 50000000,
.max_divisor = 1920,
.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
};
static const struct orion_spi_dev armada_375_spi_dev_data = {
.typ = ARMADA_SPI,
.min_divisor = 15,
.max_divisor = 1920,
.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
};
static const struct orion_spi_dev armada_380_spi_dev_data = {
.typ = ARMADA_SPI,
.max_hz = 50000000,
.max_divisor = 1920,
.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
.is_errata_50mhz_ac = true,
};
static const struct of_device_id orion_spi_of_match_table[] = {
{
.compatible = "marvell,orion-spi",
.data = &orion_spi_dev_data,
},
{
.compatible = "marvell,armada-370-spi",
.data = &armada_370_spi_dev_data,
},
{
.compatible = "marvell,armada-375-spi",
.data = &armada_375_spi_dev_data,
},
{
.compatible = "marvell,armada-380-spi",
.data = &armada_380_spi_dev_data,
},
{
.compatible = "marvell,armada-390-spi",
.data = &armada_xp_spi_dev_data,
},
{
.compatible = "marvell,armada-xp-spi",
.data = &armada_xp_spi_dev_data,
},
{}
};
MODULE_DEVICE_TABLE(of, orion_spi_of_match_table);
static int orion_spi_probe(struct platform_device *pdev)
{
const struct orion_spi_dev *devdata;
struct spi_controller *host;
struct orion_spi *spi;
struct resource *r;
unsigned long tclk_hz;
int status = 0;
struct device_node *np;
host = spi_alloc_host(&pdev->dev, sizeof(*spi));
if (host == NULL) {
dev_dbg(&pdev->dev, "host allocation failed\n");
return -ENOMEM;
}
if (pdev->id != -1)
host->bus_num = pdev->id;
if (pdev->dev.of_node) {
u32 cell_index;
if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
&cell_index))
host->bus_num = cell_index;
}
/* we support all 4 SPI modes and LSB first option */
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST | SPI_CS_WORD;
host->set_cs = orion_spi_set_cs;
host->transfer_one = orion_spi_transfer_one;
host->num_chipselect = ORION_NUM_CHIPSELECTS;
host->setup = orion_spi_setup;
host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
host->auto_runtime_pm = true;
host->use_gpio_descriptors = true;
host->flags = SPI_CONTROLLER_GPIO_SS;
platform_set_drvdata(pdev, host);
spi = spi_controller_get_devdata(host);
spi->host = host;
spi->dev = &pdev->dev;
devdata = device_get_match_data(&pdev->dev);
devdata = devdata ? devdata : &orion_spi_dev_data;
spi->devdata = devdata;
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
status = PTR_ERR(spi->clk);
goto out;
}
status = clk_prepare_enable(spi->clk);
if (status)
goto out;
/* The following clock is only used by some SoCs */
spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
if (PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
status = -EPROBE_DEFER;
goto out_rel_clk;
}
if (!IS_ERR(spi->axi_clk))
clk_prepare_enable(spi->axi_clk);
tclk_hz = clk_get_rate(spi->clk);
/*
* With old device tree, armada-370-spi could be used with
* Armada XP, however for this SoC the maximum frequency is
* 50MHz instead of tclk/4. On Armada 370, tclk cannot be
* higher than 200MHz. So, in order to be able to handle both
* SoCs, we can take the minimum of 50MHz and tclk/4.
*/
if (of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-370-spi"))
host->max_speed_hz = min(devdata->max_hz,
DIV_ROUND_UP(tclk_hz, devdata->min_divisor));
else if (devdata->min_divisor)
host->max_speed_hz =
DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
else
host->max_speed_hz = devdata->max_hz;
host->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
spi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(spi->base)) {
status = PTR_ERR(spi->base);
goto out_rel_axi_clk;
}
for_each_available_child_of_node(pdev->dev.of_node, np) {
struct orion_direct_acc *dir_acc;
u32 cs;
/* Get chip-select number from the "reg" property */
status = of_property_read_u32(np, "reg", &cs);
if (status) {
dev_err(&pdev->dev,
"%pOF has no valid 'reg' property (%d)\n",
np, status);
continue;
}
/*
* Check if an address is configured for this SPI device. If
* not, the MBus mapping via the 'ranges' property in the 'soc'
* node is not configured and this device should not use the
* direct mode. In this case, just continue with the next
* device.
*/
status = of_address_to_resource(pdev->dev.of_node, cs + 1, r);
if (status)
continue;
/*
* Only map one page for direct access. This is enough for the
* simple TX transfer which only writes to the first word.
* This needs to get extended for the direct SPI NOR / SPI NAND
* support, once this gets implemented.
*/
dir_acc = &spi->child[cs].direct_access;
dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
if (!dir_acc->vaddr) {
status = -ENOMEM;
of_node_put(np);
goto out_rel_axi_clk;
}
dir_acc->size = PAGE_SIZE;
dev_info(&pdev->dev, "CS%d configured for direct access\n", cs);
}
pm_runtime_set_active(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
status = orion_spi_reset(spi);
if (status < 0)
goto out_rel_pm;
host->dev.of_node = pdev->dev.of_node;
status = spi_register_controller(host);
if (status < 0)
goto out_rel_pm;
return status;
out_rel_pm:
pm_runtime_disable(&pdev->dev);
out_rel_axi_clk:
clk_disable_unprepare(spi->axi_clk);
out_rel_clk:
clk_disable_unprepare(spi->clk);
out:
spi_controller_put(host);
return status;
}
static void orion_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct orion_spi *spi = spi_controller_get_devdata(host);
pm_runtime_get_sync(&pdev->dev);
clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
spi_unregister_controller(host);
pm_runtime_disable(&pdev->dev);
}
MODULE_ALIAS("platform:" DRIVER_NAME);
#ifdef CONFIG_PM
static int orion_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct orion_spi *spi = spi_controller_get_devdata(host);
clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
return 0;
}
static int orion_spi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct orion_spi *spi = spi_controller_get_devdata(host);
if (!IS_ERR(spi->axi_clk))
clk_prepare_enable(spi->axi_clk);
return clk_prepare_enable(spi->clk);
}
#endif
static const struct dev_pm_ops orion_spi_pm_ops = {
SET_RUNTIME_PM_OPS(orion_spi_runtime_suspend,
orion_spi_runtime_resume,
NULL)
};
static struct platform_driver orion_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = &orion_spi_pm_ops,
.of_match_table = of_match_ptr(orion_spi_of_match_table),
},
.probe = orion_spi_probe,
.remove_new = orion_spi_remove,
};
module_platform_driver(orion_spi_driver);
MODULE_DESCRIPTION("Orion SPI driver");
MODULE_AUTHOR("Shadi Ammouri <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-orion.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
struct rtspi {
void __iomem *base;
};
/* SPI Flash Configuration Register */
#define RTL_SPI_SFCR 0x00
#define RTL_SPI_SFCR_RBO BIT(28)
#define RTL_SPI_SFCR_WBO BIT(27)
/* SPI Flash Control and Status Register */
#define RTL_SPI_SFCSR 0x08
#define RTL_SPI_SFCSR_CSB0 BIT(31)
#define RTL_SPI_SFCSR_CSB1 BIT(30)
#define RTL_SPI_SFCSR_RDY BIT(27)
#define RTL_SPI_SFCSR_CS BIT(24)
#define RTL_SPI_SFCSR_LEN_MASK ~(0x03 << 28)
#define RTL_SPI_SFCSR_LEN1 (0x00 << 28)
#define RTL_SPI_SFCSR_LEN4 (0x03 << 28)
/* SPI Flash Data Register */
#define RTL_SPI_SFDR 0x0c
#define REG(x) (rtspi->base + x)
static void rt_set_cs(struct spi_device *spi, bool active)
{
struct rtspi *rtspi = spi_controller_get_devdata(spi->controller);
u32 value;
/* CS0 bit is active low */
value = readl(REG(RTL_SPI_SFCSR));
if (active)
value |= RTL_SPI_SFCSR_CSB0;
else
value &= ~RTL_SPI_SFCSR_CSB0;
writel(value, REG(RTL_SPI_SFCSR));
}
static void set_size(struct rtspi *rtspi, int size)
{
u32 value;
value = readl(REG(RTL_SPI_SFCSR));
value &= RTL_SPI_SFCSR_LEN_MASK;
if (size == 4)
value |= RTL_SPI_SFCSR_LEN4;
else if (size == 1)
value |= RTL_SPI_SFCSR_LEN1;
writel(value, REG(RTL_SPI_SFCSR));
}
static inline void wait_ready(struct rtspi *rtspi)
{
while (!(readl(REG(RTL_SPI_SFCSR)) & RTL_SPI_SFCSR_RDY))
cpu_relax();
}
static void send4(struct rtspi *rtspi, const u32 *buf)
{
wait_ready(rtspi);
set_size(rtspi, 4);
writel(*buf, REG(RTL_SPI_SFDR));
}
static void send1(struct rtspi *rtspi, const u8 *buf)
{
wait_ready(rtspi);
set_size(rtspi, 1);
writel(buf[0] << 24, REG(RTL_SPI_SFDR));
}
static void rcv4(struct rtspi *rtspi, u32 *buf)
{
wait_ready(rtspi);
set_size(rtspi, 4);
*buf = readl(REG(RTL_SPI_SFDR));
}
static void rcv1(struct rtspi *rtspi, u8 *buf)
{
wait_ready(rtspi);
set_size(rtspi, 1);
*buf = readl(REG(RTL_SPI_SFDR)) >> 24;
}
static int transfer_one(struct spi_controller *ctrl, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rtspi *rtspi = spi_controller_get_devdata(ctrl);
void *rx_buf;
const void *tx_buf;
int cnt;
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
cnt = xfer->len;
if (tx_buf) {
while (cnt >= 4) {
send4(rtspi, tx_buf);
tx_buf += 4;
cnt -= 4;
}
while (cnt) {
send1(rtspi, tx_buf);
tx_buf++;
cnt--;
}
} else if (rx_buf) {
while (cnt >= 4) {
rcv4(rtspi, rx_buf);
rx_buf += 4;
cnt -= 4;
}
while (cnt) {
rcv1(rtspi, rx_buf);
rx_buf++;
cnt--;
}
}
spi_finalize_current_transfer(ctrl);
return 0;
}
static void init_hw(struct rtspi *rtspi)
{
u32 value;
/* Turn on big-endian byte ordering */
value = readl(REG(RTL_SPI_SFCR));
value |= RTL_SPI_SFCR_RBO | RTL_SPI_SFCR_WBO;
writel(value, REG(RTL_SPI_SFCR));
value = readl(REG(RTL_SPI_SFCSR));
/* Permanently disable CS1, since it's never used */
value |= RTL_SPI_SFCSR_CSB1;
/* Select CS0 for use */
value &= RTL_SPI_SFCSR_CS;
writel(value, REG(RTL_SPI_SFCSR));
}
static int realtek_rtl_spi_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
struct rtspi *rtspi;
int err;
ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*rtspi));
if (!ctrl) {
dev_err(&pdev->dev, "Error allocating SPI controller\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, ctrl);
rtspi = spi_controller_get_devdata(ctrl);
rtspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(rtspi->base)) {
dev_err(&pdev->dev, "Could not map SPI register address");
return -ENOMEM;
}
init_hw(rtspi);
ctrl->dev.of_node = pdev->dev.of_node;
ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
ctrl->set_cs = rt_set_cs;
ctrl->transfer_one = transfer_one;
err = devm_spi_register_controller(&pdev->dev, ctrl);
if (err) {
dev_err(&pdev->dev, "Could not register SPI controller\n");
return -ENODEV;
}
return 0;
}
static const struct of_device_id realtek_rtl_spi_of_ids[] = {
{ .compatible = "realtek,rtl8380-spi" },
{ .compatible = "realtek,rtl8382-spi" },
{ .compatible = "realtek,rtl8391-spi" },
{ .compatible = "realtek,rtl8392-spi" },
{ .compatible = "realtek,rtl8393-spi" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, realtek_rtl_spi_of_ids);
static struct platform_driver realtek_rtl_spi_driver = {
.probe = realtek_rtl_spi_probe,
.driver = {
.name = "realtek-rtl-spi",
.of_match_table = realtek_rtl_spi_of_ids,
},
};
module_platform_driver(realtek_rtl_spi_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Bert Vermeulen <[email protected]>");
MODULE_DESCRIPTION("Realtek RTL SPI driver");
| linux-master | drivers/spi/spi-realtek-rtl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI-Engine SPI controller driver
* Copyright 2015 Analog Devices Inc.
* Author: Lars-Peter Clausen <[email protected]>
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
#define SPI_ENGINE_VERSION_PATCH(x) (x & 0xff)
#define SPI_ENGINE_REG_VERSION 0x00
#define SPI_ENGINE_REG_RESET 0x40
#define SPI_ENGINE_REG_INT_ENABLE 0x80
#define SPI_ENGINE_REG_INT_PENDING 0x84
#define SPI_ENGINE_REG_INT_SOURCE 0x88
#define SPI_ENGINE_REG_SYNC_ID 0xc0
#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
#define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8
#define SPI_ENGINE_REG_CMD_FIFO 0xe0
#define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4
#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
#define SPI_ENGINE_INT_SYNC BIT(3)
#define SPI_ENGINE_CONFIG_CPHA BIT(0)
#define SPI_ENGINE_CONFIG_CPOL BIT(1)
#define SPI_ENGINE_CONFIG_3WIRE BIT(2)
#define SPI_ENGINE_INST_TRANSFER 0x0
#define SPI_ENGINE_INST_ASSERT 0x1
#define SPI_ENGINE_INST_WRITE 0x2
#define SPI_ENGINE_INST_MISC 0x3
#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
#define SPI_ENGINE_CMD_REG_CONFIG 0x1
#define SPI_ENGINE_MISC_SYNC 0x0
#define SPI_ENGINE_MISC_SLEEP 0x1
#define SPI_ENGINE_TRANSFER_WRITE 0x1
#define SPI_ENGINE_TRANSFER_READ 0x2
#define SPI_ENGINE_CMD(inst, arg1, arg2) \
(((inst) << 12) | ((arg1) << 8) | (arg2))
#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
#define SPI_ENGINE_CMD_WRITE(reg, val) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
#define SPI_ENGINE_CMD_SLEEP(delay) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
#define SPI_ENGINE_CMD_SYNC(id) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
struct spi_engine_program {
unsigned int length;
uint16_t instructions[];
};
struct spi_engine {
struct clk *clk;
struct clk *ref_clk;
spinlock_t lock;
void __iomem *base;
struct spi_message *msg;
struct spi_engine_program *p;
unsigned cmd_length;
const uint16_t *cmd_buf;
struct spi_transfer *tx_xfer;
unsigned int tx_length;
const uint8_t *tx_buf;
struct spi_transfer *rx_xfer;
unsigned int rx_length;
uint8_t *rx_buf;
unsigned int sync_id;
unsigned int completed_id;
unsigned int int_enable;
};
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
bool dry, uint16_t cmd)
{
if (!dry)
p->instructions[p->length] = cmd;
p->length++;
}
static unsigned int spi_engine_get_config(struct spi_device *spi)
{
unsigned int config = 0;
if (spi->mode & SPI_CPOL)
config |= SPI_ENGINE_CONFIG_CPOL;
if (spi->mode & SPI_CPHA)
config |= SPI_ENGINE_CONFIG_CPHA;
if (spi->mode & SPI_3WIRE)
config |= SPI_ENGINE_CONFIG_3WIRE;
return config;
}
static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
struct spi_device *spi, struct spi_transfer *xfer)
{
unsigned int clk_div;
clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
xfer->speed_hz * 2);
if (clk_div > 255)
clk_div = 255;
else if (clk_div > 0)
clk_div -= 1;
return clk_div;
}
static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
struct spi_transfer *xfer)
{
unsigned int len = xfer->len;
while (len) {
unsigned int n = min(len, 256U);
unsigned int flags = 0;
if (xfer->tx_buf)
flags |= SPI_ENGINE_TRANSFER_WRITE;
if (xfer->rx_buf)
flags |= SPI_ENGINE_TRANSFER_READ;
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
len -= n;
}
}
static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
struct spi_engine *spi_engine, unsigned int clk_div,
struct spi_transfer *xfer)
{
unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
unsigned int t;
int delay;
delay = spi_delay_to_ns(&xfer->delay, xfer);
if (delay < 0)
return;
delay /= 1000;
if (delay == 0)
return;
t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
while (t) {
unsigned int n = min(t, 256U);
spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
t -= n;
}
}
static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
struct spi_device *spi, bool assert)
{
unsigned int mask = 0xff;
if (assert)
mask ^= BIT(spi_get_chipselect(spi, 0));
spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
}
static int spi_engine_compile_message(struct spi_engine *spi_engine,
struct spi_message *msg, bool dry, struct spi_engine_program *p)
{
struct spi_device *spi = msg->spi;
struct spi_transfer *xfer;
int clk_div, new_clk_div;
bool cs_change = true;
clk_div = -1;
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
spi_engine_get_config(spi)));
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
if (new_clk_div != clk_div) {
clk_div = new_clk_div;
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
clk_div));
}
if (cs_change)
spi_engine_gen_cs(p, dry, spi, true);
spi_engine_gen_xfer(p, dry, xfer);
spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
cs_change = xfer->cs_change;
if (list_is_last(&xfer->transfer_list, &msg->transfers))
cs_change = !cs_change;
if (cs_change)
spi_engine_gen_cs(p, dry, spi, false);
}
return 0;
}
static void spi_engine_xfer_next(struct spi_engine *spi_engine,
struct spi_transfer **_xfer)
{
struct spi_message *msg = spi_engine->msg;
struct spi_transfer *xfer = *_xfer;
if (!xfer) {
xfer = list_first_entry(&msg->transfers,
struct spi_transfer, transfer_list);
} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
xfer = NULL;
} else {
xfer = list_next_entry(xfer, transfer_list);
}
*_xfer = xfer;
}
static void spi_engine_tx_next(struct spi_engine *spi_engine)
{
struct spi_transfer *xfer = spi_engine->tx_xfer;
do {
spi_engine_xfer_next(spi_engine, &xfer);
} while (xfer && !xfer->tx_buf);
spi_engine->tx_xfer = xfer;
if (xfer) {
spi_engine->tx_length = xfer->len;
spi_engine->tx_buf = xfer->tx_buf;
} else {
spi_engine->tx_buf = NULL;
}
}
static void spi_engine_rx_next(struct spi_engine *spi_engine)
{
struct spi_transfer *xfer = spi_engine->rx_xfer;
do {
spi_engine_xfer_next(spi_engine, &xfer);
} while (xfer && !xfer->rx_buf);
spi_engine->rx_xfer = xfer;
if (xfer) {
spi_engine->rx_length = xfer->len;
spi_engine->rx_buf = xfer->rx_buf;
} else {
spi_engine->rx_buf = NULL;
}
}
static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
unsigned int n, m, i;
const uint16_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
while (n && spi_engine->cmd_length) {
m = min(n, spi_engine->cmd_length);
buf = spi_engine->cmd_buf;
for (i = 0; i < m; i++)
writel_relaxed(buf[i], addr);
spi_engine->cmd_buf += m;
spi_engine->cmd_length -= m;
n -= m;
}
return spi_engine->cmd_length != 0;
}
static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
unsigned int n, m, i;
const uint8_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
while (n && spi_engine->tx_length) {
m = min(n, spi_engine->tx_length);
buf = spi_engine->tx_buf;
for (i = 0; i < m; i++)
writel_relaxed(buf[i], addr);
spi_engine->tx_buf += m;
spi_engine->tx_length -= m;
n -= m;
if (spi_engine->tx_length == 0)
spi_engine_tx_next(spi_engine);
}
return spi_engine->tx_length != 0;
}
static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
unsigned int n, m, i;
uint8_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
while (n && spi_engine->rx_length) {
m = min(n, spi_engine->rx_length);
buf = spi_engine->rx_buf;
for (i = 0; i < m; i++)
buf[i] = readl_relaxed(addr);
spi_engine->rx_buf += m;
spi_engine->rx_length -= m;
n -= m;
if (spi_engine->rx_length == 0)
spi_engine_rx_next(spi_engine);
}
return spi_engine->rx_length != 0;
}
static irqreturn_t spi_engine_irq(int irq, void *devid)
{
struct spi_controller *host = devid;
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
unsigned int disable_int = 0;
unsigned int pending;
pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
if (pending & SPI_ENGINE_INT_SYNC) {
writel_relaxed(SPI_ENGINE_INT_SYNC,
spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
spi_engine->completed_id = readl_relaxed(
spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
}
spin_lock(&spi_engine->lock);
if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
if (!spi_engine_write_cmd_fifo(spi_engine))
disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
}
if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
if (!spi_engine_write_tx_fifo(spi_engine))
disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
}
if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
if (!spi_engine_read_rx_fifo(spi_engine))
disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
}
if (pending & SPI_ENGINE_INT_SYNC) {
if (spi_engine->msg &&
spi_engine->completed_id == spi_engine->sync_id) {
struct spi_message *msg = spi_engine->msg;
kfree(spi_engine->p);
msg->status = 0;
msg->actual_length = msg->frame_length;
spi_engine->msg = NULL;
spi_finalize_current_message(host);
disable_int |= SPI_ENGINE_INT_SYNC;
}
}
if (disable_int) {
spi_engine->int_enable &= ~disable_int;
writel_relaxed(spi_engine->int_enable,
spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
}
spin_unlock(&spi_engine->lock);
return IRQ_HANDLED;
}
static int spi_engine_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
struct spi_engine_program p_dry, *p;
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
unsigned int int_enable = 0;
unsigned long flags;
size_t size;
p_dry.length = 0;
spi_engine_compile_message(spi_engine, msg, true, &p_dry);
size = sizeof(*p->instructions) * (p_dry.length + 1);
p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
if (!p)
return -ENOMEM;
spi_engine_compile_message(spi_engine, msg, false, p);
spin_lock_irqsave(&spi_engine->lock, flags);
spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
spi_engine_program_add_cmd(p, false,
SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
spi_engine->msg = msg;
spi_engine->p = p;
spi_engine->cmd_buf = p->instructions;
spi_engine->cmd_length = p->length;
if (spi_engine_write_cmd_fifo(spi_engine))
int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
spi_engine_tx_next(spi_engine);
if (spi_engine_write_tx_fifo(spi_engine))
int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
spi_engine_rx_next(spi_engine);
if (spi_engine->rx_length != 0)
int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
int_enable |= SPI_ENGINE_INT_SYNC;
writel_relaxed(int_enable,
spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
spi_engine->int_enable = int_enable;
spin_unlock_irqrestore(&spi_engine->lock, flags);
return 0;
}
static int spi_engine_probe(struct platform_device *pdev)
{
struct spi_engine *spi_engine;
struct spi_controller *host;
unsigned int version;
int irq;
int ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
if (!spi_engine)
return -ENOMEM;
host = spi_alloc_host(&pdev->dev, 0);
if (!host)
return -ENOMEM;
spi_controller_set_devdata(host, spi_engine);
spin_lock_init(&spi_engine->lock);
spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
if (IS_ERR(spi_engine->clk)) {
ret = PTR_ERR(spi_engine->clk);
goto err_put_host;
}
spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
if (IS_ERR(spi_engine->ref_clk)) {
ret = PTR_ERR(spi_engine->ref_clk);
goto err_put_host;
}
ret = clk_prepare_enable(spi_engine->clk);
if (ret)
goto err_put_host;
ret = clk_prepare_enable(spi_engine->ref_clk);
if (ret)
goto err_clk_disable;
spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi_engine->base)) {
ret = PTR_ERR(spi_engine->base);
goto err_ref_clk_disable;
}
version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
SPI_ENGINE_VERSION_MAJOR(version),
SPI_ENGINE_VERSION_MINOR(version),
SPI_ENGINE_VERSION_PATCH(version));
ret = -ENODEV;
goto err_ref_clk_disable;
}
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
ret = request_irq(irq, spi_engine_irq, 0, pdev->name, host);
if (ret)
goto err_ref_clk_disable;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
host->transfer_one_message = spi_engine_transfer_one_message;
host->num_chipselect = 8;
ret = spi_register_controller(host);
if (ret)
goto err_free_irq;
platform_set_drvdata(pdev, host);
return 0;
err_free_irq:
free_irq(irq, host);
err_ref_clk_disable:
clk_disable_unprepare(spi_engine->ref_clk);
err_clk_disable:
clk_disable_unprepare(spi_engine->clk);
err_put_host:
spi_controller_put(host);
return ret;
}
static void spi_engine_remove(struct platform_device *pdev)
{
struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev));
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
int irq = platform_get_irq(pdev, 0);
spi_unregister_controller(host);
free_irq(irq, host);
spi_controller_put(host);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
clk_disable_unprepare(spi_engine->ref_clk);
clk_disable_unprepare(spi_engine->clk);
}
static const struct of_device_id spi_engine_match_table[] = {
{ .compatible = "adi,axi-spi-engine-1.00.a" },
{ },
};
MODULE_DEVICE_TABLE(of, spi_engine_match_table);
static struct platform_driver spi_engine_driver = {
.probe = spi_engine_probe,
.remove_new = spi_engine_remove,
.driver = {
.name = "spi-engine",
.of_match_table = spi_engine_match_table,
},
};
module_platform_driver(spi_engine_driver);
MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-axi-spi-engine.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI bus driver for the Topcliff PCH used by Intel SoCs
*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/wait.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/spi/spidev.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/pch_dma.h>
/* Register offsets */
#define PCH_SPCR 0x00 /* SPI control register */
#define PCH_SPBRR 0x04 /* SPI baud rate register */
#define PCH_SPSR 0x08 /* SPI status register */
#define PCH_SPDWR 0x0C /* SPI write data register */
#define PCH_SPDRR 0x10 /* SPI read data register */
#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
#define PCH_SRST 0x1C /* SPI reset register */
#define PCH_ADDRESS_SIZE 0x20
#define PCH_SPSR_TFD 0x000007C0
#define PCH_SPSR_RFD 0x0000F800
#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
#define PCH_RX_THOLD 7
#define PCH_RX_THOLD_MAX 15
#define PCH_TX_THOLD 2
#define PCH_MAX_BAUDRATE 5000000
#define PCH_MAX_FIFO_DEPTH 16
#define STATUS_RUNNING 1
#define STATUS_EXITING 2
#define PCH_SLEEP_TIME 10
#define SSN_LOW 0x02U
#define SSN_HIGH 0x03U
#define SSN_NO_CONTROL 0x00U
#define PCH_MAX_CS 0xFF
#define PCI_DEVICE_ID_GE_SPI 0x8816
#define SPCR_SPE_BIT (1 << 0)
#define SPCR_MSTR_BIT (1 << 1)
#define SPCR_LSBF_BIT (1 << 4)
#define SPCR_CPHA_BIT (1 << 5)
#define SPCR_CPOL_BIT (1 << 6)
#define SPCR_TFIE_BIT (1 << 8)
#define SPCR_RFIE_BIT (1 << 9)
#define SPCR_FIE_BIT (1 << 10)
#define SPCR_ORIE_BIT (1 << 11)
#define SPCR_MDFIE_BIT (1 << 12)
#define SPCR_FICLR_BIT (1 << 24)
#define SPSR_TFI_BIT (1 << 0)
#define SPSR_RFI_BIT (1 << 1)
#define SPSR_FI_BIT (1 << 2)
#define SPSR_ORF_BIT (1 << 3)
#define SPBRR_SIZE_BIT (1 << 10)
#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
#define SPCR_RFIC_FIELD 20
#define SPCR_TFIC_FIELD 16
#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
#define PCH_CLOCK_HZ 50000000
#define PCH_MAX_SPBR 1023
/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
#define PCI_DEVICE_ID_ML7213_SPI 0x802c
#define PCI_DEVICE_ID_ML7223_SPI 0x800F
#define PCI_DEVICE_ID_ML7831_SPI 0x8816
/*
* Set the number of SPI instance max
* Intel EG20T PCH : 1ch
* LAPIS Semiconductor ML7213 IOH : 2ch
* LAPIS Semiconductor ML7223 IOH : 1ch
* LAPIS Semiconductor ML7831 IOH : 1ch
*/
#define PCH_SPI_MAX_DEV 2
#define PCH_BUF_SIZE 4096
#define PCH_DMA_TRANS_SIZE 12
static int use_dma = 1;
struct pch_spi_dma_ctrl {
struct pci_dev *dma_dev;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx;
struct pch_dma_slave param_tx;
struct pch_dma_slave param_rx;
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
struct scatterlist *sg_tx_p;
struct scatterlist *sg_rx_p;
struct scatterlist sg_tx;
struct scatterlist sg_rx;
int nent;
void *tx_buf_virt;
void *rx_buf_virt;
dma_addr_t tx_buf_dma;
dma_addr_t rx_buf_dma;
};
/**
* struct pch_spi_data - Holds the SPI channel specific details
* @io_remap_addr: The remapped PCI base address
* @io_base_addr: Base address
* @master: Pointer to the SPI master structure
* @work: Reference to work queue handler
* @wait: Wait queue for waking up upon receiving an
* interrupt.
* @transfer_complete: Status of SPI Transfer
* @bcurrent_msg_processing: Status flag for message processing
* @lock: Lock for protecting this structure
* @queue: SPI Message queue
* @status: Status of the SPI driver
* @bpw_len: Length of data to be transferred in bits per
* word
* @transfer_active: Flag showing active transfer
* @tx_index: Transmit data count; for bookkeeping during
* transfer
* @rx_index: Receive data count; for bookkeeping during
* transfer
* @pkt_tx_buff: Buffer for data to be transmitted
* @pkt_rx_buff: Buffer for received data
* @n_curnt_chip: The chip number that this SPI driver currently
* operates on
* @current_chip: Reference to the current chip that this SPI
* driver currently operates on
* @current_msg: The current message that this SPI driver is
* handling
* @cur_trans: The current transfer that this SPI driver is
* handling
* @board_dat: Reference to the SPI device data structure
* @plat_dev: platform_device structure
* @ch: SPI channel number
* @dma: Local DMA information
* @use_dma: True if DMA is to be used
* @irq_reg_sts: Status of IRQ registration
* @save_total_len: Save length while data is being transferred
*/
struct pch_spi_data {
void __iomem *io_remap_addr;
unsigned long io_base_addr;
struct spi_master *master;
struct work_struct work;
wait_queue_head_t wait;
u8 transfer_complete;
u8 bcurrent_msg_processing;
spinlock_t lock;
struct list_head queue;
u8 status;
u32 bpw_len;
u8 transfer_active;
u32 tx_index;
u32 rx_index;
u16 *pkt_tx_buff;
u16 *pkt_rx_buff;
u8 n_curnt_chip;
struct spi_device *current_chip;
struct spi_message *current_msg;
struct spi_transfer *cur_trans;
struct pch_spi_board_data *board_dat;
struct platform_device *plat_dev;
int ch;
struct pch_spi_dma_ctrl dma;
int use_dma;
u8 irq_reg_sts;
int save_total_len;
};
/**
* struct pch_spi_board_data - Holds the SPI device specific details
* @pdev: Pointer to the PCI device
* @suspend_sts: Status of suspend
* @num: The number of SPI device instance
*/
struct pch_spi_board_data {
struct pci_dev *pdev;
u8 suspend_sts;
int num;
};
struct pch_pd_dev_save {
int num;
struct platform_device *pd_save[PCH_SPI_MAX_DEV];
struct pch_spi_board_data *board_dat;
};
static const struct pci_device_id pch_spi_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
{ }
};
/**
* pch_spi_writereg() - Performs register writes
* @master: Pointer to struct spi_master.
* @idx: Register offset.
* @val: Value to be written to register.
*/
static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
{
struct pch_spi_data *data = spi_master_get_devdata(master);
iowrite32(val, (data->io_remap_addr + idx));
}
/**
* pch_spi_readreg() - Performs register reads
* @master: Pointer to struct spi_master.
* @idx: Register offset.
*/
static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
{
struct pch_spi_data *data = spi_master_get_devdata(master);
return ioread32(data->io_remap_addr + idx);
}
static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
u32 set, u32 clr)
{
u32 tmp = pch_spi_readreg(master, idx);
tmp = (tmp & ~clr) | set;
pch_spi_writereg(master, idx, tmp);
}
static void pch_spi_set_master_mode(struct spi_master *master)
{
pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
}
/**
* pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
* @master: Pointer to struct spi_master.
*/
static void pch_spi_clear_fifo(struct spi_master *master)
{
pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
}
static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
void __iomem *io_remap_addr)
{
u32 n_read, tx_index, rx_index, bpw_len;
u16 *pkt_rx_buffer, *pkt_tx_buff;
int read_cnt;
u32 reg_spcr_val;
void __iomem *spsr;
void __iomem *spdrr;
void __iomem *spdwr;
spsr = io_remap_addr + PCH_SPSR;
iowrite32(reg_spsr_val, spsr);
if (data->transfer_active) {
rx_index = data->rx_index;
tx_index = data->tx_index;
bpw_len = data->bpw_len;
pkt_rx_buffer = data->pkt_rx_buff;
pkt_tx_buff = data->pkt_tx_buff;
spdrr = io_remap_addr + PCH_SPDRR;
spdwr = io_remap_addr + PCH_SPDWR;
n_read = PCH_READABLE(reg_spsr_val);
for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
pkt_rx_buffer[rx_index++] = ioread32(spdrr);
if (tx_index < bpw_len)
iowrite32(pkt_tx_buff[tx_index++], spdwr);
}
/* disable RFI if not needed */
if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
/* reset rx threshold */
reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
}
/* update counts */
data->tx_index = tx_index;
data->rx_index = rx_index;
/* if transfer complete interrupt */
if (reg_spsr_val & SPSR_FI_BIT) {
if ((tx_index == bpw_len) && (rx_index == tx_index)) {
/* disable interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
PCH_ALL);
/* transfer is completed;
inform pch_spi_process_messages */
data->transfer_complete = true;
data->transfer_active = false;
wake_up(&data->wait);
} else {
dev_vdbg(&data->master->dev,
"%s : Transfer is not completed",
__func__);
}
}
}
}
/**
* pch_spi_handler() - Interrupt handler
* @irq: The interrupt number.
* @dev_id: Pointer to struct pch_spi_board_data.
*/
static irqreturn_t pch_spi_handler(int irq, void *dev_id)
{
u32 reg_spsr_val;
void __iomem *spsr;
void __iomem *io_remap_addr;
irqreturn_t ret = IRQ_NONE;
struct pch_spi_data *data = dev_id;
struct pch_spi_board_data *board_dat = data->board_dat;
if (board_dat->suspend_sts) {
dev_dbg(&board_dat->pdev->dev,
"%s returning due to suspend\n", __func__);
return IRQ_NONE;
}
io_remap_addr = data->io_remap_addr;
spsr = io_remap_addr + PCH_SPSR;
reg_spsr_val = ioread32(spsr);
if (reg_spsr_val & SPSR_ORF_BIT) {
dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
if (data->current_msg->complete) {
data->transfer_complete = true;
data->current_msg->status = -EIO;
data->current_msg->complete(data->current_msg->context);
data->bcurrent_msg_processing = false;
data->current_msg = NULL;
data->cur_trans = NULL;
}
}
if (data->use_dma)
return IRQ_NONE;
/* Check if the interrupt is for SPI device */
if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
ret = IRQ_HANDLED;
}
dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
__func__, ret);
return ret;
}
/**
* pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
* @master: Pointer to struct spi_master.
* @speed_hz: Baud rate.
*/
static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
{
u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
/* if baud rate is less than we can support limit it */
if (n_spbr > PCH_MAX_SPBR)
n_spbr = PCH_MAX_SPBR;
pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
}
/**
* pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
* @master: Pointer to struct spi_master.
* @bits_per_word: Bits per word for SPI transfer.
*/
static void pch_spi_set_bits_per_word(struct spi_master *master,
u8 bits_per_word)
{
if (bits_per_word == 8)
pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
else
pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
}
/**
* pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
* @spi: Pointer to struct spi_device.
*/
static void pch_spi_setup_transfer(struct spi_device *spi)
{
u32 flags = 0;
dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
__func__, pch_spi_readreg(spi->master, PCH_SPBRR),
spi->max_speed_hz);
pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
/* set bits per word */
pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
if (!(spi->mode & SPI_LSB_FIRST))
flags |= SPCR_LSBF_BIT;
if (spi->mode & SPI_CPOL)
flags |= SPCR_CPOL_BIT;
if (spi->mode & SPI_CPHA)
flags |= SPCR_CPHA_BIT;
pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
(SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
/* Clear the FIFO by toggling FICLR to 1 and back to 0 */
pch_spi_clear_fifo(spi->master);
}
/**
* pch_spi_reset() - Clears SPI registers
* @master: Pointer to struct spi_master.
*/
static void pch_spi_reset(struct spi_master *master)
{
/* write 1 to reset SPI */
pch_spi_writereg(master, PCH_SRST, 0x1);
/* clear reset */
pch_spi_writereg(master, PCH_SRST, 0x0);
}
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
int retval;
unsigned long flags;
/* We won't process any messages if we have been asked to terminate */
if (data->status == STATUS_EXITING) {
dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
retval = -ESHUTDOWN;
goto err_out;
}
/* If suspended ,return -EINVAL */
if (data->board_dat->suspend_sts) {
dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
retval = -EINVAL;
goto err_out;
}
/* set status of message */
pmsg->actual_length = 0;
dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
pmsg->status = -EINPROGRESS;
spin_lock_irqsave(&data->lock, flags);
/* add message to queue */
list_add_tail(&pmsg->queue, &data->queue);
spin_unlock_irqrestore(&data->lock, flags);
dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
schedule_work(&data->work);
dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
retval = 0;
err_out:
dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
return retval;
}
static inline void pch_spi_select_chip(struct pch_spi_data *data,
struct spi_device *pspi)
{
if (data->current_chip != NULL) {
if (spi_get_chipselect(pspi, 0) != data->n_curnt_chip) {
dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
data->current_chip = NULL;
}
}
data->current_chip = pspi;
data->n_curnt_chip = spi_get_chipselect(data->current_chip, 0);
dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
pch_spi_setup_transfer(pspi);
}
static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
{
int size;
u32 n_writes;
int j;
struct spi_message *pmsg, *tmp;
const u8 *tx_buf;
const u16 *tx_sbuf;
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
}
/* set bits per word if needed */
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
pch_spi_set_bits_per_word(data->master,
data->cur_trans->bits_per_word);
*bpw = data->cur_trans->bits_per_word;
} else {
*bpw = data->current_msg->spi->bits_per_word;
}
/* reset Tx/Rx index */
data->tx_index = 0;
data->rx_index = 0;
data->bpw_len = data->cur_trans->len / (*bpw / 8);
/* find alloc size */
size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
/* allocate memory for pkt_tx_buff & pkt_rx_buffer */
data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
if (data->pkt_tx_buff != NULL) {
data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
if (!data->pkt_rx_buff) {
kfree(data->pkt_tx_buff);
data->pkt_tx_buff = NULL;
}
}
if (!data->pkt_rx_buff) {
/* flush queue and set status of all transfers to -ENOMEM */
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -ENOMEM;
if (pmsg->complete)
pmsg->complete(pmsg->context);
/* delete from queue */
list_del_init(&pmsg->queue);
}
return;
}
/* copy Tx Data */
if (data->cur_trans->tx_buf != NULL) {
if (*bpw == 8) {
tx_buf = data->cur_trans->tx_buf;
for (j = 0; j < data->bpw_len; j++)
data->pkt_tx_buff[j] = *tx_buf++;
} else {
tx_sbuf = data->cur_trans->tx_buf;
for (j = 0; j < data->bpw_len; j++)
data->pkt_tx_buff[j] = *tx_sbuf++;
}
}
/* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
n_writes = data->bpw_len;
if (n_writes > PCH_MAX_FIFO_DEPTH)
n_writes = PCH_MAX_FIFO_DEPTH;
dev_dbg(&data->master->dev,
"\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
__func__);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
for (j = 0; j < n_writes; j++)
pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
/* update tx_index */
data->tx_index = j;
/* reset transfer complete flag */
data->transfer_complete = false;
data->transfer_active = true;
}
static void pch_spi_nomore_transfer(struct pch_spi_data *data)
{
struct spi_message *pmsg, *tmp;
dev_dbg(&data->master->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
data->current_msg->status = 0;
if (data->current_msg->complete) {
dev_dbg(&data->master->dev,
"%s:Invoking callback of SPI core\n", __func__);
data->current_msg->complete(data->current_msg->context);
}
/* update status in global variable */
data->bcurrent_msg_processing = false;
dev_dbg(&data->master->dev,
"%s:data->bcurrent_msg_processing = false\n", __func__);
data->current_msg = NULL;
data->cur_trans = NULL;
/* check if we have items in list and not suspending
* return 1 if list empty */
if ((list_empty(&data->queue) == 0) &&
(!data->board_dat->suspend_sts) &&
(data->status != STATUS_EXITING)) {
/* We have some more work to do (either there is more tranint
* bpw;sfer requests in the current message or there are
*more messages)
*/
dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
schedule_work(&data->work);
} else if (data->board_dat->suspend_sts ||
data->status == STATUS_EXITING) {
dev_dbg(&data->master->dev,
"%s suspend/remove initiated, flushing queue\n",
__func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete)
pmsg->complete(pmsg->context);
/* delete from queue */
list_del_init(&pmsg->queue);
}
}
}
static void pch_spi_set_ir(struct pch_spi_data *data)
{
/* enable interrupts, set threshold, enable SPI */
if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
/* set receive threshold to PCH_RX_THOLD */
pch_spi_setclr_reg(data->master, PCH_SPCR,
PCH_RX_THOLD << SPCR_RFIC_FIELD |
SPCR_FIE_BIT | SPCR_RFIE_BIT |
SPCR_ORIE_BIT | SPCR_SPE_BIT,
MASK_RFIC_SPCR_BITS | PCH_ALL);
else
/* set receive threshold to maximum */
pch_spi_setclr_reg(data->master, PCH_SPCR,
PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
SPCR_FIE_BIT | SPCR_ORIE_BIT |
SPCR_SPE_BIT,
MASK_RFIC_SPCR_BITS | PCH_ALL);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
dev_dbg(&data->master->dev,
"%s:waiting for transfer to get over\n", __func__);
wait_event_interruptible(data->wait, data->transfer_complete);
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
/* Disable interrupts and SPI transfer */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
/* clear FIFO */
pch_spi_clear_fifo(data->master);
}
static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
{
int j;
u8 *rx_buf;
u16 *rx_sbuf;
/* copy Rx Data */
if (!data->cur_trans->rx_buf)
return;
if (bpw == 8) {
rx_buf = data->cur_trans->rx_buf;
for (j = 0; j < data->bpw_len; j++)
*rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
} else {
rx_sbuf = data->cur_trans->rx_buf;
for (j = 0; j < data->bpw_len; j++)
*rx_sbuf++ = data->pkt_rx_buff[j];
}
}
static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
{
int j;
u8 *rx_buf;
u16 *rx_sbuf;
const u8 *rx_dma_buf;
const u16 *rx_dma_sbuf;
/* copy Rx Data */
if (!data->cur_trans->rx_buf)
return;
if (bpw == 8) {
rx_buf = data->cur_trans->rx_buf;
rx_dma_buf = data->dma.rx_buf_virt;
for (j = 0; j < data->bpw_len; j++)
*rx_buf++ = *rx_dma_buf++ & 0xFF;
data->cur_trans->rx_buf = rx_buf;
} else {
rx_sbuf = data->cur_trans->rx_buf;
rx_dma_sbuf = data->dma.rx_buf_virt;
for (j = 0; j < data->bpw_len; j++)
*rx_sbuf++ = *rx_dma_sbuf++;
data->cur_trans->rx_buf = rx_sbuf;
}
}
static int pch_spi_start_transfer(struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
unsigned long flags;
int rtn;
dma = &data->dma;
spin_lock_irqsave(&data->lock, flags);
/* disable interrupts, SPI set enable */
pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
spin_unlock_irqrestore(&data->lock, flags);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
dev_dbg(&data->master->dev,
"%s:waiting for transfer to get over\n", __func__);
rtn = wait_event_interruptible_timeout(data->wait,
data->transfer_complete,
msecs_to_jiffies(2 * HZ));
if (!rtn)
dev_err(&data->master->dev,
"%s wait-event timeout\n", __func__);
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
DMA_FROM_DEVICE);
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
DMA_FROM_DEVICE);
memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
async_tx_ack(dma->desc_rx);
async_tx_ack(dma->desc_tx);
kfree(dma->sg_tx_p);
kfree(dma->sg_rx_p);
spin_lock_irqsave(&data->lock, flags);
/* clear fifo threshold, disable interrupts, disable SPI transfer */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
SPCR_SPE_BIT);
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
/* clear FIFO */
pch_spi_clear_fifo(data->master);
spin_unlock_irqrestore(&data->lock, flags);
return rtn;
}
static void pch_dma_rx_complete(void *arg)
{
struct pch_spi_data *data = arg;
/* transfer is completed;inform pch_spi_process_messages_dma */
data->transfer_complete = true;
wake_up_interruptible(&data->wait);
}
static bool pch_spi_filter(struct dma_chan *chan, void *slave)
{
struct pch_dma_slave *param = slave;
if ((chan->chan_id == param->chan_id) &&
(param->dma_dev == chan->device->dev)) {
chan->private = param;
return true;
} else {
return false;
}
}
static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
struct pci_dev *dma_dev;
struct pch_dma_slave *param;
struct pch_spi_dma_ctrl *dma;
unsigned int width;
if (bpw == 8)
width = PCH_DMA_WIDTH_1_BYTE;
else
width = PCH_DMA_WIDTH_2_BYTES;
dma = &data->dma;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* Get DMA's dev information */
dma_dev = pci_get_slot(data->board_dat->pdev->bus,
PCI_DEVFN(PCI_SLOT(data->board_dat->pdev->devfn), 0));
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
param->chan_id = data->ch * 2; /* Tx = 0, 2 */
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
if (!chan) {
dev_err(&data->master->dev,
"ERROR: dma_request_channel FAILS(Tx)\n");
goto out;
}
dma->chan_tx = chan;
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
if (!chan) {
dev_err(&data->master->dev,
"ERROR: dma_request_channel FAILS(Rx)\n");
dma_release_channel(dma->chan_tx);
dma->chan_tx = NULL;
goto out;
}
dma->chan_rx = chan;
dma->dma_dev = dma_dev;
return;
out:
pci_dev_put(dma_dev);
data->use_dma = 0;
}
static void pch_spi_release_dma(struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
dma = &data->dma;
if (dma->chan_tx) {
dma_release_channel(dma->chan_tx);
dma->chan_tx = NULL;
}
if (dma->chan_rx) {
dma_release_channel(dma->chan_rx);
dma->chan_rx = NULL;
}
pci_dev_put(dma->dma_dev);
}
static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
{
const u8 *tx_buf;
const u16 *tx_sbuf;
u8 *tx_dma_buf;
u16 *tx_dma_sbuf;
struct scatterlist *sg;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx;
int num;
int i;
int size;
int rem;
int head;
unsigned long flags;
struct pch_spi_dma_ctrl *dma;
dma = &data->dma;
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
spin_unlock_irqrestore(&data->lock, flags);
}
/* set bits per word if needed */
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word !=
data->cur_trans->bits_per_word)) {
dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_set_bits_per_word(data->master,
data->cur_trans->bits_per_word);
spin_unlock_irqrestore(&data->lock, flags);
*bpw = data->cur_trans->bits_per_word;
} else {
*bpw = data->current_msg->spi->bits_per_word;
}
data->bpw_len = data->cur_trans->len / (*bpw / 8);
if (data->bpw_len > PCH_BUF_SIZE) {
data->bpw_len = PCH_BUF_SIZE;
data->cur_trans->len -= PCH_BUF_SIZE;
}
/* copy Tx Data */
if (data->cur_trans->tx_buf != NULL) {
if (*bpw == 8) {
tx_buf = data->cur_trans->tx_buf;
tx_dma_buf = dma->tx_buf_virt;
for (i = 0; i < data->bpw_len; i++)
*tx_dma_buf++ = *tx_buf++;
} else {
tx_sbuf = data->cur_trans->tx_buf;
tx_dma_sbuf = dma->tx_buf_virt;
for (i = 0; i < data->bpw_len; i++)
*tx_dma_sbuf++ = *tx_sbuf++;
}
}
/* Calculate Rx parameter for DMA transmitting */
if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
} else {
num = data->bpw_len / PCH_DMA_TRANS_SIZE;
rem = PCH_DMA_TRANS_SIZE;
}
size = PCH_DMA_TRANS_SIZE;
} else {
num = 1;
size = data->bpw_len;
rem = data->bpw_len;
}
dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
__func__, num, size, rem);
spin_lock_irqsave(&data->lock, flags);
/* set receive fifo threshold and transmit fifo threshold */
pch_spi_setclr_reg(data->master, PCH_SPCR,
((size - 1) << SPCR_RFIC_FIELD) |
(PCH_TX_THOLD << SPCR_TFIC_FIELD),
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
spin_unlock_irqrestore(&data->lock, flags);
/* RX */
dma->sg_rx_p = kmalloc_array(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
if (!dma->sg_rx_p)
return;
sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_rx_p;
for (i = 0; i < num; i++, sg++) {
if (i == (num - 2)) {
sg->offset = size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
sg->offset);
sg_dma_len(sg) = rem;
} else if (i == (num - 1)) {
sg->offset = size * (i - 1) + rem;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
sg->offset);
sg_dma_len(sg) = size;
} else {
sg->offset = size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
sg->offset);
sg_dma_len(sg) = size;
}
sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
}
sg = dma->sg_rx_p;
desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
num, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
dev_err(&data->master->dev,
"%s:dmaengine_prep_slave_sg Failed\n", __func__);
return;
}
dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
desc_rx->callback = pch_dma_rx_complete;
desc_rx->callback_param = data;
dma->nent = num;
dma->desc_rx = desc_rx;
/* Calculate Tx parameter for DMA transmitting */
if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
} else {
num = data->bpw_len / PCH_DMA_TRANS_SIZE;
rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
PCH_DMA_TRANS_SIZE - head;
}
size = PCH_DMA_TRANS_SIZE;
} else {
num = 1;
size = data->bpw_len;
rem = data->bpw_len;
head = 0;
}
dma->sg_tx_p = kmalloc_array(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
if (!dma->sg_tx_p)
return;
sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_tx_p;
for (i = 0; i < num; i++, sg++) {
if (i == 0) {
sg->offset = 0;
sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
sg->offset);
sg_dma_len(sg) = size + head;
} else if (i == (num - 1)) {
sg->offset = head + size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
sg->offset);
sg_dma_len(sg) = rem;
} else {
sg->offset = head + size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
sg->offset);
sg_dma_len(sg) = size;
}
sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
}
sg = dma->sg_tx_p;
desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
sg, num, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dev_err(&data->master->dev,
"%s:dmaengine_prep_slave_sg Failed\n", __func__);
return;
}
dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
desc_tx->callback = NULL;
desc_tx->callback_param = data;
dma->nent = num;
dma->desc_tx = desc_tx;
dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
desc_rx->tx_submit(desc_rx);
desc_tx->tx_submit(desc_tx);
spin_unlock_irqrestore(&data->lock, flags);
/* reset transfer complete flag */
data->transfer_complete = false;
}
static void pch_spi_process_messages(struct work_struct *pwork)
{
struct spi_message *pmsg, *tmp;
struct pch_spi_data *data;
int bpw;
data = container_of(pwork, struct pch_spi_data, work);
dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
spin_lock(&data->lock);
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
dev_dbg(&data->master->dev,
"%s suspend/remove initiated, flushing queue\n", __func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete) {
spin_unlock(&data->lock);
pmsg->complete(pmsg->context);
spin_lock(&data->lock);
}
/* delete from queue */
list_del_init(&pmsg->queue);
}
spin_unlock(&data->lock);
return;
}
data->bcurrent_msg_processing = true;
dev_dbg(&data->master->dev,
"%s Set data->bcurrent_msg_processing= true\n", __func__);
/* Get the message from the queue and delete it from there. */
data->current_msg = list_entry(data->queue.next, struct spi_message,
queue);
list_del_init(&data->current_msg->queue);
data->current_msg->status = 0;
pch_spi_select_chip(data, data->current_msg->spi);
spin_unlock(&data->lock);
if (data->use_dma)
pch_spi_request_dma(data,
data->current_msg->spi->bits_per_word);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
do {
int cnt;
/* If we are already processing a message get the next
transfer structure from the message otherwise retrieve
the 1st transfer request from the message. */
spin_lock(&data->lock);
if (data->cur_trans == NULL) {
data->cur_trans =
list_entry(data->current_msg->transfers.next,
struct spi_transfer, transfer_list);
dev_dbg(&data->master->dev,
"%s :Getting 1st transfer message\n",
__func__);
} else {
data->cur_trans =
list_entry(data->cur_trans->transfer_list.next,
struct spi_transfer, transfer_list);
dev_dbg(&data->master->dev,
"%s :Getting next transfer message\n",
__func__);
}
spin_unlock(&data->lock);
if (!data->cur_trans->len)
goto out;
cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
data->save_total_len = data->cur_trans->len;
if (data->use_dma) {
int i;
char *save_rx_buf = data->cur_trans->rx_buf;
for (i = 0; i < cnt; i++) {
pch_spi_handle_dma(data, &bpw);
if (!pch_spi_start_transfer(data)) {
data->transfer_complete = true;
data->current_msg->status = -EIO;
data->current_msg->complete
(data->current_msg->context);
data->bcurrent_msg_processing = false;
data->current_msg = NULL;
data->cur_trans = NULL;
goto out;
}
pch_spi_copy_rx_data_for_dma(data, bpw);
}
data->cur_trans->rx_buf = save_rx_buf;
} else {
pch_spi_set_tx(data, &bpw);
pch_spi_set_ir(data);
pch_spi_copy_rx_data(data, bpw);
kfree(data->pkt_rx_buff);
data->pkt_rx_buff = NULL;
kfree(data->pkt_tx_buff);
data->pkt_tx_buff = NULL;
}
/* increment message count */
data->cur_trans->len = data->save_total_len;
data->current_msg->actual_length += data->cur_trans->len;
dev_dbg(&data->master->dev,
"%s:data->current_msg->actual_length=%d\n",
__func__, data->current_msg->actual_length);
spi_transfer_delay_exec(data->cur_trans);
spin_lock(&data->lock);
/* No more transfer in this message. */
if ((data->cur_trans->transfer_list.next) ==
&(data->current_msg->transfers)) {
pch_spi_nomore_transfer(data);
}
spin_unlock(&data->lock);
} while (data->cur_trans != NULL);
out:
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
if (data->use_dma)
pch_spi_release_dma(data);
}
static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
flush_work(&data->work);
}
static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* reset PCH SPI h/w */
pch_spi_reset(data->master);
dev_dbg(&board_dat->pdev->dev,
"%s pch_spi_reset invoked successfully\n", __func__);
dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
return 0;
}
static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
dma = &data->dma;
if (dma->tx_buf_dma)
dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
dma->tx_buf_virt, dma->tx_buf_dma);
if (dma->rx_buf_dma)
dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
dma->rx_buf_virt, dma->rx_buf_dma);
}
static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
int ret;
dma = &data->dma;
ret = 0;
/* Get Consistent memory for Tx DMA */
dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
if (!dma->tx_buf_virt)
ret = -ENOMEM;
/* Get Consistent memory for Rx DMA */
dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
if (!dma->rx_buf_virt)
ret = -ENOMEM;
return ret;
}
static int pch_spi_pd_probe(struct platform_device *plat_dev)
{
int ret;
struct spi_master *master;
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data;
dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
master = spi_alloc_master(&board_dat->pdev->dev,
sizeof(struct pch_spi_data));
if (!master) {
dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
plat_dev->id);
return -ENOMEM;
}
data = spi_master_get_devdata(master);
data->master = master;
platform_set_drvdata(plat_dev, data);
/* baseaddress + address offset) */
data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
PCH_ADDRESS_SIZE * plat_dev->id;
data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
if (!data->io_remap_addr) {
dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
ret = -ENOMEM;
goto err_pci_iomap;
}
data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
plat_dev->id, data->io_remap_addr);
/* initialize members of SPI master */
master->num_chipselect = PCH_MAX_CS;
master->transfer = pch_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->max_speed_hz = PCH_MAX_BAUDRATE;
master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
data->n_curnt_chip = 255;
data->status = STATUS_RUNNING;
data->ch = plat_dev->id;
data->use_dma = use_dma;
INIT_LIST_HEAD(&data->queue);
spin_lock_init(&data->lock);
INIT_WORK(&data->work, pch_spi_process_messages);
init_waitqueue_head(&data->wait);
ret = pch_spi_get_resources(board_dat, data);
if (ret) {
dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
goto err_spi_get_resources;
}
ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
IRQF_SHARED, KBUILD_MODNAME, data);
if (ret) {
dev_err(&plat_dev->dev,
"%s request_irq failed\n", __func__);
goto err_request_irq;
}
data->irq_reg_sts = true;
pch_spi_set_master_mode(master);
if (use_dma) {
dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
ret = pch_alloc_dma_buf(board_dat, data);
if (ret)
goto err_spi_register_master;
}
ret = spi_register_master(master);
if (ret != 0) {
dev_err(&plat_dev->dev,
"%s spi_register_master FAILED\n", __func__);
goto err_spi_register_master;
}
return 0;
err_spi_register_master:
pch_free_dma_buf(board_dat, data);
free_irq(board_dat->pdev->irq, data);
err_request_irq:
pch_spi_free_resources(board_dat, data);
err_spi_get_resources:
pci_iounmap(board_dat->pdev, data->io_remap_addr);
err_pci_iomap:
spi_master_put(master);
return ret;
}
static void pch_spi_pd_remove(struct platform_device *plat_dev)
{
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(plat_dev);
int count;
unsigned long flags;
dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
__func__, plat_dev->id, board_dat->pdev->irq);
if (use_dma)
pch_free_dma_buf(board_dat, data);
/* check for any pending messages; no action is taken if the queue
* is still full; but at least we tried. Unload anyway */
count = 500;
spin_lock_irqsave(&data->lock, flags);
data->status = STATUS_EXITING;
while ((list_empty(&data->queue) == 0) && --count) {
dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
__func__);
spin_unlock_irqrestore(&data->lock, flags);
msleep(PCH_SLEEP_TIME);
spin_lock_irqsave(&data->lock, flags);
}
spin_unlock_irqrestore(&data->lock, flags);
pch_spi_free_resources(board_dat, data);
/* disable interrupts & free IRQ */
if (data->irq_reg_sts) {
/* disable interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
data->irq_reg_sts = false;
free_irq(board_dat->pdev->irq, data);
}
pci_iounmap(board_dat->pdev, data->io_remap_addr);
spi_unregister_master(data->master);
}
#ifdef CONFIG_PM
static int pch_spi_pd_suspend(struct platform_device *pd_dev,
pm_message_t state)
{
u8 count;
struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(pd_dev);
dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
if (!board_dat) {
dev_err(&pd_dev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return -EFAULT;
}
/* check if the current message is processed:
Only after thats done the transfer will be suspended */
count = 255;
while ((--count) > 0) {
if (!(data->bcurrent_msg_processing))
break;
msleep(PCH_SLEEP_TIME);
}
/* Free IRQ */
if (data->irq_reg_sts) {
/* disable all interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
pch_spi_reset(data->master);
free_irq(board_dat->pdev->irq, data);
data->irq_reg_sts = false;
dev_dbg(&pd_dev->dev,
"%s free_irq invoked successfully.\n", __func__);
}
return 0;
}
static int pch_spi_pd_resume(struct platform_device *pd_dev)
{
struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(pd_dev);
int retval;
if (!board_dat) {
dev_err(&pd_dev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return -EFAULT;
}
if (!data->irq_reg_sts) {
/* register IRQ */
retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
IRQF_SHARED, KBUILD_MODNAME, data);
if (retval < 0) {
dev_err(&pd_dev->dev,
"%s request_irq failed\n", __func__);
return retval;
}
/* reset PCH SPI h/w */
pch_spi_reset(data->master);
pch_spi_set_master_mode(data->master);
data->irq_reg_sts = true;
}
return 0;
}
#else
#define pch_spi_pd_suspend NULL
#define pch_spi_pd_resume NULL
#endif
static struct platform_driver pch_spi_pd_driver = {
.driver = {
.name = "pch-spi",
},
.probe = pch_spi_pd_probe,
.remove_new = pch_spi_pd_remove,
.suspend = pch_spi_pd_suspend,
.resume = pch_spi_pd_resume
};
static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pch_spi_board_data *board_dat;
struct platform_device *pd_dev = NULL;
int retval;
int i;
struct pch_pd_dev_save *pd_dev_save;
pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
if (!pd_dev_save)
return -ENOMEM;
board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
if (!board_dat) {
retval = -ENOMEM;
goto err_no_mem;
}
retval = pci_request_regions(pdev, KBUILD_MODNAME);
if (retval) {
dev_err(&pdev->dev, "%s request_region failed\n", __func__);
goto pci_request_regions;
}
board_dat->pdev = pdev;
board_dat->num = id->driver_data;
pd_dev_save->num = id->driver_data;
pd_dev_save->board_dat = board_dat;
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
goto pci_enable_device;
}
for (i = 0; i < board_dat->num; i++) {
pd_dev = platform_device_alloc("pch-spi", i);
if (!pd_dev) {
dev_err(&pdev->dev, "platform_device_alloc failed\n");
retval = -ENOMEM;
goto err_platform_device;
}
pd_dev_save->pd_save[i] = pd_dev;
pd_dev->dev.parent = &pdev->dev;
retval = platform_device_add_data(pd_dev, board_dat,
sizeof(*board_dat));
if (retval) {
dev_err(&pdev->dev,
"platform_device_add_data failed\n");
platform_device_put(pd_dev);
goto err_platform_device;
}
retval = platform_device_add(pd_dev);
if (retval) {
dev_err(&pdev->dev, "platform_device_add failed\n");
platform_device_put(pd_dev);
goto err_platform_device;
}
}
pci_set_drvdata(pdev, pd_dev_save);
return 0;
err_platform_device:
while (--i >= 0)
platform_device_unregister(pd_dev_save->pd_save[i]);
pci_disable_device(pdev);
pci_enable_device:
pci_release_regions(pdev);
pci_request_regions:
kfree(board_dat);
err_no_mem:
kfree(pd_dev_save);
return retval;
}
static void pch_spi_remove(struct pci_dev *pdev)
{
int i;
struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
for (i = 0; i < pd_dev_save->num; i++)
platform_device_unregister(pd_dev_save->pd_save[i]);
pci_disable_device(pdev);
pci_release_regions(pdev);
kfree(pd_dev_save->board_dat);
kfree(pd_dev_save);
}
static int __maybe_unused pch_spi_suspend(struct device *dev)
{
struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
dev_dbg(dev, "%s ENTRY\n", __func__);
pd_dev_save->board_dat->suspend_sts = true;
return 0;
}
static int __maybe_unused pch_spi_resume(struct device *dev)
{
struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
dev_dbg(dev, "%s ENTRY\n", __func__);
/* set suspend status to false */
pd_dev_save->board_dat->suspend_sts = false;
return 0;
}
static SIMPLE_DEV_PM_OPS(pch_spi_pm_ops, pch_spi_suspend, pch_spi_resume);
static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
.remove = pch_spi_remove,
.driver.pm = &pch_spi_pm_ops,
};
static int __init pch_spi_init(void)
{
int ret;
ret = platform_driver_register(&pch_spi_pd_driver);
if (ret)
return ret;
ret = pci_register_driver(&pch_spi_pcidev_driver);
if (ret) {
platform_driver_unregister(&pch_spi_pd_driver);
return ret;
}
return 0;
}
module_init(pch_spi_init);
static void __exit pch_spi_exit(void)
{
pci_unregister_driver(&pch_spi_pcidev_driver);
platform_driver_unregister(&pch_spi_pd_driver);
}
module_exit(pch_spi_exit);
module_param(use_dma, int, 0644);
MODULE_PARM_DESC(use_dma,
"to use DMA for data transfers pass 1 else 0; default 1");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
| linux-master | drivers/spi/spi-topcliff-pch.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Exceet Electronics GmbH
* Copyright (C) 2018 Bootlin
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/dmaengine.h>
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/sched/task_stack.h>
#include "internals.h"
#define SPI_MEM_MAX_BUSWIDTH 8
/**
* spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
* memory operation
* @ctlr: the SPI controller requesting this dma_map()
* @op: the memory operation containing the buffer to map
* @sgt: a pointer to a non-initialized sg_table that will be filled by this
* function
*
* Some controllers might want to do DMA on the data buffer embedded in @op.
* This helper prepares everything for you and provides a ready-to-use
* sg_table. This function is not intended to be called from spi drivers.
* Only SPI controller drivers should use it.
* Note that the caller must ensure the memory region pointed by
* op->data.buf.{in,out} is DMA-able before calling this function.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sgt)
{
struct device *dmadev;
if (!op->data.nbytes)
return -EINVAL;
if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
dmadev = ctlr->dma_tx->device->dev;
else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
dmadev = ctlr->dma_rx->device->dev;
else
dmadev = ctlr->dev.parent;
if (!dmadev)
return -EINVAL;
return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
op->data.dir == SPI_MEM_DATA_IN ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
/**
* spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
* memory operation
* @ctlr: the SPI controller requesting this dma_unmap()
* @op: the memory operation containing the buffer to unmap
* @sgt: a pointer to an sg_table previously initialized by
* spi_controller_dma_map_mem_op_data()
*
* Some controllers might want to do DMA on the data buffer embedded in @op.
* This helper prepares things so that the CPU can access the
* op->data.buf.{in,out} buffer again.
*
* This function is not intended to be called from SPI drivers. Only SPI
* controller drivers should use it.
*
* This function should be called after the DMA operation has finished and is
* only valid if the previous spi_controller_dma_map_mem_op_data() call
* returned 0.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sgt)
{
struct device *dmadev;
if (!op->data.nbytes)
return;
if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
dmadev = ctlr->dma_tx->device->dev;
else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
dmadev = ctlr->dma_rx->device->dev;
else
dmadev = ctlr->dev.parent;
spi_unmap_buf(ctlr, dmadev, sgt,
op->data.dir == SPI_MEM_DATA_IN ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
{
u32 mode = mem->spi->mode;
switch (buswidth) {
case 1:
return 0;
case 2:
if ((tx &&
(mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
(!tx &&
(mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
return 0;
break;
case 4:
if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
(!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
return 0;
break;
case 8:
if ((tx && (mode & SPI_TX_OCTAL)) ||
(!tx && (mode & SPI_RX_OCTAL)))
return 0;
break;
default:
break;
}
return -ENOTSUPP;
}
static bool spi_mem_check_buswidth(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
return false;
if (op->addr.nbytes &&
spi_check_buswidth_req(mem, op->addr.buswidth, true))
return false;
if (op->dummy.nbytes &&
spi_check_buswidth_req(mem, op->dummy.buswidth, true))
return false;
if (op->data.dir != SPI_MEM_NO_DATA &&
spi_check_buswidth_req(mem, op->data.buswidth,
op->data.dir == SPI_MEM_DATA_OUT))
return false;
return true;
}
bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_controller *ctlr = mem->spi->controller;
bool op_is_dtr =
op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
if (op_is_dtr) {
if (!spi_mem_controller_is_capable(ctlr, dtr))
return false;
if (op->cmd.nbytes != 2)
return false;
} else {
if (op->cmd.nbytes != 1)
return false;
}
if (op->data.ecc) {
if (!spi_mem_controller_is_capable(ctlr, ecc))
return false;
}
return spi_mem_check_buswidth(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
static bool spi_mem_buswidth_is_valid(u8 buswidth)
{
if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
return false;
return true;
}
static int spi_mem_check_op(const struct spi_mem_op *op)
{
if (!op->cmd.buswidth || !op->cmd.nbytes)
return -EINVAL;
if ((op->addr.nbytes && !op->addr.buswidth) ||
(op->dummy.nbytes && !op->dummy.buswidth) ||
(op->data.nbytes && !op->data.buswidth))
return -EINVAL;
if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
!spi_mem_buswidth_is_valid(op->addr.buswidth) ||
!spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
!spi_mem_buswidth_is_valid(op->data.buswidth))
return -EINVAL;
/* Buffers must be DMA-able. */
if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
object_is_on_stack(op->data.buf.in)))
return -EINVAL;
if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
object_is_on_stack(op->data.buf.out)))
return -EINVAL;
return 0;
}
static bool spi_mem_internal_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_controller *ctlr = mem->spi->controller;
if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
return ctlr->mem_ops->supports_op(mem, op);
return spi_mem_default_supports_op(mem, op);
}
/**
* spi_mem_supports_op() - Check if a memory device and the controller it is
* connected to support a specific memory operation
* @mem: the SPI memory
* @op: the memory operation to check
*
* Some controllers are only supporting Single or Dual IOs, others might only
* support specific opcodes, or it can even be that the controller and device
* both support Quad IOs but the hardware prevents you from using it because
* only 2 IO lines are connected.
*
* This function checks whether a specific operation is supported.
*
* Return: true if @op is supported, false otherwise.
*/
bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
if (spi_mem_check_op(op))
return false;
return spi_mem_internal_supports_op(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_supports_op);
static int spi_mem_access_start(struct spi_mem *mem)
{
struct spi_controller *ctlr = mem->spi->controller;
/*
* Flush the message queue before executing our SPI memory
* operation to prevent preemption of regular SPI transfers.
*/
spi_flush_queue(ctlr);
if (ctlr->auto_runtime_pm) {
int ret;
ret = pm_runtime_resume_and_get(ctlr->dev.parent);
if (ret < 0) {
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
return ret;
}
}
mutex_lock(&ctlr->bus_lock_mutex);
mutex_lock(&ctlr->io_mutex);
return 0;
}
static void spi_mem_access_end(struct spi_mem *mem)
{
struct spi_controller *ctlr = mem->spi->controller;
mutex_unlock(&ctlr->io_mutex);
mutex_unlock(&ctlr->bus_lock_mutex);
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
}
/**
* spi_mem_exec_op() - Execute a memory operation
* @mem: the SPI memory
* @op: the memory operation to execute
*
* Executes a memory operation.
*
* This function first checks that @op is supported and then tries to execute
* it.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
struct spi_controller *ctlr = mem->spi->controller;
struct spi_transfer xfers[4] = { };
struct spi_message msg;
u8 *tmpbuf;
int ret;
ret = spi_mem_check_op(op);
if (ret)
return ret;
if (!spi_mem_internal_supports_op(mem, op))
return -ENOTSUPP;
if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !spi_get_csgpiod(mem->spi, 0)) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;
ret = ctlr->mem_ops->exec_op(mem, op);
spi_mem_access_end(mem);
/*
* Some controllers only optimize specific paths (typically the
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
if (!ret || ret != -ENOTSUPP)
return ret;
}
tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
/*
* Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
* we're guaranteed that this buffer is DMA-able, as required by the
* SPI layer.
*/
tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
if (!tmpbuf)
return -ENOMEM;
spi_message_init(&msg);
tmpbuf[0] = op->cmd.opcode;
xfers[xferpos].tx_buf = tmpbuf;
xfers[xferpos].len = op->cmd.nbytes;
xfers[xferpos].tx_nbits = op->cmd.buswidth;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen++;
if (op->addr.nbytes) {
int i;
for (i = 0; i < op->addr.nbytes; i++)
tmpbuf[i + 1] = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
xfers[xferpos].tx_buf = tmpbuf + 1;
xfers[xferpos].len = op->addr.nbytes;
xfers[xferpos].tx_nbits = op->addr.buswidth;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->addr.nbytes;
}
if (op->dummy.nbytes) {
memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
xfers[xferpos].dummy_data = 1;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
}
if (op->data.nbytes) {
if (op->data.dir == SPI_MEM_DATA_IN) {
xfers[xferpos].rx_buf = op->data.buf.in;
xfers[xferpos].rx_nbits = op->data.buswidth;
} else {
xfers[xferpos].tx_buf = op->data.buf.out;
xfers[xferpos].tx_nbits = op->data.buswidth;
}
xfers[xferpos].len = op->data.nbytes;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->data.nbytes;
}
ret = spi_sync(mem->spi, &msg);
kfree(tmpbuf);
if (ret)
return ret;
if (msg.actual_length != totalxferlen)
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(spi_mem_exec_op);
/**
* spi_mem_get_name() - Return the SPI mem device name to be used by the
* upper layer if necessary
* @mem: the SPI memory
*
* This function allows SPI mem users to retrieve the SPI mem device name.
* It is useful if the upper layer needs to expose a custom name for
* compatibility reasons.
*
* Return: a string containing the name of the memory device to be used
* by the SPI mem user
*/
const char *spi_mem_get_name(struct spi_mem *mem)
{
return mem->name;
}
EXPORT_SYMBOL_GPL(spi_mem_get_name);
/**
* spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
* match controller limitations
* @mem: the SPI memory
* @op: the operation to adjust
*
* Some controllers have FIFO limitations and must split a data transfer
* operation into multiple ones, others require a specific alignment for
* optimized accesses. This function allows SPI mem drivers to split a single
* operation into multiple sub-operations when required.
*
* Return: a negative error code if the controller can't properly adjust @op,
* 0 otherwise. Note that @op->data.nbytes will be updated if @op
* can't be handled in a single step.
*/
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct spi_controller *ctlr = mem->spi->controller;
size_t len;
if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
return ctlr->mem_ops->adjust_op_size(mem, op);
if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
if (len > spi_max_transfer_size(mem->spi))
return -EINVAL;
op->data.nbytes = min3((size_t)op->data.nbytes,
spi_max_transfer_size(mem->spi),
spi_max_message_size(mem->spi) -
len);
if (!op->data.nbytes)
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct spi_mem_op op = desc->info.op_tmpl;
int ret;
op.addr.val = desc->info.offset + offs;
op.data.buf.in = buf;
op.data.nbytes = len;
ret = spi_mem_adjust_op_size(desc->mem, &op);
if (ret)
return ret;
ret = spi_mem_exec_op(desc->mem, &op);
if (ret)
return ret;
return op.data.nbytes;
}
static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf)
{
struct spi_mem_op op = desc->info.op_tmpl;
int ret;
op.addr.val = desc->info.offset + offs;
op.data.buf.out = buf;
op.data.nbytes = len;
ret = spi_mem_adjust_op_size(desc->mem, &op);
if (ret)
return ret;
ret = spi_mem_exec_op(desc->mem, &op);
if (ret)
return ret;
return op.data.nbytes;
}
/**
* spi_mem_dirmap_create() - Create a direct mapping descriptor
* @mem: SPI mem device this direct mapping should be created for
* @info: direct mapping information
*
* This function is creating a direct mapping descriptor which can then be used
* to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
* If the SPI controller driver does not support direct mapping, this function
* falls back to an implementation using spi_mem_exec_op(), so that the caller
* doesn't have to bother implementing a fallback on his own.
*
* Return: a valid pointer in case of success, and ERR_PTR() otherwise.
*/
struct spi_mem_dirmap_desc *
spi_mem_dirmap_create(struct spi_mem *mem,
const struct spi_mem_dirmap_info *info)
{
struct spi_controller *ctlr = mem->spi->controller;
struct spi_mem_dirmap_desc *desc;
int ret = -ENOTSUPP;
/* Make sure the number of address cycles is between 1 and 8 bytes. */
if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
return ERR_PTR(-EINVAL);
/* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
return ERR_PTR(-EINVAL);
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return ERR_PTR(-ENOMEM);
desc->mem = mem;
desc->info = *info;
if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
ret = ctlr->mem_ops->dirmap_create(desc);
if (ret) {
desc->nodirmap = true;
if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
ret = -ENOTSUPP;
else
ret = 0;
}
if (ret) {
kfree(desc);
return ERR_PTR(ret);
}
return desc;
}
EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
/**
* spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
* @desc: the direct mapping descriptor to destroy
*
* This function destroys a direct mapping descriptor previously created by
* spi_mem_dirmap_create().
*/
void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
{
struct spi_controller *ctlr = desc->mem->spi->controller;
if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
ctlr->mem_ops->dirmap_destroy(desc);
kfree(desc);
}
EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
{
struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
spi_mem_dirmap_destroy(desc);
}
/**
* devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
* it to a device
* @dev: device the dirmap desc will be attached to
* @mem: SPI mem device this direct mapping should be created for
* @info: direct mapping information
*
* devm_ variant of the spi_mem_dirmap_create() function. See
* spi_mem_dirmap_create() for more details.
*
* Return: a valid pointer in case of success, and ERR_PTR() otherwise.
*/
struct spi_mem_dirmap_desc *
devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
const struct spi_mem_dirmap_info *info)
{
struct spi_mem_dirmap_desc **ptr, *desc;
ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
desc = spi_mem_dirmap_create(mem, info);
if (IS_ERR(desc)) {
devres_free(ptr);
} else {
*ptr = desc;
devres_add(dev, ptr);
}
return desc;
}
EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
{
struct spi_mem_dirmap_desc **ptr = res;
if (WARN_ON(!ptr || !*ptr))
return 0;
return *ptr == data;
}
/**
* devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
* to a device
* @dev: device the dirmap desc is attached to
* @desc: the direct mapping descriptor to destroy
*
* devm_ variant of the spi_mem_dirmap_destroy() function. See
* spi_mem_dirmap_destroy() for more details.
*/
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc)
{
devres_release(dev, devm_spi_mem_dirmap_release,
devm_spi_mem_dirmap_match, desc);
}
EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
/**
* spi_mem_dirmap_read() - Read data through a direct mapping
* @desc: direct mapping descriptor
* @offs: offset to start reading from. Note that this is not an absolute
* offset, but the offset within the direct mapping which already has
* its own offset
* @len: length in bytes
* @buf: destination buffer. This buffer must be DMA-able
*
* This function reads data from a memory device using a direct mapping
* previously instantiated with spi_mem_dirmap_create().
*
* Return: the amount of data read from the memory device or a negative error
* code. Note that the returned size might be smaller than @len, and the caller
* is responsible for calling spi_mem_dirmap_read() again when that happens.
*/
ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct spi_controller *ctlr = desc->mem->spi->controller;
ssize_t ret;
if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
return -EINVAL;
if (!len)
return 0;
if (desc->nodirmap) {
ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
} else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
ret = spi_mem_access_start(desc->mem);
if (ret)
return ret;
ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
spi_mem_access_end(desc->mem);
} else {
ret = -ENOTSUPP;
}
return ret;
}
EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
/**
* spi_mem_dirmap_write() - Write data through a direct mapping
* @desc: direct mapping descriptor
* @offs: offset to start writing from. Note that this is not an absolute
* offset, but the offset within the direct mapping which already has
* its own offset
* @len: length in bytes
* @buf: source buffer. This buffer must be DMA-able
*
* This function writes data to a memory device using a direct mapping
* previously instantiated with spi_mem_dirmap_create().
*
* Return: the amount of data written to the memory device or a negative error
* code. Note that the returned size might be smaller than @len, and the caller
* is responsible for calling spi_mem_dirmap_write() again when that happens.
*/
ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf)
{
struct spi_controller *ctlr = desc->mem->spi->controller;
ssize_t ret;
if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
return -EINVAL;
if (!len)
return 0;
if (desc->nodirmap) {
ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
} else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
ret = spi_mem_access_start(desc->mem);
if (ret)
return ret;
ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
spi_mem_access_end(desc->mem);
} else {
ret = -ENOTSUPP;
}
return ret;
}
EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
{
return container_of(drv, struct spi_mem_driver, spidrv.driver);
}
static int spi_mem_read_status(struct spi_mem *mem,
const struct spi_mem_op *op,
u16 *status)
{
const u8 *bytes = (u8 *)op->data.buf.in;
int ret;
ret = spi_mem_exec_op(mem, op);
if (ret)
return ret;
if (op->data.nbytes > 1)
*status = ((u16)bytes[0] << 8) | bytes[1];
else
*status = bytes[0];
return 0;
}
/**
* spi_mem_poll_status() - Poll memory device status
* @mem: SPI memory device
* @op: the memory operation to execute
* @mask: status bitmask to ckeck
* @match: (status & mask) expected value
* @initial_delay_us: delay in us before starting to poll
* @polling_delay_us: time to sleep between reads in us
* @timeout_ms: timeout in milliseconds
*
* This function polls a status register and returns when
* (status & mask) == match or when the timeout has expired.
*
* Return: 0 in case of success, -ETIMEDOUT in case of error,
* -EOPNOTSUPP if not supported.
*/
int spi_mem_poll_status(struct spi_mem *mem,
const struct spi_mem_op *op,
u16 mask, u16 match,
unsigned long initial_delay_us,
unsigned long polling_delay_us,
u16 timeout_ms)
{
struct spi_controller *ctlr = mem->spi->controller;
int ret = -EOPNOTSUPP;
int read_status_ret;
u16 status;
if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
op->data.dir != SPI_MEM_DATA_IN)
return -EINVAL;
if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !spi_get_csgpiod(mem->spi, 0)) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;
ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
initial_delay_us, polling_delay_us,
timeout_ms);
spi_mem_access_end(mem);
}
if (ret == -EOPNOTSUPP) {
if (!spi_mem_supports_op(mem, op))
return ret;
if (initial_delay_us < 10)
udelay(initial_delay_us);
else
usleep_range((initial_delay_us >> 2) + 1,
initial_delay_us);
ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
(read_status_ret || ((status) & mask) == match),
polling_delay_us, timeout_ms * 1000, false, mem,
op, &status);
if (read_status_ret)
return read_status_ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(spi_mem_poll_status);
static int spi_mem_probe(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
struct spi_controller *ctlr = spi->controller;
struct spi_mem *mem;
mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
mem->spi = spi;
if (ctlr->mem_ops && ctlr->mem_ops->get_name)
mem->name = ctlr->mem_ops->get_name(mem);
else
mem->name = dev_name(&spi->dev);
if (IS_ERR_OR_NULL(mem->name))
return PTR_ERR_OR_ZERO(mem->name);
spi_set_drvdata(spi, mem);
return memdrv->probe(mem);
}
static void spi_mem_remove(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
struct spi_mem *mem = spi_get_drvdata(spi);
if (memdrv->remove)
memdrv->remove(mem);
}
static void spi_mem_shutdown(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
struct spi_mem *mem = spi_get_drvdata(spi);
if (memdrv->shutdown)
memdrv->shutdown(mem);
}
/**
* spi_mem_driver_register_with_owner() - Register a SPI memory driver
* @memdrv: the SPI memory driver to register
* @owner: the owner of this driver
*
* Registers a SPI memory driver.
*
* Return: 0 in case of success, a negative error core otherwise.
*/
int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
struct module *owner)
{
memdrv->spidrv.probe = spi_mem_probe;
memdrv->spidrv.remove = spi_mem_remove;
memdrv->spidrv.shutdown = spi_mem_shutdown;
return __spi_register_driver(owner, &memdrv->spidrv);
}
EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
/**
* spi_mem_driver_unregister() - Unregister a SPI memory driver
* @memdrv: the SPI memory driver to unregister
*
* Unregisters a SPI memory driver.
*/
void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
{
spi_unregister_driver(&memdrv->spidrv);
}
EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
| linux-master | drivers/spi/spi-mem.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Nuvoton Technology corporation.
#include <linux/kernel.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/reset.h>
#include <asm/unaligned.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
struct npcm_pspi {
struct completion xfer_done;
struct reset_control *reset;
struct spi_master *master;
unsigned int tx_bytes;
unsigned int rx_bytes;
void __iomem *base;
bool is_save_param;
u8 bits_per_word;
const u8 *tx_buf;
struct clk *clk;
u32 speed_hz;
u8 *rx_buf;
u16 mode;
u32 id;
};
#define DRIVER_NAME "npcm-pspi"
#define NPCM_PSPI_DATA 0x00
#define NPCM_PSPI_CTL1 0x02
#define NPCM_PSPI_STAT 0x04
/* definitions for control and status register */
#define NPCM_PSPI_CTL1_SPIEN BIT(0)
#define NPCM_PSPI_CTL1_MOD BIT(2)
#define NPCM_PSPI_CTL1_EIR BIT(5)
#define NPCM_PSPI_CTL1_EIW BIT(6)
#define NPCM_PSPI_CTL1_SCM BIT(7)
#define NPCM_PSPI_CTL1_SCIDL BIT(8)
#define NPCM_PSPI_CTL1_SCDV6_0 GENMASK(15, 9)
#define NPCM_PSPI_STAT_BSY BIT(0)
#define NPCM_PSPI_STAT_RBF BIT(1)
/* general definitions */
#define NPCM_PSPI_TIMEOUT_MS 2000
#define NPCM_PSPI_MAX_CLK_DIVIDER 256
#define NPCM_PSPI_MIN_CLK_DIVIDER 4
#define NPCM_PSPI_DEFAULT_CLK 25000000
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : 2;
}
static inline void npcm_pspi_irq_enable(struct npcm_pspi *priv, u16 mask)
{
u16 val;
val = ioread16(priv->base + NPCM_PSPI_CTL1);
val |= mask;
iowrite16(val, priv->base + NPCM_PSPI_CTL1);
}
static inline void npcm_pspi_irq_disable(struct npcm_pspi *priv, u16 mask)
{
u16 val;
val = ioread16(priv->base + NPCM_PSPI_CTL1);
val &= ~mask;
iowrite16(val, priv->base + NPCM_PSPI_CTL1);
}
static inline void npcm_pspi_enable(struct npcm_pspi *priv)
{
u16 val;
val = ioread16(priv->base + NPCM_PSPI_CTL1);
val |= NPCM_PSPI_CTL1_SPIEN;
iowrite16(val, priv->base + NPCM_PSPI_CTL1);
}
static inline void npcm_pspi_disable(struct npcm_pspi *priv)
{
u16 val;
val = ioread16(priv->base + NPCM_PSPI_CTL1);
val &= ~NPCM_PSPI_CTL1_SPIEN;
iowrite16(val, priv->base + NPCM_PSPI_CTL1);
}
static void npcm_pspi_set_mode(struct spi_device *spi)
{
struct npcm_pspi *priv = spi_master_get_devdata(spi->master);
u16 regtemp;
u16 mode_val;
switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
mode_val = 0;
break;
case SPI_MODE_1:
mode_val = NPCM_PSPI_CTL1_SCIDL;
break;
case SPI_MODE_2:
mode_val = NPCM_PSPI_CTL1_SCM;
break;
case SPI_MODE_3:
mode_val = NPCM_PSPI_CTL1_SCIDL | NPCM_PSPI_CTL1_SCM;
break;
}
regtemp = ioread16(priv->base + NPCM_PSPI_CTL1);
regtemp &= ~(NPCM_PSPI_CTL1_SCM | NPCM_PSPI_CTL1_SCIDL);
iowrite16(regtemp | mode_val, priv->base + NPCM_PSPI_CTL1);
}
static void npcm_pspi_set_transfer_size(struct npcm_pspi *priv, int size)
{
u16 regtemp;
regtemp = ioread16(NPCM_PSPI_CTL1 + priv->base);
switch (size) {
case 8:
regtemp &= ~NPCM_PSPI_CTL1_MOD;
break;
case 16:
regtemp |= NPCM_PSPI_CTL1_MOD;
break;
}
iowrite16(regtemp, NPCM_PSPI_CTL1 + priv->base);
}
static void npcm_pspi_set_baudrate(struct npcm_pspi *priv, unsigned int speed)
{
u32 ckdiv;
u16 regtemp;
/* the supported rates are numbers from 4 to 256. */
ckdiv = DIV_ROUND_CLOSEST(clk_get_rate(priv->clk), (2 * speed)) - 1;
regtemp = ioread16(NPCM_PSPI_CTL1 + priv->base);
regtemp &= ~NPCM_PSPI_CTL1_SCDV6_0;
iowrite16(regtemp | (ckdiv << 9), NPCM_PSPI_CTL1 + priv->base);
}
static void npcm_pspi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct npcm_pspi *priv = spi_master_get_devdata(spi->master);
priv->tx_buf = t->tx_buf;
priv->rx_buf = t->rx_buf;
priv->tx_bytes = t->len;
priv->rx_bytes = t->len;
if (!priv->is_save_param || priv->mode != spi->mode) {
npcm_pspi_set_mode(spi);
priv->mode = spi->mode;
}
/*
* If transfer is even length, and 8 bits per word transfer,
* then implement 16 bits-per-word transfer.
*/
if (priv->bits_per_word == 8 && !(t->len & 0x1))
t->bits_per_word = 16;
if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
npcm_pspi_set_transfer_size(priv, t->bits_per_word);
priv->bits_per_word = t->bits_per_word;
}
if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
npcm_pspi_set_baudrate(priv, t->speed_hz);
priv->speed_hz = t->speed_hz;
}
if (!priv->is_save_param)
priv->is_save_param = true;
}
static void npcm_pspi_send(struct npcm_pspi *priv)
{
int wsize;
u16 val;
wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
priv->tx_bytes -= wsize;
if (!priv->tx_buf)
return;
switch (wsize) {
case 1:
val = *priv->tx_buf++;
iowrite8(val, NPCM_PSPI_DATA + priv->base);
break;
case 2:
val = *priv->tx_buf++;
val = *priv->tx_buf++ | (val << 8);
iowrite16(val, NPCM_PSPI_DATA + priv->base);
break;
default:
WARN_ON_ONCE(1);
return;
}
}
static void npcm_pspi_recv(struct npcm_pspi *priv)
{
int rsize;
u16 val;
rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
priv->rx_bytes -= rsize;
if (!priv->rx_buf)
return;
switch (rsize) {
case 1:
*priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA);
break;
case 2:
val = ioread16(priv->base + NPCM_PSPI_DATA);
*priv->rx_buf++ = (val >> 8);
*priv->rx_buf++ = val & 0xff;
break;
default:
WARN_ON_ONCE(1);
return;
}
}
static int npcm_pspi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct npcm_pspi *priv = spi_master_get_devdata(master);
int status;
npcm_pspi_setup_transfer(spi, t);
reinit_completion(&priv->xfer_done);
npcm_pspi_enable(priv);
status = wait_for_completion_timeout(&priv->xfer_done,
msecs_to_jiffies
(NPCM_PSPI_TIMEOUT_MS));
if (status == 0) {
npcm_pspi_disable(priv);
return -ETIMEDOUT;
}
return 0;
}
static int npcm_pspi_prepare_transfer_hardware(struct spi_master *master)
{
struct npcm_pspi *priv = spi_master_get_devdata(master);
npcm_pspi_irq_enable(priv, NPCM_PSPI_CTL1_EIR | NPCM_PSPI_CTL1_EIW);
return 0;
}
static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
{
struct npcm_pspi *priv = spi_master_get_devdata(master);
npcm_pspi_irq_disable(priv, NPCM_PSPI_CTL1_EIR | NPCM_PSPI_CTL1_EIW);
return 0;
}
static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
{
reset_control_assert(priv->reset);
udelay(5);
reset_control_deassert(priv->reset);
}
static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
{
struct npcm_pspi *priv = dev_id;
u8 stat;
stat = ioread8(priv->base + NPCM_PSPI_STAT);
if (!priv->tx_buf && !priv->rx_buf)
return IRQ_NONE;
if (priv->tx_buf) {
if (stat & NPCM_PSPI_STAT_RBF) {
ioread8(NPCM_PSPI_DATA + priv->base);
if (priv->tx_bytes == 0) {
npcm_pspi_disable(priv);
complete(&priv->xfer_done);
return IRQ_HANDLED;
}
}
if ((stat & NPCM_PSPI_STAT_BSY) == 0)
if (priv->tx_bytes)
npcm_pspi_send(priv);
}
if (priv->rx_buf) {
if (stat & NPCM_PSPI_STAT_RBF) {
if (!priv->rx_bytes)
return IRQ_NONE;
npcm_pspi_recv(priv);
if (!priv->rx_bytes) {
npcm_pspi_disable(priv);
complete(&priv->xfer_done);
return IRQ_HANDLED;
}
}
if (((stat & NPCM_PSPI_STAT_BSY) == 0) && !priv->tx_buf)
iowrite8(0x0, NPCM_PSPI_DATA + priv->base);
}
return IRQ_HANDLED;
}
static int npcm_pspi_probe(struct platform_device *pdev)
{
struct npcm_pspi *priv;
struct spi_master *master;
unsigned long clk_hz;
int irq;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(*priv));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
priv = spi_master_get_devdata(master);
priv->master = master;
priv->is_save_param = false;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(priv->clk);
goto out_master_put;
}
ret = clk_prepare_enable(priv->clk);
if (ret)
goto out_master_put;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto out_disable_clk;
}
priv->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(priv->reset)) {
ret = PTR_ERR(priv->reset);
goto out_disable_clk;
}
/* reset SPI-HW block */
npcm_pspi_reset_hw(priv);
ret = devm_request_irq(&pdev->dev, irq, npcm_pspi_handler, 0,
"npcm-pspi", priv);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto out_disable_clk;
}
init_completion(&priv->xfer_done);
clk_hz = clk_get_rate(priv->clk);
master->max_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MIN_CLK_DIVIDER);
master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
master->mode_bits = SPI_CPHA | SPI_CPOL;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = -1;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->transfer_one = npcm_pspi_transfer_one;
master->prepare_transfer_hardware =
npcm_pspi_prepare_transfer_hardware;
master->unprepare_transfer_hardware =
npcm_pspi_unprepare_transfer_hardware;
master->use_gpio_descriptors = true;
/* set to default clock rate */
npcm_pspi_set_baudrate(priv, NPCM_PSPI_DEFAULT_CLK);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
goto out_disable_clk;
pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
return 0;
out_disable_clk:
clk_disable_unprepare(priv->clk);
out_master_put:
spi_master_put(master);
return ret;
}
static void npcm_pspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct npcm_pspi *priv = spi_master_get_devdata(master);
npcm_pspi_reset_hw(priv);
clk_disable_unprepare(priv->clk);
}
static const struct of_device_id npcm_pspi_match[] = {
{ .compatible = "nuvoton,npcm750-pspi", .data = NULL },
{ .compatible = "nuvoton,npcm845-pspi", .data = NULL },
{}
};
MODULE_DEVICE_TABLE(of, npcm_pspi_match);
static struct platform_driver npcm_pspi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = npcm_pspi_match,
},
.probe = npcm_pspi_probe,
.remove_new = npcm_pspi_remove,
};
module_platform_driver(npcm_pspi_driver);
MODULE_DESCRIPTION("NPCM peripheral SPI Controller driver");
MODULE_AUTHOR("Tomer Maimon <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-npcm-pspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Cirrus Logic EP93xx SPI controller.
*
* Copyright (C) 2010-2011 Mika Westerberg
*
* Explicit FIFO handling code was inspired by amba-pl022 driver.
*
* Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
*
* For more information about the SPI controller see documentation on Cirrus
* Logic web site:
* https://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/dma-ep93xx.h>
#include <linux/platform_data/spi-ep93xx.h>
#define SSPCR0 0x0000
#define SSPCR0_SPO BIT(6)
#define SSPCR0_SPH BIT(7)
#define SSPCR0_SCR_SHIFT 8
#define SSPCR1 0x0004
#define SSPCR1_RIE BIT(0)
#define SSPCR1_TIE BIT(1)
#define SSPCR1_RORIE BIT(2)
#define SSPCR1_LBM BIT(3)
#define SSPCR1_SSE BIT(4)
#define SSPCR1_MS BIT(5)
#define SSPCR1_SOD BIT(6)
#define SSPDR 0x0008
#define SSPSR 0x000c
#define SSPSR_TFE BIT(0)
#define SSPSR_TNF BIT(1)
#define SSPSR_RNE BIT(2)
#define SSPSR_RFF BIT(3)
#define SSPSR_BSY BIT(4)
#define SSPCPSR 0x0010
#define SSPIIR 0x0014
#define SSPIIR_RIS BIT(0)
#define SSPIIR_TIS BIT(1)
#define SSPIIR_RORIS BIT(2)
#define SSPICR SSPIIR
/* timeout in milliseconds */
#define SPI_TIMEOUT 5
/* maximum depth of RX/TX FIFO */
#define SPI_FIFO_SIZE 8
/**
* struct ep93xx_spi - EP93xx SPI controller structure
* @clk: clock for the controller
* @mmio: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
* @tx: current byte in transfer to transmit
* @rx: current byte in transfer to receive
* @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
* frame decreases this level and sending one frame increases it.
* @dma_rx: RX DMA channel
* @dma_tx: TX DMA channel
* @dma_rx_data: RX parameters passed to the DMA engine
* @dma_tx_data: TX parameters passed to the DMA engine
* @rx_sgt: sg table for RX transfers
* @tx_sgt: sg table for TX transfers
* @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
* the client
*/
struct ep93xx_spi {
struct clk *clk;
void __iomem *mmio;
unsigned long sspdr_phys;
size_t tx;
size_t rx;
size_t fifo_level;
struct dma_chan *dma_rx;
struct dma_chan *dma_tx;
struct ep93xx_dma_data dma_rx_data;
struct ep93xx_dma_data dma_tx_data;
struct sg_table rx_sgt;
struct sg_table tx_sgt;
void *zeropage;
};
/* converts bits per word to CR0.DSS value */
#define bits_per_word_to_dss(bpw) ((bpw) - 1)
/**
* ep93xx_spi_calc_divisors() - calculates SPI clock divisors
* @host: SPI host
* @rate: desired SPI output clock rate
* @div_cpsr: pointer to return the cpsr (pre-scaler) divider
* @div_scr: pointer to return the scr divider
*/
static int ep93xx_spi_calc_divisors(struct spi_controller *host,
u32 rate, u8 *div_cpsr, u8 *div_scr)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
/*
* Make sure that max value is between values supported by the
* controller.
*/
rate = clamp(rate, host->min_speed_hz, host->max_speed_hz);
/*
* Calculate divisors so that we can get speed according the
* following formula:
* rate = spi_clock_rate / (cpsr * (1 + scr))
*
* cpsr must be even number and starts from 2, scr can be any number
* between 0 and 255.
*/
for (cpsr = 2; cpsr <= 254; cpsr += 2) {
for (scr = 0; scr <= 255; scr++) {
if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
*div_scr = (u8)scr;
*div_cpsr = (u8)cpsr;
return 0;
}
}
}
return -EINVAL;
}
static int ep93xx_spi_chip_setup(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
u8 div_cpsr = 0;
u8 div_scr = 0;
u16 cr0;
int err;
err = ep93xx_spi_calc_divisors(host, xfer->speed_hz,
&div_cpsr, &div_scr);
if (err)
return err;
cr0 = div_scr << SSPCR0_SCR_SHIFT;
if (spi->mode & SPI_CPOL)
cr0 |= SSPCR0_SPO;
if (spi->mode & SPI_CPHA)
cr0 |= SSPCR0_SPH;
cr0 |= dss;
dev_dbg(&host->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
spi->mode, div_cpsr, div_scr, dss);
dev_dbg(&host->dev, "setup: cr0 %#x\n", cr0);
writel(div_cpsr, espi->mmio + SSPCPSR);
writel(cr0, espi->mmio + SSPCR0);
return 0;
}
static void ep93xx_do_write(struct spi_controller *host)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
struct spi_transfer *xfer = host->cur_msg->state;
u32 val = 0;
if (xfer->bits_per_word > 8) {
if (xfer->tx_buf)
val = ((u16 *)xfer->tx_buf)[espi->tx];
espi->tx += 2;
} else {
if (xfer->tx_buf)
val = ((u8 *)xfer->tx_buf)[espi->tx];
espi->tx += 1;
}
writel(val, espi->mmio + SSPDR);
}
static void ep93xx_do_read(struct spi_controller *host)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
struct spi_transfer *xfer = host->cur_msg->state;
u32 val;
val = readl(espi->mmio + SSPDR);
if (xfer->bits_per_word > 8) {
if (xfer->rx_buf)
((u16 *)xfer->rx_buf)[espi->rx] = val;
espi->rx += 2;
} else {
if (xfer->rx_buf)
((u8 *)xfer->rx_buf)[espi->rx] = val;
espi->rx += 1;
}
}
/**
* ep93xx_spi_read_write() - perform next RX/TX transfer
* @host: SPI host
*
* This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
* called several times, the whole transfer will be completed. Returns
* %-EINPROGRESS when current transfer was not yet completed otherwise %0.
*
* When this function is finished, RX FIFO should be empty and TX FIFO should be
* full.
*/
static int ep93xx_spi_read_write(struct spi_controller *host)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
struct spi_transfer *xfer = host->cur_msg->state;
/* read as long as RX FIFO has frames in it */
while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) {
ep93xx_do_read(host);
espi->fifo_level--;
}
/* write as long as TX FIFO has room */
while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) {
ep93xx_do_write(host);
espi->fifo_level++;
}
if (espi->rx == xfer->len)
return 0;
return -EINPROGRESS;
}
static enum dma_transfer_direction
ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
return DMA_MEM_TO_DEV;
case DMA_FROM_DEVICE:
return DMA_DEV_TO_MEM;
default:
return DMA_TRANS_NONE;
}
}
/**
* ep93xx_spi_dma_prepare() - prepares a DMA transfer
* @host: SPI host
* @dir: DMA transfer direction
*
* Function configures the DMA, maps the buffer and prepares the DMA
* descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
* in case of failure.
*/
static struct dma_async_tx_descriptor *
ep93xx_spi_dma_prepare(struct spi_controller *host,
enum dma_data_direction dir)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
struct spi_transfer *xfer = host->cur_msg->state;
struct dma_async_tx_descriptor *txd;
enum dma_slave_buswidth buswidth;
struct dma_slave_config conf;
struct scatterlist *sg;
struct sg_table *sgt;
struct dma_chan *chan;
const void *buf, *pbuf;
size_t len = xfer->len;
int i, ret, nents;
if (xfer->bits_per_word > 8)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
else
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
memset(&conf, 0, sizeof(conf));
conf.direction = ep93xx_dma_data_to_trans_dir(dir);
if (dir == DMA_FROM_DEVICE) {
chan = espi->dma_rx;
buf = xfer->rx_buf;
sgt = &espi->rx_sgt;
conf.src_addr = espi->sspdr_phys;
conf.src_addr_width = buswidth;
} else {
chan = espi->dma_tx;
buf = xfer->tx_buf;
sgt = &espi->tx_sgt;
conf.dst_addr = espi->sspdr_phys;
conf.dst_addr_width = buswidth;
}
ret = dmaengine_slave_config(chan, &conf);
if (ret)
return ERR_PTR(ret);
/*
* We need to split the transfer into PAGE_SIZE'd chunks. This is
* because we are using @espi->zeropage to provide a zero RX buffer
* for the TX transfers and we have only allocated one page for that.
*
* For performance reasons we allocate a new sg_table only when
* needed. Otherwise we will re-use the current one. Eventually the
* last sg_table is released in ep93xx_spi_release_dma().
*/
nents = DIV_ROUND_UP(len, PAGE_SIZE);
if (nents != sgt->nents) {
sg_free_table(sgt);
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret)
return ERR_PTR(ret);
}
pbuf = buf;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = min_t(size_t, len, PAGE_SIZE);
if (buf) {
sg_set_page(sg, virt_to_page(pbuf), bytes,
offset_in_page(pbuf));
} else {
sg_set_page(sg, virt_to_page(espi->zeropage),
bytes, 0);
}
pbuf += bytes;
len -= bytes;
}
if (WARN_ON(len)) {
dev_warn(&host->dev, "len = %zu expected 0!\n", len);
return ERR_PTR(-EINVAL);
}
nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
if (!nents)
return ERR_PTR(-ENOMEM);
txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
}
return txd;
}
/**
* ep93xx_spi_dma_finish() - finishes with a DMA transfer
* @host: SPI host
* @dir: DMA transfer direction
*
* Function finishes with the DMA transfer. After this, the DMA buffer is
* unmapped.
*/
static void ep93xx_spi_dma_finish(struct spi_controller *host,
enum dma_data_direction dir)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
struct dma_chan *chan;
struct sg_table *sgt;
if (dir == DMA_FROM_DEVICE) {
chan = espi->dma_rx;
sgt = &espi->rx_sgt;
} else {
chan = espi->dma_tx;
sgt = &espi->tx_sgt;
}
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
}
static void ep93xx_spi_dma_callback(void *callback_param)
{
struct spi_controller *host = callback_param;
ep93xx_spi_dma_finish(host, DMA_TO_DEVICE);
ep93xx_spi_dma_finish(host, DMA_FROM_DEVICE);
spi_finalize_current_transfer(host);
}
static int ep93xx_spi_dma_transfer(struct spi_controller *host)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
struct dma_async_tx_descriptor *rxd, *txd;
rxd = ep93xx_spi_dma_prepare(host, DMA_FROM_DEVICE);
if (IS_ERR(rxd)) {
dev_err(&host->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
return PTR_ERR(rxd);
}
txd = ep93xx_spi_dma_prepare(host, DMA_TO_DEVICE);
if (IS_ERR(txd)) {
ep93xx_spi_dma_finish(host, DMA_FROM_DEVICE);
dev_err(&host->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
return PTR_ERR(txd);
}
/* We are ready when RX is done */
rxd->callback = ep93xx_spi_dma_callback;
rxd->callback_param = host;
/* Now submit both descriptors and start DMA */
dmaengine_submit(rxd);
dmaengine_submit(txd);
dma_async_issue_pending(espi->dma_rx);
dma_async_issue_pending(espi->dma_tx);
/* signal that we need to wait for completion */
return 1;
}
static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
{
struct spi_controller *host = dev_id;
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u32 val;
/*
* If we got ROR (receive overrun) interrupt we know that something is
* wrong. Just abort the message.
*/
if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) {
/* clear the overrun interrupt */
writel(0, espi->mmio + SSPICR);
dev_warn(&host->dev,
"receive overrun, aborting the message\n");
host->cur_msg->status = -EIO;
} else {
/*
* Interrupt is either RX (RIS) or TX (TIS). For both cases we
* simply execute next data transfer.
*/
if (ep93xx_spi_read_write(host)) {
/*
* In normal case, there still is some processing left
* for current transfer. Let's wait for the next
* interrupt then.
*/
return IRQ_HANDLED;
}
}
/*
* Current transfer is finished, either with error or with success. In
* any case we disable interrupts and notify the worker to handle
* any post-processing of the message.
*/
val = readl(espi->mmio + SSPCR1);
val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
writel(val, espi->mmio + SSPCR1);
spi_finalize_current_transfer(host);
return IRQ_HANDLED;
}
static int ep93xx_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u32 val;
int ret;
ret = ep93xx_spi_chip_setup(host, spi, xfer);
if (ret) {
dev_err(&host->dev, "failed to setup chip for transfer\n");
return ret;
}
host->cur_msg->state = xfer;
espi->rx = 0;
espi->tx = 0;
/*
* There is no point of setting up DMA for the transfers which will
* fit into the FIFO and can be transferred with a single interrupt.
* So in these cases we will be using PIO and don't bother for DMA.
*/
if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE)
return ep93xx_spi_dma_transfer(host);
/* Using PIO so prime the TX FIFO and enable interrupts */
ep93xx_spi_read_write(host);
val = readl(espi->mmio + SSPCR1);
val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
writel(val, espi->mmio + SSPCR1);
/* signal that we need to wait for completion */
return 1;
}
static int ep93xx_spi_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
unsigned long timeout;
/*
* Just to be sure: flush any data from RX FIFO.
*/
timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
if (time_after(jiffies, timeout)) {
dev_warn(&host->dev,
"timeout while flushing RX FIFO\n");
return -ETIMEDOUT;
}
readl(espi->mmio + SSPDR);
}
/*
* We explicitly handle FIFO level. This way we don't have to check TX
* FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
*/
espi->fifo_level = 0;
return 0;
}
static int ep93xx_spi_prepare_hardware(struct spi_controller *host)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u32 val;
int ret;
ret = clk_prepare_enable(espi->clk);
if (ret)
return ret;
val = readl(espi->mmio + SSPCR1);
val |= SSPCR1_SSE;
writel(val, espi->mmio + SSPCR1);
return 0;
}
static int ep93xx_spi_unprepare_hardware(struct spi_controller *host)
{
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u32 val;
val = readl(espi->mmio + SSPCR1);
val &= ~SSPCR1_SSE;
writel(val, espi->mmio + SSPCR1);
clk_disable_unprepare(espi->clk);
return 0;
}
static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
{
if (ep93xx_dma_chan_is_m2p(chan))
return false;
chan->private = filter_param;
return true;
}
static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
{
dma_cap_mask_t mask;
int ret;
espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
if (!espi->zeropage)
return -ENOMEM;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
espi->dma_rx_data.port = EP93XX_DMA_SSP;
espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
espi->dma_rx_data.name = "ep93xx-spi-rx";
espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
&espi->dma_rx_data);
if (!espi->dma_rx) {
ret = -ENODEV;
goto fail_free_page;
}
espi->dma_tx_data.port = EP93XX_DMA_SSP;
espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
espi->dma_tx_data.name = "ep93xx-spi-tx";
espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
&espi->dma_tx_data);
if (!espi->dma_tx) {
ret = -ENODEV;
goto fail_release_rx;
}
return 0;
fail_release_rx:
dma_release_channel(espi->dma_rx);
espi->dma_rx = NULL;
fail_free_page:
free_page((unsigned long)espi->zeropage);
return ret;
}
static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
{
if (espi->dma_rx) {
dma_release_channel(espi->dma_rx);
sg_free_table(&espi->rx_sgt);
}
if (espi->dma_tx) {
dma_release_channel(espi->dma_tx);
sg_free_table(&espi->tx_sgt);
}
if (espi->zeropage)
free_page((unsigned long)espi->zeropage);
}
static int ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct ep93xx_spi_info *info;
struct ep93xx_spi *espi;
struct resource *res;
int irq;
int error;
info = dev_get_platdata(&pdev->dev);
if (!info) {
dev_err(&pdev->dev, "missing platform data\n");
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = spi_alloc_host(&pdev->dev, sizeof(*espi));
if (!host)
return -ENOMEM;
host->use_gpio_descriptors = true;
host->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
host->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
host->prepare_message = ep93xx_spi_prepare_message;
host->transfer_one = ep93xx_spi_transfer_one;
host->bus_num = pdev->id;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
/*
* The SPI core will count the number of GPIO descriptors to figure
* out the number of chip selects available on the platform.
*/
host->num_chipselect = 0;
platform_set_drvdata(pdev, host);
espi = spi_controller_get_devdata(host);
espi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(espi->clk)) {
dev_err(&pdev->dev, "unable to get spi clock\n");
error = PTR_ERR(espi->clk);
goto fail_release_host;
}
/*
* Calculate maximum and minimum supported clock rates
* for the controller.
*/
host->max_speed_hz = clk_get_rate(espi->clk) / 2;
host->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
espi->mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(espi->mmio)) {
error = PTR_ERR(espi->mmio);
goto fail_release_host;
}
espi->sspdr_phys = res->start + SSPDR;
error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
0, "ep93xx-spi", host);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
goto fail_release_host;
}
if (info->use_dma && ep93xx_spi_setup_dma(espi))
dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
/* make sure that the hardware is disabled */
writel(0, espi->mmio + SSPCR1);
error = devm_spi_register_controller(&pdev->dev, host);
if (error) {
dev_err(&pdev->dev, "failed to register SPI host\n");
goto fail_free_dma;
}
dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
(unsigned long)res->start, irq);
return 0;
fail_free_dma:
ep93xx_spi_release_dma(espi);
fail_release_host:
spi_controller_put(host);
return error;
}
static void ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_controller_get_devdata(host);
ep93xx_spi_release_dma(espi);
}
static struct platform_driver ep93xx_spi_driver = {
.driver = {
.name = "ep93xx-spi",
},
.probe = ep93xx_spi_probe,
.remove_new = ep93xx_spi_remove,
};
module_platform_driver(ep93xx_spi_driver);
MODULE_DESCRIPTION("EP93xx SPI Controller driver");
MODULE_AUTHOR("Mika Westerberg <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-spi");
| linux-master | drivers/spi/spi-ep93xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel PCH/PCU SPI flash PCI driver.
*
* Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include "spi-intel.h"
#define BCR 0xdc
#define BCR_WPD BIT(0)
static bool intel_spi_pci_set_writeable(void __iomem *base, void *data)
{
struct pci_dev *pdev = data;
u32 bcr;
/* Try to make the chip read/write */
pci_read_config_dword(pdev, BCR, &bcr);
if (!(bcr & BCR_WPD)) {
bcr |= BCR_WPD;
pci_write_config_dword(pdev, BCR, bcr);
pci_read_config_dword(pdev, BCR, &bcr);
}
return bcr & BCR_WPD;
}
static const struct intel_spi_boardinfo bxt_info = {
.type = INTEL_SPI_BXT,
.set_writeable = intel_spi_pci_set_writeable,
};
static const struct intel_spi_boardinfo cnl_info = {
.type = INTEL_SPI_CNL,
.set_writeable = intel_spi_pci_set_writeable,
};
static int intel_spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_spi_boardinfo *info;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->data = pdev;
return intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
}
static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xae23), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
static struct pci_driver intel_spi_pci_driver = {
.name = "intel-spi",
.id_table = intel_spi_pci_ids,
.probe = intel_spi_pci_probe,
};
module_pci_driver(intel_spi_pci_driver);
MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver");
MODULE_AUTHOR("Mika Westerberg <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-intel-pci.c |
/*
* SPI slave handler reporting uptime at reception of previous SPI message
*
* This SPI slave handler sends the time of reception of the last SPI message
* as two 32-bit unsigned integers in binary format and in network byte order,
* representing the number of seconds and fractional seconds (in microseconds)
* since boot up.
*
* Copyright (C) 2016-2017 Glider bvba
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
* system):
*
* # spidev_test -D /dev/spidev2.0 -p dummy-8B
* spi mode: 0x0
* bits per word: 8
* max speed: 500000 Hz (500 KHz)
* RX | 00 00 04 6D 00 09 5B BB ...
* ^^^^^ ^^^^^^^^
* seconds microseconds
*/
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/sched/clock.h>
#include <linux/spi/spi.h>
struct spi_slave_time_priv {
struct spi_device *spi;
struct completion finished;
struct spi_transfer xfer;
struct spi_message msg;
__be32 buf[2];
};
static int spi_slave_time_submit(struct spi_slave_time_priv *priv);
static void spi_slave_time_complete(void *arg)
{
struct spi_slave_time_priv *priv = arg;
int ret;
ret = priv->msg.status;
if (ret)
goto terminate;
ret = spi_slave_time_submit(priv);
if (ret)
goto terminate;
return;
terminate:
dev_info(&priv->spi->dev, "Terminating\n");
complete(&priv->finished);
}
static int spi_slave_time_submit(struct spi_slave_time_priv *priv)
{
u32 rem_us;
int ret;
u64 ts;
ts = local_clock();
rem_us = do_div(ts, 1000000000) / 1000;
priv->buf[0] = cpu_to_be32(ts);
priv->buf[1] = cpu_to_be32(rem_us);
spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
priv->msg.complete = spi_slave_time_complete;
priv->msg.context = priv;
ret = spi_async(priv->spi, &priv->msg);
if (ret)
dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
return ret;
}
static int spi_slave_time_probe(struct spi_device *spi)
{
struct spi_slave_time_priv *priv;
int ret;
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->spi = spi;
init_completion(&priv->finished);
priv->xfer.tx_buf = priv->buf;
priv->xfer.len = sizeof(priv->buf);
ret = spi_slave_time_submit(priv);
if (ret)
return ret;
spi_set_drvdata(spi, priv);
return 0;
}
static void spi_slave_time_remove(struct spi_device *spi)
{
struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
spi_slave_abort(spi);
wait_for_completion(&priv->finished);
}
static struct spi_driver spi_slave_time_driver = {
.driver = {
.name = "spi-slave-time",
},
.probe = spi_slave_time_probe,
.remove = spi_slave_time_remove,
};
module_spi_driver(spi_slave_time_driver);
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
MODULE_DESCRIPTION("SPI slave reporting uptime at previous SPI message");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-slave-time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (C) 2022 Hewlett-Packard Development Company, L.P. */
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#define GXP_SPI0_MAX_CHIPSELECT 2
#define GXP_SPI_SLEEP_TIME 1
#define GXP_SPI_TIMEOUT (130 * 1000000 / GXP_SPI_SLEEP_TIME)
#define MANUAL_MODE 0
#define DIRECT_MODE 1
#define SPILDAT_LEN 256
#define OFFSET_SPIMCFG 0x0
#define OFFSET_SPIMCTRL 0x4
#define OFFSET_SPICMD 0x5
#define OFFSET_SPIDCNT 0x6
#define OFFSET_SPIADDR 0x8
#define OFFSET_SPIINTSTS 0xc
#define SPIMCTRL_START 0x01
#define SPIMCTRL_BUSY 0x02
#define SPIMCTRL_DIR 0x08
struct gxp_spi;
struct gxp_spi_chip {
struct gxp_spi *spifi;
u32 cs;
};
struct gxp_spi_data {
u32 max_cs;
u32 mode_bits;
};
struct gxp_spi {
const struct gxp_spi_data *data;
void __iomem *reg_base;
void __iomem *dat_base;
void __iomem *dir_base;
struct device *dev;
struct gxp_spi_chip chips[GXP_SPI0_MAX_CHIPSELECT];
};
static void gxp_spi_set_mode(struct gxp_spi *spifi, int mode)
{
u8 value;
void __iomem *reg_base = spifi->reg_base;
value = readb(reg_base + OFFSET_SPIMCTRL);
if (mode == MANUAL_MODE) {
writeb(0x55, reg_base + OFFSET_SPICMD);
writeb(0xaa, reg_base + OFFSET_SPICMD);
value &= ~0x30;
} else {
value |= 0x30;
}
writeb(value, reg_base + OFFSET_SPIMCTRL);
}
static int gxp_spi_read_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
{
int ret;
struct gxp_spi *spifi = chip->spifi;
void __iomem *reg_base = spifi->reg_base;
u32 value;
value = readl(reg_base + OFFSET_SPIMCFG);
value &= ~(1 << 24);
value |= (chip->cs << 24);
value &= ~(0x07 << 16);
value &= ~(0x1f << 19);
writel(value, reg_base + OFFSET_SPIMCFG);
writel(0, reg_base + OFFSET_SPIADDR);
writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT);
value = readb(reg_base + OFFSET_SPIMCTRL);
value &= ~SPIMCTRL_DIR;
value |= SPIMCTRL_START;
writeb(value, reg_base + OFFSET_SPIMCTRL);
ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
!(value & SPIMCTRL_BUSY),
GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
if (ret) {
dev_warn(spifi->dev, "read reg busy time out\n");
return ret;
}
memcpy_fromio(op->data.buf.in, spifi->dat_base, op->data.nbytes);
return ret;
}
static int gxp_spi_write_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
{
int ret;
struct gxp_spi *spifi = chip->spifi;
void __iomem *reg_base = spifi->reg_base;
u32 value;
value = readl(reg_base + OFFSET_SPIMCFG);
value &= ~(1 << 24);
value |= (chip->cs << 24);
value &= ~(0x07 << 16);
value &= ~(0x1f << 19);
writel(value, reg_base + OFFSET_SPIMCFG);
writel(0, reg_base + OFFSET_SPIADDR);
writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
memcpy_toio(spifi->dat_base, op->data.buf.in, op->data.nbytes);
writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT);
value = readb(reg_base + OFFSET_SPIMCTRL);
value |= SPIMCTRL_DIR;
value |= SPIMCTRL_START;
writeb(value, reg_base + OFFSET_SPIMCTRL);
ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
!(value & SPIMCTRL_BUSY),
GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
if (ret)
dev_warn(spifi->dev, "write reg busy time out\n");
return ret;
}
static ssize_t gxp_spi_read(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
{
struct gxp_spi *spifi = chip->spifi;
u32 offset = op->addr.val;
if (chip->cs == 0)
offset += 0x4000000;
memcpy_fromio(op->data.buf.in, spifi->dir_base + offset, op->data.nbytes);
return 0;
}
static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op *op)
{
struct gxp_spi *spifi = chip->spifi;
void __iomem *reg_base = spifi->reg_base;
u32 write_len;
u32 value;
int ret;
write_len = op->data.nbytes;
if (write_len > SPILDAT_LEN)
write_len = SPILDAT_LEN;
value = readl(reg_base + OFFSET_SPIMCFG);
value &= ~(1 << 24);
value |= (chip->cs << 24);
value &= ~(0x07 << 16);
value |= (op->addr.nbytes << 16);
value &= ~(0x1f << 19);
writel(value, reg_base + OFFSET_SPIMCFG);
writel(op->addr.val, reg_base + OFFSET_SPIADDR);
writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD);
writew(write_len, reg_base + OFFSET_SPIDCNT);
memcpy_toio(spifi->dat_base, op->data.buf.in, write_len);
value = readb(reg_base + OFFSET_SPIMCTRL);
value |= SPIMCTRL_DIR;
value |= SPIMCTRL_START;
writeb(value, reg_base + OFFSET_SPIMCTRL);
ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value,
!(value & SPIMCTRL_BUSY),
GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT);
if (ret) {
dev_warn(spifi->dev, "write busy time out\n");
return ret;
}
return write_len;
}
static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct gxp_spi *spifi = spi_controller_get_devdata(mem->spi->controller);
struct gxp_spi_chip *chip = &spifi->chips[spi_get_chipselect(mem->spi, 0)];
int ret;
if (op->data.dir == SPI_MEM_DATA_IN) {
if (!op->addr.nbytes)
ret = gxp_spi_read_reg(chip, op);
else
ret = gxp_spi_read(chip, op);
} else {
if (!op->addr.nbytes)
ret = gxp_spi_write_reg(chip, op);
else
ret = gxp_spi_write(chip, op);
}
return ret;
}
static int gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
int ret;
ret = do_gxp_exec_mem_op(mem, op);
if (ret)
dev_err(&mem->spi->dev, "operation failed: %d", ret);
return ret;
}
static const struct spi_controller_mem_ops gxp_spi_mem_ops = {
.exec_op = gxp_exec_mem_op,
};
static int gxp_spi_setup(struct spi_device *spi)
{
struct gxp_spi *spifi = spi_controller_get_devdata(spi->controller);
unsigned int cs = spi_get_chipselect(spi, 0);
struct gxp_spi_chip *chip = &spifi->chips[cs];
chip->spifi = spifi;
chip->cs = cs;
gxp_spi_set_mode(spifi, MANUAL_MODE);
return 0;
}
static int gxp_spifi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gxp_spi_data *data;
struct spi_controller *ctlr;
struct gxp_spi *spifi;
int ret;
data = of_device_get_match_data(&pdev->dev);
ctlr = devm_spi_alloc_host(dev, sizeof(*spifi));
if (!ctlr)
return -ENOMEM;
spifi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, spifi);
spifi->data = data;
spifi->dev = dev;
spifi->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spifi->reg_base))
return PTR_ERR(spifi->reg_base);
spifi->dat_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(spifi->dat_base))
return PTR_ERR(spifi->dat_base);
spifi->dir_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(spifi->dir_base))
return PTR_ERR(spifi->dir_base);
ctlr->mode_bits = data->mode_bits;
ctlr->bus_num = pdev->id;
ctlr->mem_ops = &gxp_spi_mem_ops;
ctlr->setup = gxp_spi_setup;
ctlr->num_chipselect = data->max_cs;
ctlr->dev.of_node = dev->of_node;
ret = devm_spi_register_controller(dev, ctlr);
if (ret) {
return dev_err_probe(&pdev->dev, ret,
"failed to register spi controller\n");
}
return 0;
}
static const struct gxp_spi_data gxp_spifi_data = {
.max_cs = 2,
.mode_bits = 0,
};
static const struct of_device_id gxp_spifi_match[] = {
{.compatible = "hpe,gxp-spifi", .data = &gxp_spifi_data },
{ /* null */ }
};
MODULE_DEVICE_TABLE(of, gxp_spifi_match);
static struct platform_driver gxp_spifi_driver = {
.probe = gxp_spifi_probe,
.driver = {
.name = "gxp-spifi",
.of_match_table = gxp_spifi_match,
},
};
module_platform_driver(gxp_spifi_driver);
MODULE_DESCRIPTION("HPE GXP SPI Flash Interface driver");
MODULE_AUTHOR("Nick Hawkins <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-gxp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for LM70EVAL-LLP board for the LM70 sensor
*
* Copyright (C) 2006 Kaiwan N Billimoria <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/parport.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
/*
* The LM70 communicates with a host processor using a 3-wire variant of
* the SPI/Microwire bus interface. This driver specifically supports an
* NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel
* port to bitbang an SPI-parport bridge. Accordingly, this is an SPI
* master controller driver. The hwmon/lm70 driver is a "SPI protocol
* driver", layered on top of this one and usable without the lm70llp.
*
* Datasheet and Schematic:
* The LM70 is a temperature sensor chip from National Semiconductor; its
* datasheet is available at http://www.national.com/pf/LM/LM70.html
* The schematic for this particular board (the LM70EVAL-LLP) is
* available (on page 4) here:
* http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
*
* Also see Documentation/spi/spi-lm70llp.rst. The SPI<->parport code here is
* (heavily) based on spi-butterfly by David Brownell.
*
* The LM70 LLP connects to the PC parallel port in the following manner:
*
* Parallel LM70 LLP
* Port Direction JP2 Header
* ----------- --------- ------------
* D0 2 - -
* D1 3 --> V+ 5
* D2 4 --> V+ 5
* D3 5 --> V+ 5
* D4 6 --> V+ 5
* D5 7 --> nCS 8
* D6 8 --> SCLK 3
* D7 9 --> SI/O 5
* GND 25 - GND 7
* Select 13 <-- SI/O 1
*
* Note that parport pin 13 actually gets inverted by the transistor
* arrangement which lets either the parport or the LM70 drive the
* SI/SO signal (see the schematic for details).
*/
#define DRVNAME "spi-lm70llp"
#define lm70_INIT 0xBE
#define SIO 0x10
#define nCS 0x20
#define SCLK 0x40
/*-------------------------------------------------------------------------*/
struct spi_lm70llp {
struct spi_bitbang bitbang;
struct parport *port;
struct pardevice *pd;
struct spi_device *spidev_lm70;
struct spi_board_info info;
//struct device *dev;
};
/* REVISIT : ugly global ; provides "exclusive open" facility */
static struct spi_lm70llp *lm70llp;
/*-------------------------------------------------------------------*/
static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi)
{
return spi->controller_data;
}
/*---------------------- LM70 LLP eval board-specific inlines follow */
/* NOTE: we don't actually need to reread the output values, since they'll
* still be what we wrote before. Plus, going through parport builds in
* a ~1ms/operation delay; these SPI transfers could easily be faster.
*/
static inline void deassertCS(struct spi_lm70llp *pp)
{
u8 data = parport_read_data(pp->port);
data &= ~0x80; /* pull D7/SI-out low while de-asserted */
parport_write_data(pp->port, data | nCS);
}
static inline void assertCS(struct spi_lm70llp *pp)
{
u8 data = parport_read_data(pp->port);
data |= 0x80; /* pull D7/SI-out high so lm70 drives SO-in */
parport_write_data(pp->port, data & ~nCS);
}
static inline void clkHigh(struct spi_lm70llp *pp)
{
u8 data = parport_read_data(pp->port);
parport_write_data(pp->port, data | SCLK);
}
static inline void clkLow(struct spi_lm70llp *pp)
{
u8 data = parport_read_data(pp->port);
parport_write_data(pp->port, data & ~SCLK);
}
/*------------------------- SPI-LM70-specific inlines ----------------------*/
static inline void spidelay(unsigned d)
{
udelay(d);
}
static inline void setsck(struct spi_device *s, int is_on)
{
struct spi_lm70llp *pp = spidev_to_pp(s);
if (is_on)
clkHigh(pp);
else
clkLow(pp);
}
static inline void setmosi(struct spi_device *s, int is_on)
{
/* FIXME update D7 ... this way we can put the chip
* into shutdown mode and read the manufacturer ID,
* but we can't put it back into operational mode.
*/
}
/*
* getmiso:
* Why do we return 0 when the SIO line is high and vice-versa?
* The fact is, the lm70 eval board from NS (which this driver drives),
* is wired in just such a way : when the lm70's SIO goes high, a transistor
* switches it to low reflecting this on the parport (pin 13), and vice-versa.
*/
static inline int getmiso(struct spi_device *s)
{
struct spi_lm70llp *pp = spidev_to_pp(s);
return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1);
}
/*--------------------------------------------------------------------*/
#include "spi-bitbang-txrx.h"
static void lm70_chipselect(struct spi_device *spi, int value)
{
struct spi_lm70llp *pp = spidev_to_pp(spi);
if (value)
assertCS(pp);
else
deassertCS(pp);
}
/*
* Our actual bitbanger routine.
*/
static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits,
unsigned flags)
{
return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static void spi_lm70llp_attach(struct parport *p)
{
struct pardevice *pd;
struct spi_lm70llp *pp;
struct spi_master *master;
int status;
struct pardev_cb lm70llp_cb;
if (lm70llp) {
pr_warn("spi_lm70llp instance already loaded. Aborting.\n");
return;
}
/* TODO: this just _assumes_ a lm70 is there ... no probe;
* the lm70 driver could verify it, reading the manf ID.
*/
master = spi_alloc_master(p->physport->dev, sizeof(*pp));
if (!master) {
status = -ENOMEM;
goto out_fail;
}
pp = spi_master_get_devdata(master);
/*
* SPI and bitbang hookup.
*/
pp->bitbang.master = master;
pp->bitbang.chipselect = lm70_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
pp->bitbang.flags = SPI_3WIRE;
/*
* Parport hookup
*/
pp->port = p;
memset(&lm70llp_cb, 0, sizeof(lm70llp_cb));
lm70llp_cb.private = pp;
lm70llp_cb.flags = PARPORT_FLAG_EXCL;
pd = parport_register_dev_model(p, DRVNAME, &lm70llp_cb, 0);
if (!pd) {
status = -ENOMEM;
goto out_free_master;
}
pp->pd = pd;
status = parport_claim(pd);
if (status < 0)
goto out_parport_unreg;
/*
* Start SPI ...
*/
status = spi_bitbang_start(&pp->bitbang);
if (status < 0) {
dev_warn(&pd->dev, "spi_bitbang_start failed with status %d\n",
status);
goto out_off_and_release;
}
/*
* The modalias name MUST match the device_driver name
* for the bus glue code to match and subsequently bind them.
* We are binding to the generic drivers/hwmon/lm70.c device
* driver.
*/
strcpy(pp->info.modalias, "lm70");
pp->info.max_speed_hz = 6 * 1000 * 1000;
pp->info.chip_select = 0;
pp->info.mode = SPI_3WIRE | SPI_MODE_0;
/* power up the chip, and let the LM70 control SI/SO */
parport_write_data(pp->port, lm70_INIT);
/* Enable access to our primary data structure via
* the board info's (void *)controller_data.
*/
pp->info.controller_data = pp;
pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
if (pp->spidev_lm70)
dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
dev_name(&pp->spidev_lm70->dev));
else {
dev_warn(&pd->dev, "spi_new_device failed\n");
status = -ENODEV;
goto out_bitbang_stop;
}
pp->spidev_lm70->bits_per_word = 8;
lm70llp = pp;
return;
out_bitbang_stop:
spi_bitbang_stop(&pp->bitbang);
out_off_and_release:
/* power down */
parport_write_data(pp->port, 0);
mdelay(10);
parport_release(pp->pd);
out_parport_unreg:
parport_unregister_device(pd);
out_free_master:
spi_master_put(master);
out_fail:
pr_info("spi_lm70llp probe fail, status %d\n", status);
}
static void spi_lm70llp_detach(struct parport *p)
{
struct spi_lm70llp *pp;
if (!lm70llp || lm70llp->port != p)
return;
pp = lm70llp;
spi_bitbang_stop(&pp->bitbang);
/* power down */
parport_write_data(pp->port, 0);
parport_release(pp->pd);
parport_unregister_device(pp->pd);
spi_master_put(pp->bitbang.master);
lm70llp = NULL;
}
static struct parport_driver spi_lm70llp_drv = {
.name = DRVNAME,
.match_port = spi_lm70llp_attach,
.detach = spi_lm70llp_detach,
.devmodel = true,
};
module_parport_driver(spi_lm70llp_drv);
MODULE_AUTHOR("Kaiwan N Billimoria <[email protected]>");
MODULE_DESCRIPTION(
"Parport adapter for the National Semiconductor LM70 LLP eval board");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-lm70llp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Atmel QSPI Controller
*
* Copyright (C) 2015 Atmel Corporation
* Copyright (C) 2018 Cryptera A/S
*
* Author: Cyrille Pitchen <[email protected]>
* Author: Piotr Bugalski <[email protected]>
*
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi-mem.h>
/* QSPI register offsets */
#define QSPI_CR 0x0000 /* Control Register */
#define QSPI_MR 0x0004 /* Mode Register */
#define QSPI_RD 0x0008 /* Receive Data Register */
#define QSPI_TD 0x000c /* Transmit Data Register */
#define QSPI_SR 0x0010 /* Status Register */
#define QSPI_IER 0x0014 /* Interrupt Enable Register */
#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
#define QSPI_IMR 0x001c /* Interrupt Mask Register */
#define QSPI_SCR 0x0020 /* Serial Clock Register */
#define QSPI_IAR 0x0030 /* Instruction Address Register */
#define QSPI_ICR 0x0034 /* Instruction Code Register */
#define QSPI_WICR 0x0034 /* Write Instruction Code Register */
#define QSPI_IFR 0x0038 /* Instruction Frame Register */
#define QSPI_RICR 0x003C /* Read Instruction Code Register */
#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
#define QSPI_SKR 0x0044 /* Scrambling Key Register */
#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
#define QSPI_VERSION 0x00FC /* Version Register */
/* Bitfields in QSPI_CR (Control Register) */
#define QSPI_CR_QSPIEN BIT(0)
#define QSPI_CR_QSPIDIS BIT(1)
#define QSPI_CR_SWRST BIT(7)
#define QSPI_CR_LASTXFER BIT(24)
/* Bitfields in QSPI_MR (Mode Register) */
#define QSPI_MR_SMM BIT(0)
#define QSPI_MR_LLB BIT(1)
#define QSPI_MR_WDRBT BIT(2)
#define QSPI_MR_SMRM BIT(3)
#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
#define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK)
/* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */
#define QSPI_SR_RDRF BIT(0)
#define QSPI_SR_TDRE BIT(1)
#define QSPI_SR_TXEMPTY BIT(2)
#define QSPI_SR_OVRES BIT(3)
#define QSPI_SR_CSR BIT(8)
#define QSPI_SR_CSS BIT(9)
#define QSPI_SR_INSTRE BIT(10)
#define QSPI_SR_QSPIENS BIT(24)
#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
/* Bitfields in QSPI_SCR (Serial Clock Register) */
#define QSPI_SCR_CPOL BIT(0)
#define QSPI_SCR_CPHA BIT(1)
#define QSPI_SCR_SCBR_MASK GENMASK(15, 8)
#define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK)
#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
#define QSPI_ICR_INST_MASK GENMASK(7, 0)
#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
/* Bitfields in QSPI_IFR (Instruction Frame Register) */
#define QSPI_IFR_WIDTH_MASK GENMASK(2, 0)
#define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0)
#define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0)
#define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0)
#define QSPI_IFR_WIDTH_DUAL_IO (3 << 0)
#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
#define QSPI_IFR_INSTEN BIT(4)
#define QSPI_IFR_ADDREN BIT(5)
#define QSPI_IFR_OPTEN BIT(6)
#define QSPI_IFR_DATAEN BIT(7)
#define QSPI_IFR_OPTL_MASK GENMASK(9, 8)
#define QSPI_IFR_OPTL_1BIT (0 << 8)
#define QSPI_IFR_OPTL_2BIT (1 << 8)
#define QSPI_IFR_OPTL_4BIT (2 << 8)
#define QSPI_IFR_OPTL_8BIT (3 << 8)
#define QSPI_IFR_ADDRL BIT(10)
#define QSPI_IFR_TFRTYP_MEM BIT(12)
#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
#define QSPI_IFR_CRM BIT(14)
#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
#define QSPI_SMR_SCREN BIT(0)
#define QSPI_SMR_RVDIS BIT(1)
/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
#define QSPI_WPMR_WPEN BIT(0)
#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
/* Bitfields in QSPI_WPSR (Write Protection Status Register) */
#define QSPI_WPSR_WPVS BIT(0)
#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
struct atmel_qspi_caps {
bool has_qspick;
bool has_ricr;
};
struct atmel_qspi {
void __iomem *regs;
void __iomem *mem;
struct clk *pclk;
struct clk *qspick;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
resource_size_t mmap_size;
u32 pending;
u32 mr;
u32 scr;
struct completion cmd_completion;
};
struct atmel_qspi_mode {
u8 cmd_buswidth;
u8 addr_buswidth;
u8 data_buswidth;
u32 config;
};
static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
#ifdef VERBOSE_DEBUG
static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
{
switch (offset) {
case QSPI_CR:
return "CR";
case QSPI_MR:
return "MR";
case QSPI_RD:
return "MR";
case QSPI_TD:
return "TD";
case QSPI_SR:
return "SR";
case QSPI_IER:
return "IER";
case QSPI_IDR:
return "IDR";
case QSPI_IMR:
return "IMR";
case QSPI_SCR:
return "SCR";
case QSPI_IAR:
return "IAR";
case QSPI_ICR:
return "ICR/WICR";
case QSPI_IFR:
return "IFR";
case QSPI_RICR:
return "RICR";
case QSPI_SMR:
return "SMR";
case QSPI_SKR:
return "SKR";
case QSPI_WPMR:
return "WPMR";
case QSPI_WPSR:
return "WPSR";
case QSPI_VERSION:
return "VERSION";
default:
snprintf(tmp, sz, "0x%02x", offset);
break;
}
return tmp;
}
#endif /* VERBOSE_DEBUG */
static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
{
u32 value = readl_relaxed(aq->regs + offset);
#ifdef VERBOSE_DEBUG
char tmp[8];
dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
#endif /* VERBOSE_DEBUG */
return value;
}
static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
{
#ifdef VERBOSE_DEBUG
char tmp[8];
dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
#endif /* VERBOSE_DEBUG */
writel_relaxed(value, aq->regs + offset);
}
static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
const struct atmel_qspi_mode *mode)
{
if (op->cmd.buswidth != mode->cmd_buswidth)
return false;
if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
return false;
if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
return false;
return true;
}
static int atmel_qspi_find_mode(const struct spi_mem_op *op)
{
u32 i;
for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
return i;
return -ENOTSUPP;
}
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
if (atmel_qspi_find_mode(op) < 0)
return false;
/* special case not supported by hardware */
if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
op->dummy.nbytes == 0)
return false;
return true;
}
static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
const struct spi_mem_op *op, u32 *offset)
{
u32 iar, icr, ifr;
u32 dummy_cycles = 0;
int mode;
iar = 0;
icr = QSPI_ICR_INST(op->cmd.opcode);
ifr = QSPI_IFR_INSTEN;
mode = atmel_qspi_find_mode(op);
if (mode < 0)
return mode;
ifr |= atmel_qspi_modes[mode].config;
if (op->dummy.nbytes)
dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
/*
* The controller allows 24 and 32-bit addressing while NAND-flash
* requires 16-bit long. Handling 8-bit long addresses is done using
* the option field. For the 16-bit addresses, the workaround depends
* of the number of requested dummy bits. If there are 8 or more dummy
* cycles, the address is shifted and sent with the first dummy byte.
* Otherwise opcode is disabled and the first byte of the address
* contains the command opcode (works only if the opcode and address
* use the same buswidth). The limitation is when the 16-bit address is
* used without enough dummy cycles and the opcode is using a different
* buswidth than the address.
*/
if (op->addr.buswidth) {
switch (op->addr.nbytes) {
case 0:
break;
case 1:
ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
break;
case 2:
if (dummy_cycles < 8 / op->addr.buswidth) {
ifr &= ~QSPI_IFR_INSTEN;
ifr |= QSPI_IFR_ADDREN;
iar = (op->cmd.opcode << 16) |
(op->addr.val & 0xffff);
} else {
ifr |= QSPI_IFR_ADDREN;
iar = (op->addr.val << 8) & 0xffffff;
dummy_cycles -= 8 / op->addr.buswidth;
}
break;
case 3:
ifr |= QSPI_IFR_ADDREN;
iar = op->addr.val & 0xffffff;
break;
case 4:
ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
iar = op->addr.val & 0x7ffffff;
break;
default:
return -ENOTSUPP;
}
}
/* offset of the data access in the QSPI memory space */
*offset = iar;
/* Set number of dummy cycles */
if (dummy_cycles)
ifr |= QSPI_IFR_NBDUM(dummy_cycles);
/* Set data enable and data transfer type. */
if (op->data.nbytes) {
ifr |= QSPI_IFR_DATAEN;
if (op->addr.nbytes)
ifr |= QSPI_IFR_TFRTYP_MEM;
}
/*
* If the QSPI controller is set in regular SPI mode, set it in
* Serial Memory Mode (SMM).
*/
if (aq->mr != QSPI_MR_SMM) {
atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
aq->mr = QSPI_MR_SMM;
}
/* Clear pending interrupts */
(void)atmel_qspi_read(aq, QSPI_SR);
/* Set QSPI Instruction Frame registers. */
if (op->addr.nbytes && !op->data.nbytes)
atmel_qspi_write(iar, aq, QSPI_IAR);
if (aq->caps->has_ricr) {
if (op->data.dir == SPI_MEM_DATA_IN)
atmel_qspi_write(icr, aq, QSPI_RICR);
else
atmel_qspi_write(icr, aq, QSPI_WICR);
} else {
if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
atmel_qspi_write(icr, aq, QSPI_ICR);
}
atmel_qspi_write(ifr, aq, QSPI_IFR);
return 0;
}
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
u32 sr, offset;
int err;
/*
* Check if the address exceeds the MMIO window size. An improvement
* would be to add support for regular SPI mode and fall back to it
* when the flash memories overrun the controller's memory space.
*/
if (op->addr.val + op->data.nbytes > aq->mmap_size)
return -ENOTSUPP;
err = pm_runtime_resume_and_get(&aq->pdev->dev);
if (err < 0)
return err;
err = atmel_qspi_set_cfg(aq, op, &offset);
if (err)
goto pm_runtime_put;
/* Skip to the final steps if there is no data */
if (op->data.nbytes) {
/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
(void)atmel_qspi_read(aq, QSPI_IFR);
/* Send/Receive data */
if (op->data.dir == SPI_MEM_DATA_IN)
memcpy_fromio(op->data.buf.in, aq->mem + offset,
op->data.nbytes);
else
memcpy_toio(aq->mem + offset, op->data.buf.out,
op->data.nbytes);
/* Release the chip-select */
atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
}
/* Poll INSTRuction End status */
sr = atmel_qspi_read(aq, QSPI_SR);
if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
goto pm_runtime_put;
/* Wait for INSTRuction End interrupt */
reinit_completion(&aq->cmd_completion);
aq->pending = sr & QSPI_SR_CMD_COMPLETED;
atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
if (!wait_for_completion_timeout(&aq->cmd_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
pm_runtime_put:
pm_runtime_mark_last_busy(&aq->pdev->dev);
pm_runtime_put_autosuspend(&aq->pdev->dev);
return err;
}
static const char *atmel_qspi_get_name(struct spi_mem *spimem)
{
return dev_name(spimem->spi->dev.parent);
}
static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
.supports_op = atmel_qspi_supports_op,
.exec_op = atmel_qspi_exec_op,
.get_name = atmel_qspi_get_name
};
static int atmel_qspi_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->controller;
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long src_rate;
u32 scbr;
int ret;
if (ctrl->busy)
return -EBUSY;
if (!spi->max_speed_hz)
return -EINVAL;
src_rate = clk_get_rate(aq->pclk);
if (!src_rate)
return -EINVAL;
/* Compute the QSPI baudrate */
scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
if (scbr > 0)
scbr--;
ret = pm_runtime_resume_and_get(ctrl->dev.parent);
if (ret < 0)
return ret;
aq->scr = QSPI_SCR_SCBR(scbr);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(ctrl->dev.parent);
pm_runtime_put_autosuspend(ctrl->dev.parent);
return 0;
}
static int atmel_qspi_set_cs_timing(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->controller;
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long clk_rate;
u32 cs_setup;
int delay;
int ret;
delay = spi_delay_to_ns(&spi->cs_setup, NULL);
if (delay <= 0)
return delay;
clk_rate = clk_get_rate(aq->pclk);
if (!clk_rate)
return -EINVAL;
cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
1000);
ret = pm_runtime_resume_and_get(ctrl->dev.parent);
if (ret < 0)
return ret;
aq->scr |= QSPI_SCR_DLYBS(cs_setup);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(ctrl->dev.parent);
pm_runtime_put_autosuspend(ctrl->dev.parent);
return 0;
}
static void atmel_qspi_init(struct atmel_qspi *aq)
{
/* Reset the QSPI controller */
atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
aq->mr = QSPI_MR_SMM;
/* Enable the QSPI controller */
atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
{
struct atmel_qspi *aq = dev_id;
u32 status, mask, pending;
status = atmel_qspi_read(aq, QSPI_SR);
mask = atmel_qspi_read(aq, QSPI_IMR);
pending = status & mask;
if (!pending)
return IRQ_NONE;
aq->pending |= pending;
if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
complete(&aq->cmd_completion);
return IRQ_HANDLED;
}
static int atmel_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
struct atmel_qspi *aq;
struct resource *res;
int irq, err = 0;
ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
if (!ctrl)
return -ENOMEM;
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
ctrl->setup = atmel_qspi_setup;
ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
ctrl->bus_num = -1;
ctrl->mem_ops = &atmel_qspi_mem_ops;
ctrl->num_chipselect = 1;
ctrl->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, ctrl);
aq = spi_controller_get_devdata(ctrl);
init_completion(&aq->cmd_completion);
aq->pdev = pdev;
/* Map the registers */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
aq->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(aq->regs)) {
dev_err(&pdev->dev, "missing registers\n");
return PTR_ERR(aq->regs);
}
/* Map the AHB memory */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
aq->mem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(aq->mem)) {
dev_err(&pdev->dev, "missing AHB memory\n");
return PTR_ERR(aq->mem);
}
aq->mmap_size = resource_size(res);
/* Get the peripheral clock */
aq->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(aq->pclk))
aq->pclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(aq->pclk)) {
dev_err(&pdev->dev, "missing peripheral clock\n");
return PTR_ERR(aq->pclk);
}
/* Enable the peripheral clock */
err = clk_prepare_enable(aq->pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
return err;
}
aq->caps = of_device_get_match_data(&pdev->dev);
if (!aq->caps) {
dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
err = -EINVAL;
goto disable_pclk;
}
if (aq->caps->has_qspick) {
/* Get the QSPI system clock */
aq->qspick = devm_clk_get(&pdev->dev, "qspick");
if (IS_ERR(aq->qspick)) {
dev_err(&pdev->dev, "missing system clock\n");
err = PTR_ERR(aq->qspick);
goto disable_pclk;
}
/* Enable the QSPI system clock */
err = clk_prepare_enable(aq->qspick);
if (err) {
dev_err(&pdev->dev,
"failed to enable the QSPI system clock\n");
goto disable_pclk;
}
}
/* Request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
goto disable_qspick;
}
err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
0, dev_name(&pdev->dev), aq);
if (err)
goto disable_qspick;
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
atmel_qspi_init(aq);
err = spi_register_controller(ctrl);
if (err) {
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
goto disable_qspick;
}
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
disable_qspick:
clk_disable_unprepare(aq->qspick);
disable_pclk:
clk_disable_unprepare(aq->pclk);
return err;
}
static void atmel_qspi_remove(struct platform_device *pdev)
{
struct spi_controller *ctrl = platform_get_drvdata(pdev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
int ret;
spi_unregister_controller(ctrl);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret >= 0) {
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
clk_disable(aq->qspick);
clk_disable(aq->pclk);
} else {
/*
* atmel_qspi_runtime_{suspend,resume} just disable and enable
* the two clks respectively. So after resume failed these are
* off, and we skip hardware access and disabling these clks again.
*/
dev_warn(&pdev->dev, "Failed to resume device on remove\n");
}
clk_unprepare(aq->qspick);
clk_unprepare(aq->pclk);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
static int __maybe_unused atmel_qspi_suspend(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
pm_runtime_mark_last_busy(dev);
pm_runtime_force_suspend(dev);
clk_unprepare(aq->qspick);
clk_unprepare(aq->pclk);
return 0;
}
static int __maybe_unused atmel_qspi_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
int ret;
clk_prepare(aq->pclk);
clk_prepare(aq->qspick);
ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
atmel_qspi_init(aq);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
clk_disable(aq->qspick);
clk_disable(aq->pclk);
return 0;
}
static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
int ret;
ret = clk_enable(aq->pclk);
if (ret)
return ret;
ret = clk_enable(aq->qspick);
if (ret)
clk_disable(aq->pclk);
return ret;
}
static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
atmel_qspi_runtime_resume, NULL)
};
static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
.has_qspick = true,
.has_ricr = true,
};
static const struct of_device_id atmel_qspi_dt_ids[] = {
{
.compatible = "atmel,sama5d2-qspi",
.data = &atmel_sama5d2_qspi_caps,
},
{
.compatible = "microchip,sam9x60-qspi",
.data = &atmel_sam9x60_qspi_caps,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
static struct platform_driver atmel_qspi_driver = {
.driver = {
.name = "atmel_qspi",
.of_match_table = atmel_qspi_dt_ids,
.pm = pm_ptr(&atmel_qspi_pm_ops),
},
.probe = atmel_qspi_probe,
.remove_new = atmel_qspi_remove,
};
module_platform_driver(atmel_qspi_driver);
MODULE_AUTHOR("Cyrille Pitchen <[email protected]>");
MODULE_AUTHOR("Piotr Bugalski <[email protected]");
MODULE_DESCRIPTION("Atmel QSPI Controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/atmel-quadspi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH SPI bus driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
*
* Based on pxa2xx_spi.c:
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#define SPI_SH_TBR 0x00
#define SPI_SH_RBR 0x00
#define SPI_SH_CR1 0x08
#define SPI_SH_CR2 0x10
#define SPI_SH_CR3 0x18
#define SPI_SH_CR4 0x20
#define SPI_SH_CR5 0x28
/* CR1 */
#define SPI_SH_TBE 0x80
#define SPI_SH_TBF 0x40
#define SPI_SH_RBE 0x20
#define SPI_SH_RBF 0x10
#define SPI_SH_PFONRD 0x08
#define SPI_SH_SSDB 0x04
#define SPI_SH_SSD 0x02
#define SPI_SH_SSA 0x01
/* CR2 */
#define SPI_SH_RSTF 0x80
#define SPI_SH_LOOPBK 0x40
#define SPI_SH_CPOL 0x20
#define SPI_SH_CPHA 0x10
#define SPI_SH_L1M0 0x08
/* CR3 */
#define SPI_SH_MAX_BYTE 0xFF
/* CR4 */
#define SPI_SH_TBEI 0x80
#define SPI_SH_TBFI 0x40
#define SPI_SH_RBEI 0x20
#define SPI_SH_RBFI 0x10
#define SPI_SH_WPABRT 0x04
#define SPI_SH_SSS 0x01
/* CR8 */
#define SPI_SH_P1L0 0x80
#define SPI_SH_PP1L0 0x40
#define SPI_SH_MUXI 0x20
#define SPI_SH_MUXIRQ 0x10
#define SPI_SH_FIFO_SIZE 32
#define SPI_SH_SEND_TIMEOUT (3 * HZ)
#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
#undef DEBUG
struct spi_sh_data {
void __iomem *addr;
int irq;
struct spi_controller *host;
unsigned long cr1;
wait_queue_head_t wait;
int width;
};
static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
unsigned long offset)
{
if (ss->width == 8)
iowrite8(data, ss->addr + (offset >> 2));
else if (ss->width == 32)
iowrite32(data, ss->addr + offset);
}
static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
{
if (ss->width == 8)
return ioread8(ss->addr + (offset >> 2));
else if (ss->width == 32)
return ioread32(ss->addr + offset);
else
return 0;
}
static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
unsigned long offset)
{
unsigned long tmp;
tmp = spi_sh_read(ss, offset);
tmp |= val;
spi_sh_write(ss, tmp, offset);
}
static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
unsigned long offset)
{
unsigned long tmp;
tmp = spi_sh_read(ss, offset);
tmp &= ~val;
spi_sh_write(ss, tmp, offset);
}
static void clear_fifo(struct spi_sh_data *ss)
{
spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
}
static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
{
int timeout = 100000;
while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
udelay(10);
if (timeout-- < 0)
return -ETIMEDOUT;
}
return 0;
}
static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
{
int timeout = 100000;
while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
udelay(10);
if (timeout-- < 0)
return -ETIMEDOUT;
}
return 0;
}
static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
struct spi_transfer *t)
{
int i, retval = 0;
int remain = t->len;
int cur_len;
unsigned char *data;
long ret;
if (t->len)
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
data = (unsigned char *)t->tx_buf;
while (remain > 0) {
cur_len = min(SPI_SH_FIFO_SIZE, remain);
for (i = 0; i < cur_len &&
!(spi_sh_read(ss, SPI_SH_CR4) &
SPI_SH_WPABRT) &&
!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
i++)
spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
/* Abort SPI operation */
spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
retval = -EIO;
break;
}
cur_len = i;
remain -= cur_len;
data += cur_len;
if (remain > 0) {
ss->cr1 &= ~SPI_SH_TBE;
spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
ret = wait_event_interruptible_timeout(ss->wait,
ss->cr1 & SPI_SH_TBE,
SPI_SH_SEND_TIMEOUT);
if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
printk(KERN_ERR "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
}
if (list_is_last(&t->transfer_list, &mesg->transfers)) {
spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
ss->cr1 &= ~SPI_SH_TBE;
spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
ret = wait_event_interruptible_timeout(ss->wait,
ss->cr1 & SPI_SH_TBE,
SPI_SH_SEND_TIMEOUT);
if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
printk(KERN_ERR "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
return retval;
}
static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
struct spi_transfer *t)
{
int i;
int remain = t->len;
int cur_len;
unsigned char *data;
long ret;
if (t->len > SPI_SH_MAX_BYTE)
spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
else
spi_sh_write(ss, t->len, SPI_SH_CR3);
spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
spi_sh_wait_write_buffer_empty(ss);
data = (unsigned char *)t->rx_buf;
while (remain > 0) {
if (remain >= SPI_SH_FIFO_SIZE) {
ss->cr1 &= ~SPI_SH_RBF;
spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
ret = wait_event_interruptible_timeout(ss->wait,
ss->cr1 & SPI_SH_RBF,
SPI_SH_RECEIVE_TIMEOUT);
if (ret == 0 &&
spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
printk(KERN_ERR "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
cur_len = min(SPI_SH_FIFO_SIZE, remain);
for (i = 0; i < cur_len; i++) {
if (spi_sh_wait_receive_buffer(ss))
break;
data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
}
remain -= cur_len;
data += cur_len;
}
/* deassert CS when SPI is receiving. */
if (t->len > SPI_SH_MAX_BYTE) {
clear_fifo(ss);
spi_sh_write(ss, 1, SPI_SH_CR3);
} else {
spi_sh_write(ss, 0, SPI_SH_CR3);
}
return 0;
}
static int spi_sh_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *mesg)
{
struct spi_sh_data *ss = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
int ret;
pr_debug("%s: enter\n", __func__);
spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
list_for_each_entry(t, &mesg->transfers, transfer_list) {
pr_debug("tx_buf = %p, rx_buf = %p\n",
t->tx_buf, t->rx_buf);
pr_debug("len = %d, delay.value = %d\n",
t->len, t->delay.value);
if (t->tx_buf) {
ret = spi_sh_send(ss, mesg, t);
if (ret < 0)
goto error;
}
if (t->rx_buf) {
ret = spi_sh_receive(ss, mesg, t);
if (ret < 0)
goto error;
}
mesg->actual_length += t->len;
}
mesg->status = 0;
spi_finalize_current_message(ctlr);
clear_fifo(ss);
spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
udelay(100);
spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
SPI_SH_CR1);
clear_fifo(ss);
return 0;
error:
mesg->status = ret;
spi_finalize_current_message(ctlr);
if (mesg->complete)
mesg->complete(mesg->context);
spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
SPI_SH_CR1);
clear_fifo(ss);
return ret;
}
static int spi_sh_setup(struct spi_device *spi)
{
struct spi_sh_data *ss = spi_controller_get_devdata(spi->controller);
pr_debug("%s: enter\n", __func__);
spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
clear_fifo(ss);
/* 1/8 clock */
spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
udelay(10);
return 0;
}
static void spi_sh_cleanup(struct spi_device *spi)
{
struct spi_sh_data *ss = spi_controller_get_devdata(spi->controller);
pr_debug("%s: enter\n", __func__);
spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
SPI_SH_CR1);
}
static irqreturn_t spi_sh_irq(int irq, void *_ss)
{
struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
unsigned long cr1;
cr1 = spi_sh_read(ss, SPI_SH_CR1);
if (cr1 & SPI_SH_TBE)
ss->cr1 |= SPI_SH_TBE;
if (cr1 & SPI_SH_TBF)
ss->cr1 |= SPI_SH_TBF;
if (cr1 & SPI_SH_RBE)
ss->cr1 |= SPI_SH_RBE;
if (cr1 & SPI_SH_RBF)
ss->cr1 |= SPI_SH_RBF;
if (ss->cr1) {
spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
wake_up(&ss->wait);
}
return IRQ_HANDLED;
}
static void spi_sh_remove(struct platform_device *pdev)
{
struct spi_sh_data *ss = platform_get_drvdata(pdev);
spi_unregister_controller(ss->host);
free_irq(ss->irq, ss);
}
static int spi_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_controller *host;
struct spi_sh_data *ss;
int ret, irq;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "invalid resource\n");
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = devm_spi_alloc_host(&pdev->dev, sizeof(struct spi_sh_data));
if (host == NULL) {
dev_err(&pdev->dev, "devm_spi_alloc_host error.\n");
return -ENOMEM;
}
ss = spi_controller_get_devdata(host);
platform_set_drvdata(pdev, ss);
switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_8BIT:
ss->width = 8;
break;
case IORESOURCE_MEM_32BIT:
ss->width = 32;
break;
default:
dev_err(&pdev->dev, "No support width\n");
return -ENODEV;
}
ss->irq = irq;
ss->host = host;
ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (ss->addr == NULL) {
dev_err(&pdev->dev, "ioremap error.\n");
return -ENOMEM;
}
init_waitqueue_head(&ss->wait);
ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
return ret;
}
host->num_chipselect = 2;
host->bus_num = pdev->id;
host->setup = spi_sh_setup;
host->transfer_one_message = spi_sh_transfer_one_message;
host->cleanup = spi_sh_cleanup;
ret = spi_register_controller(host);
if (ret < 0) {
printk(KERN_ERR "spi_register_controller error.\n");
goto error3;
}
return 0;
error3:
free_irq(irq, ss);
return ret;
}
static struct platform_driver spi_sh_driver = {
.probe = spi_sh_probe,
.remove_new = spi_sh_remove,
.driver = {
.name = "sh_spi",
},
};
module_platform_driver(spi_sh_driver);
MODULE_DESCRIPTION("SH SPI bus driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:sh_spi");
| linux-master | drivers/spi/spi-sh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MPC52xx PSC in SPI mode driver.
*
* Maintainer: Dragos Carp
*
* Copyright (C) 2006 TOPTICA Photonics AG.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <asm/mpc52xx.h>
#include <asm/mpc52xx_psc.h>
#define MCLK 20000000 /* PSC port MClk in hz */
struct mpc52xx_psc_spi {
/* driver internal data */
struct mpc52xx_psc __iomem *psc;
struct mpc52xx_psc_fifo __iomem *fifo;
int irq;
u8 bits_per_word;
struct completion done;
};
/* controller state */
struct mpc52xx_psc_spi_cs {
int bits_per_word;
int speed_hz;
};
/* set clock freq, clock ramp, bits per work
* if t is NULL then reset the values to the default values
*/
static int mpc52xx_psc_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
cs->speed_hz = (t && t->speed_hz)
? t->speed_hz : spi->max_speed_hz;
cs->bits_per_word = (t && t->bits_per_word)
? t->bits_per_word : spi->bits_per_word;
cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8;
return 0;
}
static void mpc52xx_psc_spi_activate_cs(struct spi_device *spi)
{
struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
struct mpc52xx_psc __iomem *psc = mps->psc;
u32 sicr;
u16 ccr;
sicr = in_be32(&psc->sicr);
/* Set clock phase and polarity */
if (spi->mode & SPI_CPHA)
sicr |= 0x00001000;
else
sicr &= ~0x00001000;
if (spi->mode & SPI_CPOL)
sicr |= 0x00002000;
else
sicr &= ~0x00002000;
if (spi->mode & SPI_LSB_FIRST)
sicr |= 0x10000000;
else
sicr &= ~0x10000000;
out_be32(&psc->sicr, sicr);
/* Set clock frequency and bits per word
* Because psc->ccr is defined as 16bit register instead of 32bit
* just set the lower byte of BitClkDiv
*/
ccr = in_be16((u16 __iomem *)&psc->ccr);
ccr &= 0xFF00;
if (cs->speed_hz)
ccr |= (MCLK / cs->speed_hz - 1) & 0xFF;
else /* by default SPI Clk 1MHz */
ccr |= (MCLK / 1000000 - 1) & 0xFF;
out_be16((u16 __iomem *)&psc->ccr, ccr);
mps->bits_per_word = cs->bits_per_word;
}
#define MPC52xx_PSC_BUFSIZE (MPC52xx_PSC_RFNUM_MASK + 1)
/* wake up when 80% fifo full */
#define MPC52xx_PSC_RFALARM (MPC52xx_PSC_BUFSIZE * 20 / 100)
static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
unsigned rb = 0; /* number of bytes receieved */
unsigned sb = 0; /* number of bytes sent */
unsigned char *rx_buf = (unsigned char *)t->rx_buf;
unsigned char *tx_buf = (unsigned char *)t->tx_buf;
unsigned rfalarm;
unsigned send_at_once = MPC52xx_PSC_BUFSIZE;
unsigned recv_at_once;
int last_block = 0;
if (!t->tx_buf && !t->rx_buf && t->len)
return -EINVAL;
/* enable transmiter/receiver */
out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
while (rb < t->len) {
if (t->len - rb > MPC52xx_PSC_BUFSIZE) {
rfalarm = MPC52xx_PSC_RFALARM;
last_block = 0;
} else {
send_at_once = t->len - sb;
rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb);
last_block = 1;
}
dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once);
for (; send_at_once; sb++, send_at_once--) {
/* set EOF flag before the last word is sent */
if (send_at_once == 1 && last_block)
out_8(&psc->ircr2, 0x01);
if (tx_buf)
out_8(&psc->mpc52xx_psc_buffer_8, tx_buf[sb]);
else
out_8(&psc->mpc52xx_psc_buffer_8, 0);
}
/* enable interrupts and wait for wake up
* if just one byte is expected the Rx FIFO genererates no
* FFULL interrupt, so activate the RxRDY interrupt
*/
out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
if (t->len - rb == 1) {
out_8(&psc->mode, 0);
} else {
out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
out_be16(&fifo->rfalarm, rfalarm);
}
out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY);
wait_for_completion(&mps->done);
recv_at_once = in_be16(&fifo->rfnum);
dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once);
send_at_once = recv_at_once;
if (rx_buf) {
for (; recv_at_once; rb++, recv_at_once--)
rx_buf[rb] = in_8(&psc->mpc52xx_psc_buffer_8);
} else {
for (; recv_at_once; rb++, recv_at_once--)
in_8(&psc->mpc52xx_psc_buffer_8);
}
}
/* disable transmiter/receiver */
out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
return 0;
}
int mpc52xx_psc_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *m)
{
struct spi_device *spi;
struct spi_transfer *t = NULL;
unsigned cs_change;
int status;
spi = m->spi;
cs_change = 1;
status = 0;
list_for_each_entry (t, &m->transfers, transfer_list) {
if (t->bits_per_word || t->speed_hz) {
status = mpc52xx_psc_spi_transfer_setup(spi, t);
if (status < 0)
break;
}
if (cs_change)
mpc52xx_psc_spi_activate_cs(spi);
cs_change = t->cs_change;
status = mpc52xx_psc_spi_transfer_rxtx(spi, t);
if (status)
break;
m->actual_length += t->len;
spi_transfer_delay_exec(t);
}
m->status = status;
mpc52xx_psc_spi_transfer_setup(spi, NULL);
spi_finalize_current_message(ctlr);
return 0;
}
static int mpc52xx_psc_spi_setup(struct spi_device *spi)
{
struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
if (spi->bits_per_word%8)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
}
cs->bits_per_word = spi->bits_per_word;
cs->speed_hz = spi->max_speed_hz;
return 0;
}
static void mpc52xx_psc_spi_cleanup(struct spi_device *spi)
{
kfree(spi->controller_state);
}
static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
{
struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
u32 mclken_div;
int ret;
/* default sysclk is 512MHz */
mclken_div = 512000000 / MCLK;
ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
if (ret)
return ret;
/* Reset the PSC into a known state */
out_8(&psc->command, MPC52xx_PSC_RST_RX);
out_8(&psc->command, MPC52xx_PSC_RST_TX);
out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
/* Disable interrupts, interrupts are based on alarm level */
out_be16(&psc->mpc52xx_psc_imr, 0);
out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
out_8(&fifo->rfcntl, 0);
out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
/* Configure 8bit codec mode as a SPI master and use EOF flags */
/* SICR_SIM_CODEC8|SICR_GENCLK|SICR_SPI|SICR_MSTR|SICR_USEEOF */
out_be32(&psc->sicr, 0x0180C800);
out_be16((u16 __iomem *)&psc->ccr, 0x070F); /* default SPI Clk 1MHz */
/* Set 2ms DTL delay */
out_8(&psc->ctur, 0x00);
out_8(&psc->ctlr, 0x84);
mps->bits_per_word = 8;
return 0;
}
static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
{
struct mpc52xx_psc_spi *mps = (struct mpc52xx_psc_spi *)dev_id;
struct mpc52xx_psc __iomem *psc = mps->psc;
/* disable interrupt and wake up the work queue */
if (in_be16(&psc->mpc52xx_psc_isr) & MPC52xx_PSC_IMR_RXRDY) {
out_be16(&psc->mpc52xx_psc_imr, 0);
complete(&mps->done);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int mpc52xx_psc_spi_of_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mpc52xx_psc_spi *mps;
struct spi_master *master;
u32 bus_num;
int ret;
master = devm_spi_alloc_master(dev, sizeof(*mps));
if (master == NULL)
return -ENOMEM;
dev_set_drvdata(dev, master);
mps = spi_master_get_devdata(master);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
ret = device_property_read_u32(dev, "cell-index", &bus_num);
if (ret || bus_num > 5)
return dev_err_probe(dev, ret ? : -EINVAL, "Invalid cell-index property\n");
master->bus_num = bus_num + 1;
master->num_chipselect = 255;
master->setup = mpc52xx_psc_spi_setup;
master->transfer_one_message = mpc52xx_psc_spi_transfer_one_message;
master->cleanup = mpc52xx_psc_spi_cleanup;
device_set_node(&master->dev, dev_fwnode(dev));
mps->psc = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mps->psc))
return dev_err_probe(dev, PTR_ERR(mps->psc), "could not ioremap I/O port range\n");
/* On the 5200, fifo regs are immediately ajacent to the psc regs */
mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc);
mps->irq = platform_get_irq(pdev, 0);
if (mps->irq < 0)
return mps->irq;
ret = devm_request_irq(dev, mps->irq, mpc52xx_psc_spi_isr, 0,
"mpc52xx-psc-spi", mps);
if (ret)
return ret;
ret = mpc52xx_psc_spi_port_config(master->bus_num, mps);
if (ret < 0)
return dev_err_probe(dev, ret, "can't configure PSC! Is it capable of SPI?\n");
init_completion(&mps->done);
return devm_spi_register_master(dev, master);
}
static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
{ .compatible = "fsl,mpc5200-psc-spi", },
{ .compatible = "mpc5200-psc-spi", }, /* old */
{}
};
MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
static struct platform_driver mpc52xx_psc_spi_of_driver = {
.probe = mpc52xx_psc_spi_of_probe,
.driver = {
.name = "mpc52xx-psc-spi",
.of_match_table = mpc52xx_psc_spi_of_match,
},
};
module_platform_driver(mpc52xx_psc_spi_of_driver);
MODULE_AUTHOR("Dragos Carp");
MODULE_DESCRIPTION("MPC52xx PSC SPI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-mpc52xx-psc.c |
// SPDX-License-Identifier: GPL-2.0
//
// Mediatek SPI NOR controller driver
//
// Copyright (C) 2020 Chuanhong Guo <[email protected]>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/string.h>
#define DRIVER_NAME "mtk-spi-nor"
#define MTK_NOR_REG_CMD 0x00
#define MTK_NOR_CMD_WRITE BIT(4)
#define MTK_NOR_CMD_PROGRAM BIT(2)
#define MTK_NOR_CMD_READ BIT(0)
#define MTK_NOR_CMD_MASK GENMASK(5, 0)
#define MTK_NOR_REG_PRG_CNT 0x04
#define MTK_NOR_PRG_CNT_MAX 56
#define MTK_NOR_REG_RDATA 0x0c
#define MTK_NOR_REG_RADR0 0x10
#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
#define MTK_NOR_REG_RADR3 0xc8
#define MTK_NOR_REG_WDATA 0x1c
#define MTK_NOR_REG_PRGDATA0 0x20
#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
#define MTK_NOR_REG_PRGDATA_MAX 5
#define MTK_NOR_REG_SHIFT0 0x38
#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
#define MTK_NOR_REG_SHIFT_MAX 9
#define MTK_NOR_REG_CFG1 0x60
#define MTK_NOR_FAST_READ BIT(0)
#define MTK_NOR_REG_CFG2 0x64
#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
#define MTK_NOR_WR_BUF_EN BIT(0)
#define MTK_NOR_REG_PP_DATA 0x98
#define MTK_NOR_REG_IRQ_STAT 0xa8
#define MTK_NOR_REG_IRQ_EN 0xac
#define MTK_NOR_IRQ_DMA BIT(7)
#define MTK_NOR_IRQ_MASK GENMASK(7, 0)
#define MTK_NOR_REG_CFG3 0xb4
#define MTK_NOR_DISABLE_WREN BIT(7)
#define MTK_NOR_DISABLE_SR_POLL BIT(5)
#define MTK_NOR_REG_WP 0xc4
#define MTK_NOR_ENABLE_SF_CMD 0x30
#define MTK_NOR_REG_BUSCFG 0xcc
#define MTK_NOR_4B_ADDR BIT(4)
#define MTK_NOR_QUAD_ADDR BIT(3)
#define MTK_NOR_QUAD_READ BIT(2)
#define MTK_NOR_DUAL_ADDR BIT(1)
#define MTK_NOR_DUAL_READ BIT(0)
#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
#define MTK_NOR_REG_DMA_CTL 0x718
#define MTK_NOR_DMA_START BIT(0)
#define MTK_NOR_REG_DMA_FADR 0x71c
#define MTK_NOR_REG_DMA_DADR 0x720
#define MTK_NOR_REG_DMA_END_DADR 0x724
#define MTK_NOR_REG_CG_DIS 0x728
#define MTK_NOR_SFC_SW_RST BIT(2)
#define MTK_NOR_REG_DMA_DADR_HB 0x738
#define MTK_NOR_REG_DMA_END_DADR_HB 0x73c
#define MTK_NOR_PRG_MAX_SIZE 6
// Reading DMA src/dst addresses have to be 16-byte aligned
#define MTK_NOR_DMA_ALIGN 16
#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
// and we allocate a bounce buffer if destination address isn't aligned.
#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
// Buffered page program can do one 128-byte transfer
#define MTK_NOR_PP_SIZE 128
#define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
struct mtk_nor_caps {
u8 dma_bits;
/* extra_dummy_bit is adding for the IP of new SoCs.
* Some new SoCs modify the timing of fetching registers' values
* and IDs of nor flash, they need a extra_dummy_bit which can add
* more clock cycles for fetching data.
*/
u8 extra_dummy_bit;
};
struct mtk_nor {
struct spi_controller *ctlr;
struct device *dev;
void __iomem *base;
u8 *buffer;
dma_addr_t buffer_dma;
struct clk *spi_clk;
struct clk *ctlr_clk;
struct clk *axi_clk;
struct clk *axi_s_clk;
unsigned int spi_freq;
bool wbuf_en;
bool has_irq;
bool high_dma;
struct completion op_done;
const struct mtk_nor_caps *caps;
};
static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
{
u32 val = readl(sp->base + reg);
val &= ~clr;
val |= set;
writel(val, sp->base + reg);
}
static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
{
ulong delay = CLK_TO_US(sp, clk);
u32 reg;
int ret;
writel(cmd, sp->base + MTK_NOR_REG_CMD);
ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
delay / 3, (delay + 1) * 200);
if (ret < 0)
dev_err(sp->dev, "command %u timeout.\n", cmd);
return ret;
}
static void mtk_nor_reset(struct mtk_nor *sp)
{
mtk_nor_rmw(sp, MTK_NOR_REG_CG_DIS, 0, MTK_NOR_SFC_SW_RST);
mb(); /* flush previous writes */
mtk_nor_rmw(sp, MTK_NOR_REG_CG_DIS, MTK_NOR_SFC_SW_RST, 0);
mb(); /* flush previous writes */
writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
}
static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
{
u32 addr = op->addr.val;
int i;
for (i = 0; i < 3; i++) {
writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
addr >>= 8;
}
if (op->addr.nbytes == 4) {
writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
} else {
mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
}
}
static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
{
return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
}
static bool mtk_nor_match_read(const struct spi_mem_op *op)
{
int dummy = 0;
if (op->dummy.nbytes)
dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
if (op->addr.buswidth == 1)
return dummy == 8;
else if (op->addr.buswidth == 2)
return dummy == 4;
else if (op->addr.buswidth == 4)
return dummy == 6;
} else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
if (op->cmd.opcode == 0x03)
return dummy == 0;
else if (op->cmd.opcode == 0x0b)
return dummy == 8;
}
return false;
}
static bool mtk_nor_match_prg(const struct spi_mem_op *op)
{
int tx_len, rx_len, prg_len, prg_left;
// prg mode is spi-only.
if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) ||
(op->dummy.buswidth > 1) || (op->data.buswidth > 1))
return false;
tx_len = op->cmd.nbytes + op->addr.nbytes;
if (op->data.dir == SPI_MEM_DATA_OUT) {
// count dummy bytes only if we need to write data after it
tx_len += op->dummy.nbytes;
// leave at least one byte for data
if (tx_len > MTK_NOR_REG_PRGDATA_MAX)
return false;
// if there's no addr, meaning adjust_op_size is impossible,
// check data length as well.
if ((!op->addr.nbytes) &&
(tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1))
return false;
} else if (op->data.dir == SPI_MEM_DATA_IN) {
if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1)
return false;
rx_len = op->data.nbytes;
prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
if (rx_len > prg_left) {
if (!op->addr.nbytes)
return false;
rx_len = prg_left;
}
prg_len = tx_len + op->dummy.nbytes + rx_len;
if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
return false;
} else {
prg_len = tx_len + op->dummy.nbytes;
if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
return false;
}
return true;
}
static void mtk_nor_adj_prg_size(struct spi_mem_op *op)
{
int tx_len, tx_left, prg_left;
tx_len = op->cmd.nbytes + op->addr.nbytes;
if (op->data.dir == SPI_MEM_DATA_OUT) {
tx_len += op->dummy.nbytes;
tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len;
if (op->data.nbytes > tx_left)
op->data.nbytes = tx_left;
} else if (op->data.dir == SPI_MEM_DATA_IN) {
prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
if (op->data.nbytes > prg_left)
op->data.nbytes = prg_left;
}
}
static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
if (!op->data.nbytes)
return 0;
if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
if ((op->data.dir == SPI_MEM_DATA_IN) &&
mtk_nor_match_read(op)) {
// limit size to prevent timeout calculation overflow
if (op->data.nbytes > 0x400000)
op->data.nbytes = 0x400000;
if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
(op->data.nbytes < MTK_NOR_DMA_ALIGN))
op->data.nbytes = 1;
else if (!need_bounce(sp, op))
op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
return 0;
} else if (op->data.dir == SPI_MEM_DATA_OUT) {
if (op->data.nbytes >= MTK_NOR_PP_SIZE)
op->data.nbytes = MTK_NOR_PP_SIZE;
else
op->data.nbytes = 1;
return 0;
}
}
mtk_nor_adj_prg_size(op);
return 0;
}
static bool mtk_nor_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
if (op->cmd.buswidth != 1)
return false;
if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
switch (op->data.dir) {
case SPI_MEM_DATA_IN:
if (mtk_nor_match_read(op))
return true;
break;
case SPI_MEM_DATA_OUT:
if ((op->addr.buswidth == 1) &&
(op->dummy.nbytes == 0) &&
(op->data.buswidth == 1))
return true;
break;
default:
break;
}
}
return mtk_nor_match_prg(op);
}
static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
{
u32 reg = 0;
if (op->addr.nbytes == 4)
reg |= MTK_NOR_4B_ADDR;
if (op->data.buswidth == 4) {
reg |= MTK_NOR_QUAD_READ;
writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
if (op->addr.buswidth == 4)
reg |= MTK_NOR_QUAD_ADDR;
} else if (op->data.buswidth == 2) {
reg |= MTK_NOR_DUAL_READ;
writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
if (op->addr.buswidth == 2)
reg |= MTK_NOR_DUAL_ADDR;
} else {
if (op->cmd.opcode == 0x0b)
mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
else
mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
}
mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
}
static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
dma_addr_t dma_addr)
{
int ret = 0;
u32 delay, timeout;
u32 reg;
writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
if (sp->high_dma) {
writel(upper_32_bits(dma_addr),
sp->base + MTK_NOR_REG_DMA_DADR_HB);
writel(upper_32_bits(dma_addr + length),
sp->base + MTK_NOR_REG_DMA_END_DADR_HB);
}
if (sp->has_irq) {
reinit_completion(&sp->op_done);
mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
}
mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
timeout = (delay + 1) * 100;
if (sp->has_irq) {
if (!wait_for_completion_timeout(&sp->op_done,
usecs_to_jiffies(max(timeout, 10000U))))
ret = -ETIMEDOUT;
} else {
ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
!(reg & MTK_NOR_DMA_START), delay / 3,
timeout);
}
if (ret < 0)
dev_err(sp->dev, "dma read timeout.\n");
return ret;
}
static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
{
unsigned int rdlen;
int ret;
if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
else
rdlen = op->data.nbytes;
ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma);
if (!ret)
memcpy(op->data.buf.in, sp->buffer, op->data.nbytes);
return ret;
}
static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op)
{
int ret;
dma_addr_t dma_addr;
if (need_bounce(sp, op))
return mtk_nor_read_bounce(sp, op);
dma_addr = dma_map_single(sp->dev, op->data.buf.in,
op->data.nbytes, DMA_FROM_DEVICE);
if (dma_mapping_error(sp->dev, dma_addr))
return -EINVAL;
ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr);
dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
return ret;
}
static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
{
u8 *buf = op->data.buf.in;
int ret;
ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
if (!ret)
buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
return ret;
}
static int mtk_nor_setup_write_buffer(struct mtk_nor *sp, bool on)
{
int ret;
u32 val;
if (!(sp->wbuf_en ^ on))
return 0;
val = readl(sp->base + MTK_NOR_REG_CFG2);
if (on) {
writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
val & MTK_NOR_WR_BUF_EN, 0, 10000);
} else {
writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
!(val & MTK_NOR_WR_BUF_EN), 0, 10000);
}
if (!ret)
sp->wbuf_en = on;
return ret;
}
static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
{
const u8 *buf = op->data.buf.out;
u32 val;
int ret, i;
ret = mtk_nor_setup_write_buffer(sp, true);
if (ret < 0)
return ret;
for (i = 0; i < op->data.nbytes; i += 4) {
val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
buf[i];
writel(val, sp->base + MTK_NOR_REG_PP_DATA);
}
return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
(op->data.nbytes + 5) * BITS_PER_BYTE);
}
static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
const struct spi_mem_op *op)
{
const u8 *buf = op->data.buf.out;
int ret;
ret = mtk_nor_setup_write_buffer(sp, false);
if (ret < 0)
return ret;
writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
}
static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op)
{
int rx_len = 0;
int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
int tx_len, prg_len;
int i, ret;
void __iomem *reg;
u8 bufbyte;
tx_len = op->cmd.nbytes + op->addr.nbytes;
// count dummy bytes only if we need to write data after it
if (op->data.dir == SPI_MEM_DATA_OUT)
tx_len += op->dummy.nbytes + op->data.nbytes;
else if (op->data.dir == SPI_MEM_DATA_IN)
rx_len = op->data.nbytes;
prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes +
op->data.nbytes;
// an invalid op may reach here if the caller calls exec_op without
// adjust_op_size. return -EINVAL instead of -ENOTSUPP so that
// spi-mem won't try this op again with generic spi transfers.
if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) ||
(rx_len > MTK_NOR_REG_SHIFT_MAX + 1) ||
(prg_len > MTK_NOR_PRG_CNT_MAX / 8))
return -EINVAL;
// fill tx data
for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) {
reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
writeb(bufbyte, reg);
}
for (i = op->addr.nbytes; i > 0; i--, reg_offset--) {
reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
writeb(bufbyte, reg);
}
if (op->data.dir == SPI_MEM_DATA_OUT) {
for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) {
reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
writeb(0, reg);
}
for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
writeb(((const u8 *)(op->data.buf.out))[i], reg);
}
}
for (; reg_offset >= 0; reg_offset--) {
reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
writeb(0, reg);
}
// trigger op
if (rx_len)
writel(prg_len * BITS_PER_BYTE + sp->caps->extra_dummy_bit,
sp->base + MTK_NOR_REG_PRG_CNT);
else
writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
prg_len * BITS_PER_BYTE);
if (ret)
return ret;
// fetch read data
reg_offset = 0;
if (op->data.dir == SPI_MEM_DATA_IN) {
for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) {
reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
((u8 *)(op->data.buf.in))[i] = readb(reg);
}
}
return 0;
}
static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
int ret;
if ((op->data.nbytes == 0) ||
((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
return mtk_nor_spi_mem_prg(sp, op);
if (op->data.dir == SPI_MEM_DATA_OUT) {
mtk_nor_set_addr(sp, op);
writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
if (op->data.nbytes == MTK_NOR_PP_SIZE)
return mtk_nor_pp_buffered(sp, op);
return mtk_nor_pp_unbuffered(sp, op);
}
if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
ret = mtk_nor_setup_write_buffer(sp, false);
if (ret < 0)
return ret;
mtk_nor_setup_bus(sp, op);
if (op->data.nbytes == 1) {
mtk_nor_set_addr(sp, op);
return mtk_nor_read_pio(sp, op);
} else {
ret = mtk_nor_read_dma(sp, op);
if (unlikely(ret)) {
/* Handle rare bus glitch */
mtk_nor_reset(sp);
mtk_nor_setup_bus(sp, op);
return mtk_nor_read_dma(sp, op);
}
return ret;
}
}
return mtk_nor_spi_mem_prg(sp, op);
}
static int mtk_nor_setup(struct spi_device *spi)
{
struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
dev_err(&spi->dev, "spi clock should be %u Hz.\n",
sp->spi_freq);
return -EINVAL;
}
spi->max_speed_hz = sp->spi_freq;
return 0;
}
static int mtk_nor_transfer_one_message(struct spi_controller *master,
struct spi_message *m)
{
struct mtk_nor *sp = spi_controller_get_devdata(master);
struct spi_transfer *t = NULL;
unsigned long trx_len = 0;
int stat = 0;
int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
void __iomem *reg;
const u8 *txbuf;
u8 *rxbuf;
int i;
list_for_each_entry(t, &m->transfers, transfer_list) {
txbuf = t->tx_buf;
for (i = 0; i < t->len; i++, reg_offset--) {
reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
if (txbuf)
writeb(txbuf[i], reg);
else
writeb(0, reg);
}
trx_len += t->len;
}
writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
trx_len * BITS_PER_BYTE);
if (stat < 0)
goto msg_done;
reg_offset = trx_len - 1;
list_for_each_entry(t, &m->transfers, transfer_list) {
rxbuf = t->rx_buf;
for (i = 0; i < t->len; i++, reg_offset--) {
reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
if (rxbuf)
rxbuf[i] = readb(reg);
}
}
m->actual_length = trx_len;
msg_done:
m->status = stat;
spi_finalize_current_message(master);
return 0;
}
static void mtk_nor_disable_clk(struct mtk_nor *sp)
{
clk_disable_unprepare(sp->spi_clk);
clk_disable_unprepare(sp->ctlr_clk);
clk_disable_unprepare(sp->axi_clk);
clk_disable_unprepare(sp->axi_s_clk);
}
static int mtk_nor_enable_clk(struct mtk_nor *sp)
{
int ret;
ret = clk_prepare_enable(sp->spi_clk);
if (ret)
return ret;
ret = clk_prepare_enable(sp->ctlr_clk);
if (ret) {
clk_disable_unprepare(sp->spi_clk);
return ret;
}
ret = clk_prepare_enable(sp->axi_clk);
if (ret) {
clk_disable_unprepare(sp->spi_clk);
clk_disable_unprepare(sp->ctlr_clk);
return ret;
}
ret = clk_prepare_enable(sp->axi_s_clk);
if (ret) {
clk_disable_unprepare(sp->spi_clk);
clk_disable_unprepare(sp->ctlr_clk);
clk_disable_unprepare(sp->axi_clk);
return ret;
}
return 0;
}
static void mtk_nor_init(struct mtk_nor *sp)
{
writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT);
writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
}
static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
{
struct mtk_nor *sp = data;
u32 irq_status, irq_enabled;
irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
// write status back to clear interrupt
writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
if (!(irq_status & irq_enabled))
return IRQ_NONE;
if (irq_status & MTK_NOR_IRQ_DMA) {
complete(&sp->op_done);
writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
}
return IRQ_HANDLED;
}
static size_t mtk_max_msg_size(struct spi_device *spi)
{
return MTK_NOR_PRG_MAX_SIZE;
}
static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
.adjust_op_size = mtk_nor_adjust_op_size,
.supports_op = mtk_nor_supports_op,
.exec_op = mtk_nor_exec_op
};
static const struct mtk_nor_caps mtk_nor_caps_mt8173 = {
.dma_bits = 32,
.extra_dummy_bit = 0,
};
static const struct mtk_nor_caps mtk_nor_caps_mt8186 = {
.dma_bits = 32,
.extra_dummy_bit = 1,
};
static const struct mtk_nor_caps mtk_nor_caps_mt8192 = {
.dma_bits = 36,
.extra_dummy_bit = 0,
};
static const struct of_device_id mtk_nor_match[] = {
{ .compatible = "mediatek,mt8173-nor", .data = &mtk_nor_caps_mt8173 },
{ .compatible = "mediatek,mt8186-nor", .data = &mtk_nor_caps_mt8186 },
{ .compatible = "mediatek,mt8192-nor", .data = &mtk_nor_caps_mt8192 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_nor_match);
static int mtk_nor_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct mtk_nor *sp;
struct mtk_nor_caps *caps;
void __iomem *base;
struct clk *spi_clk, *ctlr_clk, *axi_clk, *axi_s_clk;
int ret, irq;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
spi_clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(spi_clk))
return PTR_ERR(spi_clk);
ctlr_clk = devm_clk_get(&pdev->dev, "sf");
if (IS_ERR(ctlr_clk))
return PTR_ERR(ctlr_clk);
axi_clk = devm_clk_get_optional(&pdev->dev, "axi");
if (IS_ERR(axi_clk))
return PTR_ERR(axi_clk);
axi_s_clk = devm_clk_get_optional(&pdev->dev, "axi_s");
if (IS_ERR(axi_s_clk))
return PTR_ERR(axi_s_clk);
caps = (struct mtk_nor_caps *)of_device_get_match_data(&pdev->dev);
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(caps->dma_bits));
if (ret) {
dev_err(&pdev->dev, "failed to set dma mask(%u)\n", caps->dma_bits);
return ret;
}
ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp));
if (!ctlr) {
dev_err(&pdev->dev, "failed to allocate spi controller\n");
return -ENOMEM;
}
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->max_message_size = mtk_max_msg_size;
ctlr->mem_ops = &mtk_nor_mem_ops;
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->num_chipselect = 1;
ctlr->setup = mtk_nor_setup;
ctlr->transfer_one_message = mtk_nor_transfer_one_message;
ctlr->auto_runtime_pm = true;
dev_set_drvdata(&pdev->dev, ctlr);
sp = spi_controller_get_devdata(ctlr);
sp->base = base;
sp->has_irq = false;
sp->wbuf_en = false;
sp->ctlr = ctlr;
sp->dev = &pdev->dev;
sp->spi_clk = spi_clk;
sp->ctlr_clk = ctlr_clk;
sp->axi_clk = axi_clk;
sp->axi_s_clk = axi_s_clk;
sp->caps = caps;
sp->high_dma = caps->dma_bits > 32;
sp->buffer = dmam_alloc_coherent(&pdev->dev,
MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
&sp->buffer_dma, GFP_KERNEL);
if (!sp->buffer)
return -ENOMEM;
if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) {
dev_err(sp->dev, "misaligned allocation of internal buffer.\n");
return -ENOMEM;
}
ret = mtk_nor_enable_clk(sp);
if (ret < 0)
return ret;
sp->spi_freq = clk_get_rate(sp->spi_clk);
mtk_nor_init(sp);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
dev_warn(sp->dev, "IRQ not available.");
} else {
ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
pdev->name, sp);
if (ret < 0) {
dev_warn(sp->dev, "failed to request IRQ.");
} else {
init_completion(&sp->op_done);
sp->has_irq = true;
}
}
pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0)
goto err_probe;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
return 0;
err_probe:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
mtk_nor_disable_clk(sp);
return ret;
}
static void mtk_nor_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
mtk_nor_disable_clk(sp);
}
static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
mtk_nor_disable_clk(sp);
return 0;
}
static int __maybe_unused mtk_nor_runtime_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
return mtk_nor_enable_clk(sp);
}
static int __maybe_unused mtk_nor_suspend(struct device *dev)
{
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused mtk_nor_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
mtk_nor_init(sp);
return 0;
}
static const struct dev_pm_ops mtk_nor_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend,
mtk_nor_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume)
};
static struct platform_driver mtk_nor_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mtk_nor_match,
.pm = &mtk_nor_pm_ops,
},
.probe = mtk_nor_probe,
.remove_new = mtk_nor_remove,
};
module_platform_driver(mtk_nor_driver);
MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
MODULE_AUTHOR("Chuanhong Guo <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/spi/spi-mtk-nor.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Texas Instruments.
* Copyright (C) 2010 EF Johnson Technologies
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
#include <linux/platform_data/spi-davinci.h>
#define CS_DEFAULT 0xFF
#define SPIFMT_PHASE_MASK BIT(16)
#define SPIFMT_POLARITY_MASK BIT(17)
#define SPIFMT_DISTIMER_MASK BIT(18)
#define SPIFMT_SHIFTDIR_MASK BIT(20)
#define SPIFMT_WAITENA_MASK BIT(21)
#define SPIFMT_PARITYENA_MASK BIT(22)
#define SPIFMT_ODD_PARITY_MASK BIT(23)
#define SPIFMT_WDELAY_MASK 0x3f000000u
#define SPIFMT_WDELAY_SHIFT 24
#define SPIFMT_PRESCALE_SHIFT 8
/* SPIPC0 */
#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
#define SPIINT_MASKALL 0x0101035F
#define SPIINT_MASKINT 0x0000015F
#define SPI_INTLVL_1 0x000001FF
#define SPI_INTLVL_0 0x00000000
/* SPIDAT1 (upper 16 bit defines) */
#define SPIDAT1_CSHOLD_MASK BIT(12)
#define SPIDAT1_WDEL BIT(10)
/* SPIGCR1 */
#define SPIGCR1_CLKMOD_MASK BIT(1)
#define SPIGCR1_MASTER_MASK BIT(0)
#define SPIGCR1_POWERDOWN_MASK BIT(8)
#define SPIGCR1_LOOPBACK_MASK BIT(16)
#define SPIGCR1_SPIENA_MASK BIT(24)
/* SPIBUF */
#define SPIBUF_TXFULL_MASK BIT(29)
#define SPIBUF_RXEMPTY_MASK BIT(31)
/* SPIDELAY */
#define SPIDELAY_C2TDELAY_SHIFT 24
#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
#define SPIDELAY_T2CDELAY_SHIFT 16
#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
#define SPIDELAY_T2EDELAY_SHIFT 8
#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
#define SPIDELAY_C2EDELAY_SHIFT 0
#define SPIDELAY_C2EDELAY_MASK 0xFF
/* Error Masks */
#define SPIFLG_DLEN_ERR_MASK BIT(0)
#define SPIFLG_TIMEOUT_MASK BIT(1)
#define SPIFLG_PARERR_MASK BIT(2)
#define SPIFLG_DESYNC_MASK BIT(3)
#define SPIFLG_BITERR_MASK BIT(4)
#define SPIFLG_OVRRUN_MASK BIT(6)
#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
#define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
| SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
| SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
| SPIFLG_OVRRUN_MASK)
#define SPIINT_DMA_REQ_EN BIT(16)
/* SPI Controller registers */
#define SPIGCR0 0x00
#define SPIGCR1 0x04
#define SPIINT 0x08
#define SPILVL 0x0c
#define SPIFLG 0x10
#define SPIPC0 0x14
#define SPIDAT1 0x3c
#define SPIBUF 0x40
#define SPIDELAY 0x48
#define SPIDEF 0x4c
#define SPIFMT0 0x50
#define DMA_MIN_BYTES 16
/* SPI Controller driver's private data. */
struct davinci_spi {
struct spi_bitbang bitbang;
struct clk *clk;
u8 version;
resource_size_t pbase;
void __iomem *base;
u32 irq;
struct completion done;
const void *tx;
void *rx;
int rcount;
int wcount;
struct dma_chan *dma_rx;
struct dma_chan *dma_tx;
struct davinci_spi_platform_data pdata;
void (*get_rx)(u32 rx_data, struct davinci_spi *);
u32 (*get_tx)(struct davinci_spi *);
u8 *bytes_per_word;
u8 prescaler_limit;
};
static struct davinci_spi_config davinci_spi_default_cfg;
static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
{
if (dspi->rx) {
u8 *rx = dspi->rx;
*rx++ = (u8)data;
dspi->rx = rx;
}
}
static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
{
if (dspi->rx) {
u16 *rx = dspi->rx;
*rx++ = (u16)data;
dspi->rx = rx;
}
}
static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
{
u32 data = 0;
if (dspi->tx) {
const u8 *tx = dspi->tx;
data = *tx++;
dspi->tx = tx;
}
return data;
}
static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
{
u32 data = 0;
if (dspi->tx) {
const u16 *tx = dspi->tx;
data = *tx++;
dspi->tx = tx;
}
return data;
}
static inline void set_io_bits(void __iomem *addr, u32 bits)
{
u32 v = ioread32(addr);
v |= bits;
iowrite32(v, addr);
}
static inline void clear_io_bits(void __iomem *addr, u32 bits)
{
u32 v = ioread32(addr);
v &= ~bits;
iowrite32(v, addr);
}
/*
* Interface to control the chip select signal
*/
static void davinci_spi_chipselect(struct spi_device *spi, int value)
{
struct davinci_spi *dspi;
struct davinci_spi_config *spicfg = spi->controller_data;
u8 chip_sel = spi_get_chipselect(spi, 0);
u16 spidat1 = CS_DEFAULT;
dspi = spi_controller_get_devdata(spi->controller);
/* program delay transfers if tx_delay is non zero */
if (spicfg && spicfg->wdelay)
spidat1 |= SPIDAT1_WDEL;
/*
* Board specific chip select logic decides the polarity and cs
* line for the controller
*/
if (spi_get_csgpiod(spi, 0)) {
if (value == BITBANG_CS_ACTIVE)
gpiod_set_value(spi_get_csgpiod(spi, 0), 1);
else
gpiod_set_value(spi_get_csgpiod(spi, 0), 0);
} else {
if (value == BITBANG_CS_ACTIVE) {
if (!(spi->mode & SPI_CS_WORD))
spidat1 |= SPIDAT1_CSHOLD_MASK;
spidat1 &= ~(0x1 << chip_sel);
}
}
iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
}
/**
* davinci_spi_get_prescale - Calculates the correct prescale value
* @dspi: the controller data
* @max_speed_hz: the maximum rate the SPI clock can run at
*
* This function calculates the prescale value that generates a clock rate
* less than or equal to the specified maximum.
*
* Returns: calculated prescale value for easy programming into SPI registers
* or negative error number if valid prescalar cannot be updated.
*/
static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
u32 max_speed_hz)
{
int ret;
/* Subtract 1 to match what will be programmed into SPI register. */
ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1;
if (ret < dspi->prescaler_limit || ret > 255)
return -EINVAL;
return ret;
}
/**
* davinci_spi_setup_transfer - This functions will determine transfer method
* @spi: spi device on which data transfer to be done
* @t: spi transfer in which transfer info is filled
*
* This function determines data transfer method (8/16/32 bit transfer).
* It will also set the SPI Clock Control register according to
* SPI slave device freq.
*/
static int davinci_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct davinci_spi *dspi;
struct davinci_spi_config *spicfg;
u8 bits_per_word = 0;
u32 hz = 0, spifmt = 0;
int prescale;
dspi = spi_controller_get_devdata(spi->controller);
spicfg = spi->controller_data;
if (!spicfg)
spicfg = &davinci_spi_default_cfg;
if (t) {
bits_per_word = t->bits_per_word;
hz = t->speed_hz;
}
/* if bits_per_word is not set then set it default */
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
/*
* Assign function pointer to appropriate transfer method
* 8bit, 16bit or 32bit transfer
*/
if (bits_per_word <= 8) {
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
dspi->bytes_per_word[spi_get_chipselect(spi, 0)] = 1;
} else {
dspi->get_rx = davinci_spi_rx_buf_u16;
dspi->get_tx = davinci_spi_tx_buf_u16;
dspi->bytes_per_word[spi_get_chipselect(spi, 0)] = 2;
}
if (!hz)
hz = spi->max_speed_hz;
/* Set up SPIFMTn register, unique to this chipselect. */
prescale = davinci_spi_get_prescale(dspi, hz);
if (prescale < 0)
return prescale;
spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
if (spi->mode & SPI_LSB_FIRST)
spifmt |= SPIFMT_SHIFTDIR_MASK;
if (spi->mode & SPI_CPOL)
spifmt |= SPIFMT_POLARITY_MASK;
if (!(spi->mode & SPI_CPHA))
spifmt |= SPIFMT_PHASE_MASK;
/*
* Assume wdelay is used only on SPI peripherals that has this field
* in SPIFMTn register and when it's configured from board file or DT.
*/
if (spicfg->wdelay)
spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
& SPIFMT_WDELAY_MASK);
/*
* Version 1 hardware supports two basic SPI modes:
* - Standard SPI mode uses 4 pins, with chipselect
* - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
* (distinct from SPI_3WIRE, with just one data wire;
* or similar variants without MOSI or without MISO)
*
* Version 2 hardware supports an optional handshaking signal,
* so it can support two more modes:
* - 5 pin SPI variant is standard SPI plus SPI_READY
* - 4 pin with enable is (SPI_READY | SPI_NO_CS)
*/
if (dspi->version == SPI_VERSION_2) {
u32 delay = 0;
if (spicfg->odd_parity)
spifmt |= SPIFMT_ODD_PARITY_MASK;
if (spicfg->parity_enable)
spifmt |= SPIFMT_PARITYENA_MASK;
if (spicfg->timer_disable) {
spifmt |= SPIFMT_DISTIMER_MASK;
} else {
delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
& SPIDELAY_C2TDELAY_MASK;
delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
& SPIDELAY_T2CDELAY_MASK;
}
if (spi->mode & SPI_READY) {
spifmt |= SPIFMT_WAITENA_MASK;
delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
& SPIDELAY_T2EDELAY_MASK;
delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
& SPIDELAY_C2EDELAY_MASK;
}
iowrite32(delay, dspi->base + SPIDELAY);
}
iowrite32(spifmt, dspi->base + SPIFMT0);
return 0;
}
static int davinci_spi_of_setup(struct spi_device *spi)
{
struct davinci_spi_config *spicfg = spi->controller_data;
struct device_node *np = spi->dev.of_node;
struct davinci_spi *dspi = spi_controller_get_devdata(spi->controller);
u32 prop;
if (spicfg == NULL && np) {
spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL);
if (!spicfg)
return -ENOMEM;
*spicfg = davinci_spi_default_cfg;
/* override with dt configured values */
if (!of_property_read_u32(np, "ti,spi-wdelay", &prop))
spicfg->wdelay = (u8)prop;
spi->controller_data = spicfg;
if (dspi->dma_rx && dspi->dma_tx)
spicfg->io_type = SPI_IO_TYPE_DMA;
}
return 0;
}
/**
* davinci_spi_setup - This functions will set default transfer method
* @spi: spi device on which data transfer to be done
*
* This functions sets the default transfer method.
*/
static int davinci_spi_setup(struct spi_device *spi)
{
struct davinci_spi *dspi;
struct device_node *np = spi->dev.of_node;
bool internal_cs = true;
dspi = spi_controller_get_devdata(spi->controller);
if (!(spi->mode & SPI_NO_CS)) {
if (np && spi_get_csgpiod(spi, 0))
internal_cs = false;
if (internal_cs)
set_io_bits(dspi->base + SPIPC0, 1 << spi_get_chipselect(spi, 0));
}
if (spi->mode & SPI_READY)
set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
if (spi->mode & SPI_LOOP)
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
else
clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
return davinci_spi_of_setup(spi);
}
static void davinci_spi_cleanup(struct spi_device *spi)
{
struct davinci_spi_config *spicfg = spi->controller_data;
spi->controller_data = NULL;
if (spi->dev.of_node)
kfree(spicfg);
}
static bool davinci_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct davinci_spi_config *spicfg = spi->controller_data;
bool can_dma = false;
if (spicfg)
can_dma = (spicfg->io_type == SPI_IO_TYPE_DMA) &&
(xfer->len >= DMA_MIN_BYTES) &&
!is_vmalloc_addr(xfer->rx_buf) &&
!is_vmalloc_addr(xfer->tx_buf);
return can_dma;
}
static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
{
struct device *sdev = dspi->bitbang.master->dev.parent;
if (int_status & SPIFLG_TIMEOUT_MASK) {
dev_err(sdev, "SPI Time-out Error\n");
return -ETIMEDOUT;
}
if (int_status & SPIFLG_DESYNC_MASK) {
dev_err(sdev, "SPI Desynchronization Error\n");
return -EIO;
}
if (int_status & SPIFLG_BITERR_MASK) {
dev_err(sdev, "SPI Bit error\n");
return -EIO;
}
if (dspi->version == SPI_VERSION_2) {
if (int_status & SPIFLG_DLEN_ERR_MASK) {
dev_err(sdev, "SPI Data Length Error\n");
return -EIO;
}
if (int_status & SPIFLG_PARERR_MASK) {
dev_err(sdev, "SPI Parity Error\n");
return -EIO;
}
if (int_status & SPIFLG_OVRRUN_MASK) {
dev_err(sdev, "SPI Data Overrun error\n");
return -EIO;
}
if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
dev_err(sdev, "SPI Buffer Init Active\n");
return -EBUSY;
}
}
return 0;
}
/**
* davinci_spi_process_events - check for and handle any SPI controller events
* @dspi: the controller data
*
* This function will check the SPIFLG register and handle any events that are
* detected there
*/
static int davinci_spi_process_events(struct davinci_spi *dspi)
{
u32 buf, status, errors = 0, spidat1;
buf = ioread32(dspi->base + SPIBUF);
if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
dspi->get_rx(buf & 0xFFFF, dspi);
dspi->rcount--;
}
status = ioread32(dspi->base + SPIFLG);
if (unlikely(status & SPIFLG_ERROR_MASK)) {
errors = status & SPIFLG_ERROR_MASK;
goto out;
}
if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
spidat1 = ioread32(dspi->base + SPIDAT1);
dspi->wcount--;
spidat1 &= ~0xFFFF;
spidat1 |= 0xFFFF & dspi->get_tx(dspi);
iowrite32(spidat1, dspi->base + SPIDAT1);
}
out:
return errors;
}
static void davinci_spi_dma_rx_callback(void *data)
{
struct davinci_spi *dspi = (struct davinci_spi *)data;
dspi->rcount = 0;
if (!dspi->wcount && !dspi->rcount)
complete(&dspi->done);
}
static void davinci_spi_dma_tx_callback(void *data)
{
struct davinci_spi *dspi = (struct davinci_spi *)data;
dspi->wcount = 0;
if (!dspi->wcount && !dspi->rcount)
complete(&dspi->done);
}
/**
* davinci_spi_bufs - functions which will handle transfer data
* @spi: spi device on which data transfer to be done
* @t: spi transfer in which transfer info is filled
*
* This function will put data to be transferred into data register
* of SPI controller and then wait until the completion will be marked
* by the IRQ Handler.
*/
static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct davinci_spi *dspi;
int data_type, ret = -ENOMEM;
u32 tx_data, spidat1;
u32 errors = 0;
struct davinci_spi_config *spicfg;
struct davinci_spi_platform_data *pdata;
dspi = spi_controller_get_devdata(spi->controller);
pdata = &dspi->pdata;
spicfg = (struct davinci_spi_config *)spi->controller_data;
if (!spicfg)
spicfg = &davinci_spi_default_cfg;
/* convert len to words based on bits_per_word */
data_type = dspi->bytes_per_word[spi_get_chipselect(spi, 0)];
dspi->tx = t->tx_buf;
dspi->rx = t->rx_buf;
dspi->wcount = t->len / data_type;
dspi->rcount = dspi->wcount;
spidat1 = ioread32(dspi->base + SPIDAT1);
clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
reinit_completion(&dspi->done);
if (!davinci_spi_can_dma(spi->controller, spi, t)) {
if (spicfg->io_type != SPI_IO_TYPE_POLL)
set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
/* start the transfer */
dspi->wcount--;
tx_data = dspi->get_tx(dspi);
spidat1 &= 0xFFFF0000;
spidat1 |= tx_data & 0xFFFF;
iowrite32(spidat1, dspi->base + SPIDAT1);
} else {
struct dma_slave_config dma_rx_conf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = (unsigned long)dspi->pbase + SPIBUF,
.src_addr_width = data_type,
.src_maxburst = 1,
};
struct dma_slave_config dma_tx_conf = {
.direction = DMA_MEM_TO_DEV,
.dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
.dst_addr_width = data_type,
.dst_maxburst = 1,
};
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
t->rx_sg.sgl, t->rx_sg.nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto err_desc;
if (!t->tx_buf) {
/* To avoid errors when doing rx-only transfers with
* many SG entries (> 20), use the rx buffer as the
* dummy tx buffer so that dma reloads are done at the
* same time for rx and tx.
*/
t->tx_sg.sgl = t->rx_sg.sgl;
t->tx_sg.nents = t->rx_sg.nents;
}
txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
t->tx_sg.sgl, t->tx_sg.nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto err_desc;
rxdesc->callback = davinci_spi_dma_rx_callback;
rxdesc->callback_param = (void *)dspi;
txdesc->callback = davinci_spi_dma_tx_callback;
txdesc->callback_param = (void *)dspi;
if (pdata->cshold_bug)
iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
dmaengine_submit(rxdesc);
dmaengine_submit(txdesc);
dma_async_issue_pending(dspi->dma_rx);
dma_async_issue_pending(dspi->dma_tx);
set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
}
/* Wait for the transfer to complete */
if (spicfg->io_type != SPI_IO_TYPE_POLL) {
if (wait_for_completion_timeout(&dspi->done, HZ) == 0)
errors = SPIFLG_TIMEOUT_MASK;
} else {
while (dspi->rcount > 0 || dspi->wcount > 0) {
errors = davinci_spi_process_events(dspi);
if (errors)
break;
cpu_relax();
}
}
clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
if (davinci_spi_can_dma(spi->controller, spi, t))
clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
/*
* Check for bit error, desync error,parity error,timeout error and
* receive overflow errors
*/
if (errors) {
ret = davinci_spi_check_error(dspi, errors);
WARN(!ret, "%s: error reported but no error found!\n",
dev_name(&spi->dev));
return ret;
}
if (dspi->rcount != 0 || dspi->wcount != 0) {
dev_err(&spi->dev, "SPI data transfer error\n");
return -EIO;
}
return t->len;
err_desc:
return ret;
}
/**
* dummy_thread_fn - dummy thread function
* @irq: IRQ number for this SPI Master
* @data: structure for SPI Master controller davinci_spi
*
* This is to satisfy the request_threaded_irq() API so that the irq
* handler is called in interrupt context.
*/
static irqreturn_t dummy_thread_fn(s32 irq, void *data)
{
return IRQ_HANDLED;
}
/**
* davinci_spi_irq - Interrupt handler for SPI Master Controller
* @irq: IRQ number for this SPI Master
* @data: structure for SPI Master controller davinci_spi
*
* ISR will determine that interrupt arrives either for READ or WRITE command.
* According to command it will do the appropriate action. It will check
* transfer length and if it is not zero then dispatch transfer command again.
* If transfer length is zero then it will indicate the COMPLETION so that
* davinci_spi_bufs function can go ahead.
*/
static irqreturn_t davinci_spi_irq(s32 irq, void *data)
{
struct davinci_spi *dspi = data;
int status;
status = davinci_spi_process_events(dspi);
if (unlikely(status != 0))
clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
if ((!dspi->rcount && !dspi->wcount) || status)
complete(&dspi->done);
return IRQ_HANDLED;
}
static int davinci_spi_request_dma(struct davinci_spi *dspi)
{
struct device *sdev = dspi->bitbang.master->dev.parent;
dspi->dma_rx = dma_request_chan(sdev, "rx");
if (IS_ERR(dspi->dma_rx))
return PTR_ERR(dspi->dma_rx);
dspi->dma_tx = dma_request_chan(sdev, "tx");
if (IS_ERR(dspi->dma_tx)) {
dma_release_channel(dspi->dma_rx);
return PTR_ERR(dspi->dma_tx);
}
return 0;
}
#if defined(CONFIG_OF)
/* OF SPI data structure */
struct davinci_spi_of_data {
u8 version;
u8 prescaler_limit;
};
static const struct davinci_spi_of_data dm6441_spi_data = {
.version = SPI_VERSION_1,
.prescaler_limit = 2,
};
static const struct davinci_spi_of_data da830_spi_data = {
.version = SPI_VERSION_2,
.prescaler_limit = 2,
};
static const struct davinci_spi_of_data keystone_spi_data = {
.version = SPI_VERSION_1,
.prescaler_limit = 0,
};
static const struct of_device_id davinci_spi_of_match[] = {
{
.compatible = "ti,dm6441-spi",
.data = &dm6441_spi_data,
},
{
.compatible = "ti,da830-spi",
.data = &da830_spi_data,
},
{
.compatible = "ti,keystone-spi",
.data = &keystone_spi_data,
},
{ },
};
MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
/**
* spi_davinci_get_pdata - Get platform data from DTS binding
* @pdev: ptr to platform data
* @dspi: ptr to driver data
*
* Parses and populates pdata in dspi from device tree bindings.
*
* NOTE: Not all platform data params are supported currently.
*/
static int spi_davinci_get_pdata(struct platform_device *pdev,
struct davinci_spi *dspi)
{
struct device_node *node = pdev->dev.of_node;
const struct davinci_spi_of_data *spi_data;
struct davinci_spi_platform_data *pdata;
unsigned int num_cs, intr_line = 0;
pdata = &dspi->pdata;
spi_data = device_get_match_data(&pdev->dev);
pdata->version = spi_data->version;
pdata->prescaler_limit = spi_data->prescaler_limit;
/*
* default num_cs is 1 and all chipsel are internal to the chip
* indicated by chip_sel being NULL or cs_gpios being NULL or
* set to -ENOENT. num-cs includes internal as well as gpios.
* indicated by chip_sel being NULL. GPIO based CS is not
* supported yet in DT bindings.
*/
num_cs = 1;
of_property_read_u32(node, "num-cs", &num_cs);
pdata->num_chipselect = num_cs;
of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
pdata->intr_line = intr_line;
return 0;
}
#else
static int spi_davinci_get_pdata(struct platform_device *pdev,
struct davinci_spi *dspi)
{
return -ENODEV;
}
#endif
/**
* davinci_spi_probe - probe function for SPI Master Controller
* @pdev: platform_device structure which contains plateform specific data
*
* According to Linux Device Model this function will be invoked by Linux
* with platform_device struct which contains the device specific info.
* This function will map the SPI controller's memory, register IRQ,
* Reset SPI controller and setting its registers to default value.
* It will invoke spi_bitbang_start to create work queue so that client driver
* can register transfer method to work queue.
*/
static int davinci_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
struct resource *r;
int ret = 0;
u32 spipc0;
host = spi_alloc_host(&pdev->dev, sizeof(struct davinci_spi));
if (host == NULL) {
ret = -ENOMEM;
goto err;
}
platform_set_drvdata(pdev, host);
dspi = spi_controller_get_devdata(host);
if (dev_get_platdata(&pdev->dev)) {
pdata = dev_get_platdata(&pdev->dev);
dspi->pdata = *pdata;
} else {
/* update dspi pdata with that from the DT */
ret = spi_davinci_get_pdata(pdev, dspi);
if (ret < 0)
goto free_host;
}
/* pdata in dspi is now updated and point pdata to that */
pdata = &dspi->pdata;
dspi->bytes_per_word = devm_kcalloc(&pdev->dev,
pdata->num_chipselect,
sizeof(*dspi->bytes_per_word),
GFP_KERNEL);
if (dspi->bytes_per_word == NULL) {
ret = -ENOMEM;
goto free_host;
}
dspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(dspi->base)) {
ret = PTR_ERR(dspi->base);
goto free_host;
}
dspi->pbase = r->start;
init_completion(&dspi->done);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto free_host;
dspi->irq = ret;
ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
if (ret)
goto free_host;
dspi->bitbang.master = host;
dspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
ret = -ENODEV;
goto free_host;
}
ret = clk_prepare_enable(dspi->clk);
if (ret)
goto free_host;
host->use_gpio_descriptors = true;
host->dev.of_node = pdev->dev.of_node;
host->bus_num = pdev->id;
host->num_chipselect = pdata->num_chipselect;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_GPIO_SS;
host->setup = davinci_spi_setup;
host->cleanup = davinci_spi_cleanup;
host->can_dma = davinci_spi_can_dma;
dspi->bitbang.chipselect = davinci_spi_chipselect;
dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
dspi->prescaler_limit = pdata->prescaler_limit;
dspi->version = pdata->version;
dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_WORD;
if (dspi->version == SPI_VERSION_2)
dspi->bitbang.flags |= SPI_READY;
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
ret = davinci_spi_request_dma(dspi);
if (ret == -EPROBE_DEFER) {
goto free_clk;
} else if (ret) {
dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret);
dspi->dma_rx = NULL;
dspi->dma_tx = NULL;
}
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
/* Reset In/OUT SPI module */
iowrite32(0, dspi->base + SPIGCR0);
udelay(100);
iowrite32(1, dspi->base + SPIGCR0);
/* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
iowrite32(spipc0, dspi->base + SPIPC0);
if (pdata->intr_line)
iowrite32(SPI_INTLVL_1, dspi->base + SPILVL);
else
iowrite32(SPI_INTLVL_0, dspi->base + SPILVL);
iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
/* host mode default */
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
ret = spi_bitbang_start(&dspi->bitbang);
if (ret)
goto free_dma;
dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base);
return ret;
free_dma:
if (dspi->dma_rx) {
dma_release_channel(dspi->dma_rx);
dma_release_channel(dspi->dma_tx);
}
free_clk:
clk_disable_unprepare(dspi->clk);
free_host:
spi_controller_put(host);
err:
return ret;
}
/**
* davinci_spi_remove - remove function for SPI Master Controller
* @pdev: platform_device structure which contains plateform specific data
*
* This function will do the reverse action of davinci_spi_probe function
* It will free the IRQ and SPI controller's memory region.
* It will also call spi_bitbang_stop to destroy the work queue which was
* created by spi_bitbang_start.
*/
static void davinci_spi_remove(struct platform_device *pdev)
{
struct davinci_spi *dspi;
struct spi_controller *host;
host = platform_get_drvdata(pdev);
dspi = spi_controller_get_devdata(host);
spi_bitbang_stop(&dspi->bitbang);
clk_disable_unprepare(dspi->clk);
if (dspi->dma_rx) {
dma_release_channel(dspi->dma_rx);
dma_release_channel(dspi->dma_tx);
}
spi_controller_put(host);
}
static struct platform_driver davinci_spi_driver = {
.driver = {
.name = "spi_davinci",
.of_match_table = of_match_ptr(davinci_spi_of_match),
},
.probe = davinci_spi_probe,
.remove_new = davinci_spi_remove,
};
module_platform_driver(davinci_spi_driver);
MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-davinci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IMG SPFI controller driver
*
* Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
* Copyright (C) 2014 Google, Inc.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
#define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
#define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
#define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
#define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
#define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
#define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
#define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
#define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
#define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
#define SPFI_CONTROL 0x14
#define SPFI_CONTROL_CONTINUE BIT(12)
#define SPFI_CONTROL_SOFT_RESET BIT(11)
#define SPFI_CONTROL_SEND_DMA BIT(10)
#define SPFI_CONTROL_GET_DMA BIT(9)
#define SPFI_CONTROL_SE BIT(8)
#define SPFI_CONTROL_TMODE_SHIFT 5
#define SPFI_CONTROL_TMODE_MASK 0x7
#define SPFI_CONTROL_TMODE_SINGLE 0
#define SPFI_CONTROL_TMODE_DUAL 1
#define SPFI_CONTROL_TMODE_QUAD 2
#define SPFI_CONTROL_SPFI_EN BIT(0)
#define SPFI_TRANSACTION 0x18
#define SPFI_TRANSACTION_TSIZE_SHIFT 16
#define SPFI_TRANSACTION_TSIZE_MASK 0xffff
#define SPFI_PORT_STATE 0x1c
#define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
#define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
#define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
#define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
#define SPFI_TX_32BIT_VALID_DATA 0x20
#define SPFI_TX_8BIT_VALID_DATA 0x24
#define SPFI_RX_32BIT_VALID_DATA 0x28
#define SPFI_RX_8BIT_VALID_DATA 0x2c
#define SPFI_INTERRUPT_STATUS 0x30
#define SPFI_INTERRUPT_ENABLE 0x34
#define SPFI_INTERRUPT_CLEAR 0x38
#define SPFI_INTERRUPT_IACCESS BIT(12)
#define SPFI_INTERRUPT_GDEX8BIT BIT(11)
#define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
#define SPFI_INTERRUPT_GDFUL BIT(8)
#define SPFI_INTERRUPT_GDHF BIT(7)
#define SPFI_INTERRUPT_GDEX32BIT BIT(6)
#define SPFI_INTERRUPT_GDTRIG BIT(5)
#define SPFI_INTERRUPT_SDFUL BIT(3)
#define SPFI_INTERRUPT_SDHF BIT(2)
#define SPFI_INTERRUPT_SDE BIT(1)
#define SPFI_INTERRUPT_SDTRIG BIT(0)
/*
* There are four parallel FIFOs of 16 bytes each. The word buffer
* (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
* effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA)
* accesses only a single FIFO, resulting in an effective FIFO size of
* 16 bytes.
*/
#define SPFI_32BIT_FIFO_SIZE 64
#define SPFI_8BIT_FIFO_SIZE 16
struct img_spfi {
struct device *dev;
struct spi_controller *host;
spinlock_t lock;
void __iomem *regs;
phys_addr_t phys;
int irq;
struct clk *spfi_clk;
struct clk *sys_clk;
struct dma_chan *rx_ch;
struct dma_chan *tx_ch;
bool tx_dma_busy;
bool rx_dma_busy;
};
static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
{
return readl(spfi->regs + reg);
}
static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
{
writel(val, spfi->regs + reg);
}
static inline void spfi_start(struct img_spfi *spfi)
{
u32 val;
val = spfi_readl(spfi, SPFI_CONTROL);
val |= SPFI_CONTROL_SPFI_EN;
spfi_writel(spfi, val, SPFI_CONTROL);
}
static inline void spfi_reset(struct img_spfi *spfi)
{
spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
spfi_writel(spfi, 0, SPFI_CONTROL);
}
static int spfi_wait_all_done(struct img_spfi *spfi)
{
unsigned long timeout = jiffies + msecs_to_jiffies(50);
while (time_before(jiffies, timeout)) {
u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
if (status & SPFI_INTERRUPT_ALLDONETRIG) {
spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
SPFI_INTERRUPT_CLEAR);
return 0;
}
cpu_relax();
}
dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
spfi_reset(spfi);
return -ETIMEDOUT;
}
static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
unsigned int max)
{
unsigned int count = 0;
u32 status;
while (count < max / 4) {
spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
if (status & SPFI_INTERRUPT_SDFUL)
break;
spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
count++;
}
return count * 4;
}
static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
unsigned int max)
{
unsigned int count = 0;
u32 status;
while (count < max) {
spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
if (status & SPFI_INTERRUPT_SDFUL)
break;
spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
count++;
}
return count;
}
static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
unsigned int max)
{
unsigned int count = 0;
u32 status;
while (count < max / 4) {
spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
SPFI_INTERRUPT_CLEAR);
status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
if (!(status & SPFI_INTERRUPT_GDEX32BIT))
break;
buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
count++;
}
return count * 4;
}
static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
unsigned int max)
{
unsigned int count = 0;
u32 status;
while (count < max) {
spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
SPFI_INTERRUPT_CLEAR);
status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
if (!(status & SPFI_INTERRUPT_GDEX8BIT))
break;
buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
count++;
}
return count;
}
static int img_spfi_start_pio(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct img_spfi *spfi = spi_controller_get_devdata(spi->controller);
unsigned int tx_bytes = 0, rx_bytes = 0;
const void *tx_buf = xfer->tx_buf;
void *rx_buf = xfer->rx_buf;
unsigned long timeout;
int ret;
if (tx_buf)
tx_bytes = xfer->len;
if (rx_buf)
rx_bytes = xfer->len;
spfi_start(spfi);
timeout = jiffies +
msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
while ((tx_bytes > 0 || rx_bytes > 0) &&
time_before(jiffies, timeout)) {
unsigned int tx_count, rx_count;
if (tx_bytes >= 4)
tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
else
tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
if (rx_bytes >= 4)
rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
else
rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
tx_buf += tx_count;
rx_buf += rx_count;
tx_bytes -= tx_count;
rx_bytes -= rx_count;
cpu_relax();
}
if (rx_bytes > 0 || tx_bytes > 0) {
dev_err(spfi->dev, "PIO transfer timed out\n");
return -ETIMEDOUT;
}
ret = spfi_wait_all_done(spfi);
if (ret < 0)
return ret;
return 0;
}
static void img_spfi_dma_rx_cb(void *data)
{
struct img_spfi *spfi = data;
unsigned long flags;
spfi_wait_all_done(spfi);
spin_lock_irqsave(&spfi->lock, flags);
spfi->rx_dma_busy = false;
if (!spfi->tx_dma_busy)
spi_finalize_current_transfer(spfi->host);
spin_unlock_irqrestore(&spfi->lock, flags);
}
static void img_spfi_dma_tx_cb(void *data)
{
struct img_spfi *spfi = data;
unsigned long flags;
spfi_wait_all_done(spfi);
spin_lock_irqsave(&spfi->lock, flags);
spfi->tx_dma_busy = false;
if (!spfi->rx_dma_busy)
spi_finalize_current_transfer(spfi->host);
spin_unlock_irqrestore(&spfi->lock, flags);
}
static int img_spfi_start_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct img_spfi *spfi = spi_controller_get_devdata(spi->controller);
struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
struct dma_slave_config rxconf, txconf;
spfi->rx_dma_busy = false;
spfi->tx_dma_busy = false;
if (xfer->rx_buf) {
rxconf.direction = DMA_DEV_TO_MEM;
if (xfer->len % 4 == 0) {
rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
rxconf.src_addr_width = 4;
rxconf.src_maxburst = 4;
} else {
rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
rxconf.src_addr_width = 1;
rxconf.src_maxburst = 4;
}
dmaengine_slave_config(spfi->rx_ch, &rxconf);
rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
xfer->rx_sg.nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!rxdesc)
goto stop_dma;
rxdesc->callback = img_spfi_dma_rx_cb;
rxdesc->callback_param = spfi;
}
if (xfer->tx_buf) {
txconf.direction = DMA_MEM_TO_DEV;
if (xfer->len % 4 == 0) {
txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
txconf.dst_addr_width = 4;
txconf.dst_maxburst = 4;
} else {
txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
txconf.dst_addr_width = 1;
txconf.dst_maxburst = 4;
}
dmaengine_slave_config(spfi->tx_ch, &txconf);
txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
xfer->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!txdesc)
goto stop_dma;
txdesc->callback = img_spfi_dma_tx_cb;
txdesc->callback_param = spfi;
}
if (xfer->rx_buf) {
spfi->rx_dma_busy = true;
dmaengine_submit(rxdesc);
dma_async_issue_pending(spfi->rx_ch);
}
spfi_start(spfi);
if (xfer->tx_buf) {
spfi->tx_dma_busy = true;
dmaengine_submit(txdesc);
dma_async_issue_pending(spfi->tx_ch);
}
return 1;
stop_dma:
dmaengine_terminate_all(spfi->rx_ch);
dmaengine_terminate_all(spfi->tx_ch);
return -EIO;
}
static void img_spfi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
struct img_spfi *spfi = spi_controller_get_devdata(host);
unsigned long flags;
/*
* Stop all DMA and reset the controller if the previous transaction
* timed-out and never completed it's DMA.
*/
spin_lock_irqsave(&spfi->lock, flags);
if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
spfi->tx_dma_busy = false;
spfi->rx_dma_busy = false;
dmaengine_terminate_all(spfi->tx_ch);
dmaengine_terminate_all(spfi->rx_ch);
}
spin_unlock_irqrestore(&spfi->lock, flags);
}
static int img_spfi_prepare(struct spi_controller *host, struct spi_message *msg)
{
struct img_spfi *spfi = spi_controller_get_devdata(host);
u32 val;
val = spfi_readl(spfi, SPFI_PORT_STATE);
val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK <<
SPFI_PORT_STATE_DEV_SEL_SHIFT);
val |= spi_get_chipselect(msg->spi, 0) << SPFI_PORT_STATE_DEV_SEL_SHIFT;
if (msg->spi->mode & SPI_CPHA)
val |= SPFI_PORT_STATE_CK_PHASE(spi_get_chipselect(msg->spi, 0));
else
val &= ~SPFI_PORT_STATE_CK_PHASE(spi_get_chipselect(msg->spi, 0));
if (msg->spi->mode & SPI_CPOL)
val |= SPFI_PORT_STATE_CK_POL(spi_get_chipselect(msg->spi, 0));
else
val &= ~SPFI_PORT_STATE_CK_POL(spi_get_chipselect(msg->spi, 0));
spfi_writel(spfi, val, SPFI_PORT_STATE);
return 0;
}
static int img_spfi_unprepare(struct spi_controller *host,
struct spi_message *msg)
{
struct img_spfi *spfi = spi_controller_get_devdata(host);
spfi_reset(spfi);
return 0;
}
static void img_spfi_config(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct img_spfi *spfi = spi_controller_get_devdata(spi->controller);
u32 val, div;
/*
* output = spfi_clk * (BITCLK / 512), where BITCLK must be a
* power of 2 up to 128
*/
div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
div = clamp(512 / (1 << get_count_order(div)), 1, 128);
val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi_get_chipselect(spi, 0)));
val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi_get_chipselect(spi, 0)));
spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
SPFI_TRANSACTION);
val = spfi_readl(spfi, SPFI_CONTROL);
val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
if (xfer->tx_buf)
val |= SPFI_CONTROL_SEND_DMA;
if (xfer->rx_buf)
val |= SPFI_CONTROL_GET_DMA;
val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
if (xfer->tx_nbits == SPI_NBITS_DUAL &&
xfer->rx_nbits == SPI_NBITS_DUAL)
val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
xfer->rx_nbits == SPI_NBITS_QUAD)
val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
val |= SPFI_CONTROL_SE;
spfi_writel(spfi, val, SPFI_CONTROL);
}
static int img_spfi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct img_spfi *spfi = spi_controller_get_devdata(spi->controller);
int ret;
if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
dev_err(spfi->dev,
"Transfer length (%d) is greater than the max supported (%d)",
xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
return -EINVAL;
}
img_spfi_config(host, spi, xfer);
if (host->can_dma && host->can_dma(host, spi, xfer))
ret = img_spfi_start_dma(host, spi, xfer);
else
ret = img_spfi_start_pio(host, spi, xfer);
return ret;
}
static bool img_spfi_can_dma(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *xfer)
{
if (xfer->len > SPFI_32BIT_FIFO_SIZE)
return true;
return false;
}
static irqreturn_t img_spfi_irq(int irq, void *dev_id)
{
struct img_spfi *spfi = (struct img_spfi *)dev_id;
u32 status;
status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
if (status & SPFI_INTERRUPT_IACCESS) {
spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
dev_err(spfi->dev, "Illegal access interrupt");
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int img_spfi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct img_spfi *spfi;
struct resource *res;
int ret;
u32 max_speed_hz;
host = spi_alloc_host(&pdev->dev, sizeof(*spfi));
if (!host)
return -ENOMEM;
platform_set_drvdata(pdev, host);
spfi = spi_controller_get_devdata(host);
spfi->dev = &pdev->dev;
spfi->host = host;
spin_lock_init(&spfi->lock);
spfi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(spfi->regs)) {
ret = PTR_ERR(spfi->regs);
goto put_spi;
}
spfi->phys = res->start;
spfi->irq = platform_get_irq(pdev, 0);
if (spfi->irq < 0) {
ret = spfi->irq;
goto put_spi;
}
ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
if (ret)
goto put_spi;
spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
if (IS_ERR(spfi->sys_clk)) {
ret = PTR_ERR(spfi->sys_clk);
goto put_spi;
}
spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
if (IS_ERR(spfi->spfi_clk)) {
ret = PTR_ERR(spfi->spfi_clk);
goto put_spi;
}
ret = clk_prepare_enable(spfi->sys_clk);
if (ret)
goto put_spi;
ret = clk_prepare_enable(spfi->spfi_clk);
if (ret)
goto disable_pclk;
spfi_reset(spfi);
/*
* Only enable the error (IACCESS) interrupt. In PIO mode we'll
* poll the status of the FIFOs.
*/
spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
host->auto_runtime_pm = true;
host->bus_num = pdev->id;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
host->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
host->dev.of_node = pdev->dev.of_node;
host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
host->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
host->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
/*
* Maximum speed supported by spfi is limited to the lower value
* between 1/4 of the SPFI clock or to "spfi-max-frequency"
* defined in the device tree.
* If no value is defined in the device tree assume the maximum
* speed supported to be 1/4 of the SPFI clock.
*/
if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
&max_speed_hz)) {
if (host->max_speed_hz > max_speed_hz)
host->max_speed_hz = max_speed_hz;
}
host->transfer_one = img_spfi_transfer_one;
host->prepare_message = img_spfi_prepare;
host->unprepare_message = img_spfi_unprepare;
host->handle_err = img_spfi_handle_err;
host->use_gpio_descriptors = true;
spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
if (IS_ERR(spfi->tx_ch)) {
ret = PTR_ERR(spfi->tx_ch);
spfi->tx_ch = NULL;
if (ret == -EPROBE_DEFER)
goto disable_pm;
}
spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
if (IS_ERR(spfi->rx_ch)) {
ret = PTR_ERR(spfi->rx_ch);
spfi->rx_ch = NULL;
if (ret == -EPROBE_DEFER)
goto disable_pm;
}
if (!spfi->tx_ch || !spfi->rx_ch) {
if (spfi->tx_ch)
dma_release_channel(spfi->tx_ch);
if (spfi->rx_ch)
dma_release_channel(spfi->rx_ch);
spfi->tx_ch = NULL;
spfi->rx_ch = NULL;
dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
} else {
host->dma_tx = spfi->tx_ch;
host->dma_rx = spfi->rx_ch;
host->can_dma = img_spfi_can_dma;
}
pm_runtime_set_active(spfi->dev);
pm_runtime_enable(spfi->dev);
ret = devm_spi_register_controller(spfi->dev, host);
if (ret)
goto disable_pm;
return 0;
disable_pm:
pm_runtime_disable(spfi->dev);
if (spfi->rx_ch)
dma_release_channel(spfi->rx_ch);
if (spfi->tx_ch)
dma_release_channel(spfi->tx_ch);
clk_disable_unprepare(spfi->spfi_clk);
disable_pclk:
clk_disable_unprepare(spfi->sys_clk);
put_spi:
spi_controller_put(host);
return ret;
}
static void img_spfi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct img_spfi *spfi = spi_controller_get_devdata(host);
if (spfi->tx_ch)
dma_release_channel(spfi->tx_ch);
if (spfi->rx_ch)
dma_release_channel(spfi->rx_ch);
pm_runtime_disable(spfi->dev);
if (!pm_runtime_status_suspended(spfi->dev)) {
clk_disable_unprepare(spfi->spfi_clk);
clk_disable_unprepare(spfi->sys_clk);
}
}
#ifdef CONFIG_PM
static int img_spfi_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct img_spfi *spfi = spi_controller_get_devdata(host);
clk_disable_unprepare(spfi->spfi_clk);
clk_disable_unprepare(spfi->sys_clk);
return 0;
}
static int img_spfi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct img_spfi *spfi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(spfi->sys_clk);
if (ret)
return ret;
ret = clk_prepare_enable(spfi->spfi_clk);
if (ret) {
clk_disable_unprepare(spfi->sys_clk);
return ret;
}
return 0;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int img_spfi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
return spi_controller_suspend(host);
}
static int img_spfi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct img_spfi *spfi = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
spfi_reset(spfi);
pm_runtime_put(dev);
return spi_controller_resume(host);
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops img_spfi_pm_ops = {
SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
NULL)
SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
};
static const struct of_device_id img_spfi_of_match[] = {
{ .compatible = "img,spfi", },
{ },
};
MODULE_DEVICE_TABLE(of, img_spfi_of_match);
static struct platform_driver img_spfi_driver = {
.driver = {
.name = "img-spfi",
.pm = &img_spfi_pm_ops,
.of_match_table = of_match_ptr(img_spfi_of_match),
},
.probe = img_spfi_probe,
.remove_new = img_spfi_remove,
};
module_platform_driver(img_spfi_driver);
MODULE_DESCRIPTION("IMG SPFI controller driver");
MODULE_AUTHOR("Andrew Bresticker <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-img-spfi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
* Author: Addy Ke <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#define DRIVER_NAME "rockchip-spi"
#define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
#define ROCKCHIP_SPI_SET_BITS(reg, bits) \
writel_relaxed(readl_relaxed(reg) | (bits), reg)
/* SPI register offsets */
#define ROCKCHIP_SPI_CTRLR0 0x0000
#define ROCKCHIP_SPI_CTRLR1 0x0004
#define ROCKCHIP_SPI_SSIENR 0x0008
#define ROCKCHIP_SPI_SER 0x000c
#define ROCKCHIP_SPI_BAUDR 0x0010
#define ROCKCHIP_SPI_TXFTLR 0x0014
#define ROCKCHIP_SPI_RXFTLR 0x0018
#define ROCKCHIP_SPI_TXFLR 0x001c
#define ROCKCHIP_SPI_RXFLR 0x0020
#define ROCKCHIP_SPI_SR 0x0024
#define ROCKCHIP_SPI_IPR 0x0028
#define ROCKCHIP_SPI_IMR 0x002c
#define ROCKCHIP_SPI_ISR 0x0030
#define ROCKCHIP_SPI_RISR 0x0034
#define ROCKCHIP_SPI_ICR 0x0038
#define ROCKCHIP_SPI_DMACR 0x003c
#define ROCKCHIP_SPI_DMATDLR 0x0040
#define ROCKCHIP_SPI_DMARDLR 0x0044
#define ROCKCHIP_SPI_VERSION 0x0048
#define ROCKCHIP_SPI_TXDR 0x0400
#define ROCKCHIP_SPI_RXDR 0x0800
/* Bit fields in CTRLR0 */
#define CR0_DFS_OFFSET 0
#define CR0_DFS_4BIT 0x0
#define CR0_DFS_8BIT 0x1
#define CR0_DFS_16BIT 0x2
#define CR0_CFS_OFFSET 2
#define CR0_SCPH_OFFSET 6
#define CR0_SCPOL_OFFSET 7
#define CR0_CSM_OFFSET 8
#define CR0_CSM_KEEP 0x0
/* ss_n be high for half sclk_out cycles */
#define CR0_CSM_HALF 0X1
/* ss_n be high for one sclk_out cycle */
#define CR0_CSM_ONE 0x2
/* ss_n to sclk_out delay */
#define CR0_SSD_OFFSET 10
/*
* The period between ss_n active and
* sclk_out active is half sclk_out cycles
*/
#define CR0_SSD_HALF 0x0
/*
* The period between ss_n active and
* sclk_out active is one sclk_out cycle
*/
#define CR0_SSD_ONE 0x1
#define CR0_EM_OFFSET 11
#define CR0_EM_LITTLE 0x0
#define CR0_EM_BIG 0x1
#define CR0_FBM_OFFSET 12
#define CR0_FBM_MSB 0x0
#define CR0_FBM_LSB 0x1
#define CR0_BHT_OFFSET 13
#define CR0_BHT_16BIT 0x0
#define CR0_BHT_8BIT 0x1
#define CR0_RSD_OFFSET 14
#define CR0_RSD_MAX 0x3
#define CR0_FRF_OFFSET 16
#define CR0_FRF_SPI 0x0
#define CR0_FRF_SSP 0x1
#define CR0_FRF_MICROWIRE 0x2
#define CR0_XFM_OFFSET 18
#define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
#define CR0_XFM_TR 0x0
#define CR0_XFM_TO 0x1
#define CR0_XFM_RO 0x2
#define CR0_OPM_OFFSET 20
#define CR0_OPM_HOST 0x0
#define CR0_OPM_TARGET 0x1
#define CR0_SOI_OFFSET 23
#define CR0_MTM_OFFSET 0x21
/* Bit fields in SER, 2bit */
#define SER_MASK 0x3
/* Bit fields in BAUDR */
#define BAUDR_SCKDV_MIN 2
#define BAUDR_SCKDV_MAX 65534
/* Bit fields in SR, 6bit */
#define SR_MASK 0x3f
#define SR_BUSY (1 << 0)
#define SR_TF_FULL (1 << 1)
#define SR_TF_EMPTY (1 << 2)
#define SR_RF_EMPTY (1 << 3)
#define SR_RF_FULL (1 << 4)
#define SR_TARGET_TX_BUSY (1 << 5)
/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
#define INT_MASK 0x1f
#define INT_TF_EMPTY (1 << 0)
#define INT_TF_OVERFLOW (1 << 1)
#define INT_RF_UNDERFLOW (1 << 2)
#define INT_RF_OVERFLOW (1 << 3)
#define INT_RF_FULL (1 << 4)
#define INT_CS_INACTIVE (1 << 6)
/* Bit fields in ICR, 4bit */
#define ICR_MASK 0x0f
#define ICR_ALL (1 << 0)
#define ICR_RF_UNDERFLOW (1 << 1)
#define ICR_RF_OVERFLOW (1 << 2)
#define ICR_TF_OVERFLOW (1 << 3)
/* Bit fields in DMACR */
#define RF_DMA_EN (1 << 0)
#define TF_DMA_EN (1 << 1)
/* Driver state flags */
#define RXDMA (1 << 0)
#define TXDMA (1 << 1)
/* sclk_out: spi host internal logic in rk3x can support 50Mhz */
#define MAX_SCLK_OUT 50000000U
/*
* SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
* the controller seems to hang when given 0x10000, so stick with this for now.
*/
#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
/* 2 for native cs, 2 for cs-gpio */
#define ROCKCHIP_SPI_MAX_CS_NUM 4
#define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
#define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
#define ROCKCHIP_AUTOSUSPEND_TIMEOUT 2000
struct rockchip_spi {
struct device *dev;
struct clk *spiclk;
struct clk *apb_pclk;
void __iomem *regs;
dma_addr_t dma_addr_rx;
dma_addr_t dma_addr_tx;
const void *tx;
void *rx;
unsigned int tx_left;
unsigned int rx_left;
atomic_t state;
/*depth of the FIFO buffer */
u32 fifo_len;
/* frequency of spiclk */
u32 freq;
u8 n_bytes;
u8 rsd;
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
bool target_abort;
bool cs_inactive; /* spi target tansmition stop when cs inactive */
bool cs_high_supported; /* native CS supports active-high polarity */
struct spi_transfer *xfer; /* Store xfer temporarily */
};
static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
{
writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
}
static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool target_mode)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5);
do {
if (target_mode) {
if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_TARGET_TX_BUSY) &&
!((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)))
return;
} else {
if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
return;
}
} while (!time_after(jiffies, timeout));
dev_warn(rs->dev, "spi controller is in busy state!\n");
}
static u32 get_fifo_len(struct rockchip_spi *rs)
{
u32 ver;
ver = readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION);
switch (ver) {
case ROCKCHIP_SPI_VER2_TYPE1:
case ROCKCHIP_SPI_VER2_TYPE2:
return 64;
default:
return 32;
}
}
static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
struct spi_controller *ctlr = spi->controller;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
/* Return immediately for no-op */
if (cs_asserted == rs->cs_asserted[spi_get_chipselect(spi, 0)])
return;
if (cs_asserted) {
/* Keep things powered as long as CS is asserted */
pm_runtime_get_sync(rs->dev);
if (spi_get_csgpiod(spi, 0))
ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
else
ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER,
BIT(spi_get_chipselect(spi, 0)));
} else {
if (spi_get_csgpiod(spi, 0))
ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
else
ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER,
BIT(spi_get_chipselect(spi, 0)));
/* Drop reference from when we first asserted CS */
pm_runtime_put(rs->dev);
}
rs->cs_asserted[spi_get_chipselect(spi, 0)] = cs_asserted;
}
static void rockchip_spi_handle_err(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
/* stop running spi transfer
* this also flushes both rx and tx fifos
*/
spi_enable_chip(rs, false);
/* make sure all interrupts are masked and status cleared */
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
if (atomic_read(&rs->state) & TXDMA)
dmaengine_terminate_async(ctlr->dma_tx);
if (atomic_read(&rs->state) & RXDMA)
dmaengine_terminate_async(ctlr->dma_rx);
}
static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
{
u32 tx_free = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
u32 words = min(rs->tx_left, tx_free);
rs->tx_left -= words;
for (; words; words--) {
u32 txw;
if (rs->n_bytes == 1)
txw = *(u8 *)rs->tx;
else
txw = *(u16 *)rs->tx;
writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
rs->tx += rs->n_bytes;
}
}
static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
{
u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0;
/* the hardware doesn't allow us to change fifo threshold
* level while spi is enabled, so instead make sure to leave
* enough words in the rx fifo to get the last interrupt
* exactly when all words have been received
*/
if (rx_left) {
u32 ftl = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFTLR) + 1;
if (rx_left < ftl) {
rx_left = ftl;
words = rs->rx_left - rx_left;
}
}
rs->rx_left = rx_left;
for (; words; words--) {
u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
if (!rs->rx)
continue;
if (rs->n_bytes == 1)
*(u8 *)rs->rx = (u8)rxw;
else
*(u16 *)rs->rx = (u16)rxw;
rs->rx += rs->n_bytes;
}
}
static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
{
struct spi_controller *ctlr = dev_id;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
/* When int_cs_inactive comes, spi target abort */
if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
ctlr->target_abort(ctlr);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
return IRQ_HANDLED;
}
if (rs->tx_left)
rockchip_spi_pio_writer(rs);
rockchip_spi_pio_reader(rs);
if (!rs->rx_left) {
spi_enable_chip(rs, false);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
spi_finalize_current_transfer(ctlr);
}
return IRQ_HANDLED;
}
static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
rs->tx = xfer->tx_buf;
rs->rx = xfer->rx_buf;
rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
rs->rx_left = xfer->len / rs->n_bytes;
writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
spi_enable_chip(rs, true);
if (rs->tx_left)
rockchip_spi_pio_writer(rs);
if (rs->cs_inactive)
writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
else
writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
/* 1 means the transfer is in progress */
return 1;
}
static void rockchip_spi_dma_rxcb(void *data)
{
struct spi_controller *ctlr = data;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
int state = atomic_fetch_andnot(RXDMA, &rs->state);
if (state & TXDMA && !rs->target_abort)
return;
if (rs->cs_inactive)
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
}
static void rockchip_spi_dma_txcb(void *data)
{
struct spi_controller *ctlr = data;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
int state = atomic_fetch_andnot(TXDMA, &rs->state);
if (state & RXDMA && !rs->target_abort)
return;
/* Wait until the FIFO data completely. */
wait_for_tx_idle(rs, ctlr->target);
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
}
static u32 rockchip_spi_calc_burst_size(u32 data_len)
{
u32 i;
/* burst size: 1, 2, 4, 8 */
for (i = 1; i < 8; i <<= 1) {
if (data_len & i)
break;
}
return i;
}
static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
struct spi_controller *ctlr, struct spi_transfer *xfer)
{
struct dma_async_tx_descriptor *rxdesc, *txdesc;
atomic_set(&rs->state, 0);
rs->tx = xfer->tx_buf;
rs->rx = xfer->rx_buf;
rxdesc = NULL;
if (xfer->rx_buf) {
struct dma_slave_config rxconf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = rs->dma_addr_rx,
.src_addr_width = rs->n_bytes,
.src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
};
dmaengine_slave_config(ctlr->dma_rx, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
ctlr->dma_rx,
xfer->rx_sg.sgl, xfer->rx_sg.nents,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!rxdesc)
return -EINVAL;
rxdesc->callback = rockchip_spi_dma_rxcb;
rxdesc->callback_param = ctlr;
}
txdesc = NULL;
if (xfer->tx_buf) {
struct dma_slave_config txconf = {
.direction = DMA_MEM_TO_DEV,
.dst_addr = rs->dma_addr_tx,
.dst_addr_width = rs->n_bytes,
.dst_maxburst = rs->fifo_len / 4,
};
dmaengine_slave_config(ctlr->dma_tx, &txconf);
txdesc = dmaengine_prep_slave_sg(
ctlr->dma_tx,
xfer->tx_sg.sgl, xfer->tx_sg.nents,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txdesc) {
if (rxdesc)
dmaengine_terminate_sync(ctlr->dma_rx);
return -EINVAL;
}
txdesc->callback = rockchip_spi_dma_txcb;
txdesc->callback_param = ctlr;
}
/* rx must be started before tx due to spi instinct */
if (rxdesc) {
atomic_or(RXDMA, &rs->state);
ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
dma_async_issue_pending(ctlr->dma_rx);
}
if (rs->cs_inactive)
writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
spi_enable_chip(rs, true);
if (txdesc) {
atomic_or(TXDMA, &rs->state);
dmaengine_submit(txdesc);
dma_async_issue_pending(ctlr->dma_tx);
}
/* 1 means the transfer is in progress */
return 1;
}
static int rockchip_spi_config(struct rockchip_spi *rs,
struct spi_device *spi, struct spi_transfer *xfer,
bool use_dma, bool target_mode)
{
u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET
| CR0_BHT_8BIT << CR0_BHT_OFFSET
| CR0_SSD_ONE << CR0_SSD_OFFSET
| CR0_EM_BIG << CR0_EM_OFFSET;
u32 cr1;
u32 dmacr = 0;
if (target_mode)
cr0 |= CR0_OPM_TARGET << CR0_OPM_OFFSET;
rs->target_abort = false;
cr0 |= rs->rsd << CR0_RSD_OFFSET;
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
if (spi->mode & SPI_LSB_FIRST)
cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
if (spi->mode & SPI_CS_HIGH)
cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET;
if (xfer->rx_buf && xfer->tx_buf)
cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
else if (xfer->rx_buf)
cr0 |= CR0_XFM_RO << CR0_XFM_OFFSET;
else if (use_dma)
cr0 |= CR0_XFM_TO << CR0_XFM_OFFSET;
switch (xfer->bits_per_word) {
case 4:
cr0 |= CR0_DFS_4BIT << CR0_DFS_OFFSET;
cr1 = xfer->len - 1;
break;
case 8:
cr0 |= CR0_DFS_8BIT << CR0_DFS_OFFSET;
cr1 = xfer->len - 1;
break;
case 16:
cr0 |= CR0_DFS_16BIT << CR0_DFS_OFFSET;
cr1 = xfer->len / 2 - 1;
break;
default:
/* we only whitelist 4, 8 and 16 bit words in
* ctlr->bits_per_word_mask, so this shouldn't
* happen
*/
dev_err(rs->dev, "unknown bits per word: %d\n",
xfer->bits_per_word);
return -EINVAL;
}
if (use_dma) {
if (xfer->tx_buf)
dmacr |= TF_DMA_EN;
if (xfer->rx_buf)
dmacr |= RF_DMA_EN;
}
writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
writel_relaxed(cr1, rs->regs + ROCKCHIP_SPI_CTRLR1);
/* unfortunately setting the fifo threshold level to generate an
* interrupt exactly when the fifo is full doesn't seem to work,
* so we need the strict inequality here
*/
if ((xfer->len / rs->n_bytes) < rs->fifo_len)
writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
else
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
/* the hardware only supports an even clock divisor, so
* round divisor = spiclk / speed up to nearest even number
* so that the resulting speed is <= the requested speed
*/
writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
rs->regs + ROCKCHIP_SPI_BAUDR);
return 0;
}
static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
{
return ROCKCHIP_SPI_MAX_TRANLEN;
}
static int rockchip_spi_target_abort(struct spi_controller *ctlr)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
u32 rx_fifo_left;
struct dma_tx_state state;
enum dma_status status;
/* Get current dma rx point */
if (atomic_read(&rs->state) & RXDMA) {
dmaengine_pause(ctlr->dma_rx);
status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state);
if (status == DMA_ERROR) {
rs->rx = rs->xfer->rx_buf;
rs->xfer->len = 0;
rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
for (; rx_fifo_left; rx_fifo_left--)
readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
goto out;
} else {
rs->rx += rs->xfer->len - rs->n_bytes * state.residue;
}
}
/* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
if (rs->rx) {
rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
for (; rx_fifo_left; rx_fifo_left--) {
u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
if (rs->n_bytes == 1)
*(u8 *)rs->rx = (u8)rxw;
else
*(u16 *)rs->rx = (u16)rxw;
rs->rx += rs->n_bytes;
}
rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf);
}
out:
if (atomic_read(&rs->state) & RXDMA)
dmaengine_terminate_sync(ctlr->dma_rx);
if (atomic_read(&rs->state) & TXDMA)
dmaengine_terminate_sync(ctlr->dma_tx);
atomic_set(&rs->state, 0);
spi_enable_chip(rs, false);
rs->target_abort = true;
spi_finalize_current_transfer(ctlr);
return 0;
}
static int rockchip_spi_transfer_one(
struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
int ret;
bool use_dma;
/* Zero length transfers won't trigger an interrupt on completion */
if (!xfer->len) {
spi_finalize_current_transfer(ctlr);
return 1;
}
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
if (!xfer->tx_buf && !xfer->rx_buf) {
dev_err(rs->dev, "No buffer for transfer\n");
return -EINVAL;
}
if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
return -EINVAL;
}
rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
rs->xfer = xfer;
use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->target);
if (ret)
return ret;
if (use_dma)
return rockchip_spi_prepare_dma(rs, ctlr, xfer);
return rockchip_spi_prepare_irq(rs, ctlr, xfer);
}
static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
unsigned int bytes_per_word = xfer->bits_per_word <= 8 ? 1 : 2;
/* if the numbor of spi words to transfer is less than the fifo
* length we can just fill the fifo and wait for a single irq,
* so don't bother setting up dma
*/
return xfer->len / bytes_per_word >= rs->fifo_len;
}
static int rockchip_spi_setup(struct spi_device *spi)
{
struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
u32 cr0;
if (!spi_get_csgpiod(spi, 0) && (spi->mode & SPI_CS_HIGH) && !rs->cs_high_supported) {
dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
return -EINVAL;
}
pm_runtime_get_sync(rs->dev);
cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
cr0 &= ~(0x3 << CR0_SCPH_OFFSET);
cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET);
if (spi->mode & SPI_CS_HIGH && spi_get_chipselect(spi, 0) <= 1)
cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET;
else if (spi_get_chipselect(spi, 0) <= 1)
cr0 &= ~(BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET);
writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
pm_runtime_put(rs->dev);
return 0;
}
static int rockchip_spi_probe(struct platform_device *pdev)
{
int ret;
struct rockchip_spi *rs;
struct spi_controller *ctlr;
struct resource *mem;
struct device_node *np = pdev->dev.of_node;
u32 rsd_nsecs, num_cs;
bool target_mode;
target_mode = of_property_read_bool(np, "spi-slave");
if (target_mode)
ctlr = spi_alloc_target(&pdev->dev,
sizeof(struct rockchip_spi));
else
ctlr = spi_alloc_host(&pdev->dev,
sizeof(struct rockchip_spi));
if (!ctlr)
return -ENOMEM;
platform_set_drvdata(pdev, ctlr);
rs = spi_controller_get_devdata(ctlr);
/* Get basic io resource and map it */
rs->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(rs->regs)) {
ret = PTR_ERR(rs->regs);
goto err_put_ctlr;
}
rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(rs->apb_pclk)) {
dev_err(&pdev->dev, "Failed to get apb_pclk\n");
ret = PTR_ERR(rs->apb_pclk);
goto err_put_ctlr;
}
rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
if (IS_ERR(rs->spiclk)) {
dev_err(&pdev->dev, "Failed to get spi_pclk\n");
ret = PTR_ERR(rs->spiclk);
goto err_put_ctlr;
}
ret = clk_prepare_enable(rs->apb_pclk);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
goto err_put_ctlr;
}
ret = clk_prepare_enable(rs->spiclk);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable spi_clk\n");
goto err_disable_apbclk;
}
spi_enable_chip(rs, false);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_disable_spiclk;
ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL,
IRQF_ONESHOT, dev_name(&pdev->dev), ctlr);
if (ret)
goto err_disable_spiclk;
rs->dev = &pdev->dev;
rs->freq = clk_get_rate(rs->spiclk);
if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
&rsd_nsecs)) {
/* rx sample delay is expressed in parent clock cycles (max 3) */
u32 rsd = DIV_ROUND_CLOSEST(rsd_nsecs * (rs->freq >> 8),
1000000000 >> 8);
if (!rsd) {
dev_warn(rs->dev, "%u Hz are too slow to express %u ns delay\n",
rs->freq, rsd_nsecs);
} else if (rsd > CR0_RSD_MAX) {
rsd = CR0_RSD_MAX;
dev_warn(rs->dev, "%u Hz are too fast to express %u ns delay, clamping at %u ns\n",
rs->freq, rsd_nsecs,
CR0_RSD_MAX * 1000000000U / rs->freq);
}
rs->rsd = rsd;
}
rs->fifo_len = get_fifo_len(rs);
if (!rs->fifo_len) {
dev_err(&pdev->dev, "Failed to get fifo length\n");
ret = -EINVAL;
goto err_disable_spiclk;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, ROCKCHIP_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ctlr->auto_runtime_pm = true;
ctlr->bus_num = pdev->id;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
if (target_mode) {
ctlr->mode_bits |= SPI_NO_CS;
ctlr->target_abort = rockchip_spi_target_abort;
} else {
ctlr->flags = SPI_CONTROLLER_GPIO_SS;
ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM;
/*
* rk spi0 has two native cs, spi1..5 one cs only
* if num-cs is missing in the dts, default to 1
*/
if (of_property_read_u32(np, "num-cs", &num_cs))
num_cs = 1;
ctlr->num_chipselect = num_cs;
ctlr->use_gpio_descriptors = true;
}
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
ctlr->setup = rockchip_spi_setup;
ctlr->set_cs = rockchip_spi_set_cs;
ctlr->transfer_one = rockchip_spi_transfer_one;
ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
ctlr->handle_err = rockchip_spi_handle_err;
ctlr->dma_tx = dma_request_chan(rs->dev, "tx");
if (IS_ERR(ctlr->dma_tx)) {
/* Check tx to see if we need defer probing driver */
if (PTR_ERR(ctlr->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_disable_pm_runtime;
}
dev_warn(rs->dev, "Failed to request TX DMA channel\n");
ctlr->dma_tx = NULL;
}
ctlr->dma_rx = dma_request_chan(rs->dev, "rx");
if (IS_ERR(ctlr->dma_rx)) {
if (PTR_ERR(ctlr->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_free_dma_tx;
}
dev_warn(rs->dev, "Failed to request RX DMA channel\n");
ctlr->dma_rx = NULL;
}
if (ctlr->dma_tx && ctlr->dma_rx) {
rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR;
rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR;
ctlr->can_dma = rockchip_spi_can_dma;
}
switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
case ROCKCHIP_SPI_VER2_TYPE2:
rs->cs_high_supported = true;
ctlr->mode_bits |= SPI_CS_HIGH;
if (ctlr->can_dma && target_mode)
rs->cs_inactive = true;
else
rs->cs_inactive = false;
break;
default:
rs->cs_inactive = false;
break;
}
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register controller\n");
goto err_free_dma_rx;
}
return 0;
err_free_dma_rx:
if (ctlr->dma_rx)
dma_release_channel(ctlr->dma_rx);
err_free_dma_tx:
if (ctlr->dma_tx)
dma_release_channel(ctlr->dma_tx);
err_disable_pm_runtime:
pm_runtime_disable(&pdev->dev);
err_disable_spiclk:
clk_disable_unprepare(rs->spiclk);
err_disable_apbclk:
clk_disable_unprepare(rs->apb_pclk);
err_put_ctlr:
spi_controller_put(ctlr);
return ret;
}
static void rockchip_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = spi_controller_get(platform_get_drvdata(pdev));
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
pm_runtime_get_sync(&pdev->dev);
clk_disable_unprepare(rs->spiclk);
clk_disable_unprepare(rs->apb_pclk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
if (ctlr->dma_tx)
dma_release_channel(ctlr->dma_tx);
if (ctlr->dma_rx)
dma_release_channel(ctlr->dma_rx);
spi_controller_put(ctlr);
}
#ifdef CONFIG_PM_SLEEP
static int rockchip_spi_suspend(struct device *dev)
{
int ret;
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
ret = spi_controller_suspend(ctlr);
if (ret < 0)
return ret;
clk_disable_unprepare(rs->spiclk);
clk_disable_unprepare(rs->apb_pclk);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int rockchip_spi_resume(struct device *dev)
{
int ret;
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
pinctrl_pm_select_default_state(dev);
ret = clk_prepare_enable(rs->apb_pclk);
if (ret < 0)
return ret;
ret = clk_prepare_enable(rs->spiclk);
if (ret < 0)
clk_disable_unprepare(rs->apb_pclk);
ret = spi_controller_resume(ctlr);
if (ret < 0) {
clk_disable_unprepare(rs->spiclk);
clk_disable_unprepare(rs->apb_pclk);
}
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int rockchip_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
clk_disable_unprepare(rs->spiclk);
clk_disable_unprepare(rs->apb_pclk);
return 0;
}
static int rockchip_spi_runtime_resume(struct device *dev)
{
int ret;
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
ret = clk_prepare_enable(rs->apb_pclk);
if (ret < 0)
return ret;
ret = clk_prepare_enable(rs->spiclk);
if (ret < 0)
clk_disable_unprepare(rs->apb_pclk);
return 0;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops rockchip_spi_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
rockchip_spi_runtime_resume, NULL)
};
static const struct of_device_id rockchip_spi_dt_match[] = {
{ .compatible = "rockchip,px30-spi", },
{ .compatible = "rockchip,rk3036-spi", },
{ .compatible = "rockchip,rk3066-spi", },
{ .compatible = "rockchip,rk3188-spi", },
{ .compatible = "rockchip,rk3228-spi", },
{ .compatible = "rockchip,rk3288-spi", },
{ .compatible = "rockchip,rk3308-spi", },
{ .compatible = "rockchip,rk3328-spi", },
{ .compatible = "rockchip,rk3368-spi", },
{ .compatible = "rockchip,rk3399-spi", },
{ .compatible = "rockchip,rv1108-spi", },
{ .compatible = "rockchip,rv1126-spi", },
{ },
};
MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
static struct platform_driver rockchip_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = &rockchip_spi_pm,
.of_match_table = of_match_ptr(rockchip_spi_dt_match),
},
.probe = rockchip_spi_probe,
.remove_new = rockchip_spi_remove,
};
module_platform_driver(rockchip_spi_driver);
MODULE_AUTHOR("Addy Ke <[email protected]>");
MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-rockchip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MPC52xx SPI bus driver.
*
* Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* This is the driver for the MPC5200's dedicated SPI controller.
*
* Note: this driver does not support the MPC5200 PSC in SPI mode. For
* that driver see drivers/spi/mpc52xx_psc_spi.c
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
MODULE_AUTHOR("Grant Likely <[email protected]>");
MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
MODULE_LICENSE("GPL");
/* Register offsets */
#define SPI_CTRL1 0x00
#define SPI_CTRL1_SPIE (1 << 7)
#define SPI_CTRL1_SPE (1 << 6)
#define SPI_CTRL1_MSTR (1 << 4)
#define SPI_CTRL1_CPOL (1 << 3)
#define SPI_CTRL1_CPHA (1 << 2)
#define SPI_CTRL1_SSOE (1 << 1)
#define SPI_CTRL1_LSBFE (1 << 0)
#define SPI_CTRL2 0x01
#define SPI_BRR 0x04
#define SPI_STATUS 0x05
#define SPI_STATUS_SPIF (1 << 7)
#define SPI_STATUS_WCOL (1 << 6)
#define SPI_STATUS_MODF (1 << 4)
#define SPI_DATA 0x09
#define SPI_PORTDATA 0x0d
#define SPI_DATADIR 0x10
/* FSM state return values */
#define FSM_STOP 0 /* Nothing more for the state machine to */
/* do. If something interesting happens */
/* then an IRQ will be received */
#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
/* not expected */
#define FSM_CONTINUE 2 /* Keep iterating the state machine */
/* Driver internal data */
struct mpc52xx_spi {
struct spi_master *master;
void __iomem *regs;
int irq0; /* MODF irq */
int irq1; /* SPIF irq */
unsigned int ipb_freq;
/* Statistics; not used now, but will be reintroduced for debugfs */
int msg_count;
int wcol_count;
int wcol_ticks;
u32 wcol_tx_timestamp;
int modf_count;
int byte_count;
struct list_head queue; /* queue of pending messages */
spinlock_t lock;
struct work_struct work;
/* Details of current transfer (length, and buffer pointers) */
struct spi_message *message; /* current message */
struct spi_transfer *transfer; /* current transfer */
int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
int len;
int timestamp;
u8 *rx_buf;
const u8 *tx_buf;
int cs_change;
int gpio_cs_count;
struct gpio_desc **gpio_cs;
};
/*
* CS control function
*/
static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
{
int cs;
if (ms->gpio_cs_count > 0) {
cs = spi_get_chipselect(ms->message->spi, 0);
gpiod_set_value(ms->gpio_cs[cs], value);
} else {
out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
}
}
/*
* Start a new transfer. This is called both by the idle state
* for the first transfer in a message, and by the wait state when the
* previous transfer in a message is complete.
*/
static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
{
ms->rx_buf = ms->transfer->rx_buf;
ms->tx_buf = ms->transfer->tx_buf;
ms->len = ms->transfer->len;
/* Activate the chip select */
if (ms->cs_change)
mpc52xx_spi_chipsel(ms, 1);
ms->cs_change = ms->transfer->cs_change;
/* Write out the first byte */
ms->wcol_tx_timestamp = mftb();
if (ms->tx_buf)
out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
else
out_8(ms->regs + SPI_DATA, 0);
}
/* Forward declaration of state handlers */
static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
u8 status, u8 data);
static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
u8 status, u8 data);
/*
* IDLE state
*
* No transfers are in progress; if another transfer is pending then retrieve
* it and kick it off. Otherwise, stop processing the state machine
*/
static int
mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
{
struct spi_device *spi;
int spr, sppr;
u8 ctrl1;
if (status && irq)
dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
status);
/* Check if there is another transfer waiting. */
if (list_empty(&ms->queue))
return FSM_STOP;
/* get the head of the queue */
ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
list_del_init(&ms->message->queue);
/* Setup the controller parameters */
ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
spi = ms->message->spi;
if (spi->mode & SPI_CPHA)
ctrl1 |= SPI_CTRL1_CPHA;
if (spi->mode & SPI_CPOL)
ctrl1 |= SPI_CTRL1_CPOL;
if (spi->mode & SPI_LSB_FIRST)
ctrl1 |= SPI_CTRL1_LSBFE;
out_8(ms->regs + SPI_CTRL1, ctrl1);
/* Setup the controller speed */
/* minimum divider is '2'. Also, add '1' to force rounding the
* divider up. */
sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
spr = 0;
if (sppr < 1)
sppr = 1;
while (((sppr - 1) & ~0x7) != 0) {
sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
spr++;
}
sppr--; /* sppr quantity in register is offset by 1 */
if (spr > 7) {
/* Don't overrun limits of SPI baudrate register */
spr = 7;
sppr = 7;
}
out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
ms->cs_change = 1;
ms->transfer = container_of(ms->message->transfers.next,
struct spi_transfer, transfer_list);
mpc52xx_spi_start_transfer(ms);
ms->state = mpc52xx_spi_fsmstate_transfer;
return FSM_CONTINUE;
}
/*
* TRANSFER state
*
* In the middle of a transfer. If the SPI core has completed processing
* a byte, then read out the received data and write out the next byte
* (unless this transfer is finished; in which case go on to the wait
* state)
*/
static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
u8 status, u8 data)
{
if (!status)
return ms->irq0 ? FSM_STOP : FSM_POLL;
if (status & SPI_STATUS_WCOL) {
/* The SPI controller is stoopid. At slower speeds, it may
* raise the SPIF flag before the state machine is actually
* finished, which causes a collision (internal to the state
* machine only). The manual recommends inserting a delay
* between receiving the interrupt and sending the next byte,
* but it can also be worked around simply by retrying the
* transfer which is what we do here. */
ms->wcol_count++;
ms->wcol_ticks += mftb() - ms->wcol_tx_timestamp;
ms->wcol_tx_timestamp = mftb();
data = 0;
if (ms->tx_buf)
data = *(ms->tx_buf - 1);
out_8(ms->regs + SPI_DATA, data); /* try again */
return FSM_CONTINUE;
} else if (status & SPI_STATUS_MODF) {
ms->modf_count++;
dev_err(&ms->master->dev, "mode fault\n");
mpc52xx_spi_chipsel(ms, 0);
ms->message->status = -EIO;
if (ms->message->complete)
ms->message->complete(ms->message->context);
ms->state = mpc52xx_spi_fsmstate_idle;
return FSM_CONTINUE;
}
/* Read data out of the spi device */
ms->byte_count++;
if (ms->rx_buf)
*ms->rx_buf++ = data;
/* Is the transfer complete? */
ms->len--;
if (ms->len == 0) {
ms->timestamp = mftb();
if (ms->transfer->delay.unit == SPI_DELAY_UNIT_USECS)
ms->timestamp += ms->transfer->delay.value *
tb_ticks_per_usec;
ms->state = mpc52xx_spi_fsmstate_wait;
return FSM_CONTINUE;
}
/* Write out the next byte */
ms->wcol_tx_timestamp = mftb();
if (ms->tx_buf)
out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
else
out_8(ms->regs + SPI_DATA, 0);
return FSM_CONTINUE;
}
/*
* WAIT state
*
* A transfer has completed; need to wait for the delay period to complete
* before starting the next transfer
*/
static int
mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
{
if (status && irq)
dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
status);
if (((int)mftb()) - ms->timestamp < 0)
return FSM_POLL;
ms->message->actual_length += ms->transfer->len;
/* Check if there is another transfer in this message. If there
* aren't then deactivate CS, notify sender, and drop back to idle
* to start the next message. */
if (ms->transfer->transfer_list.next == &ms->message->transfers) {
ms->msg_count++;
mpc52xx_spi_chipsel(ms, 0);
ms->message->status = 0;
if (ms->message->complete)
ms->message->complete(ms->message->context);
ms->state = mpc52xx_spi_fsmstate_idle;
return FSM_CONTINUE;
}
/* There is another transfer; kick it off */
if (ms->cs_change)
mpc52xx_spi_chipsel(ms, 0);
ms->transfer = container_of(ms->transfer->transfer_list.next,
struct spi_transfer, transfer_list);
mpc52xx_spi_start_transfer(ms);
ms->state = mpc52xx_spi_fsmstate_transfer;
return FSM_CONTINUE;
}
/**
* mpc52xx_spi_fsm_process - Finite State Machine iteration function
* @irq: irq number that triggered the FSM or 0 for polling
* @ms: pointer to mpc52xx_spi driver data
*/
static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
{
int rc = FSM_CONTINUE;
u8 status, data;
while (rc == FSM_CONTINUE) {
/* Interrupt cleared by read of STATUS followed by
* read of DATA registers */
status = in_8(ms->regs + SPI_STATUS);
data = in_8(ms->regs + SPI_DATA);
rc = ms->state(irq, ms, status, data);
}
if (rc == FSM_POLL)
schedule_work(&ms->work);
}
/**
* mpc52xx_spi_irq - IRQ handler
*/
static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
{
struct mpc52xx_spi *ms = _ms;
spin_lock(&ms->lock);
mpc52xx_spi_fsm_process(irq, ms);
spin_unlock(&ms->lock);
return IRQ_HANDLED;
}
/**
* mpc52xx_spi_wq - Workqueue function for polling the state machine
*/
static void mpc52xx_spi_wq(struct work_struct *work)
{
struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
unsigned long flags;
spin_lock_irqsave(&ms->lock, flags);
mpc52xx_spi_fsm_process(0, ms);
spin_unlock_irqrestore(&ms->lock, flags);
}
/*
* spi_master ops
*/
static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
{
struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
unsigned long flags;
m->actual_length = 0;
m->status = -EINPROGRESS;
spin_lock_irqsave(&ms->lock, flags);
list_add_tail(&m->queue, &ms->queue);
spin_unlock_irqrestore(&ms->lock, flags);
schedule_work(&ms->work);
return 0;
}
/*
* OF Platform Bus Binding
*/
static int mpc52xx_spi_probe(struct platform_device *op)
{
struct spi_master *master;
struct mpc52xx_spi *ms;
struct gpio_desc *gpio_cs;
void __iomem *regs;
u8 ctrl1;
int rc, i = 0;
/* MMIO registers */
dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
regs = of_iomap(op->dev.of_node, 0);
if (!regs)
return -ENODEV;
/* initialize the device */
ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
out_8(regs + SPI_CTRL1, ctrl1);
out_8(regs + SPI_CTRL2, 0x0);
out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
/* Clear the status register and re-read it to check for a MODF
* failure. This driver cannot currently handle multiple masters
* on the SPI bus. This fault will also occur if the SPI signals
* are not connected to any pins (port_config setting) */
in_8(regs + SPI_STATUS);
out_8(regs + SPI_CTRL1, ctrl1);
in_8(regs + SPI_DATA);
if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
dev_err(&op->dev, "mode fault; is port_config correct?\n");
rc = -EIO;
goto err_init;
}
dev_dbg(&op->dev, "allocating spi_master struct\n");
master = spi_alloc_master(&op->dev, sizeof(*ms));
if (!master) {
rc = -ENOMEM;
goto err_alloc;
}
master->transfer = mpc52xx_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = op->dev.of_node;
platform_set_drvdata(op, master);
ms = spi_master_get_devdata(master);
ms->master = master;
ms->regs = regs;
ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
ms->ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
ms->gpio_cs_count = gpiod_count(&op->dev, NULL);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
ms->gpio_cs = kmalloc_array(ms->gpio_cs_count,
sizeof(*ms->gpio_cs),
GFP_KERNEL);
if (!ms->gpio_cs) {
rc = -ENOMEM;
goto err_alloc_gpio;
}
for (i = 0; i < ms->gpio_cs_count; i++) {
gpio_cs = gpiod_get_index(&op->dev,
NULL, i, GPIOD_OUT_LOW);
rc = PTR_ERR_OR_ZERO(gpio_cs);
if (rc) {
dev_err(&op->dev,
"failed to get spi cs gpio #%d: %d\n",
i, rc);
goto err_gpio;
}
ms->gpio_cs[i] = gpio_cs;
}
}
spin_lock_init(&ms->lock);
INIT_LIST_HEAD(&ms->queue);
INIT_WORK(&ms->work, mpc52xx_spi_wq);
/* Decide if interrupts can be used */
if (ms->irq0 && ms->irq1) {
rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0,
"mpc5200-spi-modf", ms);
rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0,
"mpc5200-spi-spif", ms);
if (rc) {
free_irq(ms->irq0, ms);
free_irq(ms->irq1, ms);
ms->irq0 = ms->irq1 = 0;
}
} else {
/* operate in polled mode */
ms->irq0 = ms->irq1 = 0;
}
if (!ms->irq0)
dev_info(&op->dev, "using polled mode\n");
dev_dbg(&op->dev, "registering spi_master struct\n");
rc = spi_register_master(master);
if (rc)
goto err_register;
dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
return rc;
err_register:
dev_err(&ms->master->dev, "initialization failed\n");
err_gpio:
while (i-- > 0)
gpiod_put(ms->gpio_cs[i]);
kfree(ms->gpio_cs);
err_alloc_gpio:
spi_master_put(master);
err_alloc:
err_init:
iounmap(regs);
return rc;
}
static void mpc52xx_spi_remove(struct platform_device *op)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(op));
struct mpc52xx_spi *ms = spi_master_get_devdata(master);
int i;
free_irq(ms->irq0, ms);
free_irq(ms->irq1, ms);
for (i = 0; i < ms->gpio_cs_count; i++)
gpiod_put(ms->gpio_cs[i]);
kfree(ms->gpio_cs);
spi_unregister_master(master);
iounmap(ms->regs);
spi_master_put(master);
}
static const struct of_device_id mpc52xx_spi_match[] = {
{ .compatible = "fsl,mpc5200-spi", },
{}
};
MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
static struct platform_driver mpc52xx_spi_of_driver = {
.driver = {
.name = "mpc52xx-spi",
.of_match_table = mpc52xx_spi_match,
},
.probe = mpc52xx_spi_probe,
.remove_new = mpc52xx_spi_remove,
};
module_platform_driver(mpc52xx_spi_of_driver);
| linux-master | drivers/spi/spi-mpc52xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH HSPI bus driver
*
* Copyright (C) 2011 Kuninori Morimoto
*
* Based on spi-sh.c:
* Based on pxa2xx_spi.c:
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/sh_hspi.h>
#define SPCR 0x00
#define SPSR 0x04
#define SPSCR 0x08
#define SPTBR 0x0C
#define SPRBR 0x10
#define SPCR2 0x14
/* SPSR */
#define RXFL (1 << 2)
struct hspi_priv {
void __iomem *addr;
struct spi_controller *ctlr;
struct device *dev;
struct clk *clk;
};
/*
* basic function
*/
static void hspi_write(struct hspi_priv *hspi, int reg, u32 val)
{
iowrite32(val, hspi->addr + reg);
}
static u32 hspi_read(struct hspi_priv *hspi, int reg)
{
return ioread32(hspi->addr + reg);
}
static void hspi_bit_set(struct hspi_priv *hspi, int reg, u32 mask, u32 set)
{
u32 val = hspi_read(hspi, reg);
val &= ~mask;
val |= set & mask;
hspi_write(hspi, reg, val);
}
/*
* transfer function
*/
static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
{
int t = 256;
while (t--) {
if ((mask & hspi_read(hspi, SPSR)) == val)
return 0;
udelay(10);
}
dev_err(hspi->dev, "timeout\n");
return -ETIMEDOUT;
}
/*
* spi host function
*/
#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
#define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1)
static void hspi_hw_cs_ctrl(struct hspi_priv *hspi, int hi)
{
hspi_bit_set(hspi, SPSCR, (1 << 6), (hi) << 6);
}
static void hspi_hw_setup(struct hspi_priv *hspi,
struct spi_message *msg,
struct spi_transfer *t)
{
struct spi_device *spi = msg->spi;
struct device *dev = hspi->dev;
u32 spcr, idiv_clk;
u32 rate, best_rate, min, tmp;
/*
* find best IDIV/CLKCx settings
*/
min = ~0;
best_rate = 0;
spcr = 0;
for (idiv_clk = 0x00; idiv_clk <= 0x3F; idiv_clk++) {
rate = clk_get_rate(hspi->clk);
/* IDIV calculation */
if (idiv_clk & (1 << 5))
rate /= 128;
else
rate /= 16;
/* CLKCx calculation */
rate /= (((idiv_clk & 0x1F) + 1) * 2);
/* save best settings */
tmp = abs(t->speed_hz - rate);
if (tmp < min) {
min = tmp;
spcr = idiv_clk;
best_rate = rate;
}
}
if (spi->mode & SPI_CPHA)
spcr |= 1 << 7;
if (spi->mode & SPI_CPOL)
spcr |= 1 << 6;
dev_dbg(dev, "speed %d/%d\n", t->speed_hz, best_rate);
hspi_write(hspi, SPCR, spcr);
hspi_write(hspi, SPSR, 0x0);
hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
}
static int hspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct hspi_priv *hspi = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
u32 tx;
u32 rx;
int ret, i;
unsigned int cs_change;
const int nsecs = 50;
dev_dbg(hspi->dev, "%s\n", __func__);
cs_change = 1;
ret = 0;
list_for_each_entry(t, &msg->transfers, transfer_list) {
if (cs_change) {
hspi_hw_setup(hspi, msg, t);
hspi_hw_cs_enable(hspi);
ndelay(nsecs);
}
cs_change = t->cs_change;
for (i = 0; i < t->len; i++) {
/* wait remains */
ret = hspi_status_check_timeout(hspi, 0x1, 0);
if (ret < 0)
break;
tx = 0;
if (t->tx_buf)
tx = (u32)((u8 *)t->tx_buf)[i];
hspi_write(hspi, SPTBR, tx);
/* wait receive */
ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
if (ret < 0)
break;
rx = hspi_read(hspi, SPRBR);
if (t->rx_buf)
((u8 *)t->rx_buf)[i] = (u8)rx;
}
msg->actual_length += t->len;
spi_transfer_delay_exec(t);
if (cs_change) {
ndelay(nsecs);
hspi_hw_cs_disable(hspi);
ndelay(nsecs);
}
}
msg->status = ret;
if (!cs_change) {
ndelay(nsecs);
hspi_hw_cs_disable(hspi);
}
spi_finalize_current_message(ctlr);
return ret;
}
static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_controller *ctlr;
struct hspi_priv *hspi;
struct clk *clk;
int ret;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "invalid resource\n");
return -EINVAL;
}
ctlr = spi_alloc_host(&pdev->dev, sizeof(*hspi));
if (!ctlr)
return -ENOMEM;
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "couldn't get clock\n");
ret = -EINVAL;
goto error0;
}
hspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, hspi);
/* init hspi */
hspi->ctlr = ctlr;
hspi->dev = &pdev->dev;
hspi->clk = clk;
hspi->addr = devm_ioremap(hspi->dev,
res->start, resource_size(res));
if (!hspi->addr) {
ret = -ENOMEM;
goto error1;
}
pm_runtime_enable(&pdev->dev);
ctlr->bus_num = pdev->id;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one_message = hspi_transfer_one_message;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error2;
}
return 0;
error2:
pm_runtime_disable(&pdev->dev);
error1:
clk_put(clk);
error0:
spi_controller_put(ctlr);
return ret;
}
static void hspi_remove(struct platform_device *pdev)
{
struct hspi_priv *hspi = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
clk_put(hspi->clk);
}
static const struct of_device_id hspi_of_match[] = {
{ .compatible = "renesas,hspi", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hspi_of_match);
static struct platform_driver hspi_driver = {
.probe = hspi_probe,
.remove_new = hspi_remove,
.driver = {
.name = "sh-hspi",
.of_match_table = hspi_of_match,
},
};
module_platform_driver(hspi_driver);
MODULE_DESCRIPTION("SuperH HSPI bus driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
MODULE_ALIAS("platform:sh-hspi");
| linux-master | drivers/spi/spi-sh-hspi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Simple synchronous userspace interface to SPI devices
*
* Copyright (C) 2006 SWAPP
* Andrea Paterniani <[email protected]>
* Copyright (C) 2007 David Brownell (simplification, cleanup)
*/
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
#include <linux/uaccess.h>
/*
* This supports access to SPI devices using normal userspace I/O calls.
* Note that while traditional UNIX/POSIX I/O semantics are half duplex,
* and often mask message boundaries, full SPI support requires full duplex
* transfers. There are several kinds of internal message boundaries to
* handle chipselect management and other protocol options.
*
* SPI has a character major number assigned. We allocate minor numbers
* dynamically using a bitmask. You must use hotplug tools, such as udev
* (or mdev with busybox) to create and destroy the /dev/spidevB.C device
* nodes, since there is no fixed association of minor numbers with any
* particular SPI bus or device.
*/
#define SPIDEV_MAJOR 153 /* assigned */
#define N_SPI_MINORS 32 /* ... up to 256 */
static DECLARE_BITMAP(minors, N_SPI_MINORS);
static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256);
/* Bit masks for spi_device.mode management. Note that incorrect
* settings for some settings can cause *lots* of trouble for other
* devices on a shared bus:
*
* - CS_HIGH ... this device will be active when it shouldn't be
* - 3WIRE ... when active, it won't behave as it should
* - NO_CS ... there will be no explicit message boundaries; this
* is completely incompatible with the shared bus model
* - READY ... transfers may proceed when they shouldn't.
*
* REVISIT should changing those flags be privileged?
*/
#define SPI_MODE_MASK (SPI_MODE_X_MASK | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
| SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
| SPI_RX_QUAD | SPI_RX_OCTAL \
| SPI_RX_CPHA_FLIP | SPI_3WIRE_HIZ \
| SPI_MOSI_IDLE_LOW)
struct spidev_data {
dev_t devt;
struct mutex spi_lock;
struct spi_device *spi;
struct list_head device_entry;
/* TX/RX buffers are NULL unless this device is open (users > 0) */
struct mutex buf_lock;
unsigned users;
u8 *tx_buffer;
u8 *rx_buffer;
u32 speed_hz;
};
static LIST_HEAD(device_list);
static DEFINE_MUTEX(device_list_lock);
static unsigned bufsiz = 4096;
module_param(bufsiz, uint, S_IRUGO);
MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
/*-------------------------------------------------------------------------*/
static ssize_t
spidev_sync_unlocked(struct spi_device *spi, struct spi_message *message)
{
ssize_t status;
status = spi_sync(spi, message);
if (status == 0)
status = message->actual_length;
return status;
}
static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
ssize_t status;
struct spi_device *spi;
mutex_lock(&spidev->spi_lock);
spi = spidev->spi;
if (spi == NULL)
status = -ESHUTDOWN;
else
status = spidev_sync_unlocked(spi, message);
mutex_unlock(&spidev->spi_lock);
return status;
}
static inline ssize_t
spidev_sync_write(struct spidev_data *spidev, size_t len)
{
struct spi_transfer t = {
.tx_buf = spidev->tx_buffer,
.len = len,
.speed_hz = spidev->speed_hz,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
return spidev_sync(spidev, &m);
}
static inline ssize_t
spidev_sync_read(struct spidev_data *spidev, size_t len)
{
struct spi_transfer t = {
.rx_buf = spidev->rx_buffer,
.len = len,
.speed_hz = spidev->speed_hz,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
return spidev_sync(spidev, &m);
}
/*-------------------------------------------------------------------------*/
/* Read-only message with current device setup */
static ssize_t
spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
{
struct spidev_data *spidev;
ssize_t status;
/* chipselect only toggles at start or end of operation */
if (count > bufsiz)
return -EMSGSIZE;
spidev = filp->private_data;
mutex_lock(&spidev->buf_lock);
status = spidev_sync_read(spidev, count);
if (status > 0) {
unsigned long missing;
missing = copy_to_user(buf, spidev->rx_buffer, status);
if (missing == status)
status = -EFAULT;
else
status = status - missing;
}
mutex_unlock(&spidev->buf_lock);
return status;
}
/* Write-only message with current device setup */
static ssize_t
spidev_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct spidev_data *spidev;
ssize_t status;
unsigned long missing;
/* chipselect only toggles at start or end of operation */
if (count > bufsiz)
return -EMSGSIZE;
spidev = filp->private_data;
mutex_lock(&spidev->buf_lock);
missing = copy_from_user(spidev->tx_buffer, buf, count);
if (missing == 0)
status = spidev_sync_write(spidev, count);
else
status = -EFAULT;
mutex_unlock(&spidev->buf_lock);
return status;
}
static int spidev_message(struct spidev_data *spidev,
struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
{
struct spi_message msg;
struct spi_transfer *k_xfers;
struct spi_transfer *k_tmp;
struct spi_ioc_transfer *u_tmp;
unsigned n, total, tx_total, rx_total;
u8 *tx_buf, *rx_buf;
int status = -EFAULT;
spi_message_init(&msg);
k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
if (k_xfers == NULL)
return -ENOMEM;
/* Construct spi_message, copying any tx data to bounce buffer.
* We walk the array of user-provided transfers, using each one
* to initialize a kernel version of the same transfer.
*/
tx_buf = spidev->tx_buffer;
rx_buf = spidev->rx_buffer;
total = 0;
tx_total = 0;
rx_total = 0;
for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
n;
n--, k_tmp++, u_tmp++) {
/* Ensure that also following allocations from rx_buf/tx_buf will meet
* DMA alignment requirements.
*/
unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_DMA_MINALIGN);
k_tmp->len = u_tmp->len;
total += k_tmp->len;
/* Since the function returns the total length of transfers
* on success, restrict the total to positive int values to
* avoid the return value looking like an error. Also check
* each transfer length to avoid arithmetic overflow.
*/
if (total > INT_MAX || k_tmp->len > INT_MAX) {
status = -EMSGSIZE;
goto done;
}
if (u_tmp->rx_buf) {
/* this transfer needs space in RX bounce buffer */
rx_total += len_aligned;
if (rx_total > bufsiz) {
status = -EMSGSIZE;
goto done;
}
k_tmp->rx_buf = rx_buf;
rx_buf += len_aligned;
}
if (u_tmp->tx_buf) {
/* this transfer needs space in TX bounce buffer */
tx_total += len_aligned;
if (tx_total > bufsiz) {
status = -EMSGSIZE;
goto done;
}
k_tmp->tx_buf = tx_buf;
if (copy_from_user(tx_buf, (const u8 __user *)
(uintptr_t) u_tmp->tx_buf,
u_tmp->len))
goto done;
tx_buf += len_aligned;
}
k_tmp->cs_change = !!u_tmp->cs_change;
k_tmp->tx_nbits = u_tmp->tx_nbits;
k_tmp->rx_nbits = u_tmp->rx_nbits;
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay.value = u_tmp->delay_usecs;
k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
k_tmp->speed_hz = u_tmp->speed_hz;
k_tmp->word_delay.value = u_tmp->word_delay_usecs;
k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
if (!k_tmp->speed_hz)
k_tmp->speed_hz = spidev->speed_hz;
#ifdef VERBOSE
dev_dbg(&spidev->spi->dev,
" xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
k_tmp->len,
k_tmp->rx_buf ? "rx " : "",
k_tmp->tx_buf ? "tx " : "",
k_tmp->cs_change ? "cs " : "",
k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
k_tmp->delay.value,
k_tmp->word_delay.value,
k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
}
status = spidev_sync_unlocked(spidev->spi, &msg);
if (status < 0)
goto done;
/* copy any rx data out of bounce buffer */
for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
n;
n--, k_tmp++, u_tmp++) {
if (u_tmp->rx_buf) {
if (copy_to_user((u8 __user *)
(uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
u_tmp->len)) {
status = -EFAULT;
goto done;
}
}
}
status = total;
done:
kfree(k_xfers);
return status;
}
static struct spi_ioc_transfer *
spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
unsigned *n_ioc)
{
u32 tmp;
/* Check type, command number and direction */
if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
|| _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
|| _IOC_DIR(cmd) != _IOC_WRITE)
return ERR_PTR(-ENOTTY);
tmp = _IOC_SIZE(cmd);
if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
return ERR_PTR(-EINVAL);
*n_ioc = tmp / sizeof(struct spi_ioc_transfer);
if (*n_ioc == 0)
return NULL;
/* copy into scratch area */
return memdup_user(u_ioc, tmp);
}
static long
spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int retval = 0;
struct spidev_data *spidev;
struct spi_device *spi;
u32 tmp;
unsigned n_ioc;
struct spi_ioc_transfer *ioc;
/* Check type and command number */
if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
return -ENOTTY;
/* guard against device removal before, or while,
* we issue this ioctl.
*/
spidev = filp->private_data;
mutex_lock(&spidev->spi_lock);
spi = spi_dev_get(spidev->spi);
if (spi == NULL) {
mutex_unlock(&spidev->spi_lock);
return -ESHUTDOWN;
}
/* use the buffer lock here for triple duty:
* - prevent I/O (from us) so calling spi_setup() is safe;
* - prevent concurrent SPI_IOC_WR_* from morphing
* data fields while SPI_IOC_RD_* reads them;
* - SPI_IOC_MESSAGE needs the buffer locked "normally".
*/
mutex_lock(&spidev->buf_lock);
switch (cmd) {
/* read requests */
case SPI_IOC_RD_MODE:
case SPI_IOC_RD_MODE32:
tmp = spi->mode;
{
struct spi_controller *ctlr = spi->controller;
if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
ctlr->cs_gpiods[spi_get_chipselect(spi, 0)])
tmp &= ~SPI_CS_HIGH;
}
if (cmd == SPI_IOC_RD_MODE)
retval = put_user(tmp & SPI_MODE_MASK,
(__u8 __user *)arg);
else
retval = put_user(tmp & SPI_MODE_MASK,
(__u32 __user *)arg);
break;
case SPI_IOC_RD_LSB_FIRST:
retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
(__u8 __user *)arg);
break;
case SPI_IOC_RD_BITS_PER_WORD:
retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
break;
case SPI_IOC_RD_MAX_SPEED_HZ:
retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
break;
/* write requests */
case SPI_IOC_WR_MODE:
case SPI_IOC_WR_MODE32:
if (cmd == SPI_IOC_WR_MODE)
retval = get_user(tmp, (u8 __user *)arg);
else
retval = get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
struct spi_controller *ctlr = spi->controller;
u32 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
retval = -EINVAL;
break;
}
if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
ctlr->cs_gpiods[spi_get_chipselect(spi, 0)])
tmp |= SPI_CS_HIGH;
tmp |= spi->mode & ~SPI_MODE_MASK;
spi->mode = tmp & SPI_MODE_USER_MASK;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
else
dev_dbg(&spi->dev, "spi mode %x\n", tmp);
}
break;
case SPI_IOC_WR_LSB_FIRST:
retval = get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
u32 save = spi->mode;
if (tmp)
spi->mode |= SPI_LSB_FIRST;
else
spi->mode &= ~SPI_LSB_FIRST;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
else
dev_dbg(&spi->dev, "%csb first\n",
tmp ? 'l' : 'm');
}
break;
case SPI_IOC_WR_BITS_PER_WORD:
retval = get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
u8 save = spi->bits_per_word;
spi->bits_per_word = tmp;
retval = spi_setup(spi);
if (retval < 0)
spi->bits_per_word = save;
else
dev_dbg(&spi->dev, "%d bits per word\n", tmp);
}
break;
case SPI_IOC_WR_MAX_SPEED_HZ: {
u32 save;
retval = get_user(tmp, (__u32 __user *)arg);
if (retval)
break;
if (tmp == 0) {
retval = -EINVAL;
break;
}
save = spi->max_speed_hz;
spi->max_speed_hz = tmp;
retval = spi_setup(spi);
if (retval == 0) {
spidev->speed_hz = tmp;
dev_dbg(&spi->dev, "%d Hz (max)\n", spidev->speed_hz);
}
spi->max_speed_hz = save;
break;
}
default:
/* segmented and/or full-duplex I/O request */
/* Check message and copy into scratch area */
ioc = spidev_get_ioc_message(cmd,
(struct spi_ioc_transfer __user *)arg, &n_ioc);
if (IS_ERR(ioc)) {
retval = PTR_ERR(ioc);
break;
}
if (!ioc)
break; /* n_ioc is also 0 */
/* translate to spi_message, execute */
retval = spidev_message(spidev, ioc, n_ioc);
kfree(ioc);
break;
}
mutex_unlock(&spidev->buf_lock);
spi_dev_put(spi);
mutex_unlock(&spidev->spi_lock);
return retval;
}
#ifdef CONFIG_COMPAT
static long
spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct spi_ioc_transfer __user *u_ioc;
int retval = 0;
struct spidev_data *spidev;
struct spi_device *spi;
unsigned n_ioc, n;
struct spi_ioc_transfer *ioc;
u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
/* guard against device removal before, or while,
* we issue this ioctl.
*/
spidev = filp->private_data;
mutex_lock(&spidev->spi_lock);
spi = spi_dev_get(spidev->spi);
if (spi == NULL) {
mutex_unlock(&spidev->spi_lock);
return -ESHUTDOWN;
}
/* SPI_IOC_MESSAGE needs the buffer locked "normally" */
mutex_lock(&spidev->buf_lock);
/* Check message and copy into scratch area */
ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
if (IS_ERR(ioc)) {
retval = PTR_ERR(ioc);
goto done;
}
if (!ioc)
goto done; /* n_ioc is also 0 */
/* Convert buffer pointers */
for (n = 0; n < n_ioc; n++) {
ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
}
/* translate to spi_message, execute */
retval = spidev_message(spidev, ioc, n_ioc);
kfree(ioc);
done:
mutex_unlock(&spidev->buf_lock);
spi_dev_put(spi);
mutex_unlock(&spidev->spi_lock);
return retval;
}
static long
spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
&& _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
&& _IOC_DIR(cmd) == _IOC_WRITE)
return spidev_compat_ioc_message(filp, cmd, arg);
return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
}
#else
#define spidev_compat_ioctl NULL
#endif /* CONFIG_COMPAT */
static int spidev_open(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev = NULL, *iter;
int status = -ENXIO;
mutex_lock(&device_list_lock);
list_for_each_entry(iter, &device_list, device_entry) {
if (iter->devt == inode->i_rdev) {
status = 0;
spidev = iter;
break;
}
}
if (!spidev) {
pr_debug("spidev: nothing for minor %d\n", iminor(inode));
goto err_find_dev;
}
if (!spidev->tx_buffer) {
spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->tx_buffer) {
status = -ENOMEM;
goto err_find_dev;
}
}
if (!spidev->rx_buffer) {
spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->rx_buffer) {
status = -ENOMEM;
goto err_alloc_rx_buf;
}
}
spidev->users++;
filp->private_data = spidev;
stream_open(inode, filp);
mutex_unlock(&device_list_lock);
return 0;
err_alloc_rx_buf:
kfree(spidev->tx_buffer);
spidev->tx_buffer = NULL;
err_find_dev:
mutex_unlock(&device_list_lock);
return status;
}
static int spidev_release(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
int dofree;
mutex_lock(&device_list_lock);
spidev = filp->private_data;
filp->private_data = NULL;
mutex_lock(&spidev->spi_lock);
/* ... after we unbound from the underlying device? */
dofree = (spidev->spi == NULL);
mutex_unlock(&spidev->spi_lock);
/* last close? */
spidev->users--;
if (!spidev->users) {
kfree(spidev->tx_buffer);
spidev->tx_buffer = NULL;
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;
if (dofree)
kfree(spidev);
else
spidev->speed_hz = spidev->spi->max_speed_hz;
}
#ifdef CONFIG_SPI_SLAVE
if (!dofree)
spi_slave_abort(spidev->spi);
#endif
mutex_unlock(&device_list_lock);
return 0;
}
static const struct file_operations spidev_fops = {
.owner = THIS_MODULE,
/* REVISIT switch to aio primitives, so that userspace
* gets more complete API coverage. It'll simplify things
* too, except for the locking.
*/
.write = spidev_write,
.read = spidev_read,
.unlocked_ioctl = spidev_ioctl,
.compat_ioctl = spidev_compat_ioctl,
.open = spidev_open,
.release = spidev_release,
.llseek = no_llseek,
};
/*-------------------------------------------------------------------------*/
/* The main reason to have this class is to make mdev/udev create the
* /dev/spidevB.C character device nodes exposing our userspace API.
* It also simplifies memory management.
*/
static struct class *spidev_class;
static const struct spi_device_id spidev_spi_ids[] = {
{ .name = "dh2228fv" },
{ .name = "ltc2488" },
{ .name = "sx1301" },
{ .name = "bk4" },
{ .name = "dhcom-board" },
{ .name = "m53cpld" },
{ .name = "spi-petra" },
{ .name = "spi-authenta" },
{ .name = "em3581" },
{ .name = "si3210" },
{},
};
MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
/*
* spidev should never be referenced in DT without a specific compatible string,
* it is a Linux implementation thing rather than a description of the hardware.
*/
static int spidev_of_check(struct device *dev)
{
if (device_property_match_string(dev, "compatible", "spidev") < 0)
return 0;
dev_err(dev, "spidev listed directly in DT is not supported\n");
return -EINVAL;
}
static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
{ .compatible = "micron,spi-authenta", .data = &spidev_of_check },
{ .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
{ .compatible = "semtech,sx1301", .data = &spidev_of_check },
{ .compatible = "silabs,em3581", .data = &spidev_of_check },
{ .compatible = "silabs,si3210", .data = &spidev_of_check },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
/* Dummy SPI devices not to be used in production systems */
static int spidev_acpi_check(struct device *dev)
{
dev_warn(dev, "do not use this driver in production systems!\n");
return 0;
}
static const struct acpi_device_id spidev_acpi_ids[] = {
/*
* The ACPI SPT000* devices are only meant for development and
* testing. Systems used in production should have a proper ACPI
* description of the connected peripheral and they should also use
* a proper driver instead of poking directly to the SPI bus.
*/
{ "SPT0001", (kernel_ulong_t)&spidev_acpi_check },
{ "SPT0002", (kernel_ulong_t)&spidev_acpi_check },
{ "SPT0003", (kernel_ulong_t)&spidev_acpi_check },
{},
};
MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
/*-------------------------------------------------------------------------*/
static int spidev_probe(struct spi_device *spi)
{
int (*match)(struct device *dev);
struct spidev_data *spidev;
int status;
unsigned long minor;
match = device_get_match_data(&spi->dev);
if (match) {
status = match(&spi->dev);
if (status)
return status;
}
/* Allocate driver data */
spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
if (!spidev)
return -ENOMEM;
/* Initialize the driver data */
spidev->spi = spi;
mutex_init(&spidev->spi_lock);
mutex_init(&spidev->buf_lock);
INIT_LIST_HEAD(&spidev->device_entry);
/* If we can allocate a minor number, hook up this device.
* Reusing minors is fine so long as udev or mdev is working.
*/
mutex_lock(&device_list_lock);
minor = find_first_zero_bit(minors, N_SPI_MINORS);
if (minor < N_SPI_MINORS) {
struct device *dev;
spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
dev = device_create(spidev_class, &spi->dev, spidev->devt,
spidev, "spidev%d.%d",
spi->master->bus_num, spi_get_chipselect(spi, 0));
status = PTR_ERR_OR_ZERO(dev);
} else {
dev_dbg(&spi->dev, "no minor number available!\n");
status = -ENODEV;
}
if (status == 0) {
set_bit(minor, minors);
list_add(&spidev->device_entry, &device_list);
}
mutex_unlock(&device_list_lock);
spidev->speed_hz = spi->max_speed_hz;
if (status == 0)
spi_set_drvdata(spi, spidev);
else
kfree(spidev);
return status;
}
static void spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
/* prevent new opens */
mutex_lock(&device_list_lock);
/* make sure ops on existing fds can abort cleanly */
mutex_lock(&spidev->spi_lock);
spidev->spi = NULL;
mutex_unlock(&spidev->spi_lock);
list_del(&spidev->device_entry);
device_destroy(spidev_class, spidev->devt);
clear_bit(MINOR(spidev->devt), minors);
if (spidev->users == 0)
kfree(spidev);
mutex_unlock(&device_list_lock);
}
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.of_match_table = spidev_dt_ids,
.acpi_match_table = spidev_acpi_ids,
},
.probe = spidev_probe,
.remove = spidev_remove,
.id_table = spidev_spi_ids,
/* NOTE: suspend/resume methods are not necessary here.
* We don't do anything except pass the requests to/from
* the underlying controller. The refrigerator handles
* most issues; the controller driver handles the rest.
*/
};
/*-------------------------------------------------------------------------*/
static int __init spidev_init(void)
{
int status;
/* Claim our 256 reserved device numbers. Then register a class
* that will key udev/mdev to add/remove /dev nodes. Last, register
* the driver which manages those device numbers.
*/
status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
if (status < 0)
return status;
spidev_class = class_create("spidev");
if (IS_ERR(spidev_class)) {
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
return PTR_ERR(spidev_class);
}
status = spi_register_driver(&spidev_spi_driver);
if (status < 0) {
class_destroy(spidev_class);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
return status;
}
module_init(spidev_init);
static void __exit spidev_exit(void)
{
spi_unregister_driver(&spidev_spi_driver);
class_destroy(spidev_class);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
module_exit(spidev_exit);
MODULE_AUTHOR("Andrea Paterniani, <[email protected]>");
MODULE_DESCRIPTION("User mode SPI device interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:spidev");
| linux-master | drivers/spi/spidev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
*
* Copyright (C) 2008-2012 ST-Ericsson AB
* Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
*
* Author: Linus Walleij <[email protected]>
*
* Initial version inspired by:
* linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
* Initial adoption to PL022 by:
* Sachin Verma <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
/*
* This macro is used to define some register default values.
* reg is masked with mask, the OR:ed with an (again masked)
* val shifted sb steps to the left.
*/
#define SSP_WRITE_BITS(reg, val, mask, sb) \
((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
/*
* This macro is also used to define some default values.
* It will just shift val by sb steps to the left and mask
* the result with mask.
*/
#define GEN_MASK_BITS(val, mask, sb) \
(((val)<<(sb)) & (mask))
#define DRIVE_TX 0
#define DO_NOT_DRIVE_TX 1
#define DO_NOT_QUEUE_DMA 0
#define QUEUE_DMA 1
#define RX_TRANSFER 1
#define TX_TRANSFER 2
/*
* Macros to access SSP Registers with their offsets
*/
#define SSP_CR0(r) (r + 0x000)
#define SSP_CR1(r) (r + 0x004)
#define SSP_DR(r) (r + 0x008)
#define SSP_SR(r) (r + 0x00C)
#define SSP_CPSR(r) (r + 0x010)
#define SSP_IMSC(r) (r + 0x014)
#define SSP_RIS(r) (r + 0x018)
#define SSP_MIS(r) (r + 0x01C)
#define SSP_ICR(r) (r + 0x020)
#define SSP_DMACR(r) (r + 0x024)
#define SSP_CSR(r) (r + 0x030) /* vendor extension */
#define SSP_ITCR(r) (r + 0x080)
#define SSP_ITIP(r) (r + 0x084)
#define SSP_ITOP(r) (r + 0x088)
#define SSP_TDR(r) (r + 0x08C)
#define SSP_PID0(r) (r + 0xFE0)
#define SSP_PID1(r) (r + 0xFE4)
#define SSP_PID2(r) (r + 0xFE8)
#define SSP_PID3(r) (r + 0xFEC)
#define SSP_CID0(r) (r + 0xFF0)
#define SSP_CID1(r) (r + 0xFF4)
#define SSP_CID2(r) (r + 0xFF8)
#define SSP_CID3(r) (r + 0xFFC)
/*
* SSP Control Register 0 - SSP_CR0
*/
#define SSP_CR0_MASK_DSS (0x0FUL << 0)
#define SSP_CR0_MASK_FRF (0x3UL << 4)
#define SSP_CR0_MASK_SPO (0x1UL << 6)
#define SSP_CR0_MASK_SPH (0x1UL << 7)
#define SSP_CR0_MASK_SCR (0xFFUL << 8)
/*
* The ST version of this block moves som bits
* in SSP_CR0 and extends it to 32 bits
*/
#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
#define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
/*
* SSP Control Register 0 - SSP_CR1
*/
#define SSP_CR1_MASK_LBM (0x1UL << 0)
#define SSP_CR1_MASK_SSE (0x1UL << 1)
#define SSP_CR1_MASK_MS (0x1UL << 2)
#define SSP_CR1_MASK_SOD (0x1UL << 3)
/*
* The ST version of this block adds some bits
* in SSP_CR1
*/
#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
/* This one is only in the PL023 variant */
#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
/*
* SSP Status Register - SSP_SR
*/
#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
/*
* SSP Clock Prescale Register - SSP_CPSR
*/
#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
/*
* SSP Interrupt Mask Set/Clear Register - SSP_IMSC
*/
#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
/*
* SSP Raw Interrupt Status Register - SSP_RIS
*/
/* Receive Overrun Raw Interrupt status */
#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
/* Receive Timeout Raw Interrupt status */
#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
/* Receive FIFO Raw Interrupt status */
#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
/* Transmit FIFO Raw Interrupt status */
#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
/*
* SSP Masked Interrupt Status Register - SSP_MIS
*/
/* Receive Overrun Masked Interrupt status */
#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
/* Receive Timeout Masked Interrupt status */
#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
/* Receive FIFO Masked Interrupt status */
#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
/* Transmit FIFO Masked Interrupt status */
#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
/*
* SSP Interrupt Clear Register - SSP_ICR
*/
/* Receive Overrun Raw Clear Interrupt bit */
#define SSP_ICR_MASK_RORIC (0x1UL << 0)
/* Receive Timeout Clear Interrupt bit */
#define SSP_ICR_MASK_RTIC (0x1UL << 1)
/*
* SSP DMA Control Register - SSP_DMACR
*/
/* Receive DMA Enable bit */
#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
/* Transmit DMA Enable bit */
#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
/*
* SSP Chip Select Control Register - SSP_CSR
* (vendor extension)
*/
#define SSP_CSR_CSVALUE_MASK (0x1FUL << 0)
/*
* SSP Integration Test control Register - SSP_ITCR
*/
#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
/*
* SSP Integration Test Input Register - SSP_ITIP
*/
#define ITIP_MASK_SSPRXD (0x1UL << 0)
#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
#define ITIP_MASK_RXDMAC (0x1UL << 3)
#define ITIP_MASK_TXDMAC (0x1UL << 4)
#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
/*
* SSP Integration Test output Register - SSP_ITOP
*/
#define ITOP_MASK_SSPTXD (0x1UL << 0)
#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
#define ITOP_MASK_SSPOEn (0x1UL << 3)
#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
#define ITOP_MASK_RORINTR (0x1UL << 5)
#define ITOP_MASK_RTINTR (0x1UL << 6)
#define ITOP_MASK_RXINTR (0x1UL << 7)
#define ITOP_MASK_TXINTR (0x1UL << 8)
#define ITOP_MASK_INTR (0x1UL << 9)
#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
/*
* SSP Test Data Register - SSP_TDR
*/
#define TDR_MASK_TESTDATA (0xFFFFFFFF)
/*
* Message State
* we use the spi_message.state (void *) pointer to
* hold a single state value, that's why all this
* (void *) casting is done here.
*/
#define STATE_START ((void *) 0)
#define STATE_RUNNING ((void *) 1)
#define STATE_DONE ((void *) 2)
#define STATE_ERROR ((void *) -1)
#define STATE_TIMEOUT ((void *) -2)
/*
* SSP State - Whether Enabled or Disabled
*/
#define SSP_DISABLED (0)
#define SSP_ENABLED (1)
/*
* SSP DMA State - Whether DMA Enabled or Disabled
*/
#define SSP_DMA_DISABLED (0)
#define SSP_DMA_ENABLED (1)
/*
* SSP Clock Defaults
*/
#define SSP_DEFAULT_CLKRATE 0x2
#define SSP_DEFAULT_PRESCALE 0x40
/*
* SSP Clock Parameter ranges
*/
#define CPSDVR_MIN 0x02
#define CPSDVR_MAX 0xFE
#define SCR_MIN 0x00
#define SCR_MAX 0xFF
/*
* SSP Interrupt related Macros
*/
#define DEFAULT_SSP_REG_IMSC 0x0UL
#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
#define ENABLE_ALL_INTERRUPTS ( \
SSP_IMSC_MASK_RORIM | \
SSP_IMSC_MASK_RTIM | \
SSP_IMSC_MASK_RXIM | \
SSP_IMSC_MASK_TXIM \
)
#define CLEAR_ALL_INTERRUPTS 0x3
#define SPI_POLLING_TIMEOUT 1000
/*
* The type of reading going on this chip
*/
enum ssp_reading {
READING_NULL,
READING_U8,
READING_U16,
READING_U32
};
/*
* The type of writing going on this chip
*/
enum ssp_writing {
WRITING_NULL,
WRITING_U8,
WRITING_U16,
WRITING_U32
};
/**
* struct vendor_data - vendor-specific config parameters
* for PL022 derivates
* @fifodepth: depth of FIFOs (both)
* @max_bpw: maximum number of bits per word
* @unidir: supports unidirection transfers
* @extended_cr: 32 bit wide control register 0 with extra
* features and extra features in CR1 as found in the ST variants
* @pl023: supports a subset of the ST extensions called "PL023"
* @loopback: supports loopback mode
* @internal_cs_ctrl: supports chip select control register
*/
struct vendor_data {
int fifodepth;
int max_bpw;
bool unidir;
bool extended_cr;
bool pl023;
bool loopback;
bool internal_cs_ctrl;
};
/**
* struct pl022 - This is the private SSP driver data structure
* @adev: AMBA device model hookup
* @vendor: vendor data for the IP block
* @phybase: the physical memory where the SSP device resides
* @virtbase: the virtual memory where the SSP is mapped
* @clk: outgoing clock "SPICLK" for the SPI bus
* @host: SPI framework hookup
* @host_info: controller-specific data from machine setup
* @pump_transfers: Tasklet used in Interrupt Transfer mode
* @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
* @cur_chip: pointer to current clients chip(assigned from controller_state)
* @next_msg_cs_active: the next message in the queue has been examined
* and it was found that it uses the same chip select as the previous
* message, so we left it active after the previous transfer, and it's
* active already.
* @tx: current position in TX buffer to be read
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
* @rx_end: end position in RX buffer to be written
* @read: the type of read currently going on
* @write: the type of write currently going on
* @exp_fifo_level: expected FIFO level
* @rx_lev_trig: receive FIFO watermark level which triggers IRQ
* @tx_lev_trig: transmit FIFO watermark level which triggers IRQ
* @dma_rx_channel: optional channel for RX DMA
* @dma_tx_channel: optional channel for TX DMA
* @sgt_rx: scattertable for the RX transfer
* @sgt_tx: scattertable for the TX transfer
* @dummypage: a dummy page used for driving data on the bus with DMA
* @dma_running: indicates whether DMA is in operation
* @cur_cs: current chip select index
* @cur_gpiod: current chip select GPIO descriptor
*/
struct pl022 {
struct amba_device *adev;
struct vendor_data *vendor;
resource_size_t phybase;
void __iomem *virtbase;
struct clk *clk;
struct spi_controller *host;
struct pl022_ssp_controller *host_info;
/* Message per-transfer pump */
struct tasklet_struct pump_transfers;
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
bool next_msg_cs_active;
void *tx;
void *tx_end;
void *rx;
void *rx_end;
enum ssp_reading read;
enum ssp_writing write;
u32 exp_fifo_level;
enum ssp_rx_level_trig rx_lev_trig;
enum ssp_tx_level_trig tx_lev_trig;
/* DMA settings */
#ifdef CONFIG_DMA_ENGINE
struct dma_chan *dma_rx_channel;
struct dma_chan *dma_tx_channel;
struct sg_table sgt_rx;
struct sg_table sgt_tx;
char *dummypage;
bool dma_running;
#endif
int cur_cs;
struct gpio_desc *cur_gpiod;
};
/**
* struct chip_data - To maintain runtime state of SSP for each client chip
* @cr0: Value of control register CR0 of SSP - on later ST variants this
* register is 32 bits wide rather than just 16
* @cr1: Value of control register CR1 of SSP
* @dmacr: Value of DMA control Register of SSP
* @cpsr: Value of Clock prescale register
* @n_bytes: how many bytes(power of 2) reqd for a given data width of client
* @enable_dma: Whether to enable DMA or not
* @read: function ptr to be used to read when doing xfer for this chip
* @write: function ptr to be used to write when doing xfer for this chip
* @xfer_type: polling/interrupt/DMA
*
* Runtime state of the SSP controller, maintained per chip,
* This would be set according to the current message that would be served
*/
struct chip_data {
u32 cr0;
u16 cr1;
u16 dmacr;
u16 cpsr;
u8 n_bytes;
bool enable_dma;
enum ssp_reading read;
enum ssp_writing write;
int xfer_type;
};
/**
* internal_cs_control - Control chip select signals via SSP_CSR.
* @pl022: SSP driver private data structure
* @command: select/delect the chip
*
* Used on controller with internal chip select control via SSP_CSR register
* (vendor extension). Each of the 5 LSB in the register controls one chip
* select signal.
*/
static void internal_cs_control(struct pl022 *pl022, u32 command)
{
u32 tmp;
tmp = readw(SSP_CSR(pl022->virtbase));
if (command == SSP_CHIP_SELECT)
tmp &= ~BIT(pl022->cur_cs);
else
tmp |= BIT(pl022->cur_cs);
writew(tmp, SSP_CSR(pl022->virtbase));
}
static void pl022_cs_control(struct pl022 *pl022, u32 command)
{
if (pl022->vendor->internal_cs_ctrl)
internal_cs_control(pl022, command);
else if (pl022->cur_gpiod)
/*
* This needs to be inverted since with GPIOLIB in
* control, the inversion will be handled by
* GPIOLIB's active low handling. The "command"
* passed into this function will be SSP_CHIP_SELECT
* which is enum:ed to 0, so we need the inverse
* (1) to activate chip select.
*/
gpiod_set_value(pl022->cur_gpiod, !command);
}
/**
* giveback - current spi_message is over, schedule next message and call
* callback of this message. Assumes that caller already
* set message->status; dma and pio irqs are blocked
* @pl022: SSP driver private data structure
*/
static void giveback(struct pl022 *pl022)
{
struct spi_transfer *last_transfer;
pl022->next_msg_cs_active = false;
last_transfer = list_last_entry(&pl022->cur_msg->transfers,
struct spi_transfer, transfer_list);
/* Delay if requested before any change in chip select */
/*
* FIXME: This runs in interrupt context.
* Is this really smart?
*/
spi_transfer_delay_exec(last_transfer);
if (!last_transfer->cs_change) {
struct spi_message *next_msg;
/*
* cs_change was not set. We can keep the chip select
* enabled if there is message in the queue and it is
* for the same spi device.
*
* We cannot postpone this until pump_messages, because
* after calling msg->complete (below) the driver that
* sent the current message could be unloaded, which
* could invalidate the cs_control() callback...
*/
/* get a pointer to the next message, if any */
next_msg = spi_get_next_queued_message(pl022->host);
/*
* see if the next and current messages point
* to the same spi device.
*/
if (next_msg && next_msg->spi != pl022->cur_msg->spi)
next_msg = NULL;
if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
pl022_cs_control(pl022, SSP_CHIP_DESELECT);
else
pl022->next_msg_cs_active = true;
}
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
/* disable the SPI/SSP operation */
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
spi_finalize_current_message(pl022->host);
}
/**
* flush - flush the FIFO to reach a clean state
* @pl022: SSP driver private data structure
*/
static int flush(struct pl022 *pl022)
{
unsigned long limit = loops_per_jiffy << 1;
dev_dbg(&pl022->adev->dev, "flush\n");
do {
while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
readw(SSP_DR(pl022->virtbase));
} while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
pl022->exp_fifo_level = 0;
return limit;
}
/**
* restore_state - Load configuration of current chip
* @pl022: SSP driver private data structure
*/
static void restore_state(struct pl022 *pl022)
{
struct chip_data *chip = pl022->cur_chip;
if (pl022->vendor->extended_cr)
writel(chip->cr0, SSP_CR0(pl022->virtbase));
else
writew(chip->cr0, SSP_CR0(pl022->virtbase));
writew(chip->cr1, SSP_CR1(pl022->virtbase));
writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
}
/*
* Default SSP Register Values
*/
#define DEFAULT_SSP_REG_CR0 ( \
GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
)
/* ST versions have slightly different bit layout */
#define DEFAULT_SSP_REG_CR0_ST ( \
GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
)
/* The PL023 version is slightly different again */
#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
)
#define DEFAULT_SSP_REG_CR1 ( \
GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
)
/* ST versions extend this register to use all 16 bits */
#define DEFAULT_SSP_REG_CR1_ST ( \
DEFAULT_SSP_REG_CR1 | \
GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
)
/*
* The PL023 variant has further differences: no loopback mode, no microwire
* support, and a new clock feedback delay setting.
*/
#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
)
#define DEFAULT_SSP_REG_CPSR ( \
GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
)
#define DEFAULT_SSP_REG_DMACR (\
GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
)
/**
* load_ssp_default_config - Load default configuration for SSP
* @pl022: SSP driver private data structure
*/
static void load_ssp_default_config(struct pl022 *pl022)
{
if (pl022->vendor->pl023) {
writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
} else if (pl022->vendor->extended_cr) {
writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
} else {
writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
}
writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
}
/*
* This will write to TX and read from RX according to the parameters
* set in pl022.
*/
static void readwriter(struct pl022 *pl022)
{
/*
* The FIFO depth is different between primecell variants.
* I believe filling in too much in the FIFO might cause
* errons in 8bit wide transfers on ARM variants (just 8 words
* FIFO, means only 8x8 = 64 bits in FIFO) at least.
*
* To prevent this issue, the TX FIFO is only filled to the
* unused RX FIFO fill length, regardless of what the TX
* FIFO status flag indicates.
*/
dev_dbg(&pl022->adev->dev,
"%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
__func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
/* Read as much as you can */
while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
&& (pl022->rx < pl022->rx_end)) {
switch (pl022->read) {
case READING_NULL:
readw(SSP_DR(pl022->virtbase));
break;
case READING_U8:
*(u8 *) (pl022->rx) =
readw(SSP_DR(pl022->virtbase)) & 0xFFU;
break;
case READING_U16:
*(u16 *) (pl022->rx) =
(u16) readw(SSP_DR(pl022->virtbase));
break;
case READING_U32:
*(u32 *) (pl022->rx) =
readl(SSP_DR(pl022->virtbase));
break;
}
pl022->rx += (pl022->cur_chip->n_bytes);
pl022->exp_fifo_level--;
}
/*
* Write as much as possible up to the RX FIFO size
*/
while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
&& (pl022->tx < pl022->tx_end)) {
switch (pl022->write) {
case WRITING_NULL:
writew(0x0, SSP_DR(pl022->virtbase));
break;
case WRITING_U8:
writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
break;
case WRITING_U16:
writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
break;
case WRITING_U32:
writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
break;
}
pl022->tx += (pl022->cur_chip->n_bytes);
pl022->exp_fifo_level++;
/*
* This inner reader takes care of things appearing in the RX
* FIFO as we're transmitting. This will happen a lot since the
* clock starts running when you put things into the TX FIFO,
* and then things are continuously clocked into the RX FIFO.
*/
while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
&& (pl022->rx < pl022->rx_end)) {
switch (pl022->read) {
case READING_NULL:
readw(SSP_DR(pl022->virtbase));
break;
case READING_U8:
*(u8 *) (pl022->rx) =
readw(SSP_DR(pl022->virtbase)) & 0xFFU;
break;
case READING_U16:
*(u16 *) (pl022->rx) =
(u16) readw(SSP_DR(pl022->virtbase));
break;
case READING_U32:
*(u32 *) (pl022->rx) =
readl(SSP_DR(pl022->virtbase));
break;
}
pl022->rx += (pl022->cur_chip->n_bytes);
pl022->exp_fifo_level--;
}
}
/*
* When we exit here the TX FIFO should be full and the RX FIFO
* should be empty
*/
}
/**
* next_transfer - Move to the Next transfer in the current spi message
* @pl022: SSP driver private data structure
*
* This function moves though the linked list of spi transfers in the
* current spi message and returns with the state of current spi
* message i.e whether its last transfer is done(STATE_DONE) or
* Next transfer is ready(STATE_RUNNING)
*/
static void *next_transfer(struct pl022 *pl022)
{
struct spi_message *msg = pl022->cur_msg;
struct spi_transfer *trans = pl022->cur_transfer;
/* Move to next transfer */
if (trans->transfer_list.next != &msg->transfers) {
pl022->cur_transfer =
list_entry(trans->transfer_list.next,
struct spi_transfer, transfer_list);
return STATE_RUNNING;
}
return STATE_DONE;
}
/*
* This DMA functionality is only compiled in if we have
* access to the generic DMA devices/DMA engine.
*/
#ifdef CONFIG_DMA_ENGINE
static void unmap_free_dma_scatter(struct pl022 *pl022)
{
/* Unmap and free the SG tables */
dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
pl022->sgt_rx.nents, DMA_FROM_DEVICE);
sg_free_table(&pl022->sgt_rx);
sg_free_table(&pl022->sgt_tx);
}
static void dma_callback(void *data)
{
struct pl022 *pl022 = data;
struct spi_message *msg = pl022->cur_msg;
BUG_ON(!pl022->sgt_rx.sgl);
#ifdef VERBOSE_DEBUG
/*
* Optionally dump out buffers to inspect contents, this is
* good if you want to convince yourself that the loopback
* read/write contents are the same, when adopting to a new
* DMA engine.
*/
{
struct scatterlist *sg;
unsigned int i;
dma_sync_sg_for_cpu(&pl022->adev->dev,
pl022->sgt_rx.sgl,
pl022->sgt_rx.nents,
DMA_FROM_DEVICE);
for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
print_hex_dump(KERN_ERR, "SPI RX: ",
DUMP_PREFIX_OFFSET,
16,
1,
sg_virt(sg),
sg_dma_len(sg),
1);
}
for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
print_hex_dump(KERN_ERR, "SPI TX: ",
DUMP_PREFIX_OFFSET,
16,
1,
sg_virt(sg),
sg_dma_len(sg),
1);
}
}
#endif
unmap_free_dma_scatter(pl022);
/* Update total bytes transferred */
msg->actual_length += pl022->cur_transfer->len;
/* Move to next transfer */
msg->state = next_transfer(pl022);
if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
pl022_cs_control(pl022, SSP_CHIP_DESELECT);
tasklet_schedule(&pl022->pump_transfers);
}
static void setup_dma_scatter(struct pl022 *pl022,
void *buffer,
unsigned int length,
struct sg_table *sgtab)
{
struct scatterlist *sg;
int bytesleft = length;
void *bufp = buffer;
int mapbytes;
int i;
if (buffer) {
for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
/*
* If there are less bytes left than what fits
* in the current page (plus page alignment offset)
* we just feed in this, else we stuff in as much
* as we can.
*/
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
sg_set_page(sg, virt_to_page(bufp),
mapbytes, offset_in_page(bufp));
bufp += mapbytes;
bytesleft -= mapbytes;
dev_dbg(&pl022->adev->dev,
"set RX/TX target page @ %p, %d bytes, %d left\n",
bufp, mapbytes, bytesleft);
}
} else {
/* Map the dummy buffer on every page */
for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
if (bytesleft < PAGE_SIZE)
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE;
sg_set_page(sg, virt_to_page(pl022->dummypage),
mapbytes, 0);
bytesleft -= mapbytes;
dev_dbg(&pl022->adev->dev,
"set RX/TX to dummy page %d bytes, %d left\n",
mapbytes, bytesleft);
}
}
BUG_ON(bytesleft);
}
/**
* configure_dma - configures the channels for the next transfer
* @pl022: SSP driver's private data structure
*/
static int configure_dma(struct pl022 *pl022)
{
struct dma_slave_config rx_conf = {
.src_addr = SSP_DR(pl022->phybase),
.direction = DMA_DEV_TO_MEM,
.device_fc = false,
};
struct dma_slave_config tx_conf = {
.dst_addr = SSP_DR(pl022->phybase),
.direction = DMA_MEM_TO_DEV,
.device_fc = false,
};
unsigned int pages;
int ret;
int rx_sglen, tx_sglen;
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
/* Check that the channels are available */
if (!rxchan || !txchan)
return -ENODEV;
/*
* If supplied, the DMA burstsize should equal the FIFO trigger level.
* Notice that the DMA engine uses one-to-one mapping. Since we can
* not trigger on 2 elements this needs explicit mapping rather than
* calculation.
*/
switch (pl022->rx_lev_trig) {
case SSP_RX_1_OR_MORE_ELEM:
rx_conf.src_maxburst = 1;
break;
case SSP_RX_4_OR_MORE_ELEM:
rx_conf.src_maxburst = 4;
break;
case SSP_RX_8_OR_MORE_ELEM:
rx_conf.src_maxburst = 8;
break;
case SSP_RX_16_OR_MORE_ELEM:
rx_conf.src_maxburst = 16;
break;
case SSP_RX_32_OR_MORE_ELEM:
rx_conf.src_maxburst = 32;
break;
default:
rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
break;
}
switch (pl022->tx_lev_trig) {
case SSP_TX_1_OR_MORE_EMPTY_LOC:
tx_conf.dst_maxburst = 1;
break;
case SSP_TX_4_OR_MORE_EMPTY_LOC:
tx_conf.dst_maxburst = 4;
break;
case SSP_TX_8_OR_MORE_EMPTY_LOC:
tx_conf.dst_maxburst = 8;
break;
case SSP_TX_16_OR_MORE_EMPTY_LOC:
tx_conf.dst_maxburst = 16;
break;
case SSP_TX_32_OR_MORE_EMPTY_LOC:
tx_conf.dst_maxburst = 32;
break;
default:
tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
break;
}
switch (pl022->read) {
case READING_NULL:
/* Use the same as for writing */
rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
break;
case READING_U8:
rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
case READING_U16:
rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case READING_U32:
rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
}
switch (pl022->write) {
case WRITING_NULL:
/* Use the same as for reading */
tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
break;
case WRITING_U8:
tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
case WRITING_U16:
tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case WRITING_U32:
tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
}
/* SPI pecularity: we need to read and write the same width */
if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
rx_conf.src_addr_width = tx_conf.dst_addr_width;
if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
tx_conf.dst_addr_width = rx_conf.src_addr_width;
BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
dmaengine_slave_config(rxchan, &rx_conf);
dmaengine_slave_config(txchan, &tx_conf);
/* Create sglists for the transfers */
pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC);
if (ret)
goto err_alloc_rx_sg;
ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC);
if (ret)
goto err_alloc_tx_sg;
/* Fill in the scatterlists for the RX+TX buffers */
setup_dma_scatter(pl022, pl022->rx,
pl022->cur_transfer->len, &pl022->sgt_rx);
setup_dma_scatter(pl022, pl022->tx,
pl022->cur_transfer->len, &pl022->sgt_tx);
/* Map DMA buffers */
rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
pl022->sgt_rx.nents, DMA_FROM_DEVICE);
if (!rx_sglen)
goto err_rx_sgmap;
tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
if (!tx_sglen)
goto err_tx_sgmap;
/* Send both scatterlists */
rxdesc = dmaengine_prep_slave_sg(rxchan,
pl022->sgt_rx.sgl,
rx_sglen,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto err_rxdesc;
txdesc = dmaengine_prep_slave_sg(txchan,
pl022->sgt_tx.sgl,
tx_sglen,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto err_txdesc;
/* Put the callback on the RX transfer only, that should finish last */
rxdesc->callback = dma_callback;
rxdesc->callback_param = pl022;
/* Submit and fire RX and TX with TX last so we're ready to read! */
dmaengine_submit(rxdesc);
dmaengine_submit(txdesc);
dma_async_issue_pending(rxchan);
dma_async_issue_pending(txchan);
pl022->dma_running = true;
return 0;
err_txdesc:
dmaengine_terminate_all(txchan);
err_rxdesc:
dmaengine_terminate_all(rxchan);
dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
pl022->sgt_tx.nents, DMA_TO_DEVICE);
err_tx_sgmap:
dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
pl022->sgt_rx.nents, DMA_FROM_DEVICE);
err_rx_sgmap:
sg_free_table(&pl022->sgt_tx);
err_alloc_tx_sg:
sg_free_table(&pl022->sgt_rx);
err_alloc_rx_sg:
return -ENOMEM;
}
static int pl022_dma_probe(struct pl022 *pl022)
{
dma_cap_mask_t mask;
/* Try to acquire a generic DMA engine slave channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/*
* We need both RX and TX channels to do DMA, else do none
* of them.
*/
pl022->dma_rx_channel = dma_request_channel(mask,
pl022->host_info->dma_filter,
pl022->host_info->dma_rx_param);
if (!pl022->dma_rx_channel) {
dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
goto err_no_rxchan;
}
pl022->dma_tx_channel = dma_request_channel(mask,
pl022->host_info->dma_filter,
pl022->host_info->dma_tx_param);
if (!pl022->dma_tx_channel) {
dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
goto err_no_txchan;
}
pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pl022->dummypage)
goto err_no_dummypage;
dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
dma_chan_name(pl022->dma_rx_channel),
dma_chan_name(pl022->dma_tx_channel));
return 0;
err_no_dummypage:
dma_release_channel(pl022->dma_tx_channel);
err_no_txchan:
dma_release_channel(pl022->dma_rx_channel);
pl022->dma_rx_channel = NULL;
err_no_rxchan:
dev_err(&pl022->adev->dev,
"Failed to work in dma mode, work without dma!\n");
return -ENODEV;
}
static int pl022_dma_autoprobe(struct pl022 *pl022)
{
struct device *dev = &pl022->adev->dev;
struct dma_chan *chan;
int err;
/* automatically configure DMA channels from platform, normally using DT */
chan = dma_request_chan(dev, "rx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_no_rxchan;
}
pl022->dma_rx_channel = chan;
chan = dma_request_chan(dev, "tx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_no_txchan;
}
pl022->dma_tx_channel = chan;
pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pl022->dummypage) {
err = -ENOMEM;
goto err_no_dummypage;
}
return 0;
err_no_dummypage:
dma_release_channel(pl022->dma_tx_channel);
pl022->dma_tx_channel = NULL;
err_no_txchan:
dma_release_channel(pl022->dma_rx_channel);
pl022->dma_rx_channel = NULL;
err_no_rxchan:
return err;
}
static void terminate_dma(struct pl022 *pl022)
{
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
dmaengine_terminate_all(rxchan);
dmaengine_terminate_all(txchan);
unmap_free_dma_scatter(pl022);
pl022->dma_running = false;
}
static void pl022_dma_remove(struct pl022 *pl022)
{
if (pl022->dma_running)
terminate_dma(pl022);
if (pl022->dma_tx_channel)
dma_release_channel(pl022->dma_tx_channel);
if (pl022->dma_rx_channel)
dma_release_channel(pl022->dma_rx_channel);
kfree(pl022->dummypage);
}
#else
static inline int configure_dma(struct pl022 *pl022)
{
return -ENODEV;
}
static inline int pl022_dma_autoprobe(struct pl022 *pl022)
{
return 0;
}
static inline int pl022_dma_probe(struct pl022 *pl022)
{
return 0;
}
static inline void pl022_dma_remove(struct pl022 *pl022)
{
}
#endif
/**
* pl022_interrupt_handler - Interrupt handler for SSP controller
* @irq: IRQ number
* @dev_id: Local device data
*
* This function handles interrupts generated for an interrupt based transfer.
* If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
* current message's state as STATE_ERROR and schedule the tasklet
* pump_transfers which will do the postprocessing of the current message by
* calling giveback(). Otherwise it reads data from RX FIFO till there is no
* more data, and writes data in TX FIFO till it is not full. If we complete
* the transfer we move to the next transfer and schedule the tasklet.
*/
static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
{
struct pl022 *pl022 = dev_id;
struct spi_message *msg = pl022->cur_msg;
u16 irq_status = 0;
if (unlikely(!msg)) {
dev_err(&pl022->adev->dev,
"bad message state in interrupt handler");
/* Never fail */
return IRQ_HANDLED;
}
/* Read the Interrupt Status Register */
irq_status = readw(SSP_MIS(pl022->virtbase));
if (unlikely(!irq_status))
return IRQ_NONE;
/*
* This handles the FIFO interrupts, the timeout
* interrupts are flatly ignored, they cannot be
* trusted.
*/
if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
/*
* Overrun interrupt - bail out since our Data has been
* corrupted
*/
dev_err(&pl022->adev->dev, "FIFO overrun\n");
if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
dev_err(&pl022->adev->dev,
"RXFIFO is full\n");
/*
* Disable and clear interrupts, disable SSP,
* mark message with bad status so it can be
* retried.
*/
writew(DISABLE_ALL_INTERRUPTS,
SSP_IMSC(pl022->virtbase));
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
msg->state = STATE_ERROR;
/* Schedule message queue handler */
tasklet_schedule(&pl022->pump_transfers);
return IRQ_HANDLED;
}
readwriter(pl022);
if (pl022->tx == pl022->tx_end) {
/* Disable Transmit interrupt, enable receive interrupt */
writew((readw(SSP_IMSC(pl022->virtbase)) &
~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
SSP_IMSC(pl022->virtbase));
}
/*
* Since all transactions must write as much as shall be read,
* we can conclude the entire transaction once RX is complete.
* At this point, all TX will always be finished.
*/
if (pl022->rx >= pl022->rx_end) {
writew(DISABLE_ALL_INTERRUPTS,
SSP_IMSC(pl022->virtbase));
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
if (unlikely(pl022->rx > pl022->rx_end)) {
dev_warn(&pl022->adev->dev, "read %u surplus "
"bytes (did you request an odd "
"number of bytes on a 16bit bus?)\n",
(u32) (pl022->rx - pl022->rx_end));
}
/* Update total bytes transferred */
msg->actual_length += pl022->cur_transfer->len;
/* Move to next transfer */
msg->state = next_transfer(pl022);
if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
pl022_cs_control(pl022, SSP_CHIP_DESELECT);
tasklet_schedule(&pl022->pump_transfers);
return IRQ_HANDLED;
}
return IRQ_HANDLED;
}
/*
* This sets up the pointers to memory for the next message to
* send out on the SPI bus.
*/
static int set_up_next_transfer(struct pl022 *pl022,
struct spi_transfer *transfer)
{
int residue;
/* Sanity check the message for this bus width */
residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
if (unlikely(residue != 0)) {
dev_err(&pl022->adev->dev,
"message of %u bytes to transmit but the current "
"chip bus has a data width of %u bytes!\n",
pl022->cur_transfer->len,
pl022->cur_chip->n_bytes);
dev_err(&pl022->adev->dev, "skipping this message\n");
return -EIO;
}
pl022->tx = (void *)transfer->tx_buf;
pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
pl022->rx = (void *)transfer->rx_buf;
pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
pl022->write =
pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
return 0;
}
/**
* pump_transfers - Tasklet function which schedules next transfer
* when running in interrupt or DMA transfer mode.
* @data: SSP driver private data structure
*
*/
static void pump_transfers(unsigned long data)
{
struct pl022 *pl022 = (struct pl022 *) data;
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
/* Get current state information */
message = pl022->cur_msg;
transfer = pl022->cur_transfer;
/* Handle for abort */
if (message->state == STATE_ERROR) {
message->status = -EIO;
giveback(pl022);
return;
}
/* Handle end of message */
if (message->state == STATE_DONE) {
message->status = 0;
giveback(pl022);
return;
}
/* Delay if requested at end of transfer before CS change */
if (message->state == STATE_RUNNING) {
previous = list_entry(transfer->transfer_list.prev,
struct spi_transfer,
transfer_list);
/*
* FIXME: This runs in interrupt context.
* Is this really smart?
*/
spi_transfer_delay_exec(previous);
/* Reselect chip select only if cs_change was requested */
if (previous->cs_change)
pl022_cs_control(pl022, SSP_CHIP_SELECT);
} else {
/* STATE_START */
message->state = STATE_RUNNING;
}
if (set_up_next_transfer(pl022, transfer)) {
message->state = STATE_ERROR;
message->status = -EIO;
giveback(pl022);
return;
}
/* Flush the FIFOs and let's go! */
flush(pl022);
if (pl022->cur_chip->enable_dma) {
if (configure_dma(pl022)) {
dev_dbg(&pl022->adev->dev,
"configuration of DMA failed, fall back to interrupt mode\n");
goto err_config_dma;
}
return;
}
err_config_dma:
/* enable all interrupts except RX */
writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
}
static void do_interrupt_dma_transfer(struct pl022 *pl022)
{
/*
* Default is to enable all interrupts except RX -
* this will be enabled once TX is complete
*/
u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
/* Enable target chip, if not already active */
if (!pl022->next_msg_cs_active)
pl022_cs_control(pl022, SSP_CHIP_SELECT);
if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
/* Error path */
pl022->cur_msg->state = STATE_ERROR;
pl022->cur_msg->status = -EIO;
giveback(pl022);
return;
}
/* If we're using DMA, set up DMA here */
if (pl022->cur_chip->enable_dma) {
/* Configure DMA transfer */
if (configure_dma(pl022)) {
dev_dbg(&pl022->adev->dev,
"configuration of DMA failed, fall back to interrupt mode\n");
goto err_config_dma;
}
/* Disable interrupts in DMA mode, IRQ from DMA controller */
irqflags = DISABLE_ALL_INTERRUPTS;
}
err_config_dma:
/* Enable SSP, turn on interrupts */
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
writew(irqflags, SSP_IMSC(pl022->virtbase));
}
static void print_current_status(struct pl022 *pl022)
{
u32 read_cr0;
u16 read_cr1, read_dmacr, read_sr;
if (pl022->vendor->extended_cr)
read_cr0 = readl(SSP_CR0(pl022->virtbase));
else
read_cr0 = readw(SSP_CR0(pl022->virtbase));
read_cr1 = readw(SSP_CR1(pl022->virtbase));
read_dmacr = readw(SSP_DMACR(pl022->virtbase));
read_sr = readw(SSP_SR(pl022->virtbase));
dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
dev_warn(&pl022->adev->dev,
"spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
pl022->exp_fifo_level,
pl022->vendor->fifodepth);
}
static void do_polling_transfer(struct pl022 *pl022)
{
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
unsigned long time, timeout;
message = pl022->cur_msg;
while (message->state != STATE_DONE) {
/* Handle for abort */
if (message->state == STATE_ERROR)
break;
transfer = pl022->cur_transfer;
/* Delay if requested at end of transfer */
if (message->state == STATE_RUNNING) {
previous =
list_entry(transfer->transfer_list.prev,
struct spi_transfer, transfer_list);
spi_transfer_delay_exec(previous);
if (previous->cs_change)
pl022_cs_control(pl022, SSP_CHIP_SELECT);
} else {
/* STATE_START */
message->state = STATE_RUNNING;
if (!pl022->next_msg_cs_active)
pl022_cs_control(pl022, SSP_CHIP_SELECT);
}
/* Configuration Changing Per Transfer */
if (set_up_next_transfer(pl022, transfer)) {
/* Error path */
message->state = STATE_ERROR;
break;
}
/* Flush FIFOs and enable SSP */
flush(pl022);
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
time = jiffies;
readwriter(pl022);
if (time_after(time, timeout)) {
dev_warn(&pl022->adev->dev,
"%s: timeout!\n", __func__);
message->state = STATE_TIMEOUT;
print_current_status(pl022);
goto out;
}
cpu_relax();
}
/* Update total byte transferred */
message->actual_length += pl022->cur_transfer->len;
/* Move to next transfer */
message->state = next_transfer(pl022);
if (message->state != STATE_DONE
&& pl022->cur_transfer->cs_change)
pl022_cs_control(pl022, SSP_CHIP_DESELECT);
}
out:
/* Handle end of message */
if (message->state == STATE_DONE)
message->status = 0;
else if (message->state == STATE_TIMEOUT)
message->status = -EAGAIN;
else
message->status = -EIO;
giveback(pl022);
return;
}
static int pl022_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
struct pl022 *pl022 = spi_controller_get_devdata(host);
/* Initial message state */
pl022->cur_msg = msg;
msg->state = STATE_START;
pl022->cur_transfer = list_entry(msg->transfers.next,
struct spi_transfer, transfer_list);
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(msg->spi);
pl022->cur_cs = spi_get_chipselect(msg->spi, 0);
/* This is always available but may be set to -ENOENT */
pl022->cur_gpiod = spi_get_csgpiod(msg->spi, 0);
restore_state(pl022);
flush(pl022);
if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
do_polling_transfer(pl022);
else
do_interrupt_dma_transfer(pl022);
return 0;
}
static int pl022_unprepare_transfer_hardware(struct spi_controller *host)
{
struct pl022 *pl022 = spi_controller_get_devdata(host);
/* nothing more to do - disable spi/ssp and power off */
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
return 0;
}
static int verify_controller_parameters(struct pl022 *pl022,
struct pl022_config_chip const *chip_info)
{
if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
|| (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
dev_err(&pl022->adev->dev,
"interface is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
(!pl022->vendor->unidir)) {
dev_err(&pl022->adev->dev,
"unidirectional mode not supported in this "
"hardware version\n");
return -EINVAL;
}
if ((chip_info->hierarchy != SSP_MASTER)
&& (chip_info->hierarchy != SSP_SLAVE)) {
dev_err(&pl022->adev->dev,
"hierarchy is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->com_mode != INTERRUPT_TRANSFER)
&& (chip_info->com_mode != DMA_TRANSFER)
&& (chip_info->com_mode != POLLING_TRANSFER)) {
dev_err(&pl022->adev->dev,
"Communication mode is configured incorrectly\n");
return -EINVAL;
}
switch (chip_info->rx_lev_trig) {
case SSP_RX_1_OR_MORE_ELEM:
case SSP_RX_4_OR_MORE_ELEM:
case SSP_RX_8_OR_MORE_ELEM:
/* These are always OK, all variants can handle this */
break;
case SSP_RX_16_OR_MORE_ELEM:
if (pl022->vendor->fifodepth < 16) {
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
break;
case SSP_RX_32_OR_MORE_ELEM:
if (pl022->vendor->fifodepth < 32) {
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
break;
default:
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
switch (chip_info->tx_lev_trig) {
case SSP_TX_1_OR_MORE_EMPTY_LOC:
case SSP_TX_4_OR_MORE_EMPTY_LOC:
case SSP_TX_8_OR_MORE_EMPTY_LOC:
/* These are always OK, all variants can handle this */
break;
case SSP_TX_16_OR_MORE_EMPTY_LOC:
if (pl022->vendor->fifodepth < 16) {
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
break;
case SSP_TX_32_OR_MORE_EMPTY_LOC:
if (pl022->vendor->fifodepth < 32) {
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
break;
default:
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
}
if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
if ((chip_info->ctrl_len < SSP_BITS_4)
|| (chip_info->ctrl_len > SSP_BITS_32)) {
dev_err(&pl022->adev->dev,
"CTRL LEN is configured incorrectly\n");
return -EINVAL;
}
if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
&& (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
dev_err(&pl022->adev->dev,
"Wait State is configured incorrectly\n");
return -EINVAL;
}
/* Half duplex is only available in the ST Micro version */
if (pl022->vendor->extended_cr) {
if ((chip_info->duplex !=
SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
&& (chip_info->duplex !=
SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
dev_err(&pl022->adev->dev,
"Microwire duplex mode is configured incorrectly\n");
return -EINVAL;
}
} else {
if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
dev_err(&pl022->adev->dev,
"Microwire half duplex mode requested,"
" but this is only available in the"
" ST version of PL022\n");
return -EINVAL;
}
}
}
return 0;
}
static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
{
return rate / (cpsdvsr * (1 + scr));
}
static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
ssp_clock_params * clk_freq)
{
/* Lets calculate the frequency parameters */
u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN;
u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0,
best_scr = 0, tmp, found = 0;
rate = clk_get_rate(pl022->clk);
/* cpsdvscr = 2 & scr 0 */
max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
/* cpsdvsr = 254 & scr = 255 */
min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
if (freq > max_tclk)
dev_warn(&pl022->adev->dev,
"Max speed that can be programmed is %d Hz, you requested %d\n",
max_tclk, freq);
if (freq < min_tclk) {
dev_err(&pl022->adev->dev,
"Requested frequency: %d Hz is less than minimum possible %d Hz\n",
freq, min_tclk);
return -EINVAL;
}
/*
* best_freq will give closest possible available rate (<= requested
* freq) for all values of scr & cpsdvsr.
*/
while ((cpsdvsr <= CPSDVR_MAX) && !found) {
while (scr <= SCR_MAX) {
tmp = spi_rate(rate, cpsdvsr, scr);
if (tmp > freq) {
/* we need lower freq */
scr++;
continue;
}
/*
* If found exact value, mark found and break.
* If found more closer value, update and break.
*/
if (tmp > best_freq) {
best_freq = tmp;
best_cpsdvsr = cpsdvsr;
best_scr = scr;
if (tmp == freq)
found = 1;
}
/*
* increased scr will give lower rates, which are not
* required
*/
break;
}
cpsdvsr += 2;
scr = SCR_MIN;
}
WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
freq);
clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
clk_freq->scr = (u8) (best_scr & 0xFF);
dev_dbg(&pl022->adev->dev,
"SSP Target Frequency is: %u, Effective Frequency is %u\n",
freq, best_freq);
dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n",
clk_freq->cpsdvsr, clk_freq->scr);
return 0;
}
/*
* A piece of default chip info unless the platform
* supplies it.
*/
static const struct pl022_config_chip pl022_default_chip_info = {
.com_mode = INTERRUPT_TRANSFER,
.iface = SSP_INTERFACE_MOTOROLA_SPI,
.hierarchy = SSP_MASTER,
.slave_tx_disable = DO_NOT_DRIVE_TX,
.rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
.tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
.ctrl_len = SSP_BITS_8,
.wait_state = SSP_MWIRE_WAIT_ZERO,
.duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
};
/**
* pl022_setup - setup function registered to SPI host framework
* @spi: spi device which is requesting setup
*
* This function is registered to the SPI framework for this SPI host
* controller. If it is the first time when setup is called by this device,
* this function will initialize the runtime state for this chip and save
* the same in the device structure. Else it will update the runtime info
* with the updated chip info. Nothing is really being written to the
* controller hardware here, that is not done until the actual transfer
* commence.
*/
static int pl022_setup(struct spi_device *spi)
{
struct pl022_config_chip const *chip_info;
struct pl022_config_chip chip_info_dt;
struct chip_data *chip;
struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
int status = 0;
struct pl022 *pl022 = spi_controller_get_devdata(spi->controller);
unsigned int bits = spi->bits_per_word;
u32 tmp;
struct device_node *np = spi->dev.of_node;
if (!spi->max_speed_hz)
return -EINVAL;
/* Get controller_state if one is supplied */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
dev_dbg(&spi->dev,
"allocated memory for controller's runtime state\n");
}
/* Get controller data if one is supplied */
chip_info = spi->controller_data;
if (chip_info == NULL) {
if (np) {
chip_info_dt = pl022_default_chip_info;
chip_info_dt.hierarchy = SSP_MASTER;
of_property_read_u32(np, "pl022,interface",
&chip_info_dt.iface);
of_property_read_u32(np, "pl022,com-mode",
&chip_info_dt.com_mode);
of_property_read_u32(np, "pl022,rx-level-trig",
&chip_info_dt.rx_lev_trig);
of_property_read_u32(np, "pl022,tx-level-trig",
&chip_info_dt.tx_lev_trig);
of_property_read_u32(np, "pl022,ctrl-len",
&chip_info_dt.ctrl_len);
of_property_read_u32(np, "pl022,wait-state",
&chip_info_dt.wait_state);
of_property_read_u32(np, "pl022,duplex",
&chip_info_dt.duplex);
chip_info = &chip_info_dt;
} else {
chip_info = &pl022_default_chip_info;
/* spi_board_info.controller_data not is supplied */
dev_dbg(&spi->dev,
"using default controller_data settings\n");
}
} else
dev_dbg(&spi->dev,
"using user supplied controller_data settings\n");
/*
* We can override with custom divisors, else we use the board
* frequency setting
*/
if ((0 == chip_info->clk_freq.cpsdvsr)
&& (0 == chip_info->clk_freq.scr)) {
status = calculate_effective_freq(pl022,
spi->max_speed_hz,
&clk_freq);
if (status < 0)
goto err_config_params;
} else {
memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
if ((clk_freq.cpsdvsr % 2) != 0)
clk_freq.cpsdvsr =
clk_freq.cpsdvsr - 1;
}
if ((clk_freq.cpsdvsr < CPSDVR_MIN)
|| (clk_freq.cpsdvsr > CPSDVR_MAX)) {
status = -EINVAL;
dev_err(&spi->dev,
"cpsdvsr is configured incorrectly\n");
goto err_config_params;
}
status = verify_controller_parameters(pl022, chip_info);
if (status) {
dev_err(&spi->dev, "controller data is incorrect");
goto err_config_params;
}
pl022->rx_lev_trig = chip_info->rx_lev_trig;
pl022->tx_lev_trig = chip_info->tx_lev_trig;
/* Now set controller state based on controller data */
chip->xfer_type = chip_info->com_mode;
/* Check bits per word with vendor specific range */
if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
status = -ENOTSUPP;
dev_err(&spi->dev, "illegal data size for this controller!\n");
dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
pl022->vendor->max_bpw);
goto err_config_params;
} else if (bits <= 8) {
dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
chip->n_bytes = 1;
chip->read = READING_U8;
chip->write = WRITING_U8;
} else if (bits <= 16) {
dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
chip->n_bytes = 2;
chip->read = READING_U16;
chip->write = WRITING_U16;
} else {
dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
chip->n_bytes = 4;
chip->read = READING_U32;
chip->write = WRITING_U32;
}
/* Now Initialize all register settings required for this chip */
chip->cr0 = 0;
chip->cr1 = 0;
chip->dmacr = 0;
chip->cpsr = 0;
if ((chip_info->com_mode == DMA_TRANSFER)
&& ((pl022->host_info)->enable_dma)) {
chip->enable_dma = true;
dev_dbg(&spi->dev, "DMA mode set in controller state\n");
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
SSP_DMACR_MASK_RXDMAE, 0);
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
SSP_DMACR_MASK_TXDMAE, 1);
} else {
chip->enable_dma = false;
dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
SSP_DMACR_MASK_RXDMAE, 0);
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
SSP_DMACR_MASK_TXDMAE, 1);
}
chip->cpsr = clk_freq.cpsdvsr;
/* Special setup for the ST micro extended control registers */
if (pl022->vendor->extended_cr) {
u32 etx;
if (pl022->vendor->pl023) {
/* These bits are only in the PL023 */
SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
SSP_CR1_MASK_FBCLKDEL_ST, 13);
} else {
/* These bits are in the PL022 but not PL023 */
SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
SSP_CR0_MASK_HALFDUP_ST, 5);
SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
SSP_CR0_MASK_CSS_ST, 16);
SSP_WRITE_BITS(chip->cr0, chip_info->iface,
SSP_CR0_MASK_FRF_ST, 21);
SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
SSP_CR1_MASK_MWAIT_ST, 6);
}
SSP_WRITE_BITS(chip->cr0, bits - 1,
SSP_CR0_MASK_DSS_ST, 0);
if (spi->mode & SPI_LSB_FIRST) {
tmp = SSP_RX_LSB;
etx = SSP_TX_LSB;
} else {
tmp = SSP_RX_MSB;
etx = SSP_TX_MSB;
}
SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
SSP_CR1_MASK_RXIFLSEL_ST, 7);
SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
SSP_CR1_MASK_TXIFLSEL_ST, 10);
} else {
SSP_WRITE_BITS(chip->cr0, bits - 1,
SSP_CR0_MASK_DSS, 0);
SSP_WRITE_BITS(chip->cr0, chip_info->iface,
SSP_CR0_MASK_FRF, 4);
}
/* Stuff that is common for all versions */
if (spi->mode & SPI_CPOL)
tmp = SSP_CLK_POL_IDLE_HIGH;
else
tmp = SSP_CLK_POL_IDLE_LOW;
SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
if (spi->mode & SPI_CPHA)
tmp = SSP_CLK_SECOND_EDGE;
else
tmp = SSP_CLK_FIRST_EDGE;
SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
/* Loopback is available on all versions except PL023 */
if (pl022->vendor->loopback) {
if (spi->mode & SPI_LOOP)
tmp = LOOPBACK_ENABLED;
else
tmp = LOOPBACK_DISABLED;
SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
}
SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD,
3);
/* Save controller_state */
spi_set_ctldata(spi, chip);
return status;
err_config_params:
spi_set_ctldata(spi, NULL);
kfree(chip);
return status;
}
/**
* pl022_cleanup - cleanup function registered to SPI host framework
* @spi: spi device which is requesting cleanup
*
* This function is registered to the SPI framework for this SPI host
* controller. It will free the runtime state of chip.
*/
static void pl022_cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
spi_set_ctldata(spi, NULL);
kfree(chip);
}
static struct pl022_ssp_controller *
pl022_platform_data_dt_get(struct device *dev)
{
struct device_node *np = dev->of_node;
struct pl022_ssp_controller *pd;
if (!np) {
dev_err(dev, "no dt node defined\n");
return NULL;
}
pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
if (!pd)
return NULL;
pd->bus_id = -1;
of_property_read_u32(np, "pl022,autosuspend-delay",
&pd->autosuspend_delay);
pd->rt = of_property_read_bool(np, "pl022,rt");
return pd;
}
static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct pl022_ssp_controller *platform_info =
dev_get_platdata(&adev->dev);
struct spi_controller *host;
struct pl022 *pl022 = NULL; /*Data for this driver */
int status = 0;
dev_info(&adev->dev,
"ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
if (!platform_info && IS_ENABLED(CONFIG_OF))
platform_info = pl022_platform_data_dt_get(dev);
if (!platform_info) {
dev_err(dev, "probe: no platform data defined\n");
return -ENODEV;
}
/* Allocate host with space for data */
host = spi_alloc_host(dev, sizeof(struct pl022));
if (host == NULL) {
dev_err(&adev->dev, "probe - cannot alloc SPI host\n");
return -ENOMEM;
}
pl022 = spi_controller_get_devdata(host);
pl022->host = host;
pl022->host_info = platform_info;
pl022->adev = adev;
pl022->vendor = id->data;
/*
* Bus Number Which has been Assigned to this SSP controller
* on this board
*/
host->bus_num = platform_info->bus_id;
host->cleanup = pl022_cleanup;
host->setup = pl022_setup;
host->auto_runtime_pm = true;
host->transfer_one_message = pl022_transfer_one_message;
host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
host->rt = platform_info->rt;
host->dev.of_node = dev->of_node;
host->use_gpio_descriptors = true;
/*
* Supports mode 0-3, loopback, and active low CS. Transfers are
* always MS bit first on the original pl022.
*/
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
if (pl022->vendor->extended_cr)
host->mode_bits |= SPI_LSB_FIRST;
dev_dbg(&adev->dev, "BUSNO: %d\n", host->bus_num);
status = amba_request_regions(adev, NULL);
if (status)
goto err_no_ioregion;
pl022->phybase = adev->res.start;
pl022->virtbase = devm_ioremap(dev, adev->res.start,
resource_size(&adev->res));
if (pl022->virtbase == NULL) {
status = -ENOMEM;
goto err_no_ioremap;
}
dev_info(&adev->dev, "mapped registers from %pa to %p\n",
&adev->res.start, pl022->virtbase);
pl022->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
status = PTR_ERR(pl022->clk);
dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
goto err_no_clk;
}
status = clk_prepare_enable(pl022->clk);
if (status) {
dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
goto err_no_clk_en;
}
/* Initialize transfer pump */
tasklet_init(&pl022->pump_transfers, pump_transfers,
(unsigned long)pl022);
/* Disable SSP */
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
load_ssp_default_config(pl022);
status = devm_request_irq(dev, adev->irq[0], pl022_interrupt_handler,
0, "pl022", pl022);
if (status < 0) {
dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
goto err_no_irq;
}
/* Get DMA channels, try autoconfiguration first */
status = pl022_dma_autoprobe(pl022);
if (status == -EPROBE_DEFER) {
dev_dbg(dev, "deferring probe to get DMA channel\n");
goto err_no_irq;
}
/* If that failed, use channels from platform_info */
if (status == 0)
platform_info->enable_dma = 1;
else if (platform_info->enable_dma) {
status = pl022_dma_probe(pl022);
if (status != 0)
platform_info->enable_dma = 0;
}
/* Register with the SPI framework */
amba_set_drvdata(adev, pl022);
status = devm_spi_register_controller(&adev->dev, host);
if (status != 0) {
dev_err_probe(&adev->dev, status,
"problem registering spi host\n");
goto err_spi_register;
}
dev_dbg(dev, "probe succeeded\n");
/* let runtime pm put suspend */
if (platform_info->autosuspend_delay > 0) {
dev_info(&adev->dev,
"will use autosuspend for runtime pm, delay %dms\n",
platform_info->autosuspend_delay);
pm_runtime_set_autosuspend_delay(dev,
platform_info->autosuspend_delay);
pm_runtime_use_autosuspend(dev);
}
pm_runtime_put(dev);
return 0;
err_spi_register:
if (platform_info->enable_dma)
pl022_dma_remove(pl022);
err_no_irq:
clk_disable_unprepare(pl022->clk);
err_no_clk_en:
err_no_clk:
err_no_ioremap:
amba_release_regions(adev);
err_no_ioregion:
spi_controller_put(host);
return status;
}
static void
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
if (!pl022)
return;
/*
* undo pm_runtime_put() in probe. I assume that we're not
* accessing the primecell here.
*/
pm_runtime_get_noresume(&adev->dev);
load_ssp_default_config(pl022);
if (pl022->host_info->enable_dma)
pl022_dma_remove(pl022);
clk_disable_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
}
#ifdef CONFIG_PM_SLEEP
static int pl022_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
ret = spi_controller_suspend(pl022->host);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret) {
spi_controller_resume(pl022->host);
return ret;
}
pinctrl_pm_select_sleep_state(dev);
dev_dbg(dev, "suspended\n");
return 0;
}
static int pl022_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
dev_err(dev, "problem resuming\n");
/* Start the queue running */
ret = spi_controller_resume(pl022->host);
if (!ret)
dev_dbg(dev, "resumed\n");
return ret;
}
#endif
#ifdef CONFIG_PM
static int pl022_runtime_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
clk_disable_unprepare(pl022->clk);
pinctrl_pm_select_idle_state(dev);
return 0;
}
static int pl022_runtime_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
pinctrl_pm_select_default_state(dev);
clk_prepare_enable(pl022->clk);
return 0;
}
#endif
static const struct dev_pm_ops pl022_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
};
static struct vendor_data vendor_arm = {
.fifodepth = 8,
.max_bpw = 16,
.unidir = false,
.extended_cr = false,
.pl023 = false,
.loopback = true,
.internal_cs_ctrl = false,
};
static struct vendor_data vendor_st = {
.fifodepth = 32,
.max_bpw = 32,
.unidir = false,
.extended_cr = true,
.pl023 = false,
.loopback = true,
.internal_cs_ctrl = false,
};
static struct vendor_data vendor_st_pl023 = {
.fifodepth = 32,
.max_bpw = 32,
.unidir = false,
.extended_cr = true,
.pl023 = true,
.loopback = false,
.internal_cs_ctrl = false,
};
static struct vendor_data vendor_lsi = {
.fifodepth = 8,
.max_bpw = 16,
.unidir = false,
.extended_cr = false,
.pl023 = false,
.loopback = true,
.internal_cs_ctrl = true,
};
static const struct amba_id pl022_ids[] = {
{
/*
* ARM PL022 variant, this has a 16bit wide
* and 8 locations deep TX/RX FIFO
*/
.id = 0x00041022,
.mask = 0x000fffff,
.data = &vendor_arm,
},
{
/*
* ST Micro derivative, this has 32bit wide
* and 32 locations deep TX/RX FIFO
*/
.id = 0x01080022,
.mask = 0xffffffff,
.data = &vendor_st,
},
{
/*
* ST-Ericsson derivative "PL023" (this is not
* an official ARM number), this is a PL022 SSP block
* stripped to SPI mode only, it has 32bit wide
* and 32 locations deep TX/RX FIFO but no extended
* CR0/CR1 register
*/
.id = 0x00080023,
.mask = 0xffffffff,
.data = &vendor_st_pl023,
},
{
/*
* PL022 variant that has a chip select control register whih
* allows control of 5 output signals nCS[0:4].
*/
.id = 0x000b6022,
.mask = 0x000fffff,
.data = &vendor_lsi,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, pl022_ids);
static struct amba_driver pl022_driver = {
.drv = {
.name = "ssp-pl022",
.pm = &pl022_dev_pm_ops,
},
.id_table = pl022_ids,
.probe = pl022_probe,
.remove = pl022_remove,
};
static int __init pl022_init(void)
{
return amba_driver_register(&pl022_driver);
}
subsys_initcall(pl022_init);
static void __exit pl022_exit(void)
{
amba_driver_unregister(&pl022_driver);
}
module_exit(pl022_exit);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("PL022 SSP Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-pl022.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cavium ThunderX SPI driver.
*
* Copyright (C) 2016 Cavium Inc.
* Authors: Jan Glauber <[email protected]>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spi/spi.h>
#include "spi-cavium.h"
#define DRV_NAME "spi-thunderx"
#define SYS_FREQ_DEFAULT 700000000 /* 700 Mhz */
static int thunderx_spi_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct spi_controller *host;
struct octeon_spi *p;
int ret;
host = spi_alloc_host(dev, sizeof(struct octeon_spi));
if (!host)
return -ENOMEM;
p = spi_controller_get_devdata(host);
ret = pcim_enable_device(pdev);
if (ret)
goto error;
ret = pci_request_regions(pdev, DRV_NAME);
if (ret)
goto error;
p->register_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (!p->register_base) {
ret = -EINVAL;
goto error;
}
p->regs.config = 0x1000;
p->regs.status = 0x1008;
p->regs.tx = 0x1010;
p->regs.data = 0x1080;
p->clk = devm_clk_get(dev, NULL);
if (IS_ERR(p->clk)) {
ret = PTR_ERR(p->clk);
goto error;
}
ret = clk_prepare_enable(p->clk);
if (ret)
goto error;
p->sys_freq = clk_get_rate(p->clk);
if (!p->sys_freq)
p->sys_freq = SYS_FREQ_DEFAULT;
dev_info(dev, "Set system clock to %u\n", p->sys_freq);
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->num_chipselect = 4;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
SPI_LSB_FIRST | SPI_3WIRE;
host->transfer_one_message = octeon_spi_transfer_one_message;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
host->dev.of_node = pdev->dev.of_node;
pci_set_drvdata(pdev, host);
ret = devm_spi_register_controller(dev, host);
if (ret)
goto error;
return 0;
error:
clk_disable_unprepare(p->clk);
pci_release_regions(pdev);
spi_controller_put(host);
return ret;
}
static void thunderx_spi_remove(struct pci_dev *pdev)
{
struct spi_controller *host = pci_get_drvdata(pdev);
struct octeon_spi *p;
p = spi_controller_get_devdata(host);
if (!p)
return;
clk_disable_unprepare(p->clk);
pci_release_regions(pdev);
/* Put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
}
static const struct pci_device_id thunderx_spi_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa00b) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, thunderx_spi_pci_id_table);
static struct pci_driver thunderx_spi_driver = {
.name = DRV_NAME,
.id_table = thunderx_spi_pci_id_table,
.probe = thunderx_spi_probe,
.remove = thunderx_spi_remove,
};
module_pci_driver(thunderx_spi_driver);
MODULE_DESCRIPTION("Cavium, Inc. ThunderX SPI bus driver");
MODULE_AUTHOR("Jan Glauber");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-cavium-thunderx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PIC32 Quad SPI controller driver.
*
* Purna Chandra Mandal <[email protected]>
* Copyright (c) 2016, Microchip Technology Inc.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
/* SQI registers */
#define PESQI_XIP_CONF1_REG 0x00
#define PESQI_XIP_CONF2_REG 0x04
#define PESQI_CONF_REG 0x08
#define PESQI_CTRL_REG 0x0C
#define PESQI_CLK_CTRL_REG 0x10
#define PESQI_CMD_THRES_REG 0x14
#define PESQI_INT_THRES_REG 0x18
#define PESQI_INT_ENABLE_REG 0x1C
#define PESQI_INT_STAT_REG 0x20
#define PESQI_TX_DATA_REG 0x24
#define PESQI_RX_DATA_REG 0x28
#define PESQI_STAT1_REG 0x2C
#define PESQI_STAT2_REG 0x30
#define PESQI_BD_CTRL_REG 0x34
#define PESQI_BD_CUR_ADDR_REG 0x38
#define PESQI_BD_BASE_ADDR_REG 0x40
#define PESQI_BD_STAT_REG 0x44
#define PESQI_BD_POLL_CTRL_REG 0x48
#define PESQI_BD_TX_DMA_STAT_REG 0x4C
#define PESQI_BD_RX_DMA_STAT_REG 0x50
#define PESQI_THRES_REG 0x54
#define PESQI_INT_SIGEN_REG 0x58
/* PESQI_CONF_REG fields */
#define PESQI_MODE 0x7
#define PESQI_MODE_BOOT 0
#define PESQI_MODE_PIO 1
#define PESQI_MODE_DMA 2
#define PESQI_MODE_XIP 3
#define PESQI_MODE_SHIFT 0
#define PESQI_CPHA BIT(3)
#define PESQI_CPOL BIT(4)
#define PESQI_LSBF BIT(5)
#define PESQI_RXLATCH BIT(7)
#define PESQI_SERMODE BIT(8)
#define PESQI_WP_EN BIT(9)
#define PESQI_HOLD_EN BIT(10)
#define PESQI_BURST_EN BIT(12)
#define PESQI_CS_CTRL_HW BIT(15)
#define PESQI_SOFT_RESET BIT(16)
#define PESQI_LANES_SHIFT 20
#define PESQI_SINGLE_LANE 0
#define PESQI_DUAL_LANE 1
#define PESQI_QUAD_LANE 2
#define PESQI_CSEN_SHIFT 24
#define PESQI_EN BIT(23)
/* PESQI_CLK_CTRL_REG fields */
#define PESQI_CLK_EN BIT(0)
#define PESQI_CLK_STABLE BIT(1)
#define PESQI_CLKDIV_SHIFT 8
#define PESQI_CLKDIV 0xff
/* PESQI_INT_THR/CMD_THR_REG */
#define PESQI_TXTHR_MASK 0x1f
#define PESQI_TXTHR_SHIFT 8
#define PESQI_RXTHR_MASK 0x1f
#define PESQI_RXTHR_SHIFT 0
/* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
#define PESQI_TXEMPTY BIT(0)
#define PESQI_TXFULL BIT(1)
#define PESQI_TXTHR BIT(2)
#define PESQI_RXEMPTY BIT(3)
#define PESQI_RXFULL BIT(4)
#define PESQI_RXTHR BIT(5)
#define PESQI_BDDONE BIT(9) /* BD processing complete */
#define PESQI_PKTCOMP BIT(10) /* packet processing complete */
#define PESQI_DMAERR BIT(11) /* error */
/* PESQI_BD_CTRL_REG */
#define PESQI_DMA_EN BIT(0) /* enable DMA engine */
#define PESQI_POLL_EN BIT(1) /* enable polling */
#define PESQI_BDP_START BIT(2) /* start BD processor */
/* PESQI controller buffer descriptor */
struct buf_desc {
u32 bd_ctrl; /* control */
u32 bd_status; /* reserved */
u32 bd_addr; /* DMA buffer addr */
u32 bd_nextp; /* next item in chain */
};
/* bd_ctrl */
#define BD_BUFLEN 0x1ff
#define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
#define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */
#define BD_LIFM BIT(18) /* last data of pkt */
#define BD_LAST BIT(19) /* end of list */
#define BD_DATA_RECV BIT(20) /* receive data */
#define BD_DDR BIT(21) /* DDR mode */
#define BD_DUAL BIT(22) /* Dual SPI */
#define BD_QUAD BIT(23) /* Quad SPI */
#define BD_LSBF BIT(25) /* LSB First */
#define BD_STAT_CHECK BIT(27) /* Status poll */
#define BD_DEVSEL_SHIFT 28 /* CS */
#define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
#define BD_EN BIT(31) /* BD owned by H/W */
/**
* struct ring_desc - Representation of SQI ring descriptor
* @list: list element to add to free or used list.
* @bd: PESQI controller buffer descriptor
* @bd_dma: DMA address of PESQI controller buffer descriptor
* @xfer_len: transfer length
*/
struct ring_desc {
struct list_head list;
struct buf_desc *bd;
dma_addr_t bd_dma;
u32 xfer_len;
};
/* Global constants */
#define PESQI_BD_BUF_LEN_MAX 256
#define PESQI_BD_COUNT 256 /* max 64KB data per spi message */
struct pic32_sqi {
void __iomem *regs;
struct clk *sys_clk;
struct clk *base_clk; /* drives spi clock */
struct spi_controller *host;
int irq;
struct completion xfer_done;
struct ring_desc *ring;
void *bd;
dma_addr_t bd_dma;
struct list_head bd_list_free; /* free */
struct list_head bd_list_used; /* allocated */
struct spi_device *cur_spi;
u32 cur_speed;
u8 cur_mode;
};
static inline void pic32_setbits(void __iomem *reg, u32 set)
{
writel(readl(reg) | set, reg);
}
static inline void pic32_clrbits(void __iomem *reg, u32 clr)
{
writel(readl(reg) & ~clr, reg);
}
static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
{
u32 val, div;
/* div = base_clk / (2 * spi_clk) */
div = clk_get_rate(sqi->base_clk) / (2 * sck);
div &= PESQI_CLKDIV;
val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
/* apply new divider */
val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
val |= div << PESQI_CLKDIV_SHIFT;
writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
/* wait for stability */
return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
val & PESQI_CLK_STABLE, 1, 5000);
}
static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
{
u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
/* INT_SIGEN works as interrupt-gate to INTR line */
writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
}
static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
{
writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
}
static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
{
struct pic32_sqi *sqi = dev_id;
u32 enable, status;
enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
status = readl(sqi->regs + PESQI_INT_STAT_REG);
/* check spurious interrupt */
if (!status)
return IRQ_NONE;
if (status & PESQI_DMAERR) {
enable = 0;
goto irq_done;
}
if (status & PESQI_TXTHR)
enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
if (status & PESQI_RXTHR)
enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
if (status & PESQI_BDDONE)
enable &= ~PESQI_BDDONE;
/* packet processing completed */
if (status & PESQI_PKTCOMP) {
/* mask all interrupts */
enable = 0;
/* complete trasaction */
complete(&sqi->xfer_done);
}
irq_done:
/* interrupts are sticky, so mask when handled */
writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
return IRQ_HANDLED;
}
static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
{
struct ring_desc *rdesc;
if (list_empty(&sqi->bd_list_free))
return NULL;
rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
list_move_tail(&rdesc->list, &sqi->bd_list_used);
return rdesc;
}
static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
{
list_move(&rdesc->list, &sqi->bd_list_free);
}
static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
struct spi_message *mesg,
struct spi_transfer *xfer)
{
struct spi_device *spi = mesg->spi;
struct scatterlist *sg, *sgl;
struct ring_desc *rdesc;
struct buf_desc *bd;
int nents, i;
u32 bd_ctrl;
u32 nbits;
/* Device selection */
bd_ctrl = spi_get_chipselect(spi, 0) << BD_DEVSEL_SHIFT;
/* half-duplex: select transfer buffer, direction and lane */
if (xfer->rx_buf) {
bd_ctrl |= BD_DATA_RECV;
nbits = xfer->rx_nbits;
sgl = xfer->rx_sg.sgl;
nents = xfer->rx_sg.nents;
} else {
nbits = xfer->tx_nbits;
sgl = xfer->tx_sg.sgl;
nents = xfer->tx_sg.nents;
}
if (nbits & SPI_NBITS_QUAD)
bd_ctrl |= BD_QUAD;
else if (nbits & SPI_NBITS_DUAL)
bd_ctrl |= BD_DUAL;
/* LSB first */
if (spi->mode & SPI_LSB_FIRST)
bd_ctrl |= BD_LSBF;
/* ownership to hardware */
bd_ctrl |= BD_EN;
for_each_sg(sgl, sg, nents, i) {
/* get ring descriptor */
rdesc = ring_desc_get(sqi);
if (!rdesc)
break;
bd = rdesc->bd;
/* BD CTRL: length */
rdesc->xfer_len = sg_dma_len(sg);
bd->bd_ctrl = bd_ctrl;
bd->bd_ctrl |= rdesc->xfer_len;
/* BD STAT */
bd->bd_status = 0;
/* BD BUFFER ADDRESS */
bd->bd_addr = sg->dma_address;
}
return 0;
}
static int pic32_sqi_prepare_hardware(struct spi_controller *host)
{
struct pic32_sqi *sqi = spi_controller_get_devdata(host);
/* enable spi interface */
pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
/* enable spi clk */
pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
return 0;
}
static bool pic32_sqi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *x)
{
/* Do DMA irrespective of transfer size */
return true;
}
static int pic32_sqi_one_message(struct spi_controller *host,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
struct ring_desc *rdesc, *next;
struct spi_transfer *xfer;
struct pic32_sqi *sqi;
int ret = 0, mode;
unsigned long timeout;
u32 val;
sqi = spi_controller_get_devdata(host);
reinit_completion(&sqi->xfer_done);
msg->actual_length = 0;
/* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
* and "delay_usecs". But spi_device specific speed and mode change
* can be handled at best during spi chip-select switch.
*/
if (sqi->cur_spi != spi) {
/* set spi speed */
if (sqi->cur_speed != spi->max_speed_hz) {
sqi->cur_speed = spi->max_speed_hz;
ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
if (ret)
dev_warn(&spi->dev, "set_clk, %d\n", ret);
}
/* set spi mode */
mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
if (sqi->cur_mode != mode) {
val = readl(sqi->regs + PESQI_CONF_REG);
val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
if (mode & SPI_CPOL)
val |= PESQI_CPOL;
if (mode & SPI_LSB_FIRST)
val |= PESQI_LSBF;
val |= PESQI_CPHA;
writel(val, sqi->regs + PESQI_CONF_REG);
sqi->cur_mode = mode;
}
sqi->cur_spi = spi;
}
/* prepare hardware desc-list(BD) for transfer(s) */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
ret = pic32_sqi_one_transfer(sqi, msg, xfer);
if (ret) {
dev_err(&spi->dev, "xfer %p err\n", xfer);
goto xfer_out;
}
}
/* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
* element of the list.
*/
rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
BD_LIFM | BD_PKT_INT_EN;
/* set base address BD list for DMA engine */
rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
/* enable interrupt */
pic32_sqi_enable_int(sqi);
/* enable DMA engine */
val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
writel(val, sqi->regs + PESQI_BD_CTRL_REG);
/* wait for xfer completion */
timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
if (timeout == 0) {
dev_err(&sqi->host->dev, "wait timedout/interrupted\n");
ret = -ETIMEDOUT;
msg->status = ret;
} else {
/* success */
msg->status = 0;
ret = 0;
}
/* disable DMA */
writel(0, sqi->regs + PESQI_BD_CTRL_REG);
pic32_sqi_disable_int(sqi);
xfer_out:
list_for_each_entry_safe_reverse(rdesc, next,
&sqi->bd_list_used, list) {
/* Update total byte transferred */
msg->actual_length += rdesc->xfer_len;
/* release ring descr */
ring_desc_put(sqi, rdesc);
}
spi_finalize_current_message(spi->controller);
return ret;
}
static int pic32_sqi_unprepare_hardware(struct spi_controller *host)
{
struct pic32_sqi *sqi = spi_controller_get_devdata(host);
/* disable clk */
pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
/* disable spi */
pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
return 0;
}
static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
{
struct ring_desc *rdesc;
struct buf_desc *bd;
int i;
/* allocate coherent DMAable memory for hardware buffer descriptors. */
sqi->bd = dma_alloc_coherent(&sqi->host->dev,
sizeof(*bd) * PESQI_BD_COUNT,
&sqi->bd_dma, GFP_KERNEL);
if (!sqi->bd) {
dev_err(&sqi->host->dev, "failed allocating dma buffer\n");
return -ENOMEM;
}
/* allocate software ring descriptors */
sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
if (!sqi->ring) {
dma_free_coherent(&sqi->host->dev,
sizeof(*bd) * PESQI_BD_COUNT,
sqi->bd, sqi->bd_dma);
return -ENOMEM;
}
bd = (struct buf_desc *)sqi->bd;
INIT_LIST_HEAD(&sqi->bd_list_free);
INIT_LIST_HEAD(&sqi->bd_list_used);
/* initialize ring-desc */
for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
INIT_LIST_HEAD(&rdesc->list);
rdesc->bd = &bd[i];
rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
list_add_tail(&rdesc->list, &sqi->bd_list_free);
}
/* Prepare BD: chain to next BD(s) */
for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
bd[i].bd_nextp = rdesc[i + 1].bd_dma;
bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
return 0;
}
static void ring_desc_ring_free(struct pic32_sqi *sqi)
{
dma_free_coherent(&sqi->host->dev,
sizeof(struct buf_desc) * PESQI_BD_COUNT,
sqi->bd, sqi->bd_dma);
kfree(sqi->ring);
}
static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
{
unsigned long flags;
u32 val;
/* Soft-reset of PESQI controller triggers interrupt.
* We are not yet ready to handle them so disable CPU
* interrupt for the time being.
*/
local_irq_save(flags);
/* assert soft-reset */
writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
/* wait until clear */
readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
!(val & PESQI_SOFT_RESET), 1, 5000);
/* disable all interrupts */
pic32_sqi_disable_int(sqi);
/* Now it is safe to enable back CPU interrupt */
local_irq_restore(flags);
/* tx and rx fifo interrupt threshold */
val = readl(sqi->regs + PESQI_CMD_THRES_REG);
val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
writel(val, sqi->regs + PESQI_CMD_THRES_REG);
val = readl(sqi->regs + PESQI_INT_THRES_REG);
val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
writel(val, sqi->regs + PESQI_INT_THRES_REG);
/* default configuration */
val = readl(sqi->regs + PESQI_CONF_REG);
/* set mode: DMA */
val &= ~PESQI_MODE;
val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
writel(val, sqi->regs + PESQI_CONF_REG);
/* DATAEN - SQIID0-ID3 */
val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
/* burst/INCR4 enable */
val |= PESQI_BURST_EN;
/* CSEN - all CS */
val |= 3U << PESQI_CSEN_SHIFT;
writel(val, sqi->regs + PESQI_CONF_REG);
/* write poll count */
writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
sqi->cur_speed = 0;
sqi->cur_mode = -1;
}
static int pic32_sqi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct pic32_sqi *sqi;
int ret;
host = spi_alloc_host(&pdev->dev, sizeof(*sqi));
if (!host)
return -ENOMEM;
sqi = spi_controller_get_devdata(host);
sqi->host = host;
sqi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sqi->regs)) {
ret = PTR_ERR(sqi->regs);
goto err_free_host;
}
/* irq */
sqi->irq = platform_get_irq(pdev, 0);
if (sqi->irq < 0) {
ret = sqi->irq;
goto err_free_host;
}
/* clocks */
sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
if (IS_ERR(sqi->sys_clk)) {
ret = PTR_ERR(sqi->sys_clk);
dev_err(&pdev->dev, "no sys_clk ?\n");
goto err_free_host;
}
sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
if (IS_ERR(sqi->base_clk)) {
ret = PTR_ERR(sqi->base_clk);
dev_err(&pdev->dev, "no base clk ?\n");
goto err_free_host;
}
ret = clk_prepare_enable(sqi->sys_clk);
if (ret) {
dev_err(&pdev->dev, "sys clk enable failed\n");
goto err_free_host;
}
ret = clk_prepare_enable(sqi->base_clk);
if (ret) {
dev_err(&pdev->dev, "base clk enable failed\n");
clk_disable_unprepare(sqi->sys_clk);
goto err_free_host;
}
init_completion(&sqi->xfer_done);
/* initialize hardware */
pic32_sqi_hw_init(sqi);
/* allocate buffers & descriptors */
ret = ring_desc_ring_alloc(sqi);
if (ret) {
dev_err(&pdev->dev, "ring alloc failed\n");
goto err_disable_clk;
}
/* install irq handlers */
ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
dev_name(&pdev->dev), sqi);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
goto err_free_ring;
}
/* register host */
host->num_chipselect = 2;
host->max_speed_hz = clk_get_rate(sqi->base_clk);
host->dma_alignment = 32;
host->max_dma_len = PESQI_BD_BUF_LEN_MAX;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->can_dma = pic32_sqi_can_dma;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
host->transfer_one_message = pic32_sqi_one_message;
host->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
host->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
dev_err(&host->dev, "failed registering spi host\n");
free_irq(sqi->irq, sqi);
goto err_free_ring;
}
platform_set_drvdata(pdev, sqi);
return 0;
err_free_ring:
ring_desc_ring_free(sqi);
err_disable_clk:
clk_disable_unprepare(sqi->base_clk);
clk_disable_unprepare(sqi->sys_clk);
err_free_host:
spi_controller_put(host);
return ret;
}
static void pic32_sqi_remove(struct platform_device *pdev)
{
struct pic32_sqi *sqi = platform_get_drvdata(pdev);
/* release resources */
free_irq(sqi->irq, sqi);
ring_desc_ring_free(sqi);
/* disable clk */
clk_disable_unprepare(sqi->base_clk);
clk_disable_unprepare(sqi->sys_clk);
}
static const struct of_device_id pic32_sqi_of_ids[] = {
{.compatible = "microchip,pic32mzda-sqi",},
{},
};
MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
static struct platform_driver pic32_sqi_driver = {
.driver = {
.name = "sqi-pic32",
.of_match_table = of_match_ptr(pic32_sqi_of_ids),
},
.probe = pic32_sqi_probe,
.remove_new = pic32_sqi_remove,
};
module_platform_driver(pic32_sqi_driver);
MODULE_AUTHOR("Purna Chandra Mandal <[email protected]>");
MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-pic32-sqi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Atmel AT32 and AT91 SPI Controllers
*
* Copyright (C) 2006 Atmel Corporation
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <trace/events/spi.h>
/* SPI register offsets */
#define SPI_CR 0x0000
#define SPI_MR 0x0004
#define SPI_RDR 0x0008
#define SPI_TDR 0x000c
#define SPI_SR 0x0010
#define SPI_IER 0x0014
#define SPI_IDR 0x0018
#define SPI_IMR 0x001c
#define SPI_CSR0 0x0030
#define SPI_CSR1 0x0034
#define SPI_CSR2 0x0038
#define SPI_CSR3 0x003c
#define SPI_FMR 0x0040
#define SPI_FLR 0x0044
#define SPI_VERSION 0x00fc
#define SPI_RPR 0x0100
#define SPI_RCR 0x0104
#define SPI_TPR 0x0108
#define SPI_TCR 0x010c
#define SPI_RNPR 0x0110
#define SPI_RNCR 0x0114
#define SPI_TNPR 0x0118
#define SPI_TNCR 0x011c
#define SPI_PTCR 0x0120
#define SPI_PTSR 0x0124
/* Bitfields in CR */
#define SPI_SPIEN_OFFSET 0
#define SPI_SPIEN_SIZE 1
#define SPI_SPIDIS_OFFSET 1
#define SPI_SPIDIS_SIZE 1
#define SPI_SWRST_OFFSET 7
#define SPI_SWRST_SIZE 1
#define SPI_LASTXFER_OFFSET 24
#define SPI_LASTXFER_SIZE 1
#define SPI_TXFCLR_OFFSET 16
#define SPI_TXFCLR_SIZE 1
#define SPI_RXFCLR_OFFSET 17
#define SPI_RXFCLR_SIZE 1
#define SPI_FIFOEN_OFFSET 30
#define SPI_FIFOEN_SIZE 1
#define SPI_FIFODIS_OFFSET 31
#define SPI_FIFODIS_SIZE 1
/* Bitfields in MR */
#define SPI_MSTR_OFFSET 0
#define SPI_MSTR_SIZE 1
#define SPI_PS_OFFSET 1
#define SPI_PS_SIZE 1
#define SPI_PCSDEC_OFFSET 2
#define SPI_PCSDEC_SIZE 1
#define SPI_FDIV_OFFSET 3
#define SPI_FDIV_SIZE 1
#define SPI_MODFDIS_OFFSET 4
#define SPI_MODFDIS_SIZE 1
#define SPI_WDRBT_OFFSET 5
#define SPI_WDRBT_SIZE 1
#define SPI_LLB_OFFSET 7
#define SPI_LLB_SIZE 1
#define SPI_PCS_OFFSET 16
#define SPI_PCS_SIZE 4
#define SPI_DLYBCS_OFFSET 24
#define SPI_DLYBCS_SIZE 8
/* Bitfields in RDR */
#define SPI_RD_OFFSET 0
#define SPI_RD_SIZE 16
/* Bitfields in TDR */
#define SPI_TD_OFFSET 0
#define SPI_TD_SIZE 16
/* Bitfields in SR */
#define SPI_RDRF_OFFSET 0
#define SPI_RDRF_SIZE 1
#define SPI_TDRE_OFFSET 1
#define SPI_TDRE_SIZE 1
#define SPI_MODF_OFFSET 2
#define SPI_MODF_SIZE 1
#define SPI_OVRES_OFFSET 3
#define SPI_OVRES_SIZE 1
#define SPI_ENDRX_OFFSET 4
#define SPI_ENDRX_SIZE 1
#define SPI_ENDTX_OFFSET 5
#define SPI_ENDTX_SIZE 1
#define SPI_RXBUFF_OFFSET 6
#define SPI_RXBUFF_SIZE 1
#define SPI_TXBUFE_OFFSET 7
#define SPI_TXBUFE_SIZE 1
#define SPI_NSSR_OFFSET 8
#define SPI_NSSR_SIZE 1
#define SPI_TXEMPTY_OFFSET 9
#define SPI_TXEMPTY_SIZE 1
#define SPI_SPIENS_OFFSET 16
#define SPI_SPIENS_SIZE 1
#define SPI_TXFEF_OFFSET 24
#define SPI_TXFEF_SIZE 1
#define SPI_TXFFF_OFFSET 25
#define SPI_TXFFF_SIZE 1
#define SPI_TXFTHF_OFFSET 26
#define SPI_TXFTHF_SIZE 1
#define SPI_RXFEF_OFFSET 27
#define SPI_RXFEF_SIZE 1
#define SPI_RXFFF_OFFSET 28
#define SPI_RXFFF_SIZE 1
#define SPI_RXFTHF_OFFSET 29
#define SPI_RXFTHF_SIZE 1
#define SPI_TXFPTEF_OFFSET 30
#define SPI_TXFPTEF_SIZE 1
#define SPI_RXFPTEF_OFFSET 31
#define SPI_RXFPTEF_SIZE 1
/* Bitfields in CSR0 */
#define SPI_CPOL_OFFSET 0
#define SPI_CPOL_SIZE 1
#define SPI_NCPHA_OFFSET 1
#define SPI_NCPHA_SIZE 1
#define SPI_CSAAT_OFFSET 3
#define SPI_CSAAT_SIZE 1
#define SPI_BITS_OFFSET 4
#define SPI_BITS_SIZE 4
#define SPI_SCBR_OFFSET 8
#define SPI_SCBR_SIZE 8
#define SPI_DLYBS_OFFSET 16
#define SPI_DLYBS_SIZE 8
#define SPI_DLYBCT_OFFSET 24
#define SPI_DLYBCT_SIZE 8
/* Bitfields in RCR */
#define SPI_RXCTR_OFFSET 0
#define SPI_RXCTR_SIZE 16
/* Bitfields in TCR */
#define SPI_TXCTR_OFFSET 0
#define SPI_TXCTR_SIZE 16
/* Bitfields in RNCR */
#define SPI_RXNCR_OFFSET 0
#define SPI_RXNCR_SIZE 16
/* Bitfields in TNCR */
#define SPI_TXNCR_OFFSET 0
#define SPI_TXNCR_SIZE 16
/* Bitfields in PTCR */
#define SPI_RXTEN_OFFSET 0
#define SPI_RXTEN_SIZE 1
#define SPI_RXTDIS_OFFSET 1
#define SPI_RXTDIS_SIZE 1
#define SPI_TXTEN_OFFSET 8
#define SPI_TXTEN_SIZE 1
#define SPI_TXTDIS_OFFSET 9
#define SPI_TXTDIS_SIZE 1
/* Bitfields in FMR */
#define SPI_TXRDYM_OFFSET 0
#define SPI_TXRDYM_SIZE 2
#define SPI_RXRDYM_OFFSET 4
#define SPI_RXRDYM_SIZE 2
#define SPI_TXFTHRES_OFFSET 16
#define SPI_TXFTHRES_SIZE 6
#define SPI_RXFTHRES_OFFSET 24
#define SPI_RXFTHRES_SIZE 6
/* Bitfields in FLR */
#define SPI_TXFL_OFFSET 0
#define SPI_TXFL_SIZE 6
#define SPI_RXFL_OFFSET 16
#define SPI_RXFL_SIZE 6
/* Constants for BITS */
#define SPI_BITS_8_BPT 0
#define SPI_BITS_9_BPT 1
#define SPI_BITS_10_BPT 2
#define SPI_BITS_11_BPT 3
#define SPI_BITS_12_BPT 4
#define SPI_BITS_13_BPT 5
#define SPI_BITS_14_BPT 6
#define SPI_BITS_15_BPT 7
#define SPI_BITS_16_BPT 8
#define SPI_ONE_DATA 0
#define SPI_TWO_DATA 1
#define SPI_FOUR_DATA 2
/* Bit manipulation macros */
#define SPI_BIT(name) \
(1 << SPI_##name##_OFFSET)
#define SPI_BF(name, value) \
(((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
#define SPI_BFEXT(name, value) \
(((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
#define SPI_BFINS(name, value, old) \
(((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
| SPI_BF(name, value))
/* Register access macros */
#define spi_readl(port, reg) \
readl_relaxed((port)->regs + SPI_##reg)
#define spi_writel(port, reg, value) \
writel_relaxed((value), (port)->regs + SPI_##reg)
#define spi_writew(port, reg, value) \
writew_relaxed((value), (port)->regs + SPI_##reg)
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
* cache operations; better heuristics consider wordsize and bitrate.
*/
#define DMA_MIN_BYTES 16
#define SPI_DMA_MIN_TIMEOUT (msecs_to_jiffies(1000))
#define SPI_DMA_TIMEOUT_PER_10K (msecs_to_jiffies(4))
#define AUTOSUSPEND_TIMEOUT 2000
struct atmel_spi_caps {
bool is_spi2;
bool has_wdrbt;
bool has_dma_support;
bool has_pdc_support;
};
/*
* The core SPI transfer engine just talks to a register bank to set up
* DMA transfers; transfer queue progress is driven by IRQs. The clock
* framework provides the base clock, subdivided for each spi_device.
*/
struct atmel_spi {
spinlock_t lock;
unsigned long flags;
phys_addr_t phybase;
void __iomem *regs;
int irq;
struct clk *clk;
struct platform_device *pdev;
unsigned long spi_clk;
struct spi_transfer *current_transfer;
int current_remaining_bytes;
int done_status;
dma_addr_t dma_addr_rx_bbuf;
dma_addr_t dma_addr_tx_bbuf;
void *addr_rx_bbuf;
void *addr_tx_bbuf;
struct completion xfer_completion;
struct atmel_spi_caps caps;
bool use_dma;
bool use_pdc;
bool keep_cs;
u32 fifo_size;
u8 native_cs_free;
u8 native_cs_for_gpio;
};
/* Controller-specific per-slave state */
struct atmel_spi_device {
u32 csr;
};
#define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */
#define INVALID_DMA_ADDRESS 0xffffffff
/*
* Version 2 of the SPI controller has
* - CR.LASTXFER
* - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
* - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
* - SPI_CSRx.CSAAT
* - SPI_CSRx.SBCR allows faster clocking
*/
static bool atmel_spi_is_v2(struct atmel_spi *as)
{
return as->caps.is_spi2;
}
/*
* Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
* they assume that spi slave device state will not change on deselect, so
* that automagic deselection is OK. ("NPCSx rises if no data is to be
* transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
* controllers have CSAAT and friends.
*
* Even controller newer than ar91rm9200, using GPIOs can make sens as
* it lets us support active-high chipselects despite the controller's
* belief that only active-low devices/systems exists.
*
* However, at91rm9200 has a second erratum whereby nCS0 doesn't work
* right when driven with GPIO. ("Mode Fault does not allow more than one
* Master on Chip Select 0.") No workaround exists for that ... so for
* nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
* and (c) will trigger that first erratum in some cases.
*/
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
int chip_select;
u32 mr;
if (spi_get_csgpiod(spi, 0))
chip_select = as->native_cs_for_gpio;
else
chip_select = spi_get_chipselect(spi, 0);
if (atmel_spi_is_v2(as)) {
spi_writel(as, CSR0 + 4 * chip_select, asd->csr);
/* For the low SPI version, there is a issue that PDC transfer
* on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
*/
spi_writel(as, CSR0, asd->csr);
if (as->caps.has_wdrbt) {
spi_writel(as, MR,
SPI_BF(PCS, ~(0x01 << chip_select))
| SPI_BIT(WDRBT)
| SPI_BIT(MODFDIS)
| SPI_BIT(MSTR));
} else {
spi_writel(as, MR,
SPI_BF(PCS, ~(0x01 << chip_select))
| SPI_BIT(MODFDIS)
| SPI_BIT(MSTR));
}
mr = spi_readl(as, MR);
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
u32 csr;
/* Make sure clock polarity is correct */
for (i = 0; i < spi->controller->num_chipselect; i++) {
csr = spi_readl(as, CSR0 + 4 * i);
if ((csr ^ cpol) & SPI_BIT(CPOL))
spi_writel(as, CSR0 + 4 * i,
csr ^ SPI_BIT(CPOL));
}
mr = spi_readl(as, MR);
mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
spi_writel(as, MR, mr);
}
dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
}
static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
{
int chip_select;
u32 mr;
if (spi_get_csgpiod(spi, 0))
chip_select = as->native_cs_for_gpio;
else
chip_select = spi_get_chipselect(spi, 0);
/* only deactivate *this* device; sometimes transfers to
* another device may be active when this routine is called.
*/
mr = spi_readl(as, MR);
if (~SPI_BFEXT(PCS, mr) & (1 << chip_select)) {
mr = SPI_BFINS(PCS, 0xf, mr);
spi_writel(as, MR, mr);
}
dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
if (!spi_get_csgpiod(spi, 0))
spi_writel(as, CR, SPI_BIT(LASTXFER));
}
static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
{
spin_lock_irqsave(&as->lock, as->flags);
}
static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
{
spin_unlock_irqrestore(&as->lock, as->flags);
}
static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
{
return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
}
static inline bool atmel_spi_use_dma(struct atmel_spi *as,
struct spi_transfer *xfer)
{
return as->use_dma && xfer->len >= DMA_MIN_BYTES;
}
static bool atmel_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_controller_get_devdata(host);
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
return atmel_spi_use_dma(as, xfer) &&
!atmel_spi_is_vmalloc_xfer(xfer);
else
return atmel_spi_use_dma(as, xfer);
}
static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
{
struct spi_controller *host = platform_get_drvdata(as->pdev);
struct dma_slave_config slave_config;
int err = 0;
if (bits_per_word > 8) {
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
} else {
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
}
slave_config.dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
slave_config.src_addr = (dma_addr_t)as->phybase + SPI_RDR;
slave_config.src_maxburst = 1;
slave_config.dst_maxburst = 1;
slave_config.device_fc = false;
/*
* This driver uses fixed peripheral select mode (PS bit set to '0' in
* the Mode Register).
* So according to the datasheet, when FIFOs are available (and
* enabled), the Transmit FIFO operates in Multiple Data Mode.
* In this mode, up to 2 data, not 4, can be written into the Transmit
* Data Register in a single access.
* However, the first data has to be written into the lowest 16 bits and
* the second data into the highest 16 bits of the Transmit
* Data Register. For 8bit data (the most frequent case), it would
* require to rework tx_buf so each data would actually fit 16 bits.
* So we'd rather write only one data at the time. Hence the transmit
* path works the same whether FIFOs are available (and enabled) or not.
*/
if (dmaengine_slave_config(host->dma_tx, &slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure tx dma channel\n");
err = -EINVAL;
}
/*
* This driver configures the spi controller for host mode (MSTR bit
* set to '1' in the Mode Register).
* So according to the datasheet, when FIFOs are available (and
* enabled), the Receive FIFO operates in Single Data Mode.
* So the receive path works the same whether FIFOs are available (and
* enabled) or not.
*/
if (dmaengine_slave_config(host->dma_rx, &slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure rx dma channel\n");
err = -EINVAL;
}
return err;
}
static int atmel_spi_configure_dma(struct spi_controller *host,
struct atmel_spi *as)
{
struct device *dev = &as->pdev->dev;
int err;
host->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(host->dma_tx)) {
err = PTR_ERR(host->dma_tx);
dev_dbg(dev, "No TX DMA channel, DMA is disabled\n");
goto error_clear;
}
host->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(host->dma_rx)) {
err = PTR_ERR(host->dma_rx);
/*
* No reason to check EPROBE_DEFER here since we have already
* requested tx channel.
*/
dev_dbg(dev, "No RX DMA channel, DMA is disabled\n");
goto error;
}
err = atmel_spi_dma_slave_config(as, 8);
if (err)
goto error;
dev_info(&as->pdev->dev,
"Using %s (tx) and %s (rx) for DMA transfers\n",
dma_chan_name(host->dma_tx),
dma_chan_name(host->dma_rx));
return 0;
error:
if (!IS_ERR(host->dma_rx))
dma_release_channel(host->dma_rx);
if (!IS_ERR(host->dma_tx))
dma_release_channel(host->dma_tx);
error_clear:
host->dma_tx = host->dma_rx = NULL;
return err;
}
static void atmel_spi_stop_dma(struct spi_controller *host)
{
if (host->dma_rx)
dmaengine_terminate_all(host->dma_rx);
if (host->dma_tx)
dmaengine_terminate_all(host->dma_tx);
}
static void atmel_spi_release_dma(struct spi_controller *host)
{
if (host->dma_rx) {
dma_release_channel(host->dma_rx);
host->dma_rx = NULL;
}
if (host->dma_tx) {
dma_release_channel(host->dma_tx);
host->dma_tx = NULL;
}
}
/* This function is called by the DMA driver from tasklet context */
static void dma_callback(void *data)
{
struct spi_controller *host = data;
struct atmel_spi *as = spi_controller_get_devdata(host);
if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
as->current_transfer->len);
}
complete(&as->xfer_completion);
}
/*
* Next transfer using PIO without FIFO.
*/
static void atmel_spi_next_xfer_single(struct spi_controller *host,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_controller_get_devdata(host);
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_pio\n");
/* Make sure data is not remaining in RDR */
spi_readl(as, RDR);
while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
spi_readl(as, RDR);
cpu_relax();
}
if (xfer->bits_per_word > 8)
spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
else
spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
dev_dbg(host->dev.parent,
" start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
xfer->bits_per_word);
/* Enable relevant interrupts */
spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
}
/*
* Next transfer using PIO with FIFO.
*/
static void atmel_spi_next_xfer_fifo(struct spi_controller *host,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_controller_get_devdata(host);
u32 current_remaining_data, num_data;
u32 offset = xfer->len - as->current_remaining_bytes;
const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
const u8 *bytes = (const u8 *)((u8 *)xfer->tx_buf + offset);
u16 td0, td1;
u32 fifomr;
dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_fifo\n");
/* Compute the number of data to transfer in the current iteration */
current_remaining_data = ((xfer->bits_per_word > 8) ?
((u32)as->current_remaining_bytes >> 1) :
(u32)as->current_remaining_bytes);
num_data = min(current_remaining_data, as->fifo_size);
/* Flush RX and TX FIFOs */
spi_writel(as, CR, SPI_BIT(RXFCLR) | SPI_BIT(TXFCLR));
while (spi_readl(as, FLR))
cpu_relax();
/* Set RX FIFO Threshold to the number of data to transfer */
fifomr = spi_readl(as, FMR);
spi_writel(as, FMR, SPI_BFINS(RXFTHRES, num_data, fifomr));
/* Clear FIFO flags in the Status Register, especially RXFTHF */
(void)spi_readl(as, SR);
/* Fill TX FIFO */
while (num_data >= 2) {
if (xfer->bits_per_word > 8) {
td0 = *words++;
td1 = *words++;
} else {
td0 = *bytes++;
td1 = *bytes++;
}
spi_writel(as, TDR, (td1 << 16) | td0);
num_data -= 2;
}
if (num_data) {
if (xfer->bits_per_word > 8)
td0 = *words++;
else
td0 = *bytes++;
spi_writew(as, TDR, td0);
num_data--;
}
dev_dbg(host->dev.parent,
" start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
xfer->bits_per_word);
/*
* Enable RX FIFO Threshold Flag interrupt to be notified about
* transfer completion.
*/
spi_writel(as, IER, SPI_BIT(RXFTHF) | SPI_BIT(OVRES));
}
/*
* Next transfer using PIO.
*/
static void atmel_spi_next_xfer_pio(struct spi_controller *host,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_controller_get_devdata(host);
if (as->fifo_size)
atmel_spi_next_xfer_fifo(host, xfer);
else
atmel_spi_next_xfer_single(host, xfer);
}
/*
* Submit next transfer for DMA.
*/
static int atmel_spi_next_xfer_dma_submit(struct spi_controller *host,
struct spi_transfer *xfer,
u32 *plen)
{
struct atmel_spi *as = spi_controller_get_devdata(host);
struct dma_chan *rxchan = host->dma_rx;
struct dma_chan *txchan = host->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
dma_cookie_t cookie;
dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
/* Check that the channels are available */
if (!rxchan || !txchan)
return -ENODEV;
*plen = xfer->len;
if (atmel_spi_dma_slave_config(as, xfer->bits_per_word))
goto err_exit;
/* Send both scatterlists */
if (atmel_spi_is_vmalloc_xfer(xfer) &&
IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
rxdesc = dmaengine_prep_slave_single(rxchan,
as->dma_addr_rx_bbuf,
xfer->len,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
} else {
rxdesc = dmaengine_prep_slave_sg(rxchan,
xfer->rx_sg.sgl,
xfer->rx_sg.nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
}
if (!rxdesc)
goto err_dma;
if (atmel_spi_is_vmalloc_xfer(xfer) &&
IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
txdesc = dmaengine_prep_slave_single(txchan,
as->dma_addr_tx_bbuf,
xfer->len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
} else {
txdesc = dmaengine_prep_slave_sg(txchan,
xfer->tx_sg.sgl,
xfer->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
}
if (!txdesc)
goto err_dma;
dev_dbg(host->dev.parent,
" start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
xfer->rx_buf, (unsigned long long)xfer->rx_dma);
/* Enable relevant interrupts */
spi_writel(as, IER, SPI_BIT(OVRES));
/* Put the callback on the RX transfer only, that should finish last */
rxdesc->callback = dma_callback;
rxdesc->callback_param = host;
/* Submit and fire RX and TX with TX last so we're ready to read! */
cookie = rxdesc->tx_submit(rxdesc);
if (dma_submit_error(cookie))
goto err_dma;
cookie = txdesc->tx_submit(txdesc);
if (dma_submit_error(cookie))
goto err_dma;
rxchan->device->device_issue_pending(rxchan);
txchan->device->device_issue_pending(txchan);
return 0;
err_dma:
spi_writel(as, IDR, SPI_BIT(OVRES));
atmel_spi_stop_dma(host);
err_exit:
return -ENOMEM;
}
static void atmel_spi_next_xfer_data(struct spi_controller *host,
struct spi_transfer *xfer,
dma_addr_t *tx_dma,
dma_addr_t *rx_dma,
u32 *plen)
{
*rx_dma = xfer->rx_dma + xfer->len - *plen;
*tx_dma = xfer->tx_dma + xfer->len - *plen;
if (*plen > host->max_dma_len)
*plen = host->max_dma_len;
}
static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
struct spi_device *spi,
struct spi_transfer *xfer)
{
u32 scbr, csr;
unsigned long bus_hz;
int chip_select;
if (spi_get_csgpiod(spi, 0))
chip_select = as->native_cs_for_gpio;
else
chip_select = spi_get_chipselect(spi, 0);
/* v1 chips start out at half the peripheral bus speed. */
bus_hz = as->spi_clk;
if (!atmel_spi_is_v2(as))
bus_hz /= 2;
/*
* Calculate the lowest divider that satisfies the
* constraint, assuming div32/fdiv/mbz == 0.
*/
scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
/*
* If the resulting divider doesn't fit into the
* register bitfield, we can't satisfy the constraint.
*/
if (scbr >= (1 << SPI_SCBR_SIZE)) {
dev_err(&spi->dev,
"setup: %d Hz too slow, scbr %u; min %ld Hz\n",
xfer->speed_hz, scbr, bus_hz/255);
return -EINVAL;
}
if (scbr == 0) {
dev_err(&spi->dev,
"setup: %d Hz too high, scbr %u; max %ld Hz\n",
xfer->speed_hz, scbr, bus_hz);
return -EINVAL;
}
csr = spi_readl(as, CSR0 + 4 * chip_select);
csr = SPI_BFINS(SCBR, scbr, csr);
spi_writel(as, CSR0 + 4 * chip_select, csr);
xfer->effective_speed_hz = bus_hz / scbr;
return 0;
}
/*
* Submit next transfer for PDC.
* lock is held, spi irq is blocked
*/
static void atmel_spi_pdc_next_xfer(struct spi_controller *host,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_controller_get_devdata(host);
u32 len;
dma_addr_t tx_dma, rx_dma;
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
len = as->current_remaining_bytes;
atmel_spi_next_xfer_data(host, xfer, &tx_dma, &rx_dma, &len);
as->current_remaining_bytes -= len;
spi_writel(as, RPR, rx_dma);
spi_writel(as, TPR, tx_dma);
if (xfer->bits_per_word > 8)
len >>= 1;
spi_writel(as, RCR, len);
spi_writel(as, TCR, len);
dev_dbg(&host->dev,
" start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
(unsigned long long)xfer->rx_dma);
if (as->current_remaining_bytes) {
len = as->current_remaining_bytes;
atmel_spi_next_xfer_data(host, xfer, &tx_dma, &rx_dma, &len);
as->current_remaining_bytes -= len;
spi_writel(as, RNPR, rx_dma);
spi_writel(as, TNPR, tx_dma);
if (xfer->bits_per_word > 8)
len >>= 1;
spi_writel(as, RNCR, len);
spi_writel(as, TNCR, len);
dev_dbg(&host->dev,
" next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
(unsigned long long)xfer->rx_dma);
}
/* REVISIT: We're waiting for RXBUFF before we start the next
* transfer because we need to handle some difficult timing
* issues otherwise. If we wait for TXBUFE in one transfer and
* then starts waiting for RXBUFF in the next, it's difficult
* to tell the difference between the RXBUFF interrupt we're
* actually waiting for and the RXBUFF interrupt of the
* previous transfer.
*
* It should be doable, though. Just not now...
*/
spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
/*
* For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
* - The buffer is either valid for CPU access, else NULL
* - If the buffer is valid, so is its DMA address
*
* This driver manages the dma address unless message->is_dma_mapped.
*/
static int
atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
{
struct device *dev = &as->pdev->dev;
xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
if (xfer->tx_buf) {
/* tx_buf is a const void* where we need a void * for the dma
* mapping */
void *nonconst_tx = (void *)xfer->tx_buf;
xfer->tx_dma = dma_map_single(dev,
nonconst_tx, xfer->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma))
return -ENOMEM;
}
if (xfer->rx_buf) {
xfer->rx_dma = dma_map_single(dev,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma)) {
if (xfer->tx_buf)
dma_unmap_single(dev,
xfer->tx_dma, xfer->len,
DMA_TO_DEVICE);
return -ENOMEM;
}
}
return 0;
}
static void atmel_spi_dma_unmap_xfer(struct spi_controller *host,
struct spi_transfer *xfer)
{
if (xfer->tx_dma != INVALID_DMA_ADDRESS)
dma_unmap_single(host->dev.parent, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
if (xfer->rx_dma != INVALID_DMA_ADDRESS)
dma_unmap_single(host->dev.parent, xfer->rx_dma,
xfer->len, DMA_FROM_DEVICE);
}
static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
{
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
}
static void
atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
{
u8 *rxp;
u16 *rxp16;
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
if (xfer->bits_per_word > 8) {
rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
*rxp16 = spi_readl(as, RDR);
} else {
rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
*rxp = spi_readl(as, RDR);
}
if (xfer->bits_per_word > 8) {
if (as->current_remaining_bytes > 2)
as->current_remaining_bytes -= 2;
else
as->current_remaining_bytes = 0;
} else {
as->current_remaining_bytes--;
}
}
static void
atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
{
u32 fifolr = spi_readl(as, FLR);
u32 num_bytes, num_data = SPI_BFEXT(RXFL, fifolr);
u32 offset = xfer->len - as->current_remaining_bytes;
u16 *words = (u16 *)((u8 *)xfer->rx_buf + offset);
u8 *bytes = (u8 *)((u8 *)xfer->rx_buf + offset);
u16 rd; /* RD field is the lowest 16 bits of RDR */
/* Update the number of remaining bytes to transfer */
num_bytes = ((xfer->bits_per_word > 8) ?
(num_data << 1) :
num_data);
if (as->current_remaining_bytes > num_bytes)
as->current_remaining_bytes -= num_bytes;
else
as->current_remaining_bytes = 0;
/* Handle odd number of bytes when data are more than 8bit width */
if (xfer->bits_per_word > 8)
as->current_remaining_bytes &= ~0x1;
/* Read data */
while (num_data) {
rd = spi_readl(as, RDR);
if (xfer->bits_per_word > 8)
*words++ = rd;
else
*bytes++ = rd;
num_data--;
}
}
/* Called from IRQ
*
* Must update "current_remaining_bytes" to keep track of data
* to transfer.
*/
static void
atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
{
if (as->fifo_size)
atmel_spi_pump_fifo_data(as, xfer);
else
atmel_spi_pump_single_data(as, xfer);
}
/* Interrupt
*
*/
static irqreturn_t
atmel_spi_pio_interrupt(int irq, void *dev_id)
{
struct spi_controller *host = dev_id;
struct atmel_spi *as = spi_controller_get_devdata(host);
u32 status, pending, imr;
struct spi_transfer *xfer;
int ret = IRQ_NONE;
imr = spi_readl(as, IMR);
status = spi_readl(as, SR);
pending = status & imr;
if (pending & SPI_BIT(OVRES)) {
ret = IRQ_HANDLED;
spi_writel(as, IDR, SPI_BIT(OVRES));
dev_warn(host->dev.parent, "overrun\n");
/*
* When we get an overrun, we disregard the current
* transfer. Data will not be copied back from any
* bounce buffer and msg->actual_len will not be
* updated with the last xfer.
*
* We will also not process any remaning transfers in
* the message.
*/
as->done_status = -EIO;
smp_wmb();
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
complete(&as->xfer_completion);
} else if (pending & (SPI_BIT(RDRF) | SPI_BIT(RXFTHF))) {
atmel_spi_lock(as);
if (as->current_remaining_bytes) {
ret = IRQ_HANDLED;
xfer = as->current_transfer;
atmel_spi_pump_pio_data(as, xfer);
if (!as->current_remaining_bytes)
spi_writel(as, IDR, pending);
complete(&as->xfer_completion);
}
atmel_spi_unlock(as);
} else {
WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
ret = IRQ_HANDLED;
spi_writel(as, IDR, pending);
}
return ret;
}
static irqreturn_t
atmel_spi_pdc_interrupt(int irq, void *dev_id)
{
struct spi_controller *host = dev_id;
struct atmel_spi *as = spi_controller_get_devdata(host);
u32 status, pending, imr;
int ret = IRQ_NONE;
imr = spi_readl(as, IMR);
status = spi_readl(as, SR);
pending = status & imr;
if (pending & SPI_BIT(OVRES)) {
ret = IRQ_HANDLED;
spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
| SPI_BIT(OVRES)));
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
as->done_status = -EIO;
complete(&as->xfer_completion);
} else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
ret = IRQ_HANDLED;
spi_writel(as, IDR, pending);
complete(&as->xfer_completion);
}
return ret;
}
static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
{
struct spi_delay *delay = &spi->word_delay;
u32 value = delay->value;
switch (delay->unit) {
case SPI_DELAY_UNIT_NSECS:
value /= 1000;
break;
case SPI_DELAY_UNIT_USECS:
break;
default:
return -EINVAL;
}
return (as->spi_clk / 1000000 * value) >> 5;
}
static void initialize_native_cs_for_gpio(struct atmel_spi *as)
{
int i;
struct spi_controller *host = platform_get_drvdata(as->pdev);
if (!as->native_cs_free)
return; /* already initialized */
if (!host->cs_gpiods)
return; /* No CS GPIO */
/*
* On the first version of the controller (AT91RM9200), CS0
* can't be used associated with GPIO
*/
if (atmel_spi_is_v2(as))
i = 0;
else
i = 1;
for (; i < 4; i++)
if (host->cs_gpiods[i])
as->native_cs_free |= BIT(i);
if (as->native_cs_free)
as->native_cs_for_gpio = ffs(as->native_cs_free);
}
static int atmel_spi_setup(struct spi_device *spi)
{
struct atmel_spi *as;
struct atmel_spi_device *asd;
u32 csr;
unsigned int bits = spi->bits_per_word;
int chip_select;
int word_delay_csr;
as = spi_controller_get_devdata(spi->controller);
/* see notes above re chipselect */
if (!spi_get_csgpiod(spi, 0) && (spi->mode & SPI_CS_HIGH)) {
dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
return -EINVAL;
}
/* Setup() is called during spi_register_controller(aka
* spi_register_master) but after all membmers of the cs_gpiod
* array have been filled, so we can looked for which native
* CS will be free for using with GPIO
*/
initialize_native_cs_for_gpio(as);
if (spi_get_csgpiod(spi, 0) && as->native_cs_free) {
dev_err(&spi->dev,
"No native CS available to support this GPIO CS\n");
return -EBUSY;
}
if (spi_get_csgpiod(spi, 0))
chip_select = as->native_cs_for_gpio;
else
chip_select = spi_get_chipselect(spi, 0);
csr = SPI_BF(BITS, bits - 8);
if (spi->mode & SPI_CPOL)
csr |= SPI_BIT(CPOL);
if (!(spi->mode & SPI_CPHA))
csr |= SPI_BIT(NCPHA);
if (!spi_get_csgpiod(spi, 0))
csr |= SPI_BIT(CSAAT);
csr |= SPI_BF(DLYBS, 0);
word_delay_csr = atmel_word_delay_csr(spi, as);
if (word_delay_csr < 0)
return word_delay_csr;
/* DLYBCT adds delays between words. This is useful for slow devices
* that need a bit of time to setup the next transfer.
*/
csr |= SPI_BF(DLYBCT, word_delay_csr);
asd = spi->controller_state;
if (!asd) {
asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
if (!asd)
return -ENOMEM;
spi->controller_state = asd;
}
asd->csr = csr;
dev_dbg(&spi->dev,
"setup: bpw %u mode 0x%x -> csr%d %08x\n",
bits, spi->mode, spi_get_chipselect(spi, 0), csr);
if (!atmel_spi_is_v2(as))
spi_writel(as, CSR0 + 4 * chip_select, csr);
return 0;
}
static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
{
struct atmel_spi *as = spi_controller_get_devdata(spi->controller);
/* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
* since we already have routines for activate/deactivate translate
* high/low to active/inactive
*/
enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
if (enable) {
cs_activate(as, spi);
} else {
cs_deactivate(as, spi);
}
}
static int atmel_spi_one_transfer(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct atmel_spi *as;
u8 bits;
u32 len;
struct atmel_spi_device *asd;
int timeout;
int ret;
unsigned int dma_timeout;
long ret_timeout;
as = spi_controller_get_devdata(host);
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
if (bits != xfer->bits_per_word - 8) {
dev_dbg(&spi->dev,
"you can't yet change bits_per_word in transfers\n");
return -ENOPROTOOPT;
}
/*
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
*/
if ((!host->cur_msg->is_dma_mapped)
&& as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
}
atmel_spi_set_xfer_speed(as, spi, xfer);
as->done_status = 0;
as->current_transfer = xfer;
as->current_remaining_bytes = xfer->len;
while (as->current_remaining_bytes) {
reinit_completion(&as->xfer_completion);
if (as->use_pdc) {
atmel_spi_lock(as);
atmel_spi_pdc_next_xfer(host, xfer);
atmel_spi_unlock(as);
} else if (atmel_spi_use_dma(as, xfer)) {
len = as->current_remaining_bytes;
ret = atmel_spi_next_xfer_dma_submit(host,
xfer, &len);
if (ret) {
dev_err(&spi->dev,
"unable to use DMA, fallback to PIO\n");
as->done_status = ret;
break;
} else {
as->current_remaining_bytes -= len;
if (as->current_remaining_bytes < 0)
as->current_remaining_bytes = 0;
}
} else {
atmel_spi_lock(as);
atmel_spi_next_xfer_pio(host, xfer);
atmel_spi_unlock(as);
}
dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer));
ret_timeout = wait_for_completion_interruptible_timeout(&as->xfer_completion,
dma_timeout);
if (ret_timeout <= 0) {
dev_err(&spi->dev, "spi transfer %s\n",
!ret_timeout ? "timeout" : "canceled");
as->done_status = ret_timeout < 0 ? ret_timeout : -EIO;
}
if (as->done_status)
break;
}
if (as->done_status) {
if (as->use_pdc) {
dev_warn(host->dev.parent,
"overrun (%u/%u remaining)\n",
spi_readl(as, TCR), spi_readl(as, RCR));
/*
* Clean up DMA registers and make sure the data
* registers are empty.
*/
spi_writel(as, RNCR, 0);
spi_writel(as, TNCR, 0);
spi_writel(as, RCR, 0);
spi_writel(as, TCR, 0);
for (timeout = 1000; timeout; timeout--)
if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
break;
if (!timeout)
dev_warn(host->dev.parent,
"timeout waiting for TXEMPTY");
while (spi_readl(as, SR) & SPI_BIT(RDRF))
spi_readl(as, RDR);
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
} else if (atmel_spi_use_dma(as, xfer)) {
atmel_spi_stop_dma(host);
}
}
if (!host->cur_msg->is_dma_mapped
&& as->use_pdc)
atmel_spi_dma_unmap_xfer(host, xfer);
if (as->use_pdc)
atmel_spi_disable_pdc_transfer(as);
return as->done_status;
}
static void atmel_spi_cleanup(struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
if (!asd)
return;
spi->controller_state = NULL;
kfree(asd);
}
static inline unsigned int atmel_get_version(struct atmel_spi *as)
{
return spi_readl(as, VERSION) & 0x00000fff;
}
static void atmel_get_caps(struct atmel_spi *as)
{
unsigned int version;
version = atmel_get_version(as);
as->caps.is_spi2 = version > 0x121;
as->caps.has_wdrbt = version >= 0x210;
as->caps.has_dma_support = version >= 0x212;
as->caps.has_pdc_support = version < 0x212;
}
static void atmel_spi_init(struct atmel_spi *as)
{
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
/* It is recommended to enable FIFOs first thing after reset */
if (as->fifo_size)
spi_writel(as, CR, SPI_BIT(FIFOEN));
if (as->caps.has_wdrbt) {
spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
| SPI_BIT(MSTR));
} else {
spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
}
if (as->use_pdc)
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
spi_writel(as, CR, SPI_BIT(SPIEN));
}
static int atmel_spi_probe(struct platform_device *pdev)
{
struct resource *regs;
int irq;
struct clk *clk;
int ret;
struct spi_controller *host;
struct atmel_spi *as;
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
clk = devm_clk_get(&pdev->dev, "spi_clk");
if (IS_ERR(clk))
return PTR_ERR(clk);
/* setup spi core then atmel-specific driver state */
host = spi_alloc_host(&pdev->dev, sizeof(*as));
if (!host)
return -ENOMEM;
/* the spi->mode bits understood by this driver: */
host->use_gpio_descriptors = true;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
host->dev.of_node = pdev->dev.of_node;
host->bus_num = pdev->id;
host->num_chipselect = 4;
host->setup = atmel_spi_setup;
host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX |
SPI_CONTROLLER_GPIO_SS);
host->transfer_one = atmel_spi_one_transfer;
host->set_cs = atmel_spi_set_cs;
host->cleanup = atmel_spi_cleanup;
host->auto_runtime_pm = true;
host->max_dma_len = SPI_MAX_DMA_XFER;
host->can_dma = atmel_spi_can_dma;
platform_set_drvdata(pdev, host);
as = spi_controller_get_devdata(host);
spin_lock_init(&as->lock);
as->pdev = pdev;
as->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
if (IS_ERR(as->regs)) {
ret = PTR_ERR(as->regs);
goto out_unmap_regs;
}
as->phybase = regs->start;
as->irq = irq;
as->clk = clk;
init_completion(&as->xfer_completion);
atmel_get_caps(as);
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
ret = atmel_spi_configure_dma(host, as);
if (ret == 0) {
as->use_dma = true;
} else if (ret == -EPROBE_DEFER) {
goto out_unmap_regs;
}
} else if (as->caps.has_pdc_support) {
as->use_pdc = true;
}
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
SPI_MAX_DMA_XFER,
&as->dma_addr_rx_bbuf,
GFP_KERNEL | GFP_DMA);
if (!as->addr_rx_bbuf) {
as->use_dma = false;
} else {
as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
SPI_MAX_DMA_XFER,
&as->dma_addr_tx_bbuf,
GFP_KERNEL | GFP_DMA);
if (!as->addr_tx_bbuf) {
as->use_dma = false;
dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
as->addr_rx_bbuf,
as->dma_addr_rx_bbuf);
}
}
if (!as->use_dma)
dev_info(host->dev.parent,
" can not allocate dma coherent memory\n");
}
if (as->caps.has_dma_support && !as->use_dma)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
if (as->use_pdc) {
ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
0, dev_name(&pdev->dev), host);
} else {
ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
0, dev_name(&pdev->dev), host);
}
if (ret)
goto out_unmap_regs;
/* Initialize the hardware */
ret = clk_prepare_enable(clk);
if (ret)
goto out_free_irq;
as->spi_clk = clk_get_rate(clk);
as->fifo_size = 0;
if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
&as->fifo_size)) {
dev_info(&pdev->dev, "Using FIFO (%u data)\n", as->fifo_size);
}
atmel_spi_init(as);
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
goto out_free_dma;
/* go! */
dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
atmel_get_version(as), (unsigned long)regs->start,
irq);
return 0;
out_free_dma:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
if (as->use_dma)
atmel_spi_release_dma(host);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
clk_disable_unprepare(clk);
out_free_irq:
out_unmap_regs:
spi_controller_put(host);
return ret;
}
static void atmel_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_controller_get_devdata(host);
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
if (as->use_dma) {
atmel_spi_stop_dma(host);
atmel_spi_release_dma(host);
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
as->addr_tx_bbuf,
as->dma_addr_tx_bbuf);
dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
as->addr_rx_bbuf,
as->dma_addr_rx_bbuf);
}
}
spin_lock_irq(&as->lock);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
spi_readl(as, SR);
spin_unlock_irq(&as->lock);
clk_disable_unprepare(as->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
static int atmel_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct atmel_spi *as = spi_controller_get_devdata(host);
clk_disable_unprepare(as->clk);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int atmel_spi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct atmel_spi *as = spi_controller_get_devdata(host);
pinctrl_pm_select_default_state(dev);
return clk_prepare_enable(as->clk);
}
static int atmel_spi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
/* Stop the queue running */
ret = spi_controller_suspend(host);
if (ret)
return ret;
if (!pm_runtime_suspended(dev))
atmel_spi_runtime_suspend(dev);
return 0;
}
static int atmel_spi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct atmel_spi *as = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(as->clk);
if (ret)
return ret;
atmel_spi_init(as);
clk_disable_unprepare(as->clk);
if (!pm_runtime_suspended(dev)) {
ret = atmel_spi_runtime_resume(dev);
if (ret)
return ret;
}
/* Start the queue running */
return spi_controller_resume(host);
}
static const struct dev_pm_ops atmel_spi_pm_ops = {
SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
atmel_spi_runtime_resume, NULL)
};
static const struct of_device_id atmel_spi_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-spi" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
.pm = pm_ptr(&atmel_spi_pm_ops),
.of_match_table = atmel_spi_dt_ids,
},
.probe = atmel_spi_probe,
.remove_new = atmel_spi_remove,
};
module_platform_driver(atmel_spi_driver);
MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel_spi");
| linux-master | drivers/spi/spi-atmel.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
//
// AMD SPI controller driver
//
// Copyright (c) 2020, Advanced Micro Devices, Inc.
//
// Author: Sanjay R Mehta <[email protected]>
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/iopoll.h>
#define AMD_SPI_CTRL0_REG 0x00
#define AMD_SPI_EXEC_CMD BIT(16)
#define AMD_SPI_FIFO_CLEAR BIT(20)
#define AMD_SPI_BUSY BIT(31)
#define AMD_SPI_OPCODE_REG 0x45
#define AMD_SPI_CMD_TRIGGER_REG 0x47
#define AMD_SPI_TRIGGER_CMD BIT(7)
#define AMD_SPI_OPCODE_MASK 0xFF
#define AMD_SPI_ALT_CS_REG 0x1D
#define AMD_SPI_ALT_CS_MASK 0x3
#define AMD_SPI_FIFO_BASE 0x80
#define AMD_SPI_TX_COUNT_REG 0x48
#define AMD_SPI_RX_COUNT_REG 0x4B
#define AMD_SPI_STATUS_REG 0x4C
#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
#define AMD_SPI_ENA_REG 0x20
#define AMD_SPI_ALT_SPD_SHIFT 20
#define AMD_SPI_ALT_SPD_MASK GENMASK(23, AMD_SPI_ALT_SPD_SHIFT)
#define AMD_SPI_SPI100_SHIFT 0
#define AMD_SPI_SPI100_MASK GENMASK(AMD_SPI_SPI100_SHIFT, AMD_SPI_SPI100_SHIFT)
#define AMD_SPI_SPEED_REG 0x6C
#define AMD_SPI_SPD7_SHIFT 8
#define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT)
#define AMD_SPI_MAX_HZ 100000000
#define AMD_SPI_MIN_HZ 800000
/**
* enum amd_spi_versions - SPI controller versions
* @AMD_SPI_V1: AMDI0061 hardware version
* @AMD_SPI_V2: AMDI0062 hardware version
*/
enum amd_spi_versions {
AMD_SPI_V1 = 1,
AMD_SPI_V2,
};
enum amd_spi_speed {
F_66_66MHz,
F_33_33MHz,
F_22_22MHz,
F_16_66MHz,
F_100MHz,
F_800KHz,
SPI_SPD7 = 0x7,
F_50MHz = 0x4,
F_4MHz = 0x32,
F_3_17MHz = 0x3F
};
/**
* struct amd_spi_freq - Matches device speed with values to write in regs
* @speed_hz: Device frequency
* @enable_val: Value to be written to "enable register"
* @spd7_val: Some frequencies requires to have a value written at SPISPEED register
*/
struct amd_spi_freq {
u32 speed_hz;
u32 enable_val;
u32 spd7_val;
};
/**
* struct amd_spi - SPI driver instance
* @io_remap_addr: Start address of the SPI controller registers
* @version: SPI controller hardware version
* @speed_hz: Device frequency
*/
struct amd_spi {
void __iomem *io_remap_addr;
enum amd_spi_versions version;
unsigned int speed_hz;
};
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
{
return ioread8((u8 __iomem *)amd_spi->io_remap_addr + idx);
}
static inline void amd_spi_writereg8(struct amd_spi *amd_spi, int idx, u8 val)
{
iowrite8(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
}
static void amd_spi_setclear_reg8(struct amd_spi *amd_spi, int idx, u8 set, u8 clear)
{
u8 tmp = amd_spi_readreg8(amd_spi, idx);
tmp = (tmp & ~clear) | set;
amd_spi_writereg8(amd_spi, idx, tmp);
}
static inline u32 amd_spi_readreg32(struct amd_spi *amd_spi, int idx)
{
return ioread32((u8 __iomem *)amd_spi->io_remap_addr + idx);
}
static inline void amd_spi_writereg32(struct amd_spi *amd_spi, int idx, u32 val)
{
iowrite32(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
}
static inline void amd_spi_setclear_reg32(struct amd_spi *amd_spi, int idx, u32 set, u32 clear)
{
u32 tmp = amd_spi_readreg32(amd_spi, idx);
tmp = (tmp & ~clear) | set;
amd_spi_writereg32(amd_spi, idx, tmp);
}
static void amd_spi_select_chip(struct amd_spi *amd_spi, u8 cs)
{
amd_spi_setclear_reg8(amd_spi, AMD_SPI_ALT_CS_REG, cs, AMD_SPI_ALT_CS_MASK);
}
static inline void amd_spi_clear_chip(struct amd_spi *amd_spi, u8 chip_select)
{
amd_spi_writereg8(amd_spi, AMD_SPI_ALT_CS_REG, chip_select & ~AMD_SPI_ALT_CS_MASK);
}
static void amd_spi_clear_fifo_ptr(struct amd_spi *amd_spi)
{
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, AMD_SPI_FIFO_CLEAR);
}
static int amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
{
switch (amd_spi->version) {
case AMD_SPI_V1:
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode,
AMD_SPI_OPCODE_MASK);
return 0;
case AMD_SPI_V2:
amd_spi_writereg8(amd_spi, AMD_SPI_OPCODE_REG, cmd_opcode);
return 0;
default:
return -ENODEV;
}
}
static inline void amd_spi_set_rx_count(struct amd_spi *amd_spi, u8 rx_count)
{
amd_spi_setclear_reg8(amd_spi, AMD_SPI_RX_COUNT_REG, rx_count, 0xff);
}
static inline void amd_spi_set_tx_count(struct amd_spi *amd_spi, u8 tx_count)
{
amd_spi_setclear_reg8(amd_spi, AMD_SPI_TX_COUNT_REG, tx_count, 0xff);
}
static int amd_spi_busy_wait(struct amd_spi *amd_spi)
{
u32 val;
int reg;
switch (amd_spi->version) {
case AMD_SPI_V1:
reg = AMD_SPI_CTRL0_REG;
break;
case AMD_SPI_V2:
reg = AMD_SPI_STATUS_REG;
break;
default:
return -ENODEV;
}
return readl_poll_timeout(amd_spi->io_remap_addr + reg, val,
!(val & AMD_SPI_BUSY), 20, 2000000);
}
static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
{
int ret;
ret = amd_spi_busy_wait(amd_spi);
if (ret)
return ret;
switch (amd_spi->version) {
case AMD_SPI_V1:
/* Set ExecuteOpCode bit in the CTRL0 register */
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD,
AMD_SPI_EXEC_CMD);
return 0;
case AMD_SPI_V2:
/* Trigger the command execution */
amd_spi_setclear_reg8(amd_spi, AMD_SPI_CMD_TRIGGER_REG,
AMD_SPI_TRIGGER_CMD, AMD_SPI_TRIGGER_CMD);
return 0;
default:
return -ENODEV;
}
}
static int amd_spi_host_setup(struct spi_device *spi)
{
struct amd_spi *amd_spi = spi_controller_get_devdata(spi->controller);
amd_spi_clear_fifo_ptr(amd_spi);
return 0;
}
static const struct amd_spi_freq amd_spi_freq[] = {
{ AMD_SPI_MAX_HZ, F_100MHz, 0},
{ 66660000, F_66_66MHz, 0},
{ 50000000, SPI_SPD7, F_50MHz},
{ 33330000, F_33_33MHz, 0},
{ 22220000, F_22_22MHz, 0},
{ 16660000, F_16_66MHz, 0},
{ 4000000, SPI_SPD7, F_4MHz},
{ 3170000, SPI_SPD7, F_3_17MHz},
{ AMD_SPI_MIN_HZ, F_800KHz, 0},
};
static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
{
unsigned int i, spd7_val, alt_spd;
if (speed_hz < AMD_SPI_MIN_HZ)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
if (speed_hz >= amd_spi_freq[i].speed_hz)
break;
if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz)
return 0;
amd_spi->speed_hz = amd_spi_freq[i].speed_hz;
alt_spd = (amd_spi_freq[i].enable_val << AMD_SPI_ALT_SPD_SHIFT)
& AMD_SPI_ALT_SPD_MASK;
amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, alt_spd,
AMD_SPI_ALT_SPD_MASK);
if (amd_spi->speed_hz == AMD_SPI_MAX_HZ)
amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, 1,
AMD_SPI_SPI100_MASK);
if (amd_spi_freq[i].spd7_val) {
spd7_val = (amd_spi_freq[i].spd7_val << AMD_SPI_SPD7_SHIFT)
& AMD_SPI_SPD7_MASK;
amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val,
AMD_SPI_SPD7_MASK);
}
return 0;
}
static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
struct spi_controller *host,
struct spi_message *message)
{
struct spi_transfer *xfer = NULL;
struct spi_device *spi = message->spi;
u8 cmd_opcode = 0, fifo_pos = AMD_SPI_FIFO_BASE;
u8 *buf = NULL;
u32 i = 0;
u32 tx_len = 0, rx_len = 0;
list_for_each_entry(xfer, &message->transfers,
transfer_list) {
if (xfer->speed_hz)
amd_set_spi_freq(amd_spi, xfer->speed_hz);
else
amd_set_spi_freq(amd_spi, spi->max_speed_hz);
if (xfer->tx_buf) {
buf = (u8 *)xfer->tx_buf;
if (!tx_len) {
cmd_opcode = *(u8 *)xfer->tx_buf;
buf++;
xfer->len--;
}
tx_len += xfer->len;
/* Write data into the FIFO. */
for (i = 0; i < xfer->len; i++)
amd_spi_writereg8(amd_spi, fifo_pos + i, buf[i]);
fifo_pos += xfer->len;
}
/* Store no. of bytes to be received from FIFO */
if (xfer->rx_buf)
rx_len += xfer->len;
}
if (!buf) {
message->status = -EINVAL;
goto fin_msg;
}
amd_spi_set_opcode(amd_spi, cmd_opcode);
amd_spi_set_tx_count(amd_spi, tx_len);
amd_spi_set_rx_count(amd_spi, rx_len);
/* Execute command */
message->status = amd_spi_execute_opcode(amd_spi);
if (message->status)
goto fin_msg;
if (rx_len) {
message->status = amd_spi_busy_wait(amd_spi);
if (message->status)
goto fin_msg;
list_for_each_entry(xfer, &message->transfers, transfer_list)
if (xfer->rx_buf) {
buf = (u8 *)xfer->rx_buf;
/* Read data from FIFO to receive buffer */
for (i = 0; i < xfer->len; i++)
buf[i] = amd_spi_readreg8(amd_spi, fifo_pos + i);
fifo_pos += xfer->len;
}
}
/* Update statistics */
message->actual_length = tx_len + rx_len + 1;
fin_msg:
switch (amd_spi->version) {
case AMD_SPI_V1:
break;
case AMD_SPI_V2:
amd_spi_clear_chip(amd_spi, spi_get_chipselect(message->spi, 0));
break;
default:
return -ENODEV;
}
spi_finalize_current_message(host);
return message->status;
}
static int amd_spi_host_transfer(struct spi_controller *host,
struct spi_message *msg)
{
struct amd_spi *amd_spi = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
amd_spi_select_chip(amd_spi, spi_get_chipselect(spi, 0));
/*
* Extract spi_transfers from the spi message and
* program the controller.
*/
return amd_spi_fifo_xfer(amd_spi, host, msg);
}
static size_t amd_spi_max_transfer_size(struct spi_device *spi)
{
return AMD_SPI_FIFO_SIZE;
}
static int amd_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *host;
struct amd_spi *amd_spi;
int err;
/* Allocate storage for host and driver private data */
host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
if (!host)
return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
amd_spi = spi_controller_get_devdata(host);
amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(amd_spi->io_remap_addr))
return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
"ioremap of SPI registers failed\n");
dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
amd_spi->version = (uintptr_t) device_get_match_data(dev);
/* Initialize the spi_controller fields */
host->bus_num = 0;
host->num_chipselect = 4;
host->mode_bits = 0;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->max_speed_hz = AMD_SPI_MAX_HZ;
host->min_speed_hz = AMD_SPI_MIN_HZ;
host->setup = amd_spi_host_setup;
host->transfer_one_message = amd_spi_host_transfer;
host->max_transfer_size = amd_spi_max_transfer_size;
host->max_message_size = amd_spi_max_transfer_size;
/* Register the controller with SPI framework */
err = devm_spi_register_controller(dev, host);
if (err)
return dev_err_probe(dev, err, "error registering SPI controller\n");
return 0;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id spi_acpi_match[] = {
{ "AMDI0061", AMD_SPI_V1 },
{ "AMDI0062", AMD_SPI_V2 },
{},
};
MODULE_DEVICE_TABLE(acpi, spi_acpi_match);
#endif
static struct platform_driver amd_spi_driver = {
.driver = {
.name = "amd_spi",
.acpi_match_table = ACPI_PTR(spi_acpi_match),
},
.probe = amd_spi_probe,
};
module_platform_driver(amd_spi_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Sanjay Mehta <[email protected]>");
MODULE_DESCRIPTION("AMD SPI Master Controller Driver");
| linux-master | drivers/spi/spi-amd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Socionext SPI flash controller F_OSPI driver
* Copyright (C) 2021 Socionext Inc.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
/* Registers */
#define OSPI_PROT_CTL_INDIR 0x00
#define OSPI_PROT_MODE_DATA_MASK GENMASK(31, 30)
#define OSPI_PROT_MODE_ALT_MASK GENMASK(29, 28)
#define OSPI_PROT_MODE_ADDR_MASK GENMASK(27, 26)
#define OSPI_PROT_MODE_CODE_MASK GENMASK(25, 24)
#define OSPI_PROT_MODE_SINGLE 0
#define OSPI_PROT_MODE_DUAL 1
#define OSPI_PROT_MODE_QUAD 2
#define OSPI_PROT_MODE_OCTAL 3
#define OSPI_PROT_DATA_RATE_DATA BIT(23)
#define OSPI_PROT_DATA_RATE_ALT BIT(22)
#define OSPI_PROT_DATA_RATE_ADDR BIT(21)
#define OSPI_PROT_DATA_RATE_CODE BIT(20)
#define OSPI_PROT_SDR 0
#define OSPI_PROT_DDR 1
#define OSPI_PROT_BIT_POS_DATA BIT(19)
#define OSPI_PROT_BIT_POS_ALT BIT(18)
#define OSPI_PROT_BIT_POS_ADDR BIT(17)
#define OSPI_PROT_BIT_POS_CODE BIT(16)
#define OSPI_PROT_SAMP_EDGE BIT(12)
#define OSPI_PROT_DATA_UNIT_MASK GENMASK(11, 10)
#define OSPI_PROT_DATA_UNIT_1B 0
#define OSPI_PROT_DATA_UNIT_2B 1
#define OSPI_PROT_DATA_UNIT_4B 3
#define OSPI_PROT_TRANS_DIR_WRITE BIT(9)
#define OSPI_PROT_DATA_EN BIT(8)
#define OSPI_PROT_ALT_SIZE_MASK GENMASK(7, 5)
#define OSPI_PROT_ADDR_SIZE_MASK GENMASK(4, 2)
#define OSPI_PROT_CODE_SIZE_MASK GENMASK(1, 0)
#define OSPI_CLK_CTL 0x10
#define OSPI_CLK_CTL_BOOT_INT_CLK_EN BIT(16)
#define OSPI_CLK_CTL_PHA BIT(12)
#define OSPI_CLK_CTL_PHA_180 0
#define OSPI_CLK_CTL_PHA_90 1
#define OSPI_CLK_CTL_DIV GENMASK(9, 8)
#define OSPI_CLK_CTL_DIV_1 0
#define OSPI_CLK_CTL_DIV_2 1
#define OSPI_CLK_CTL_DIV_4 2
#define OSPI_CLK_CTL_DIV_8 3
#define OSPI_CLK_CTL_INT_CLK_EN BIT(0)
#define OSPI_CS_CTL1 0x14
#define OSPI_CS_CTL2 0x18
#define OSPI_SSEL 0x20
#define OSPI_CMD_IDX_INDIR 0x40
#define OSPI_ADDR 0x50
#define OSPI_ALT_INDIR 0x60
#define OSPI_DMY_INDIR 0x70
#define OSPI_DAT 0x80
#define OSPI_DAT_SWP_INDIR 0x90
#define OSPI_DAT_SIZE_INDIR 0xA0
#define OSPI_DAT_SIZE_EN BIT(15)
#define OSPI_DAT_SIZE_MASK GENMASK(10, 0)
#define OSPI_DAT_SIZE_MAX (OSPI_DAT_SIZE_MASK + 1)
#define OSPI_TRANS_CTL 0xC0
#define OSPI_TRANS_CTL_STOP_REQ BIT(1) /* RW1AC */
#define OSPI_TRANS_CTL_START_REQ BIT(0) /* RW1AC */
#define OSPI_ACC_MODE 0xC4
#define OSPI_ACC_MODE_BOOT_DISABLE BIT(0)
#define OSPI_SWRST 0xD0
#define OSPI_SWRST_INDIR_WRITE_FIFO BIT(9) /* RW1AC */
#define OSPI_SWRST_INDIR_READ_FIFO BIT(8) /* RW1AC */
#define OSPI_STAT 0xE0
#define OSPI_STAT_IS_AXI_WRITING BIT(10)
#define OSPI_STAT_IS_AXI_READING BIT(9)
#define OSPI_STAT_IS_SPI_INT_CLK_STOP BIT(4)
#define OSPI_STAT_IS_SPI_IDLE BIT(3)
#define OSPI_IRQ 0xF0
#define OSPI_IRQ_CS_DEASSERT BIT(8)
#define OSPI_IRQ_WRITE_BUF_READY BIT(2)
#define OSPI_IRQ_READ_BUF_READY BIT(1)
#define OSPI_IRQ_CS_TRANS_COMP BIT(0)
#define OSPI_IRQ_ALL \
(OSPI_IRQ_CS_DEASSERT | OSPI_IRQ_WRITE_BUF_READY \
| OSPI_IRQ_READ_BUF_READY | OSPI_IRQ_CS_TRANS_COMP)
#define OSPI_IRQ_STAT_EN 0xF4
#define OSPI_IRQ_SIG_EN 0xF8
/* Parameters */
#define OSPI_NUM_CS 4
#define OSPI_DUMMY_CYCLE_MAX 255
#define OSPI_WAIT_MAX_MSEC 100
struct f_ospi {
void __iomem *base;
struct device *dev;
struct clk *clk;
struct mutex mlock;
};
static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
{
return (op->dummy.nbytes * 8) / op->dummy.buswidth;
}
static void f_ospi_clear_irq(struct f_ospi *ospi)
{
writel(OSPI_IRQ_CS_DEASSERT | OSPI_IRQ_CS_TRANS_COMP,
ospi->base + OSPI_IRQ);
}
static void f_ospi_enable_irq_status(struct f_ospi *ospi, u32 irq_bits)
{
u32 val;
val = readl(ospi->base + OSPI_IRQ_STAT_EN);
val |= irq_bits;
writel(val, ospi->base + OSPI_IRQ_STAT_EN);
}
static void f_ospi_disable_irq_status(struct f_ospi *ospi, u32 irq_bits)
{
u32 val;
val = readl(ospi->base + OSPI_IRQ_STAT_EN);
val &= ~irq_bits;
writel(val, ospi->base + OSPI_IRQ_STAT_EN);
}
static void f_ospi_disable_irq_output(struct f_ospi *ospi, u32 irq_bits)
{
u32 val;
val = readl(ospi->base + OSPI_IRQ_SIG_EN);
val &= ~irq_bits;
writel(val, ospi->base + OSPI_IRQ_SIG_EN);
}
static int f_ospi_prepare_config(struct f_ospi *ospi)
{
u32 val, stat0, stat1;
/* G4: Disable internal clock */
val = readl(ospi->base + OSPI_CLK_CTL);
val &= ~(OSPI_CLK_CTL_BOOT_INT_CLK_EN | OSPI_CLK_CTL_INT_CLK_EN);
writel(val, ospi->base + OSPI_CLK_CTL);
/* G5: Wait for stop */
stat0 = OSPI_STAT_IS_AXI_WRITING | OSPI_STAT_IS_AXI_READING;
stat1 = OSPI_STAT_IS_SPI_IDLE | OSPI_STAT_IS_SPI_INT_CLK_STOP;
return readl_poll_timeout(ospi->base + OSPI_STAT,
val, (val & (stat0 | stat1)) == stat1,
0, OSPI_WAIT_MAX_MSEC);
}
static int f_ospi_unprepare_config(struct f_ospi *ospi)
{
u32 val;
/* G11: Enable internal clock */
val = readl(ospi->base + OSPI_CLK_CTL);
val |= OSPI_CLK_CTL_BOOT_INT_CLK_EN | OSPI_CLK_CTL_INT_CLK_EN;
writel(val, ospi->base + OSPI_CLK_CTL);
/* G12: Wait for clock to start */
return readl_poll_timeout(ospi->base + OSPI_STAT,
val, !(val & OSPI_STAT_IS_SPI_INT_CLK_STOP),
0, OSPI_WAIT_MAX_MSEC);
}
static void f_ospi_config_clk(struct f_ospi *ospi, u32 device_hz)
{
long rate_hz = clk_get_rate(ospi->clk);
u32 div = DIV_ROUND_UP(rate_hz, device_hz);
u32 div_reg;
u32 val;
if (rate_hz < device_hz) {
dev_warn(ospi->dev, "Device frequency too large: %d\n",
device_hz);
div_reg = OSPI_CLK_CTL_DIV_1;
} else {
if (div == 1) {
div_reg = OSPI_CLK_CTL_DIV_1;
} else if (div == 2) {
div_reg = OSPI_CLK_CTL_DIV_2;
} else if (div <= 4) {
div_reg = OSPI_CLK_CTL_DIV_4;
} else if (div <= 8) {
div_reg = OSPI_CLK_CTL_DIV_8;
} else {
dev_warn(ospi->dev, "Device frequency too small: %d\n",
device_hz);
div_reg = OSPI_CLK_CTL_DIV_8;
}
}
/*
* G7: Set clock mode
* clock phase is fixed at 180 degrees and configure edge direction
* instead.
*/
val = readl(ospi->base + OSPI_CLK_CTL);
val &= ~(OSPI_CLK_CTL_PHA | OSPI_CLK_CTL_DIV);
val |= FIELD_PREP(OSPI_CLK_CTL_PHA, OSPI_CLK_CTL_PHA_180)
| FIELD_PREP(OSPI_CLK_CTL_DIV, div_reg);
writel(val, ospi->base + OSPI_CLK_CTL);
}
static void f_ospi_config_dll(struct f_ospi *ospi)
{
/* G8: Configure DLL, nothing */
}
static u8 f_ospi_get_mode(struct f_ospi *ospi, int width, int data_size)
{
u8 mode = OSPI_PROT_MODE_SINGLE;
switch (width) {
case 1:
mode = OSPI_PROT_MODE_SINGLE;
break;
case 2:
mode = OSPI_PROT_MODE_DUAL;
break;
case 4:
mode = OSPI_PROT_MODE_QUAD;
break;
case 8:
mode = OSPI_PROT_MODE_OCTAL;
break;
default:
if (data_size)
dev_err(ospi->dev, "Invalid buswidth: %d\n", width);
break;
}
return mode;
}
static void f_ospi_config_indir_protocol(struct f_ospi *ospi,
struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_device *spi = mem->spi;
u8 mode;
u32 prot = 0, val;
int unit;
/* Set one chip select */
writel(BIT(spi_get_chipselect(spi, 0)), ospi->base + OSPI_SSEL);
mode = f_ospi_get_mode(ospi, op->cmd.buswidth, 1);
prot |= FIELD_PREP(OSPI_PROT_MODE_CODE_MASK, mode);
mode = f_ospi_get_mode(ospi, op->addr.buswidth, op->addr.nbytes);
prot |= FIELD_PREP(OSPI_PROT_MODE_ADDR_MASK, mode);
mode = f_ospi_get_mode(ospi, op->data.buswidth, op->data.nbytes);
prot |= FIELD_PREP(OSPI_PROT_MODE_DATA_MASK, mode);
prot |= FIELD_PREP(OSPI_PROT_DATA_RATE_DATA, OSPI_PROT_SDR);
prot |= FIELD_PREP(OSPI_PROT_DATA_RATE_ALT, OSPI_PROT_SDR);
prot |= FIELD_PREP(OSPI_PROT_DATA_RATE_ADDR, OSPI_PROT_SDR);
prot |= FIELD_PREP(OSPI_PROT_DATA_RATE_CODE, OSPI_PROT_SDR);
if (spi->mode & SPI_LSB_FIRST)
prot |= OSPI_PROT_BIT_POS_DATA | OSPI_PROT_BIT_POS_ALT
| OSPI_PROT_BIT_POS_ADDR | OSPI_PROT_BIT_POS_CODE;
if (spi->mode & SPI_CPHA)
prot |= OSPI_PROT_SAMP_EDGE;
/* Examine nbytes % 4 */
switch (op->data.nbytes & 0x3) {
case 0:
unit = OSPI_PROT_DATA_UNIT_4B;
val = 0;
break;
case 2:
unit = OSPI_PROT_DATA_UNIT_2B;
val = OSPI_DAT_SIZE_EN | (op->data.nbytes - 1);
break;
default:
unit = OSPI_PROT_DATA_UNIT_1B;
val = OSPI_DAT_SIZE_EN | (op->data.nbytes - 1);
break;
}
prot |= FIELD_PREP(OSPI_PROT_DATA_UNIT_MASK, unit);
switch (op->data.dir) {
case SPI_MEM_DATA_IN:
prot |= OSPI_PROT_DATA_EN;
break;
case SPI_MEM_DATA_OUT:
prot |= OSPI_PROT_TRANS_DIR_WRITE | OSPI_PROT_DATA_EN;
break;
case SPI_MEM_NO_DATA:
prot |= OSPI_PROT_TRANS_DIR_WRITE;
break;
default:
dev_warn(ospi->dev, "Unsupported direction");
break;
}
prot |= FIELD_PREP(OSPI_PROT_ADDR_SIZE_MASK, op->addr.nbytes);
prot |= FIELD_PREP(OSPI_PROT_CODE_SIZE_MASK, 1); /* 1byte */
writel(prot, ospi->base + OSPI_PROT_CTL_INDIR);
writel(val, ospi->base + OSPI_DAT_SIZE_INDIR);
}
static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_device *spi = mem->spi;
u32 irq_stat_en;
int ret;
ret = f_ospi_prepare_config(ospi);
if (ret)
return ret;
f_ospi_config_clk(ospi, spi->max_speed_hz);
f_ospi_config_indir_protocol(ospi, mem, op);
writel(f_ospi_get_dummy_cycle(op), ospi->base + OSPI_DMY_INDIR);
writel(op->addr.val, ospi->base + OSPI_ADDR);
writel(op->cmd.opcode, ospi->base + OSPI_CMD_IDX_INDIR);
f_ospi_clear_irq(ospi);
switch (op->data.dir) {
case SPI_MEM_DATA_IN:
irq_stat_en = OSPI_IRQ_READ_BUF_READY | OSPI_IRQ_CS_TRANS_COMP;
break;
case SPI_MEM_DATA_OUT:
irq_stat_en = OSPI_IRQ_WRITE_BUF_READY | OSPI_IRQ_CS_TRANS_COMP;
break;
case SPI_MEM_NO_DATA:
irq_stat_en = OSPI_IRQ_CS_TRANS_COMP;
break;
default:
dev_warn(ospi->dev, "Unsupported direction");
irq_stat_en = 0;
}
f_ospi_disable_irq_status(ospi, ~irq_stat_en);
f_ospi_enable_irq_status(ospi, irq_stat_en);
return f_ospi_unprepare_config(ospi);
}
static void f_ospi_indir_start_xfer(struct f_ospi *ospi)
{
/* Write only 1, auto cleared */
writel(OSPI_TRANS_CTL_START_REQ, ospi->base + OSPI_TRANS_CTL);
}
static void f_ospi_indir_stop_xfer(struct f_ospi *ospi)
{
/* Write only 1, auto cleared */
writel(OSPI_TRANS_CTL_STOP_REQ, ospi->base + OSPI_TRANS_CTL);
}
static int f_ospi_indir_wait_xfer_complete(struct f_ospi *ospi)
{
u32 val;
return readl_poll_timeout(ospi->base + OSPI_IRQ, val,
val & OSPI_IRQ_CS_TRANS_COMP,
0, OSPI_WAIT_MAX_MSEC);
}
static int f_ospi_indir_read(struct f_ospi *ospi, struct spi_mem *mem,
const struct spi_mem_op *op)
{
u8 *buf = op->data.buf.in;
u32 val;
int i, ret;
mutex_lock(&ospi->mlock);
/* E1-2: Prepare transfer operation */
ret = f_ospi_indir_prepare_op(ospi, mem, op);
if (ret)
goto out;
f_ospi_indir_start_xfer(ospi);
/* E3-4: Wait for ready and read data */
for (i = 0; i < op->data.nbytes; i++) {
ret = readl_poll_timeout(ospi->base + OSPI_IRQ, val,
val & OSPI_IRQ_READ_BUF_READY,
0, OSPI_WAIT_MAX_MSEC);
if (ret)
goto out;
buf[i] = readl(ospi->base + OSPI_DAT) & 0xFF;
}
/* E5-6: Stop transfer if data size is nothing */
if (!(readl(ospi->base + OSPI_DAT_SIZE_INDIR) & OSPI_DAT_SIZE_EN))
f_ospi_indir_stop_xfer(ospi);
/* E7-8: Wait for completion and clear */
ret = f_ospi_indir_wait_xfer_complete(ospi);
if (ret)
goto out;
writel(OSPI_IRQ_CS_TRANS_COMP, ospi->base + OSPI_IRQ);
/* E9: Do nothing if data size is valid */
if (readl(ospi->base + OSPI_DAT_SIZE_INDIR) & OSPI_DAT_SIZE_EN)
goto out;
/* E10-11: Reset and check read fifo */
writel(OSPI_SWRST_INDIR_READ_FIFO, ospi->base + OSPI_SWRST);
ret = readl_poll_timeout(ospi->base + OSPI_SWRST, val,
!(val & OSPI_SWRST_INDIR_READ_FIFO),
0, OSPI_WAIT_MAX_MSEC);
out:
mutex_unlock(&ospi->mlock);
return ret;
}
static int f_ospi_indir_write(struct f_ospi *ospi, struct spi_mem *mem,
const struct spi_mem_op *op)
{
u8 *buf = (u8 *)op->data.buf.out;
u32 val;
int i, ret;
mutex_lock(&ospi->mlock);
/* F1-3: Prepare transfer operation */
ret = f_ospi_indir_prepare_op(ospi, mem, op);
if (ret)
goto out;
f_ospi_indir_start_xfer(ospi);
if (!(readl(ospi->base + OSPI_PROT_CTL_INDIR) & OSPI_PROT_DATA_EN))
goto nodata;
/* F4-5: Wait for buffer ready and write data */
for (i = 0; i < op->data.nbytes; i++) {
ret = readl_poll_timeout(ospi->base + OSPI_IRQ, val,
val & OSPI_IRQ_WRITE_BUF_READY,
0, OSPI_WAIT_MAX_MSEC);
if (ret)
goto out;
writel(buf[i], ospi->base + OSPI_DAT);
}
/* F6-7: Stop transfer if data size is nothing */
if (!(readl(ospi->base + OSPI_DAT_SIZE_INDIR) & OSPI_DAT_SIZE_EN))
f_ospi_indir_stop_xfer(ospi);
nodata:
/* F8-9: Wait for completion and clear */
ret = f_ospi_indir_wait_xfer_complete(ospi);
if (ret)
goto out;
writel(OSPI_IRQ_CS_TRANS_COMP, ospi->base + OSPI_IRQ);
out:
mutex_unlock(&ospi->mlock);
return ret;
}
static int f_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct f_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
int err = 0;
switch (op->data.dir) {
case SPI_MEM_DATA_IN:
err = f_ospi_indir_read(ospi, mem, op);
break;
case SPI_MEM_DATA_OUT:
fallthrough;
case SPI_MEM_NO_DATA:
err = f_ospi_indir_write(ospi, mem, op);
break;
default:
dev_warn(ospi->dev, "Unsupported direction");
err = -EOPNOTSUPP;
}
return err;
}
static bool f_ospi_supports_op_width(struct spi_mem *mem,
const struct spi_mem_op *op)
{
static const u8 width_available[] = { 0, 1, 2, 4, 8 };
u8 width_op[] = { op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth };
bool is_match_found;
int i, j;
for (i = 0; i < ARRAY_SIZE(width_op); i++) {
is_match_found = false;
for (j = 0; j < ARRAY_SIZE(width_available); j++) {
if (width_op[i] == width_available[j]) {
is_match_found = true;
break;
}
}
if (!is_match_found)
return false;
}
return true;
}
static bool f_ospi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (f_ospi_get_dummy_cycle(op) > OSPI_DUMMY_CYCLE_MAX)
return false;
if (op->addr.nbytes > 4)
return false;
if (!f_ospi_supports_op_width(mem, op))
return false;
return spi_mem_default_supports_op(mem, op);
}
static int f_ospi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
op->data.nbytes = min_t(int, op->data.nbytes, OSPI_DAT_SIZE_MAX);
return 0;
}
static const struct spi_controller_mem_ops f_ospi_mem_ops = {
.adjust_op_size = f_ospi_adjust_op_size,
.supports_op = f_ospi_supports_op,
.exec_op = f_ospi_exec_op,
};
static int f_ospi_init(struct f_ospi *ospi)
{
int ret;
ret = f_ospi_prepare_config(ospi);
if (ret)
return ret;
/* Disable boot signal */
writel(OSPI_ACC_MODE_BOOT_DISABLE, ospi->base + OSPI_ACC_MODE);
f_ospi_config_dll(ospi);
/* Disable IRQ */
f_ospi_clear_irq(ospi);
f_ospi_disable_irq_status(ospi, OSPI_IRQ_ALL);
f_ospi_disable_irq_output(ospi, OSPI_IRQ_ALL);
return f_ospi_unprepare_config(ospi);
}
static int f_ospi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct device *dev = &pdev->dev;
struct f_ospi *ospi;
u32 num_cs = OSPI_NUM_CS;
int ret;
ctlr = spi_alloc_host(dev, sizeof(*ospi));
if (!ctlr)
return -ENOMEM;
ctlr->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL
| SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL
| SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST;
ctlr->mem_ops = &f_ospi_mem_ops;
ctlr->bus_num = -1;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > OSPI_NUM_CS) {
dev_err(dev, "num-cs too large: %d\n", num_cs);
return -ENOMEM;
}
ctlr->num_chipselect = num_cs;
ctlr->dev.of_node = dev->of_node;
ospi = spi_controller_get_devdata(ctlr);
ospi->dev = dev;
platform_set_drvdata(pdev, ospi);
ospi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ospi->base)) {
ret = PTR_ERR(ospi->base);
goto err_put_ctlr;
}
ospi->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ospi->clk)) {
ret = PTR_ERR(ospi->clk);
goto err_put_ctlr;
}
mutex_init(&ospi->mlock);
ret = f_ospi_init(ospi);
if (ret)
goto err_destroy_mutex;
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
goto err_destroy_mutex;
return 0;
err_destroy_mutex:
mutex_destroy(&ospi->mlock);
err_put_ctlr:
spi_controller_put(ctlr);
return ret;
}
static void f_ospi_remove(struct platform_device *pdev)
{
struct f_ospi *ospi = platform_get_drvdata(pdev);
mutex_destroy(&ospi->mlock);
}
static const struct of_device_id f_ospi_dt_ids[] = {
{ .compatible = "socionext,f-ospi" },
{}
};
MODULE_DEVICE_TABLE(of, f_ospi_dt_ids);
static struct platform_driver f_ospi_driver = {
.driver = {
.name = "socionext,f-ospi",
.of_match_table = f_ospi_dt_ids,
},
.probe = f_ospi_probe,
.remove_new = f_ospi_remove,
};
module_platform_driver(f_ospi_driver);
MODULE_DESCRIPTION("Socionext F_OSPI controller driver");
MODULE_AUTHOR("Socionext Inc.");
MODULE_AUTHOR("Kunihiko Hayashi <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-sn-f-ospi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2015 Daniel Schwierzeck <[email protected]>
* Copyright (C) 2016 Hauke Mehrtens <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#ifdef CONFIG_LANTIQ
#include <lantiq_soc.h>
#endif
#define LTQ_SPI_RX_IRQ_NAME "spi_rx"
#define LTQ_SPI_TX_IRQ_NAME "spi_tx"
#define LTQ_SPI_ERR_IRQ_NAME "spi_err"
#define LTQ_SPI_FRM_IRQ_NAME "spi_frm"
#define LTQ_SPI_CLC 0x00
#define LTQ_SPI_PISEL 0x04
#define LTQ_SPI_ID 0x08
#define LTQ_SPI_CON 0x10
#define LTQ_SPI_STAT 0x14
#define LTQ_SPI_WHBSTATE 0x18
#define LTQ_SPI_TB 0x20
#define LTQ_SPI_RB 0x24
#define LTQ_SPI_RXFCON 0x30
#define LTQ_SPI_TXFCON 0x34
#define LTQ_SPI_FSTAT 0x38
#define LTQ_SPI_BRT 0x40
#define LTQ_SPI_BRSTAT 0x44
#define LTQ_SPI_SFCON 0x60
#define LTQ_SPI_SFSTAT 0x64
#define LTQ_SPI_GPOCON 0x70
#define LTQ_SPI_GPOSTAT 0x74
#define LTQ_SPI_FPGO 0x78
#define LTQ_SPI_RXREQ 0x80
#define LTQ_SPI_RXCNT 0x84
#define LTQ_SPI_DMACON 0xec
#define LTQ_SPI_IRNEN 0xf4
#define LTQ_SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */
#define LTQ_SPI_CLC_SMC_M (0xFF << LTQ_SPI_CLC_SMC_S)
#define LTQ_SPI_CLC_RMC_S 8 /* Clock divider for normal run mode */
#define LTQ_SPI_CLC_RMC_M (0xFF << LTQ_SPI_CLC_RMC_S)
#define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */
#define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
#define LTQ_SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */
#define LTQ_SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */
#define LTQ_SPI_ID_MOD_S 8 /* Module ID */
#define LTQ_SPI_ID_MOD_M (0xff << LTQ_SPI_ID_MOD_S)
#define LTQ_SPI_ID_CFG_S 5 /* DMA interface support */
#define LTQ_SPI_ID_CFG_M (1 << LTQ_SPI_ID_CFG_S)
#define LTQ_SPI_ID_REV_M 0x1F /* Hardware revision number */
#define LTQ_SPI_CON_BM_S 16 /* Data width selection */
#define LTQ_SPI_CON_BM_M (0x1F << LTQ_SPI_CON_BM_S)
#define LTQ_SPI_CON_EM BIT(24) /* Echo mode */
#define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */
#define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */
#define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
#define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
#define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */
#define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */
#define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
#define LTQ_SPI_CON_LB BIT(7) /* Loopback control */
#define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */
#define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */
#define LTQ_SPI_CON_HB BIT(4) /* Heading control */
#define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */
#define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
#define LTQ_SPI_STAT_RXBV_S 28
#define LTQ_SPI_STAT_RXBV_M (0x7 << LTQ_SPI_STAT_RXBV_S)
#define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */
#define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
#define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
#define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */
#define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
#define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
#define LTQ_SPI_STAT_ME BIT(7) /* Mode error flag */
#define LTQ_SPI_STAT_MS BIT(1) /* Host/target select bit */
#define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
#define LTQ_SPI_STAT_ERRORS (LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
#define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
#define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
#define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
#define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
#define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
#define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
#define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
#define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
#define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
#define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
#define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
#define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
#define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set host select bit */
#define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear host select bit */
#define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
#define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
#define LTQ_SPI_WHBSTATE_CLR_ERRORS (LTQ_SPI_WHBSTATE_CLRRUE | \
LTQ_SPI_WHBSTATE_CLRME | \
LTQ_SPI_WHBSTATE_CLRTE | \
LTQ_SPI_WHBSTATE_CLRRE | \
LTQ_SPI_WHBSTATE_CLRAE | \
LTQ_SPI_WHBSTATE_CLRTUE)
#define LTQ_SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */
#define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
#define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
#define LTQ_SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */
#define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
#define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
#define LTQ_SPI_FSTAT_RXFFL_S 0
#define LTQ_SPI_FSTAT_TXFFL_S 8
#define LTQ_SPI_GPOCON_ISCSBN_S 8
#define LTQ_SPI_GPOCON_INVOUTN_S 0
#define LTQ_SPI_FGPO_SETOUTN_S 8
#define LTQ_SPI_FGPO_CLROUTN_S 0
#define LTQ_SPI_RXREQ_RXCNT_M 0xFFFF /* Receive count value */
#define LTQ_SPI_RXCNT_TODO_M 0xFFFF /* Recevie to-do value */
#define LTQ_SPI_IRNEN_TFI BIT(4) /* TX finished interrupt */
#define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
#define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */
#define LTQ_SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */
#define LTQ_SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */
#define LTQ_SPI_IRNEN_R_XRX BIT(1) /* Transmit end interrupt request */
#define LTQ_SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */
#define LTQ_SPI_IRNEN_ALL 0x1F
struct lantiq_ssc_spi;
struct lantiq_ssc_hwcfg {
int (*cfg_irq)(struct platform_device *pdev, struct lantiq_ssc_spi *spi);
unsigned int irnen_r;
unsigned int irnen_t;
unsigned int irncr;
unsigned int irnicr;
bool irq_ack;
u32 fifo_size_mask;
};
struct lantiq_ssc_spi {
struct spi_controller *host;
struct device *dev;
void __iomem *regbase;
struct clk *spi_clk;
struct clk *fpi_clk;
const struct lantiq_ssc_hwcfg *hwcfg;
spinlock_t lock;
struct workqueue_struct *wq;
struct work_struct work;
const u8 *tx;
u8 *rx;
unsigned int tx_todo;
unsigned int rx_todo;
unsigned int bits_per_word;
unsigned int speed_hz;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
unsigned int base_cs;
unsigned int fdx_tx_level;
};
static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
{
return __raw_readl(spi->regbase + reg);
}
static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
u32 reg)
{
__raw_writel(val, spi->regbase + reg);
}
static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
u32 set, u32 reg)
{
u32 val = __raw_readl(spi->regbase + reg);
val &= ~clr;
val |= set;
__raw_writel(val, spi->regbase + reg);
}
static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
{
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
return (fstat >> LTQ_SPI_FSTAT_TXFFL_S) & hwcfg->fifo_size_mask;
}
static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
{
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
return (fstat >> LTQ_SPI_FSTAT_RXFFL_S) & hwcfg->fifo_size_mask;
}
static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
{
return spi->tx_fifo_size - tx_fifo_level(spi);
}
static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
{
u32 val = spi->rx_fifo_size << LTQ_SPI_RXFCON_RXFITL_S;
val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
lantiq_ssc_writel(spi, val, LTQ_SPI_RXFCON);
}
static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
{
u32 val = 1 << LTQ_SPI_TXFCON_TXFITL_S;
val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
lantiq_ssc_writel(spi, val, LTQ_SPI_TXFCON);
}
static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
{
lantiq_ssc_maskl(spi, 0, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
}
static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
{
lantiq_ssc_maskl(spi, 0, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
}
static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
{
lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
}
static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
{
lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
}
static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
unsigned int max_speed_hz)
{
u32 spi_clk, brt;
/*
* SPI module clock is derived from FPI bus clock dependent on
* divider value in CLC.RMS which is always set to 1.
*
* f_SPI
* baudrate = --------------
* 2 * (BR + 1)
*/
spi_clk = clk_get_rate(spi->fpi_clk) / 2;
if (max_speed_hz > spi_clk)
brt = 0;
else
brt = spi_clk / max_speed_hz - 1;
if (brt > 0xFFFF)
brt = 0xFFFF;
dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
spi_clk, max_speed_hz, brt);
lantiq_ssc_writel(spi, brt, LTQ_SPI_BRT);
}
static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
unsigned int bits_per_word)
{
u32 bm;
/* CON.BM value = bits_per_word - 1 */
bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_S;
lantiq_ssc_maskl(spi, LTQ_SPI_CON_BM_M, bm, LTQ_SPI_CON);
}
static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
unsigned int mode)
{
u32 con_set = 0, con_clr = 0;
/*
* SPI mode mapping in CON register:
* Mode CPOL CPHA CON.PO CON.PH
* 0 0 0 0 1
* 1 0 1 0 0
* 2 1 0 1 1
* 3 1 1 1 0
*/
if (mode & SPI_CPHA)
con_clr |= LTQ_SPI_CON_PH;
else
con_set |= LTQ_SPI_CON_PH;
if (mode & SPI_CPOL)
con_set |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
else
con_clr |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
/* Set heading control */
if (mode & SPI_LSB_FIRST)
con_clr |= LTQ_SPI_CON_HB;
else
con_set |= LTQ_SPI_CON_HB;
/* Set loopback mode */
if (mode & SPI_LOOP)
con_set |= LTQ_SPI_CON_LB;
else
con_clr |= LTQ_SPI_CON_LB;
lantiq_ssc_maskl(spi, con_clr, con_set, LTQ_SPI_CON);
}
static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
{
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
/*
* Set clock divider for run mode to 1 to
* run at same frequency as FPI bus
*/
lantiq_ssc_writel(spi, 1 << LTQ_SPI_CLC_RMC_S, LTQ_SPI_CLC);
/* Put controller into config mode */
hw_enter_config_mode(spi);
/* Clear error flags */
lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
/* Enable error checking, disable TX/RX */
lantiq_ssc_writel(spi, LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN | LTQ_SPI_CON_TXOFF |
LTQ_SPI_CON_RXOFF, LTQ_SPI_CON);
/* Setup default SPI mode */
hw_setup_bits_per_word(spi, spi->bits_per_word);
hw_setup_clock_mode(spi, SPI_MODE_0);
/* Enable host mode and clear error flags */
lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS |
LTQ_SPI_WHBSTATE_CLR_ERRORS,
LTQ_SPI_WHBSTATE);
/* Reset GPIO/CS registers */
lantiq_ssc_writel(spi, 0, LTQ_SPI_GPOCON);
lantiq_ssc_writel(spi, 0xFF00, LTQ_SPI_FPGO);
/* Enable and flush FIFOs */
rx_fifo_reset(spi);
tx_fifo_reset(spi);
/* Enable interrupts */
lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r |
LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
}
static int lantiq_ssc_setup(struct spi_device *spidev)
{
struct spi_controller *host = spidev->controller;
struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
unsigned int cs = spi_get_chipselect(spidev, 0);
u32 gpocon;
/* GPIOs are used for CS */
if (spi_get_csgpiod(spidev, 0))
return 0;
dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
if (cs < spi->base_cs) {
dev_err(spi->dev,
"chipselect %i too small (min %i)\n", cs, spi->base_cs);
return -EINVAL;
}
/* set GPO pin to CS mode */
gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S);
/* invert GPO pin */
if (spidev->mode & SPI_CS_HIGH)
gpocon |= 1 << (cs - spi->base_cs);
lantiq_ssc_maskl(spi, 0, gpocon, LTQ_SPI_GPOCON);
return 0;
}
static int lantiq_ssc_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
hw_enter_config_mode(spi);
hw_setup_clock_mode(spi, message->spi->mode);
hw_enter_active_mode(spi);
return 0;
}
static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
struct spi_device *spidev, struct spi_transfer *t)
{
unsigned int speed_hz = t->speed_hz;
unsigned int bits_per_word = t->bits_per_word;
u32 con;
if (bits_per_word != spi->bits_per_word ||
speed_hz != spi->speed_hz) {
hw_enter_config_mode(spi);
hw_setup_speed_hz(spi, speed_hz);
hw_setup_bits_per_word(spi, bits_per_word);
hw_enter_active_mode(spi);
spi->speed_hz = speed_hz;
spi->bits_per_word = bits_per_word;
}
/* Configure transmitter and receiver */
con = lantiq_ssc_readl(spi, LTQ_SPI_CON);
if (t->tx_buf)
con &= ~LTQ_SPI_CON_TXOFF;
else
con |= LTQ_SPI_CON_TXOFF;
if (t->rx_buf)
con &= ~LTQ_SPI_CON_RXOFF;
else
con |= LTQ_SPI_CON_RXOFF;
lantiq_ssc_writel(spi, con, LTQ_SPI_CON);
}
static int lantiq_ssc_unprepare_message(struct spi_controller *host,
struct spi_message *message)
{
struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
flush_workqueue(spi->wq);
/* Disable transmitter and receiver while idle */
lantiq_ssc_maskl(spi, 0, LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF,
LTQ_SPI_CON);
return 0;
}
static void tx_fifo_write(struct lantiq_ssc_spi *spi)
{
const u8 *tx8;
const u16 *tx16;
const u32 *tx32;
u32 data;
unsigned int tx_free = tx_fifo_free(spi);
spi->fdx_tx_level = 0;
while (spi->tx_todo && tx_free) {
switch (spi->bits_per_word) {
case 2 ... 8:
tx8 = spi->tx;
data = *tx8;
spi->tx_todo--;
spi->tx++;
break;
case 16:
tx16 = (u16 *) spi->tx;
data = *tx16;
spi->tx_todo -= 2;
spi->tx += 2;
break;
case 32:
tx32 = (u32 *) spi->tx;
data = *tx32;
spi->tx_todo -= 4;
spi->tx += 4;
break;
default:
WARN_ON(1);
data = 0;
break;
}
lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
tx_free--;
spi->fdx_tx_level++;
}
}
static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
{
u8 *rx8;
u16 *rx16;
u32 *rx32;
u32 data;
unsigned int rx_fill = rx_fifo_level(spi);
/*
* Wait until all expected data to be shifted in.
* Otherwise, rx overrun may occur.
*/
while (rx_fill != spi->fdx_tx_level)
rx_fill = rx_fifo_level(spi);
while (rx_fill) {
data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
switch (spi->bits_per_word) {
case 2 ... 8:
rx8 = spi->rx;
*rx8 = data;
spi->rx_todo--;
spi->rx++;
break;
case 16:
rx16 = (u16 *) spi->rx;
*rx16 = data;
spi->rx_todo -= 2;
spi->rx += 2;
break;
case 32:
rx32 = (u32 *) spi->rx;
*rx32 = data;
spi->rx_todo -= 4;
spi->rx += 4;
break;
default:
WARN_ON(1);
break;
}
rx_fill--;
}
}
static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
{
u32 data, *rx32;
u8 *rx8;
unsigned int rxbv, shift;
unsigned int rx_fill = rx_fifo_level(spi);
/*
* In RX-only mode the bits per word value is ignored by HW. A value
* of 32 is used instead. Thus all 4 bytes per FIFO must be read.
* If remaining RX bytes are less than 4, the FIFO must be read
* differently. The amount of received and valid bytes is indicated
* by STAT.RXBV register value.
*/
while (rx_fill) {
if (spi->rx_todo < 4) {
rxbv = (lantiq_ssc_readl(spi, LTQ_SPI_STAT) &
LTQ_SPI_STAT_RXBV_M) >> LTQ_SPI_STAT_RXBV_S;
data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
shift = (rxbv - 1) * 8;
rx8 = spi->rx;
while (rxbv) {
*rx8++ = (data >> shift) & 0xFF;
rxbv--;
shift -= 8;
spi->rx_todo--;
spi->rx++;
}
} else {
data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
rx32 = (u32 *) spi->rx;
*rx32++ = data;
spi->rx_todo -= 4;
spi->rx += 4;
}
rx_fill--;
}
}
static void rx_request(struct lantiq_ssc_spi *spi)
{
unsigned int rxreq, rxreq_max;
/*
* To avoid receive overflows at high clocks it is better to request
* only the amount of bytes that fits into all FIFOs. This value
* depends on the FIFO size implemented in hardware.
*/
rxreq = spi->rx_todo;
rxreq_max = spi->rx_fifo_size * 4;
if (rxreq > rxreq_max)
rxreq = rxreq_max;
lantiq_ssc_writel(spi, rxreq, LTQ_SPI_RXREQ);
}
static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
{
struct lantiq_ssc_spi *spi = data;
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
spin_lock(&spi->lock);
if (hwcfg->irq_ack)
lantiq_ssc_writel(spi, val, hwcfg->irncr);
if (spi->tx) {
if (spi->rx && spi->rx_todo)
rx_fifo_read_full_duplex(spi);
if (spi->tx_todo)
tx_fifo_write(spi);
else if (!tx_fifo_level(spi))
goto completed;
} else if (spi->rx) {
if (spi->rx_todo) {
rx_fifo_read_half_duplex(spi);
if (spi->rx_todo)
rx_request(spi);
else
goto completed;
} else {
goto completed;
}
}
spin_unlock(&spi->lock);
return IRQ_HANDLED;
completed:
queue_work(spi->wq, &spi->work);
spin_unlock(&spi->lock);
return IRQ_HANDLED;
}
static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
{
struct lantiq_ssc_spi *spi = data;
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
if (!(stat & LTQ_SPI_STAT_ERRORS))
return IRQ_NONE;
spin_lock(&spi->lock);
if (hwcfg->irq_ack)
lantiq_ssc_writel(spi, val, hwcfg->irncr);
if (stat & LTQ_SPI_STAT_RUE)
dev_err(spi->dev, "receive underflow error\n");
if (stat & LTQ_SPI_STAT_TUE)
dev_err(spi->dev, "transmit underflow error\n");
if (stat & LTQ_SPI_STAT_AE)
dev_err(spi->dev, "abort error\n");
if (stat & LTQ_SPI_STAT_RE)
dev_err(spi->dev, "receive overflow error\n");
if (stat & LTQ_SPI_STAT_TE)
dev_err(spi->dev, "transmit overflow error\n");
if (stat & LTQ_SPI_STAT_ME)
dev_err(spi->dev, "mode error\n");
/* Clear error flags */
lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
/* set bad status so it can be retried */
if (spi->host->cur_msg)
spi->host->cur_msg->status = -EIO;
queue_work(spi->wq, &spi->work);
spin_unlock(&spi->lock);
return IRQ_HANDLED;
}
static irqreturn_t intel_lgm_ssc_isr(int irq, void *data)
{
struct lantiq_ssc_spi *spi = data;
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
if (!(val & LTQ_SPI_IRNEN_ALL))
return IRQ_NONE;
if (val & LTQ_SPI_IRNEN_E)
return lantiq_ssc_err_interrupt(irq, data);
if ((val & hwcfg->irnen_t) || (val & hwcfg->irnen_r))
return lantiq_ssc_xmit_interrupt(irq, data);
return IRQ_HANDLED;
}
static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
struct spi_transfer *t)
{
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags);
spi->tx = t->tx_buf;
spi->rx = t->rx_buf;
if (t->tx_buf) {
spi->tx_todo = t->len;
/* initially fill TX FIFO */
tx_fifo_write(spi);
}
if (spi->rx) {
spi->rx_todo = t->len;
/* start shift clock in RX-only mode */
if (!spi->tx)
rx_request(spi);
}
spin_unlock_irqrestore(&spi->lock, flags);
return t->len;
}
/*
* The driver only gets an interrupt when the FIFO is empty, but there
* is an additional shift register from which the data is written to
* the wire. We get the last interrupt when the controller starts to
* write the last word to the wire, not when it is finished. Do busy
* waiting till it finishes.
*/
static void lantiq_ssc_bussy_work(struct work_struct *work)
{
struct lantiq_ssc_spi *spi;
unsigned long long timeout = 8LL * 1000LL;
unsigned long end;
spi = container_of(work, typeof(*spi), work);
do_div(timeout, spi->speed_hz);
timeout += timeout + 100; /* some tolerance */
end = jiffies + msecs_to_jiffies(timeout);
do {
u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
if (!(stat & LTQ_SPI_STAT_BSY)) {
spi_finalize_current_transfer(spi->host);
return;
}
cond_resched();
} while (!time_after_eq(jiffies, end));
if (spi->host->cur_msg)
spi->host->cur_msg->status = -EIO;
spi_finalize_current_transfer(spi->host);
}
static void lantiq_ssc_handle_err(struct spi_controller *host,
struct spi_message *message)
{
struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
/* flush FIFOs on timeout */
rx_fifo_flush(spi);
tx_fifo_flush(spi);
}
static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
{
struct lantiq_ssc_spi *spi = spi_controller_get_devdata(spidev->controller);
unsigned int cs = spi_get_chipselect(spidev, 0);
u32 fgpo;
if (!!(spidev->mode & SPI_CS_HIGH) == enable)
fgpo = (1 << (cs - spi->base_cs));
else
fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S));
lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO);
}
static int lantiq_ssc_transfer_one(struct spi_controller *host,
struct spi_device *spidev,
struct spi_transfer *t)
{
struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
hw_setup_transfer(spi, spidev, t);
return transfer_start(spi, spidev, t);
}
static int intel_lgm_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
{
int irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
return devm_request_irq(&pdev->dev, irq, intel_lgm_ssc_isr, 0, "spi", spi);
}
static int lantiq_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
{
int irq, err;
irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
0, LTQ_SPI_RX_IRQ_NAME, spi);
if (err)
return err;
irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
0, LTQ_SPI_TX_IRQ_NAME, spi);
if (err)
return err;
irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_err_interrupt,
0, LTQ_SPI_ERR_IRQ_NAME, spi);
return err;
}
static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
.cfg_irq = lantiq_cfg_irq,
.irnen_r = LTQ_SPI_IRNEN_R_XWAY,
.irnen_t = LTQ_SPI_IRNEN_T_XWAY,
.irnicr = 0xF8,
.irncr = 0xFC,
.fifo_size_mask = GENMASK(5, 0),
.irq_ack = false,
};
static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
.cfg_irq = lantiq_cfg_irq,
.irnen_r = LTQ_SPI_IRNEN_R_XRX,
.irnen_t = LTQ_SPI_IRNEN_T_XRX,
.irnicr = 0xF8,
.irncr = 0xFC,
.fifo_size_mask = GENMASK(5, 0),
.irq_ack = false,
};
static const struct lantiq_ssc_hwcfg intel_ssc_lgm = {
.cfg_irq = intel_lgm_cfg_irq,
.irnen_r = LTQ_SPI_IRNEN_R_XRX,
.irnen_t = LTQ_SPI_IRNEN_T_XRX,
.irnicr = 0xFC,
.irncr = 0xF8,
.fifo_size_mask = GENMASK(7, 0),
.irq_ack = true,
};
static const struct of_device_id lantiq_ssc_match[] = {
{ .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
{ .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
{ .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
{ .compatible = "intel,lgm-spi", .data = &intel_ssc_lgm, },
{},
};
MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
static int lantiq_ssc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *host;
struct lantiq_ssc_spi *spi;
const struct lantiq_ssc_hwcfg *hwcfg;
u32 id, supports_dma, revision;
unsigned int num_cs;
int err;
hwcfg = of_device_get_match_data(dev);
host = spi_alloc_host(dev, sizeof(struct lantiq_ssc_spi));
if (!host)
return -ENOMEM;
spi = spi_controller_get_devdata(host);
spi->host = host;
spi->dev = dev;
spi->hwcfg = hwcfg;
platform_set_drvdata(pdev, spi);
spi->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->regbase)) {
err = PTR_ERR(spi->regbase);
goto err_host_put;
}
err = hwcfg->cfg_irq(pdev, spi);
if (err)
goto err_host_put;
spi->spi_clk = devm_clk_get(dev, "gate");
if (IS_ERR(spi->spi_clk)) {
err = PTR_ERR(spi->spi_clk);
goto err_host_put;
}
err = clk_prepare_enable(spi->spi_clk);
if (err)
goto err_host_put;
/*
* Use the old clk_get_fpi() function on Lantiq platform, till it
* supports common clk.
*/
#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
spi->fpi_clk = clk_get_fpi();
#else
spi->fpi_clk = clk_get(dev, "freq");
#endif
if (IS_ERR(spi->fpi_clk)) {
err = PTR_ERR(spi->fpi_clk);
goto err_clk_disable;
}
num_cs = 8;
of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
spi->base_cs = 1;
of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
spin_lock_init(&spi->lock);
spi->bits_per_word = 8;
spi->speed_hz = 0;
host->dev.of_node = pdev->dev.of_node;
host->num_chipselect = num_cs;
host->use_gpio_descriptors = true;
host->setup = lantiq_ssc_setup;
host->set_cs = lantiq_ssc_set_cs;
host->handle_err = lantiq_ssc_handle_err;
host->prepare_message = lantiq_ssc_prepare_message;
host->unprepare_message = lantiq_ssc_unprepare_message;
host->transfer_one = lantiq_ssc_transfer_one;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
SPI_LOOP;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
if (!spi->wq) {
err = -ENOMEM;
goto err_clk_put;
}
INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
id = lantiq_ssc_readl(spi, LTQ_SPI_ID);
spi->tx_fifo_size = (id >> LTQ_SPI_ID_TXFS_S) & hwcfg->fifo_size_mask;
spi->rx_fifo_size = (id >> LTQ_SPI_ID_RXFS_S) & hwcfg->fifo_size_mask;
supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S;
revision = id & LTQ_SPI_ID_REV_M;
lantiq_ssc_hw_init(spi);
dev_info(dev,
"Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
err = devm_spi_register_controller(dev, host);
if (err) {
dev_err(dev, "failed to register spi host\n");
goto err_wq_destroy;
}
return 0;
err_wq_destroy:
destroy_workqueue(spi->wq);
err_clk_put:
clk_put(spi->fpi_clk);
err_clk_disable:
clk_disable_unprepare(spi->spi_clk);
err_host_put:
spi_controller_put(host);
return err;
}
static void lantiq_ssc_remove(struct platform_device *pdev)
{
struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
lantiq_ssc_writel(spi, 0, LTQ_SPI_IRNEN);
lantiq_ssc_writel(spi, 0, LTQ_SPI_CLC);
rx_fifo_flush(spi);
tx_fifo_flush(spi);
hw_enter_config_mode(spi);
destroy_workqueue(spi->wq);
clk_disable_unprepare(spi->spi_clk);
clk_put(spi->fpi_clk);
}
static struct platform_driver lantiq_ssc_driver = {
.probe = lantiq_ssc_probe,
.remove_new = lantiq_ssc_remove,
.driver = {
.name = "spi-lantiq-ssc",
.of_match_table = lantiq_ssc_match,
},
};
module_platform_driver(lantiq_ssc_driver);
MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
MODULE_AUTHOR("Daniel Schwierzeck <[email protected]>");
MODULE_AUTHOR("Hauke Mehrtens <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:spi-lantiq-ssc");
| linux-master | drivers/spi/spi-lantiq-ssc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Xilinx Zynq UltraScale+ MPSoC Quad-SPI (QSPI) controller driver
* (master mode only)
*
* Copyright (C) 2009 - 2015 Xilinx, Inc.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/spi/spi-mem.h>
/* Generic QSPI register offsets */
#define GQSPI_CONFIG_OFST 0x00000100
#define GQSPI_ISR_OFST 0x00000104
#define GQSPI_IDR_OFST 0x0000010C
#define GQSPI_IER_OFST 0x00000108
#define GQSPI_IMASK_OFST 0x00000110
#define GQSPI_EN_OFST 0x00000114
#define GQSPI_TXD_OFST 0x0000011C
#define GQSPI_RXD_OFST 0x00000120
#define GQSPI_TX_THRESHOLD_OFST 0x00000128
#define GQSPI_RX_THRESHOLD_OFST 0x0000012C
#define IOU_TAPDLY_BYPASS_OFST 0x0000003C
#define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138
#define GQSPI_GEN_FIFO_OFST 0x00000140
#define GQSPI_SEL_OFST 0x00000144
#define GQSPI_GF_THRESHOLD_OFST 0x00000150
#define GQSPI_FIFO_CTRL_OFST 0x0000014C
#define GQSPI_QSPIDMA_DST_CTRL_OFST 0x0000080C
#define GQSPI_QSPIDMA_DST_SIZE_OFST 0x00000804
#define GQSPI_QSPIDMA_DST_STS_OFST 0x00000808
#define GQSPI_QSPIDMA_DST_I_STS_OFST 0x00000814
#define GQSPI_QSPIDMA_DST_I_EN_OFST 0x00000818
#define GQSPI_QSPIDMA_DST_I_DIS_OFST 0x0000081C
#define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820
#define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800
#define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828
#define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
/* GQSPI register bit masks */
#define GQSPI_SEL_MASK 0x00000001
#define GQSPI_EN_MASK 0x00000001
#define GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK 0x00000020
#define GQSPI_ISR_WR_TO_CLR_MASK 0x00000002
#define GQSPI_IDR_ALL_MASK 0x00000FBE
#define GQSPI_CFG_MODE_EN_MASK 0xC0000000
#define GQSPI_CFG_GEN_FIFO_START_MODE_MASK 0x20000000
#define GQSPI_CFG_ENDIAN_MASK 0x04000000
#define GQSPI_CFG_EN_POLL_TO_MASK 0x00100000
#define GQSPI_CFG_WP_HOLD_MASK 0x00080000
#define GQSPI_CFG_BAUD_RATE_DIV_MASK 0x00000038
#define GQSPI_CFG_CLK_PHA_MASK 0x00000004
#define GQSPI_CFG_CLK_POL_MASK 0x00000002
#define GQSPI_CFG_START_GEN_FIFO_MASK 0x10000000
#define GQSPI_GENFIFO_IMM_DATA_MASK 0x000000FF
#define GQSPI_GENFIFO_DATA_XFER 0x00000100
#define GQSPI_GENFIFO_EXP 0x00000200
#define GQSPI_GENFIFO_MODE_SPI 0x00000400
#define GQSPI_GENFIFO_MODE_DUALSPI 0x00000800
#define GQSPI_GENFIFO_MODE_QUADSPI 0x00000C00
#define GQSPI_GENFIFO_MODE_MASK 0x00000C00
#define GQSPI_GENFIFO_CS_LOWER 0x00001000
#define GQSPI_GENFIFO_CS_UPPER 0x00002000
#define GQSPI_GENFIFO_BUS_LOWER 0x00004000
#define GQSPI_GENFIFO_BUS_UPPER 0x00008000
#define GQSPI_GENFIFO_BUS_BOTH 0x0000C000
#define GQSPI_GENFIFO_BUS_MASK 0x0000C000
#define GQSPI_GENFIFO_TX 0x00010000
#define GQSPI_GENFIFO_RX 0x00020000
#define GQSPI_GENFIFO_STRIPE 0x00040000
#define GQSPI_GENFIFO_POLL 0x00080000
#define GQSPI_GENFIFO_EXP_START 0x00000100
#define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004
#define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002
#define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001
#define GQSPI_ISR_RXEMPTY_MASK 0x00000800
#define GQSPI_ISR_GENFIFOFULL_MASK 0x00000400
#define GQSPI_ISR_GENFIFONOT_FULL_MASK 0x00000200
#define GQSPI_ISR_TXEMPTY_MASK 0x00000100
#define GQSPI_ISR_GENFIFOEMPTY_MASK 0x00000080
#define GQSPI_ISR_RXFULL_MASK 0x00000020
#define GQSPI_ISR_RXNEMPTY_MASK 0x00000010
#define GQSPI_ISR_TXFULL_MASK 0x00000008
#define GQSPI_ISR_TXNOT_FULL_MASK 0x00000004
#define GQSPI_ISR_POLL_TIME_EXPIRE_MASK 0x00000002
#define GQSPI_IER_TXNOT_FULL_MASK 0x00000004
#define GQSPI_IER_RXEMPTY_MASK 0x00000800
#define GQSPI_IER_POLL_TIME_EXPIRE_MASK 0x00000002
#define GQSPI_IER_RXNEMPTY_MASK 0x00000010
#define GQSPI_IER_GENFIFOEMPTY_MASK 0x00000080
#define GQSPI_IER_TXEMPTY_MASK 0x00000100
#define GQSPI_QSPIDMA_DST_INTR_ALL_MASK 0x000000FE
#define GQSPI_QSPIDMA_DST_STS_WTC 0x0000E000
#define GQSPI_CFG_MODE_EN_DMA_MASK 0x80000000
#define GQSPI_ISR_IDR_MASK 0x00000994
#define GQSPI_QSPIDMA_DST_I_EN_DONE_MASK 0x00000002
#define GQSPI_QSPIDMA_DST_I_STS_DONE_MASK 0x00000002
#define GQSPI_IRQ_MASK 0x00000980
#define GQSPI_CFG_BAUD_RATE_DIV_SHIFT 3
#define GQSPI_GENFIFO_CS_SETUP 0x4
#define GQSPI_GENFIFO_CS_HOLD 0x3
#define GQSPI_TXD_DEPTH 64
#define GQSPI_RX_FIFO_THRESHOLD 32
#define GQSPI_RX_FIFO_FILL (GQSPI_RX_FIFO_THRESHOLD * 4)
#define GQSPI_TX_FIFO_THRESHOLD_RESET_VAL 32
#define GQSPI_TX_FIFO_FILL (GQSPI_TXD_DEPTH -\
GQSPI_TX_FIFO_THRESHOLD_RESET_VAL)
#define GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL 0X10
#define GQSPI_QSPIDMA_DST_CTRL_RESET_VAL 0x803FFA00
#define GQSPI_SELECT_FLASH_CS_LOWER 0x1
#define GQSPI_SELECT_FLASH_CS_UPPER 0x2
#define GQSPI_SELECT_FLASH_CS_BOTH 0x3
#define GQSPI_SELECT_FLASH_BUS_LOWER 0x1
#define GQSPI_SELECT_FLASH_BUS_UPPER 0x2
#define GQSPI_SELECT_FLASH_BUS_BOTH 0x3
#define GQSPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
#define GQSPI_BAUD_DIV_SHIFT 2 /* Baud rate divisor shift */
#define GQSPI_SELECT_MODE_SPI 0x1
#define GQSPI_SELECT_MODE_DUALSPI 0x2
#define GQSPI_SELECT_MODE_QUADSPI 0x4
#define GQSPI_DMA_UNALIGN 0x3
#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
#define GQSPI_MAX_NUM_CS 2 /* Maximum number of chip selects */
#define GQSPI_USE_DATA_DLY 0x1
#define GQSPI_USE_DATA_DLY_SHIFT 31
#define GQSPI_DATA_DLY_ADJ_VALUE 0x2
#define GQSPI_DATA_DLY_ADJ_SHIFT 28
#define GQSPI_LPBK_DLY_ADJ_DLY_1 0x1
#define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 0x3
#define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
#define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 0x2
/* set to differentiate versal from zynqmp, 1=versal, 0=zynqmp */
#define QSPI_QUIRK_HAS_TAPDELAY BIT(0)
#define GQSPI_FREQ_37_5MHZ 37500000
#define GQSPI_FREQ_40MHZ 40000000
#define GQSPI_FREQ_100MHZ 100000000
#define GQSPI_FREQ_150MHZ 150000000
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
/**
* struct qspi_platform_data - zynqmp qspi platform data structure
* @quirks: Flags is used to identify the platform
*/
struct qspi_platform_data {
u32 quirks;
};
/**
* struct zynqmp_qspi - Defines qspi driver instance
* @ctlr: Pointer to the spi controller information
* @regs: Virtual address of the QSPI controller registers
* @refclk: Pointer to the peripheral clock
* @pclk: Pointer to the APB clock
* @irq: IRQ number
* @dev: Pointer to struct device
* @txbuf: Pointer to the TX buffer
* @rxbuf: Pointer to the RX buffer
* @bytes_to_transfer: Number of bytes left to transfer
* @bytes_to_receive: Number of bytes left to receive
* @genfifocs: Used for chip select
* @genfifobus: Used to select the upper or lower bus
* @dma_rx_bytes: Remaining bytes to receive by DMA mode
* @dma_addr: DMA address after mapping the kernel buffer
* @genfifoentry: Used for storing the genfifoentry instruction.
* @mode: Defines the mode in which QSPI is operating
* @data_completion: completion structure
* @op_lock: Operational lock
* @speed_hz: Current SPI bus clock speed in hz
* @has_tapdelay: Used for tapdelay register available in qspi
*/
struct zynqmp_qspi {
struct spi_controller *ctlr;
void __iomem *regs;
struct clk *refclk;
struct clk *pclk;
int irq;
struct device *dev;
const void *txbuf;
void *rxbuf;
int bytes_to_transfer;
int bytes_to_receive;
u32 genfifocs;
u32 genfifobus;
u32 dma_rx_bytes;
dma_addr_t dma_addr;
u32 genfifoentry;
enum mode_type mode;
struct completion data_completion;
struct mutex op_lock;
u32 speed_hz;
bool has_tapdelay;
};
/**
* zynqmp_gqspi_read - For GQSPI controller read operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset from where to read
* Return: Value at the offset
*/
static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
{
return readl_relaxed(xqspi->regs + offset);
}
/**
* zynqmp_gqspi_write - For GQSPI controller write operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset where to write
* @val: Value to be written
*/
static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
u32 val)
{
writel_relaxed(val, (xqspi->regs + offset));
}
/**
* zynqmp_gqspi_selectslave - For selection of slave device
* @instanceptr: Pointer to the zynqmp_qspi structure
* @slavecs: For chip select
* @slavebus: To check which bus is selected- upper or lower
*/
static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
u8 slavecs, u8 slavebus)
{
/*
* Bus and CS lines selected here will be updated in the instance and
* used for subsequent GENFIFO entries during transfer.
*/
/* Choose slave select line */
switch (slavecs) {
case GQSPI_SELECT_FLASH_CS_BOTH:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
GQSPI_GENFIFO_CS_UPPER;
break;
case GQSPI_SELECT_FLASH_CS_UPPER:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
break;
case GQSPI_SELECT_FLASH_CS_LOWER:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER;
break;
default:
dev_warn(instanceptr->dev, "Invalid slave select\n");
}
/* Choose the bus */
switch (slavebus) {
case GQSPI_SELECT_FLASH_BUS_BOTH:
instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER |
GQSPI_GENFIFO_BUS_UPPER;
break;
case GQSPI_SELECT_FLASH_BUS_UPPER:
instanceptr->genfifobus = GQSPI_GENFIFO_BUS_UPPER;
break;
case GQSPI_SELECT_FLASH_BUS_LOWER:
instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
break;
default:
dev_warn(instanceptr->dev, "Invalid slave bus\n");
}
}
/**
* zynqmp_qspi_set_tapdelay: To configure qspi tap delays
* @xqspi: Pointer to the zynqmp_qspi structure
* @baudrateval: Buadrate to configure
*/
static void zynqmp_qspi_set_tapdelay(struct zynqmp_qspi *xqspi, u32 baudrateval)
{
u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
u32 reqhz = 0;
clk_rate = clk_get_rate(xqspi->refclk);
reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
if (!xqspi->has_tapdelay) {
if (reqhz <= GQSPI_FREQ_40MHZ) {
zynqmp_pm_set_tapdelay_bypass(PM_TAPDELAY_QSPI,
PM_TAPDELAY_BYPASS_ENABLE);
} else if (reqhz <= GQSPI_FREQ_100MHZ) {
zynqmp_pm_set_tapdelay_bypass(PM_TAPDELAY_QSPI,
PM_TAPDELAY_BYPASS_ENABLE);
lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
datadlyadj |= ((GQSPI_USE_DATA_DLY <<
GQSPI_USE_DATA_DLY_SHIFT)
| (GQSPI_DATA_DLY_ADJ_VALUE <<
GQSPI_DATA_DLY_ADJ_SHIFT));
} else if (reqhz <= GQSPI_FREQ_150MHZ) {
lpbkdlyadj |= GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK;
}
} else {
if (reqhz <= GQSPI_FREQ_37_5MHZ) {
tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
} else if (reqhz <= GQSPI_FREQ_100MHZ) {
tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
datadlyadj |= (GQSPI_USE_DATA_DLY <<
GQSPI_USE_DATA_DLY_SHIFT);
} else if (reqhz <= GQSPI_FREQ_150MHZ) {
lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK
| (GQSPI_LPBK_DLY_ADJ_DLY_1 <<
GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT));
}
zynqmp_gqspi_write(xqspi,
IOU_TAPDLY_BYPASS_OFST, tapdlybypass);
}
zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST, lpbkdlyadj);
zynqmp_gqspi_write(xqspi, GQSPI_DATA_DLY_ADJ_OFST, datadlyadj);
}
/**
* zynqmp_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynqmp_qspi structure
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
* - Master mode
* - TX threshold set to 1
* - RX threshold set to 1
* - Flash memory interface mode enabled
* This function performs the following actions
* - Disable and clear all the interrupts
* - Enable manual slave select
* - Enable manual start
* - Deselect all the chip select lines
* - Set the little endian mode of TX FIFO
* - Set clock phase
* - Set clock polarity and
* - Enable the QSPI controller
*/
static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
{
u32 config_reg, baud_rate_val = 0;
ulong clk_rate;
/* Select the GQSPI mode */
zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK);
/* Clear and disable interrupts */
zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST) |
GQSPI_ISR_WR_TO_CLR_MASK);
/* Clear the DMA STS */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
zynqmp_gqspi_read(xqspi,
GQSPI_QSPIDMA_DST_I_STS_OFST));
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_STS_OFST,
zynqmp_gqspi_read(xqspi,
GQSPI_QSPIDMA_DST_STS_OFST) |
GQSPI_QSPIDMA_DST_STS_WTC);
zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_IDR_ALL_MASK);
zynqmp_gqspi_write(xqspi,
GQSPI_QSPIDMA_DST_I_DIS_OFST,
GQSPI_QSPIDMA_DST_INTR_ALL_MASK);
/* Disable the GQSPI */
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
/* Manual start */
config_reg |= GQSPI_CFG_GEN_FIFO_START_MODE_MASK;
/* Little endian by default */
config_reg &= ~GQSPI_CFG_ENDIAN_MASK;
/* Disable poll time out */
config_reg &= ~GQSPI_CFG_EN_POLL_TO_MASK;
/* Set hold bit */
config_reg |= GQSPI_CFG_WP_HOLD_MASK;
/* Clear pre-scalar by default */
config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
/* Set CPHA */
if (xqspi->ctlr->mode_bits & SPI_CPHA)
config_reg |= GQSPI_CFG_CLK_PHA_MASK;
else
config_reg &= ~GQSPI_CFG_CLK_PHA_MASK;
/* Set CPOL */
if (xqspi->ctlr->mode_bits & SPI_CPOL)
config_reg |= GQSPI_CFG_CLK_POL_MASK;
else
config_reg &= ~GQSPI_CFG_CLK_POL_MASK;
/* Set the clock frequency */
clk_rate = clk_get_rate(xqspi->refclk);
while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
(clk_rate /
(GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > xqspi->speed_hz)
baud_rate_val++;
config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
/* Set the tapdelay for clock frequency */
zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
/* Clear the TX and RX FIFO */
zynqmp_gqspi_write(xqspi, GQSPI_FIFO_CTRL_OFST,
GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK |
GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK |
GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK);
/* Reset thresholds */
zynqmp_gqspi_write(xqspi, GQSPI_TX_THRESHOLD_OFST,
GQSPI_TX_FIFO_THRESHOLD_RESET_VAL);
zynqmp_gqspi_write(xqspi, GQSPI_RX_THRESHOLD_OFST,
GQSPI_RX_FIFO_THRESHOLD);
zynqmp_gqspi_write(xqspi, GQSPI_GF_THRESHOLD_OFST,
GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL);
zynqmp_gqspi_selectslave(xqspi,
GQSPI_SELECT_FLASH_CS_LOWER,
GQSPI_SELECT_FLASH_BUS_LOWER);
/* Initialize DMA */
zynqmp_gqspi_write(xqspi,
GQSPI_QSPIDMA_DST_CTRL_OFST,
GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
/* Enable the GQSPI */
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
}
/**
* zynqmp_qspi_copy_read_data - Copy data to RX buffer
* @xqspi: Pointer to the zynqmp_qspi structure
* @data: The variable where data is stored
* @size: Number of bytes to be copied from data to RX buffer
*/
static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
ulong data, u8 size)
{
memcpy(xqspi->rxbuf, &data, size);
xqspi->rxbuf += size;
xqspi->bytes_to_receive -= size;
}
/**
* zynqmp_qspi_chipselect - Select or deselect the chip select line
* @qspi: Pointer to the spi_device structure
* @is_high: Select(0) or deselect (1) the chip select line
*/
static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
{
struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
ulong timeout;
u32 genfifoentry = 0, statusreg;
genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
if (!is_high) {
if (!spi_get_chipselect(qspi, 0)) {
xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER;
} else {
xqspi->genfifobus = GQSPI_GENFIFO_BUS_UPPER;
xqspi->genfifocs = GQSPI_GENFIFO_CS_UPPER;
}
genfifoentry |= xqspi->genfifobus;
genfifoentry |= xqspi->genfifocs;
genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
} else {
genfifoentry |= GQSPI_GENFIFO_CS_HOLD;
}
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
/* Manually start the generic FIFO command */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
GQSPI_CFG_START_GEN_FIFO_MASK);
timeout = jiffies + msecs_to_jiffies(1000);
/* Wait until the generic FIFO command is empty */
do {
statusreg = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) &&
(statusreg & GQSPI_ISR_TXEMPTY_MASK))
break;
cpu_relax();
} while (!time_after_eq(jiffies, timeout));
if (time_after_eq(jiffies, timeout))
dev_err(xqspi->dev, "Chip select timed out\n");
}
/**
* zynqmp_qspi_selectspimode - Selects SPI mode - x1 or x2 or x4.
* @xqspi: xqspi is a pointer to the GQSPI instance
* @spimode: spimode - SPI or DUAL or QUAD.
* Return: Mask to set desired SPI mode in GENFIFO entry.
*/
static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
u8 spimode)
{
u32 mask = 0;
switch (spimode) {
case GQSPI_SELECT_MODE_DUALSPI:
mask = GQSPI_GENFIFO_MODE_DUALSPI;
break;
case GQSPI_SELECT_MODE_QUADSPI:
mask = GQSPI_GENFIFO_MODE_QUADSPI;
break;
case GQSPI_SELECT_MODE_SPI:
mask = GQSPI_GENFIFO_MODE_SPI;
break;
default:
dev_warn(xqspi->dev, "Invalid SPI mode\n");
}
return mask;
}
/**
* zynqmp_qspi_config_op - Configure QSPI controller for specified
* transfer
* @xqspi: Pointer to the zynqmp_qspi structure
* @qspi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
*
* Return: Always 0
*
* Note:
* If the requested frequency is not an exact match with what can be
* obtained using the pre-scalar value, the driver sets the clock
* frequency which is lower than the requested frequency (maximum lower)
* for the transfer.
*
* If the requested frequency is higher or lower than that is supported
* by the QSPI controller the driver will set the highest or lowest
* frequency supported by controller.
*/
static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
struct spi_device *qspi)
{
ulong clk_rate;
u32 config_reg, req_speed_hz, baud_rate_val = 0;
req_speed_hz = qspi->max_speed_hz;
if (xqspi->speed_hz != req_speed_hz) {
xqspi->speed_hz = req_speed_hz;
/* Set the clock frequency */
/* If req_speed_hz == 0, default to lowest speed */
clk_rate = clk_get_rate(xqspi->refclk);
while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
(clk_rate /
(GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) >
req_speed_hz)
baud_rate_val++;
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
}
return 0;
}
/**
* zynqmp_qspi_setup_op - Configure the QSPI controller
* @qspi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer,
* baud rate and divisor value to setup the requested qspi clock.
*
* Return: 0 on success; error value otherwise.
*/
static int zynqmp_qspi_setup_op(struct spi_device *qspi)
{
struct spi_controller *ctlr = qspi->master;
struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
if (ctlr->busy)
return -EBUSY;
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
return 0;
}
/**
* zynqmp_qspi_filltxfifo - Fills the TX FIFO as long as there is room in
* the FIFO or the bytes required to be
* transmitted.
* @xqspi: Pointer to the zynqmp_qspi structure
* @size: Number of bytes to be copied from TX buffer to TX FIFO
*/
static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
{
u32 count = 0, intermediate;
while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
if (xqspi->bytes_to_transfer >= 4) {
memcpy(&intermediate, xqspi->txbuf, 4);
xqspi->txbuf += 4;
xqspi->bytes_to_transfer -= 4;
count += 4;
} else {
memcpy(&intermediate, xqspi->txbuf,
xqspi->bytes_to_transfer);
xqspi->txbuf += xqspi->bytes_to_transfer;
xqspi->bytes_to_transfer = 0;
count += xqspi->bytes_to_transfer;
}
zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
}
}
/**
* zynqmp_qspi_readrxfifo - Fills the RX FIFO as long as there is room in
* the FIFO.
* @xqspi: Pointer to the zynqmp_qspi structure
* @size: Number of bytes to be copied from RX buffer to RX FIFO
*/
static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
{
ulong data;
int count = 0;
while ((count < size) && (xqspi->bytes_to_receive > 0)) {
if (xqspi->bytes_to_receive >= 4) {
(*(u32 *)xqspi->rxbuf) =
zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
xqspi->rxbuf += 4;
xqspi->bytes_to_receive -= 4;
count += 4;
} else {
data = zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
count += xqspi->bytes_to_receive;
zynqmp_qspi_copy_read_data(xqspi, data,
xqspi->bytes_to_receive);
xqspi->bytes_to_receive = 0;
}
}
}
/**
* zynqmp_qspi_fillgenfifo - Fills the GENFIFO.
* @xqspi: Pointer to the zynqmp_qspi structure
* @nbits: Transfer/Receive buswidth.
* @genfifoentry: Variable in which GENFIFO mask is saved
*/
static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
u32 genfifoentry)
{
u32 transfer_len = 0;
if (xqspi->txbuf) {
genfifoentry &= ~GQSPI_GENFIFO_RX;
genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
genfifoentry |= GQSPI_GENFIFO_TX;
transfer_len = xqspi->bytes_to_transfer;
} else if (xqspi->rxbuf) {
genfifoentry &= ~GQSPI_GENFIFO_TX;
genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
genfifoentry |= GQSPI_GENFIFO_RX;
if (xqspi->mode == GQSPI_MODE_DMA)
transfer_len = xqspi->dma_rx_bytes;
else
transfer_len = xqspi->bytes_to_receive;
} else {
/* Sending dummy circles here */
genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
transfer_len = xqspi->bytes_to_transfer;
}
genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
xqspi->genfifoentry = genfifoentry;
if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
genfifoentry |= transfer_len;
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
} else {
int tempcount = transfer_len;
u32 exponent = 8; /* 2^8 = 256 */
u8 imm_data = tempcount & 0xFF;
tempcount &= ~(tempcount & 0xFF);
/* Immediate entry */
if (tempcount != 0) {
/* Exponent entries */
genfifoentry |= GQSPI_GENFIFO_EXP;
while (tempcount != 0) {
if (tempcount & GQSPI_GENFIFO_EXP_START) {
genfifoentry &=
~GQSPI_GENFIFO_IMM_DATA_MASK;
genfifoentry |= exponent;
zynqmp_gqspi_write(xqspi,
GQSPI_GEN_FIFO_OFST,
genfifoentry);
}
tempcount = tempcount >> 1;
exponent++;
}
}
if (imm_data != 0) {
genfifoentry &= ~GQSPI_GENFIFO_EXP;
genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
genfifoentry |= (u8)(imm_data & 0xFF);
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
genfifoentry);
}
}
if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) {
/* Dummy generic FIFO entry */
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
}
}
/**
* zynqmp_process_dma_irq - Handler for DMA done interrupt of QSPI
* controller
* @xqspi: zynqmp_qspi instance pointer
*
* This function handles DMA interrupt only.
*/
static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
{
u32 config_reg, genfifoentry;
dma_unmap_single(xqspi->dev, xqspi->dma_addr,
xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
xqspi->rxbuf += xqspi->dma_rx_bytes;
xqspi->bytes_to_receive -= xqspi->dma_rx_bytes;
xqspi->dma_rx_bytes = 0;
/* Disabling the DMA interrupts */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_DIS_OFST,
GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
if (xqspi->bytes_to_receive > 0) {
/* Switch to IO mode,for remaining bytes to receive */
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
/* Initiate the transfer of remaining bytes */
genfifoentry = xqspi->genfifoentry;
genfifoentry |= xqspi->bytes_to_receive;
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
/* Dummy generic FIFO entry */
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
/* Manual start */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
(zynqmp_gqspi_read(xqspi,
GQSPI_CONFIG_OFST) |
GQSPI_CFG_START_GEN_FIFO_MASK));
/* Enable the RX interrupts for IO mode */
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
GQSPI_IER_GENFIFOEMPTY_MASK |
GQSPI_IER_RXNEMPTY_MASK |
GQSPI_IER_RXEMPTY_MASK);
}
}
/**
* zynqmp_qspi_irq - Interrupt service routine of the QSPI controller
* @irq: IRQ number
* @dev_id: Pointer to the xqspi structure
*
* This function handles TX empty only.
* On TX empty interrupt this function reads the received data from RX FIFO
* and fills the TX FIFO if there is any data remaining to be transferred.
*
* Return: IRQ_HANDLED when interrupt is handled
* IRQ_NONE otherwise.
*/
static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
{
struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
irqreturn_t ret = IRQ_NONE;
u32 status, mask, dma_status = 0;
status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST, status);
mask = (status & ~(zynqmp_gqspi_read(xqspi, GQSPI_IMASK_OFST)));
/* Read and clear DMA status */
if (xqspi->mode == GQSPI_MODE_DMA) {
dma_status =
zynqmp_gqspi_read(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST);
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
dma_status);
}
if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
ret = IRQ_HANDLED;
}
if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
zynqmp_process_dma_irq(xqspi);
ret = IRQ_HANDLED;
} else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
(mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
ret = IRQ_HANDLED;
}
if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
complete(&xqspi->data_completion);
ret = IRQ_HANDLED;
}
return ret;
}
/**
* zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
* @xqspi: xqspi is a pointer to the GQSPI instance.
*
* Return: 0 on success; error value otherwise.
*/
static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
{
u32 rx_bytes, rx_rem, config_reg;
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
if (xqspi->bytes_to_receive < 8 ||
((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
/* Setting to IO mode */
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
xqspi->mode = GQSPI_MODE_IO;
xqspi->dma_rx_bytes = 0;
return 0;
}
rx_rem = xqspi->bytes_to_receive % 4;
rx_bytes = (xqspi->bytes_to_receive - rx_rem);
addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
rx_bytes, DMA_FROM_DEVICE);
if (dma_mapping_error(xqspi->dev, addr)) {
dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
return -ENOMEM;
}
xqspi->dma_rx_bytes = rx_bytes;
xqspi->dma_addr = addr;
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_OFST,
(u32)(addr & 0xffffffff));
addr = ((addr >> 16) >> 16);
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
((u32)addr) & 0xfff);
/* Enabling the DMA mode */
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
/* Switch to DMA mode */
xqspi->mode = GQSPI_MODE_DMA;
/* Write the number of bytes to transfer */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
return 0;
}
/**
* zynqmp_qspi_write_op - This function sets up the GENFIFO entries,
* TX FIFO, and fills the TX FIFO with as many
* bytes as possible.
* @xqspi: Pointer to the GQSPI instance.
* @tx_nbits: Transfer buswidth.
* @genfifoentry: Variable in which GENFIFO mask is returned
* to calling function
*/
static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
u32 genfifoentry)
{
u32 config_reg;
zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry);
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
if (xqspi->mode == GQSPI_MODE_DMA) {
config_reg = zynqmp_gqspi_read(xqspi,
GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
config_reg);
xqspi->mode = GQSPI_MODE_IO;
}
}
/**
* zynqmp_qspi_read_op - This function sets up the GENFIFO entries and
* RX DMA operation.
* @xqspi: xqspi is a pointer to the GQSPI instance.
* @rx_nbits: Receive buswidth.
* @genfifoentry: genfifoentry is pointer to the variable in which
* GENFIFO mask is returned to calling function
*
* Return: 0 on success; error value otherwise.
*/
static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
u32 genfifoentry)
{
int ret;
ret = zynqmp_qspi_setuprxdma(xqspi);
if (ret)
return ret;
zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
return 0;
}
/**
* zynqmp_qspi_suspend - Suspend method for the QSPI driver
* @dev: Address of the platform_device structure
*
* This function stops the QSPI driver queue and disables the QSPI controller
*
* Return: Always 0
*/
static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
{
struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
struct spi_controller *ctlr = xqspi->ctlr;
int ret;
ret = spi_controller_suspend(ctlr);
if (ret)
return ret;
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
return 0;
}
/**
* zynqmp_qspi_resume - Resume method for the QSPI driver
* @dev: Address of the platform_device structure
*
* The function starts the QSPI driver queue and initializes the QSPI
* controller
*
* Return: 0 on success; error value otherwise
*/
static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
{
struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
struct spi_controller *ctlr = xqspi->ctlr;
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
spi_controller_resume(ctlr);
return 0;
}
/**
* zynqmp_runtime_suspend - Runtime suspend method for the SPI driver
* @dev: Address of the platform_device structure
*
* This function disables the clocks
*
* Return: Always 0
*/
static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
{
struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
return 0;
}
/**
* zynqmp_runtime_resume - Runtime resume method for the SPI driver
* @dev: Address of the platform_device structure
*
* This function enables the clocks
*
* Return: 0 on success and error value on error
*/
static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
{
struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(xqspi->pclk);
if (ret) {
dev_err(dev, "Cannot enable APB clock.\n");
return ret;
}
ret = clk_prepare_enable(xqspi->refclk);
if (ret) {
dev_err(dev, "Cannot enable device clock.\n");
clk_disable_unprepare(xqspi->pclk);
return ret;
}
return 0;
}
/**
* zynqmp_qspi_exec_op() - Initiates the QSPI transfer
* @mem: The SPI memory
* @op: The memory operation to execute
*
* Executes a memory operation.
*
* This function first selects the chip and starts the memory operation.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
static int zynqmp_qspi_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct zynqmp_qspi *xqspi = spi_controller_get_devdata
(mem->spi->master);
int err = 0, i;
u32 genfifoentry = 0;
u16 opcode = op->cmd.opcode;
u64 opaddr;
dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth);
mutex_lock(&xqspi->op_lock);
zynqmp_qspi_config_op(xqspi, mem->spi);
zynqmp_qspi_chipselect(mem->spi, false);
genfifoentry |= xqspi->genfifocs;
genfifoentry |= xqspi->genfifobus;
if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
xqspi->txbuf = &opcode;
xqspi->rxbuf = NULL;
xqspi->bytes_to_transfer = op->cmd.nbytes;
xqspi->bytes_to_receive = 0;
zynqmp_qspi_write_op(xqspi, op->cmd.buswidth, genfifoentry);
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
GQSPI_CFG_START_GEN_FIFO_MASK);
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
GQSPI_IER_GENFIFOEMPTY_MASK |
GQSPI_IER_TXNOT_FULL_MASK);
if (!wait_for_completion_timeout
(&xqspi->data_completion, msecs_to_jiffies(1000))) {
err = -ETIMEDOUT;
goto return_err;
}
}
if (op->addr.nbytes) {
xqspi->txbuf = &opaddr;
for (i = 0; i < op->addr.nbytes; i++) {
*(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
}
reinit_completion(&xqspi->data_completion);
xqspi->rxbuf = NULL;
xqspi->bytes_to_transfer = op->addr.nbytes;
xqspi->bytes_to_receive = 0;
zynqmp_qspi_write_op(xqspi, op->addr.buswidth, genfifoentry);
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi,
GQSPI_CONFIG_OFST) |
GQSPI_CFG_START_GEN_FIFO_MASK);
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
GQSPI_IER_TXEMPTY_MASK |
GQSPI_IER_GENFIFOEMPTY_MASK |
GQSPI_IER_TXNOT_FULL_MASK);
if (!wait_for_completion_timeout
(&xqspi->data_completion, msecs_to_jiffies(1000))) {
err = -ETIMEDOUT;
goto return_err;
}
}
if (op->dummy.nbytes) {
xqspi->txbuf = NULL;
xqspi->rxbuf = NULL;
/*
* xqspi->bytes_to_transfer here represents the dummy circles
* which need to be sent.
*/
xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
xqspi->bytes_to_receive = 0;
/*
* Using op->data.buswidth instead of op->dummy.buswidth here because
* we need to use it to configure the correct SPI mode.
*/
zynqmp_qspi_write_op(xqspi, op->data.buswidth,
genfifoentry);
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
GQSPI_CFG_START_GEN_FIFO_MASK);
}
if (op->data.nbytes) {
reinit_completion(&xqspi->data_completion);
if (op->data.dir == SPI_MEM_DATA_OUT) {
xqspi->txbuf = (u8 *)op->data.buf.out;
xqspi->rxbuf = NULL;
xqspi->bytes_to_transfer = op->data.nbytes;
xqspi->bytes_to_receive = 0;
zynqmp_qspi_write_op(xqspi, op->data.buswidth,
genfifoentry);
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read
(xqspi, GQSPI_CONFIG_OFST) |
GQSPI_CFG_START_GEN_FIFO_MASK);
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
GQSPI_IER_TXEMPTY_MASK |
GQSPI_IER_GENFIFOEMPTY_MASK |
GQSPI_IER_TXNOT_FULL_MASK);
} else {
xqspi->txbuf = NULL;
xqspi->rxbuf = (u8 *)op->data.buf.in;
xqspi->bytes_to_receive = op->data.nbytes;
xqspi->bytes_to_transfer = 0;
err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
genfifoentry);
if (err)
goto return_err;
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read
(xqspi, GQSPI_CONFIG_OFST) |
GQSPI_CFG_START_GEN_FIFO_MASK);
if (xqspi->mode == GQSPI_MODE_DMA) {
zynqmp_gqspi_write
(xqspi, GQSPI_QSPIDMA_DST_I_EN_OFST,
GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
} else {
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
GQSPI_IER_GENFIFOEMPTY_MASK |
GQSPI_IER_RXNEMPTY_MASK |
GQSPI_IER_RXEMPTY_MASK);
}
}
if (!wait_for_completion_timeout
(&xqspi->data_completion, msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
return_err:
zynqmp_qspi_chipselect(mem->spi, true);
mutex_unlock(&xqspi->op_lock);
return err;
}
static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(zynqmp_runtime_suspend,
zynqmp_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(zynqmp_qspi_suspend, zynqmp_qspi_resume)
};
static const struct qspi_platform_data versal_qspi_def = {
.quirks = QSPI_QUIRK_HAS_TAPDELAY,
};
static const struct of_device_id zynqmp_qspi_of_match[] = {
{ .compatible = "xlnx,zynqmp-qspi-1.0"},
{ .compatible = "xlnx,versal-qspi-1.0", .data = &versal_qspi_def },
{ /* End of table */ }
};
static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
.exec_op = zynqmp_qspi_exec_op,
};
/**
* zynqmp_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function initializes the driver data structures and the hardware.
*
* Return: 0 on success; error value otherwise
*/
static int zynqmp_qspi_probe(struct platform_device *pdev)
{
int ret = 0;
struct spi_controller *ctlr;
struct zynqmp_qspi *xqspi;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
u32 num_cs;
const struct qspi_platform_data *p_data;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
return -ENOMEM;
xqspi = spi_controller_get_devdata(ctlr);
xqspi->dev = dev;
xqspi->ctlr = ctlr;
platform_set_drvdata(pdev, xqspi);
p_data = of_device_get_match_data(&pdev->dev);
if (p_data && (p_data->quirks & QSPI_QUIRK_HAS_TAPDELAY))
xqspi->has_tapdelay = true;
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
goto remove_master;
}
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(dev, "pclk clock not found.\n");
ret = PTR_ERR(xqspi->pclk);
goto remove_master;
}
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xqspi->refclk)) {
dev_err(dev, "ref_clk clock not found.\n");
ret = PTR_ERR(xqspi->refclk);
goto remove_master;
}
ret = clk_prepare_enable(xqspi->pclk);
if (ret) {
dev_err(dev, "Unable to enable APB clock.\n");
goto remove_master;
}
ret = clk_prepare_enable(xqspi->refclk);
if (ret) {
dev_err(dev, "Unable to enable device clock.\n");
goto clk_dis_pclk;
}
init_completion(&xqspi->data_completion);
mutex_init(&xqspi->op_lock);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to pm_runtime_get_sync: %d\n", ret);
goto clk_dis_all;
}
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
xqspi->speed_hz = ctlr->max_speed_hz;
/* QSPI controller initializations */
zynqmp_qspi_init_hw(xqspi);
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq < 0) {
ret = xqspi->irq;
goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
0, pdev->name, xqspi);
if (ret != 0) {
ret = -ENXIO;
dev_err(dev, "request_irq failed\n");
goto clk_dis_all;
}
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
if (ret)
goto clk_dis_all;
ret = of_property_read_u32(np, "num-cs", &num_cs);
if (ret < 0) {
ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
} else if (num_cs > GQSPI_MAX_NUM_CS) {
ret = -EINVAL;
dev_err(&pdev->dev, "only %d chip selects are available\n",
GQSPI_MAX_NUM_CS);
goto clk_dis_all;
} else {
ctlr->num_chipselect = num_cs;
}
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
ctlr->setup = zynqmp_qspi_setup_op;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->dev.of_node = np;
ctlr->auto_runtime_pm = true;
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_controller failed\n");
goto clk_dis_all;
}
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
clk_dis_all:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
remove_master:
spi_controller_put(ctlr);
return ret;
}
/**
* zynqmp_qspi_remove - Remove method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees all resources allocated to
* the device.
*
* Return: 0 Always
*/
static void zynqmp_qspi_remove(struct platform_device *pdev)
{
struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
pm_runtime_get_sync(&pdev->dev);
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
}
MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
static struct platform_driver zynqmp_qspi_driver = {
.probe = zynqmp_qspi_probe,
.remove_new = zynqmp_qspi_remove,
.driver = {
.name = "zynqmp-qspi",
.of_match_table = zynqmp_qspi_of_match,
.pm = &zynqmp_qspi_dev_pm_ops,
},
};
module_platform_driver(zynqmp_qspi_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Zynqmp QSPI driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-zynqmp-gqspi.c |
/*
* SPI slave handler controlling system state
*
* This SPI slave handler allows remote control of system reboot, power off,
* halt, and suspend.
*
* Copyright (C) 2016-2017 Glider bvba
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
* system):
*
* # reboot='\x7c\x50'
* # poweroff='\x71\x3f'
* # halt='\x38\x76'
* # suspend='\x1b\x1b'
* # spidev_test -D /dev/spidev2.0 -p $suspend # or $reboot, $poweroff, $halt
*/
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/spi/spi.h>
/*
* The numbers are chosen to display something human-readable on two 7-segment
* displays connected to two 74HC595 shift registers
*/
#define CMD_REBOOT 0x7c50 /* rb */
#define CMD_POWEROFF 0x713f /* OF */
#define CMD_HALT 0x3876 /* HL */
#define CMD_SUSPEND 0x1b1b /* ZZ */
struct spi_slave_system_control_priv {
struct spi_device *spi;
struct completion finished;
struct spi_transfer xfer;
struct spi_message msg;
__be16 cmd;
};
static
int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv);
static void spi_slave_system_control_complete(void *arg)
{
struct spi_slave_system_control_priv *priv = arg;
u16 cmd;
int ret;
if (priv->msg.status)
goto terminate;
cmd = be16_to_cpu(priv->cmd);
switch (cmd) {
case CMD_REBOOT:
dev_info(&priv->spi->dev, "Rebooting system...\n");
kernel_restart(NULL);
break;
case CMD_POWEROFF:
dev_info(&priv->spi->dev, "Powering off system...\n");
kernel_power_off();
break;
case CMD_HALT:
dev_info(&priv->spi->dev, "Halting system...\n");
kernel_halt();
break;
case CMD_SUSPEND:
dev_info(&priv->spi->dev, "Suspending system...\n");
pm_suspend(PM_SUSPEND_MEM);
break;
default:
dev_warn(&priv->spi->dev, "Unknown command 0x%x\n", cmd);
break;
}
ret = spi_slave_system_control_submit(priv);
if (ret)
goto terminate;
return;
terminate:
dev_info(&priv->spi->dev, "Terminating\n");
complete(&priv->finished);
}
static
int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv)
{
int ret;
spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
priv->msg.complete = spi_slave_system_control_complete;
priv->msg.context = priv;
ret = spi_async(priv->spi, &priv->msg);
if (ret)
dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
return ret;
}
static int spi_slave_system_control_probe(struct spi_device *spi)
{
struct spi_slave_system_control_priv *priv;
int ret;
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->spi = spi;
init_completion(&priv->finished);
priv->xfer.rx_buf = &priv->cmd;
priv->xfer.len = sizeof(priv->cmd);
ret = spi_slave_system_control_submit(priv);
if (ret)
return ret;
spi_set_drvdata(spi, priv);
return 0;
}
static void spi_slave_system_control_remove(struct spi_device *spi)
{
struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
spi_slave_abort(spi);
wait_for_completion(&priv->finished);
}
static struct spi_driver spi_slave_system_control_driver = {
.driver = {
.name = "spi-slave-system-control",
},
.probe = spi_slave_system_control_probe,
.remove = spi_slave_system_control_remove,
};
module_spi_driver(spi_slave_system_control_driver);
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
MODULE_DESCRIPTION("SPI slave handler controlling system state");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-slave-system-control.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016 Broadcom Limited
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "spi-bcm-qspi.h"
#define INTR_BASE_BIT_SHIFT 0x02
#define INTR_COUNT 0x07
struct bcm_iproc_intc {
struct bcm_qspi_soc_intc soc_intc;
struct platform_device *pdev;
void __iomem *int_reg;
void __iomem *int_status_reg;
spinlock_t soclock;
bool big_endian;
};
static u32 bcm_iproc_qspi_get_l2_int_status(struct bcm_qspi_soc_intc *soc_intc)
{
struct bcm_iproc_intc *priv =
container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
void __iomem *mmio = priv->int_status_reg;
int i;
u32 val = 0, sts = 0;
for (i = 0; i < INTR_COUNT; i++) {
if (bcm_qspi_readl(priv->big_endian, mmio + (i * 4)))
val |= 1UL << i;
}
if (val & INTR_MSPI_DONE_MASK)
sts |= MSPI_DONE;
if (val & BSPI_LR_INTERRUPTS_ALL)
sts |= BSPI_DONE;
if (val & BSPI_LR_INTERRUPTS_ERROR)
sts |= BSPI_ERR;
return sts;
}
static void bcm_iproc_qspi_int_ack(struct bcm_qspi_soc_intc *soc_intc, int type)
{
struct bcm_iproc_intc *priv =
container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
void __iomem *mmio = priv->int_status_reg;
u32 mask = get_qspi_mask(type);
int i;
for (i = 0; i < INTR_COUNT; i++) {
if (mask & (1UL << i))
bcm_qspi_writel(priv->big_endian, 1, mmio + (i * 4));
}
}
static void bcm_iproc_qspi_int_set(struct bcm_qspi_soc_intc *soc_intc, int type,
bool en)
{
struct bcm_iproc_intc *priv =
container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
void __iomem *mmio = priv->int_reg;
u32 mask = get_qspi_mask(type);
u32 val;
unsigned long flags;
spin_lock_irqsave(&priv->soclock, flags);
val = bcm_qspi_readl(priv->big_endian, mmio);
if (en)
val = val | (mask << INTR_BASE_BIT_SHIFT);
else
val = val & ~(mask << INTR_BASE_BIT_SHIFT);
bcm_qspi_writel(priv->big_endian, val, mmio);
spin_unlock_irqrestore(&priv->soclock, flags);
}
static int bcm_iproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm_iproc_intc *priv;
struct bcm_qspi_soc_intc *soc_intc;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
soc_intc = &priv->soc_intc;
priv->pdev = pdev;
spin_lock_init(&priv->soclock);
priv->int_reg = devm_platform_ioremap_resource_byname(pdev, "intr_regs");
if (IS_ERR(priv->int_reg))
return PTR_ERR(priv->int_reg);
priv->int_status_reg = devm_platform_ioremap_resource_byname(pdev,
"intr_status_reg");
if (IS_ERR(priv->int_status_reg))
return PTR_ERR(priv->int_status_reg);
priv->big_endian = of_device_is_big_endian(dev->of_node);
bcm_iproc_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
bcm_iproc_qspi_int_set(soc_intc, MSPI_BSPI_DONE, false);
soc_intc->bcm_qspi_int_ack = bcm_iproc_qspi_int_ack;
soc_intc->bcm_qspi_int_set = bcm_iproc_qspi_int_set;
soc_intc->bcm_qspi_get_int_status = bcm_iproc_qspi_get_l2_int_status;
return bcm_qspi_probe(pdev, soc_intc);
}
static void bcm_iproc_remove(struct platform_device *pdev)
{
bcm_qspi_remove(pdev);
}
static const struct of_device_id bcm_iproc_of_match[] = {
{ .compatible = "brcm,spi-nsp-qspi" },
{ .compatible = "brcm,spi-ns2-qspi" },
{},
};
MODULE_DEVICE_TABLE(of, bcm_iproc_of_match);
static struct platform_driver bcm_iproc_driver = {
.probe = bcm_iproc_probe,
.remove_new = bcm_iproc_remove,
.driver = {
.name = "bcm_iproc",
.pm = &bcm_qspi_pm_ops,
.of_match_table = bcm_iproc_of_match,
}
};
module_platform_driver(bcm_iproc_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Kamal Dasu");
MODULE_DESCRIPTION("SPI flash driver for Broadcom iProc SoCs");
| linux-master | drivers/spi/spi-iproc-qspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI QSPI driver
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
* Author: Sourav Poddar <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
struct ti_qspi_regs {
u32 clkctrl;
};
struct ti_qspi {
struct completion transfer_complete;
/* list synchronization */
struct mutex list_lock;
struct spi_master *master;
void __iomem *base;
void __iomem *mmap_base;
size_t mmap_size;
struct regmap *ctrl_base;
unsigned int ctrl_reg;
struct clk *fclk;
struct device *dev;
struct ti_qspi_regs ctx_reg;
dma_addr_t mmap_phys_base;
dma_addr_t rx_bb_dma_addr;
void *rx_bb_addr;
struct dma_chan *rx_chan;
u32 cmd;
u32 dc;
bool mmap_enabled;
int current_cs;
};
#define QSPI_PID (0x0)
#define QSPI_SYSCONFIG (0x10)
#define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
#define QSPI_SPI_DC_REG (0x44)
#define QSPI_SPI_CMD_REG (0x48)
#define QSPI_SPI_STATUS_REG (0x4c)
#define QSPI_SPI_DATA_REG (0x50)
#define QSPI_SPI_SETUP_REG(n) ((0x54 + 4 * n))
#define QSPI_SPI_SWITCH_REG (0x64)
#define QSPI_SPI_DATA_REG_1 (0x68)
#define QSPI_SPI_DATA_REG_2 (0x6c)
#define QSPI_SPI_DATA_REG_3 (0x70)
#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
/* Clock Control */
#define QSPI_CLK_EN (1 << 31)
#define QSPI_CLK_DIV_MAX 0xffff
/* Command */
#define QSPI_EN_CS(n) (n << 28)
#define QSPI_WLEN(n) ((n - 1) << 19)
#define QSPI_3_PIN (1 << 18)
#define QSPI_RD_SNGL (1 << 16)
#define QSPI_WR_SNGL (2 << 16)
#define QSPI_RD_DUAL (3 << 16)
#define QSPI_RD_QUAD (7 << 16)
#define QSPI_INVAL (4 << 16)
#define QSPI_FLEN(n) ((n - 1) << 0)
#define QSPI_WLEN_MAX_BITS 128
#define QSPI_WLEN_MAX_BYTES 16
#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
/* STATUS REGISTER */
#define BUSY 0x01
#define WC 0x02
/* Device Control */
#define QSPI_DD(m, n) (m << (3 + n * 8))
#define QSPI_CKPHA(n) (1 << (2 + n * 8))
#define QSPI_CSPOL(n) (1 << (1 + n * 8))
#define QSPI_CKPOL(n) (1 << (n * 8))
#define QSPI_FRAME 4096
#define QSPI_AUTOSUSPEND_TIMEOUT 2000
#define MEM_CS_EN(n) ((n + 1) << 8)
#define MEM_CS_MASK (7 << 8)
#define MM_SWITCH 0x1
#define QSPI_SETUP_RD_NORMAL (0x0 << 12)
#define QSPI_SETUP_RD_DUAL (0x1 << 12)
#define QSPI_SETUP_RD_QUAD (0x3 << 12)
#define QSPI_SETUP_ADDR_SHIFT 8
#define QSPI_SETUP_DUMMY_SHIFT 10
#define QSPI_DMA_BUFFER_SIZE SZ_64K
static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
unsigned long reg)
{
return readl(qspi->base + reg);
}
static inline void ti_qspi_write(struct ti_qspi *qspi,
unsigned long val, unsigned long reg)
{
writel(val, qspi->base + reg);
}
static int ti_qspi_setup(struct spi_device *spi)
{
struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
int ret;
if (spi->master->busy) {
dev_dbg(qspi->dev, "master busy doing other transfers\n");
return -EBUSY;
}
if (!qspi->master->max_speed_hz) {
dev_err(qspi->dev, "spi max frequency not defined\n");
return -EINVAL;
}
spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz);
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0) {
dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
return ret;
}
pm_runtime_mark_last_busy(qspi->dev);
ret = pm_runtime_put_autosuspend(qspi->dev);
if (ret < 0) {
dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
return ret;
}
return 0;
}
static void ti_qspi_setup_clk(struct ti_qspi *qspi, u32 speed_hz)
{
struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
int clk_div;
u32 clk_ctrl_reg, clk_rate, clk_ctrl_new;
clk_rate = clk_get_rate(qspi->fclk);
clk_div = DIV_ROUND_UP(clk_rate, speed_hz) - 1;
clk_div = clamp(clk_div, 0, QSPI_CLK_DIV_MAX);
dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", speed_hz, clk_div);
pm_runtime_resume_and_get(qspi->dev);
clk_ctrl_new = QSPI_CLK_EN | clk_div;
if (ctx_reg->clkctrl != clk_ctrl_new) {
clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
clk_ctrl_reg &= ~QSPI_CLK_EN;
/* disable SCLK */
ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
/* enable SCLK */
ti_qspi_write(qspi, clk_ctrl_new, QSPI_SPI_CLOCK_CNTRL_REG);
ctx_reg->clkctrl = clk_ctrl_new;
}
pm_runtime_mark_last_busy(qspi->dev);
pm_runtime_put_autosuspend(qspi->dev);
}
static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
{
struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
}
static inline u32 qspi_is_busy(struct ti_qspi *qspi)
{
u32 stat;
unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
while ((stat & BUSY) && time_after(timeout, jiffies)) {
cpu_relax();
stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
}
WARN(stat & BUSY, "qspi busy\n");
return stat & BUSY;
}
static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
{
u32 stat;
unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
do {
stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
if (stat & WC)
return 0;
cpu_relax();
} while (time_after(timeout, jiffies));
stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
if (stat & WC)
return 0;
return -ETIMEDOUT;
}
static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
int count)
{
int wlen, xfer_len;
unsigned int cmd;
const u8 *txbuf;
u32 data;
txbuf = t->tx_buf;
cmd = qspi->cmd | QSPI_WR_SNGL;
wlen = t->bits_per_word >> 3; /* in bytes */
xfer_len = wlen;
while (count) {
if (qspi_is_busy(qspi))
return -EBUSY;
switch (wlen) {
case 1:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
cmd, qspi->dc, *txbuf);
if (count >= QSPI_WLEN_MAX_BYTES) {
u32 *txp = (u32 *)txbuf;
data = cpu_to_be32(*txp++);
writel(data, qspi->base +
QSPI_SPI_DATA_REG_3);
data = cpu_to_be32(*txp++);
writel(data, qspi->base +
QSPI_SPI_DATA_REG_2);
data = cpu_to_be32(*txp++);
writel(data, qspi->base +
QSPI_SPI_DATA_REG_1);
data = cpu_to_be32(*txp++);
writel(data, qspi->base +
QSPI_SPI_DATA_REG);
xfer_len = QSPI_WLEN_MAX_BYTES;
cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
} else {
writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
cmd = qspi->cmd | QSPI_WR_SNGL;
xfer_len = wlen;
cmd |= QSPI_WLEN(wlen);
}
break;
case 2:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
cmd, qspi->dc, *txbuf);
writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
break;
case 4:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
cmd, qspi->dc, *txbuf);
writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
break;
}
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (ti_qspi_poll_wc(qspi)) {
dev_err(qspi->dev, "write timed out\n");
return -ETIMEDOUT;
}
txbuf += xfer_len;
count -= xfer_len;
}
return 0;
}
static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
int count)
{
int wlen;
unsigned int cmd;
u32 rx;
u8 rxlen, rx_wlen;
u8 *rxbuf;
rxbuf = t->rx_buf;
cmd = qspi->cmd;
switch (t->rx_nbits) {
case SPI_NBITS_DUAL:
cmd |= QSPI_RD_DUAL;
break;
case SPI_NBITS_QUAD:
cmd |= QSPI_RD_QUAD;
break;
default:
cmd |= QSPI_RD_SNGL;
break;
}
wlen = t->bits_per_word >> 3; /* in bytes */
rx_wlen = wlen;
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
if (qspi_is_busy(qspi))
return -EBUSY;
switch (wlen) {
case 1:
/*
* Optimize the 8-bit words transfers, as used by
* the SPI flash devices.
*/
if (count >= QSPI_WLEN_MAX_BYTES) {
rxlen = QSPI_WLEN_MAX_BYTES;
} else {
rxlen = min(count, 4);
}
rx_wlen = rxlen << 3;
cmd &= ~QSPI_WLEN_MASK;
cmd |= QSPI_WLEN(rx_wlen);
break;
default:
rxlen = wlen;
break;
}
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (ti_qspi_poll_wc(qspi)) {
dev_err(qspi->dev, "read timed out\n");
return -ETIMEDOUT;
}
switch (wlen) {
case 1:
/*
* Optimize the 8-bit words transfers, as used by
* the SPI flash devices.
*/
if (count >= QSPI_WLEN_MAX_BYTES) {
u32 *rxp = (u32 *) rxbuf;
rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
*rxp++ = be32_to_cpu(rx);
rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
*rxp++ = be32_to_cpu(rx);
rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
*rxp++ = be32_to_cpu(rx);
rx = readl(qspi->base + QSPI_SPI_DATA_REG);
*rxp++ = be32_to_cpu(rx);
} else {
u8 *rxp = rxbuf;
rx = readl(qspi->base + QSPI_SPI_DATA_REG);
if (rx_wlen >= 8)
*rxp++ = rx >> (rx_wlen - 8);
if (rx_wlen >= 16)
*rxp++ = rx >> (rx_wlen - 16);
if (rx_wlen >= 24)
*rxp++ = rx >> (rx_wlen - 24);
if (rx_wlen >= 32)
*rxp++ = rx;
}
break;
case 2:
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
break;
case 4:
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
break;
}
rxbuf += rxlen;
count -= rxlen;
}
return 0;
}
static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
int count)
{
int ret;
if (t->tx_buf) {
ret = qspi_write_msg(qspi, t, count);
if (ret) {
dev_dbg(qspi->dev, "Error while writing\n");
return ret;
}
}
if (t->rx_buf) {
ret = qspi_read_msg(qspi, t, count);
if (ret) {
dev_dbg(qspi->dev, "Error while reading\n");
return ret;
}
}
return 0;
}
static void ti_qspi_dma_callback(void *param)
{
struct ti_qspi *qspi = param;
complete(&qspi->transfer_complete);
}
static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
dma_addr_t dma_src, size_t len)
{
struct dma_chan *chan = qspi->rx_chan;
dma_cookie_t cookie;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
struct dma_async_tx_descriptor *tx;
int ret;
unsigned long time_left;
tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
if (!tx) {
dev_err(qspi->dev, "device_prep_dma_memcpy error\n");
return -EIO;
}
tx->callback = ti_qspi_dma_callback;
tx->callback_param = qspi;
cookie = tx->tx_submit(tx);
reinit_completion(&qspi->transfer_complete);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
return -EIO;
}
dma_async_issue_pending(chan);
time_left = wait_for_completion_timeout(&qspi->transfer_complete,
msecs_to_jiffies(len));
if (time_left == 0) {
dmaengine_terminate_sync(chan);
dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
return -ETIMEDOUT;
}
return 0;
}
static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs,
void *to, size_t readsize)
{
dma_addr_t dma_src = qspi->mmap_phys_base + offs;
int ret = 0;
/*
* Use bounce buffer as FS like jffs2, ubifs may pass
* buffers that does not belong to kernel lowmem region.
*/
while (readsize != 0) {
size_t xfer_len = min_t(size_t, QSPI_DMA_BUFFER_SIZE,
readsize);
ret = ti_qspi_dma_xfer(qspi, qspi->rx_bb_dma_addr,
dma_src, xfer_len);
if (ret != 0)
return ret;
memcpy(to, qspi->rx_bb_addr, xfer_len);
readsize -= xfer_len;
dma_src += xfer_len;
to += xfer_len;
}
return ret;
}
static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
loff_t from)
{
struct scatterlist *sg;
dma_addr_t dma_src = qspi->mmap_phys_base + from;
dma_addr_t dma_dst;
int i, len, ret;
for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
dma_dst = sg_dma_address(sg);
len = sg_dma_len(sg);
ret = ti_qspi_dma_xfer(qspi, dma_dst, dma_src, len);
if (ret)
return ret;
dma_src += len;
}
return 0;
}
static void ti_qspi_enable_memory_map(struct spi_device *spi)
{
struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base) {
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
MEM_CS_MASK,
MEM_CS_EN(spi_get_chipselect(spi, 0)));
}
qspi->mmap_enabled = true;
qspi->current_cs = spi_get_chipselect(spi, 0);
}
static void ti_qspi_disable_memory_map(struct spi_device *spi)
{
struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base)
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
MEM_CS_MASK, 0);
qspi->mmap_enabled = false;
qspi->current_cs = -1;
}
static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
u8 data_nbits, u8 addr_width,
u8 dummy_bytes)
{
struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
u32 memval = opcode;
switch (data_nbits) {
case SPI_NBITS_QUAD:
memval |= QSPI_SETUP_RD_QUAD;
break;
case SPI_NBITS_DUAL:
memval |= QSPI_SETUP_RD_DUAL;
break;
default:
memval |= QSPI_SETUP_RD_NORMAL;
break;
}
memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
ti_qspi_write(qspi, memval,
QSPI_SPI_SETUP_REG(spi_get_chipselect(spi, 0)));
}
static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
size_t max_len;
if (op->data.dir == SPI_MEM_DATA_IN) {
if (op->addr.val < qspi->mmap_size) {
/* Limit MMIO to the mmaped region */
if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
max_len = qspi->mmap_size - op->addr.val;
op->data.nbytes = min((size_t) op->data.nbytes,
max_len);
}
} else {
/*
* Use fallback mode (SW generated transfers) above the
* mmaped region.
* Adjust size to comply with the QSPI max frame length.
*/
max_len = QSPI_FRAME;
max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
op->data.nbytes = min((size_t) op->data.nbytes,
max_len);
}
}
return 0;
}
static int ti_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
u32 from = 0;
int ret = 0;
/* Only optimize read path. */
if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
!op->addr.nbytes || op->addr.nbytes > 4)
return -ENOTSUPP;
/* Address exceeds MMIO window size, fall back to regular mode. */
from = op->addr.val;
if (from + op->data.nbytes > qspi->mmap_size)
return -ENOTSUPP;
mutex_lock(&qspi->list_lock);
if (!qspi->mmap_enabled || qspi->current_cs != spi_get_chipselect(mem->spi, 0)) {
ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz);
ti_qspi_enable_memory_map(mem->spi);
}
ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
op->addr.nbytes, op->dummy.nbytes);
if (qspi->rx_chan) {
struct sg_table sgt;
if (virt_addr_valid(op->data.buf.in) &&
!spi_controller_dma_map_mem_op_data(mem->spi->master, op,
&sgt)) {
ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
spi_controller_dma_unmap_mem_op_data(mem->spi->master,
op, &sgt);
} else {
ret = ti_qspi_dma_bounce_buffer(qspi, from,
op->data.buf.in,
op->data.nbytes);
}
} else {
memcpy_fromio(op->data.buf.in, qspi->mmap_base + from,
op->data.nbytes);
}
mutex_unlock(&qspi->list_lock);
return ret;
}
static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.exec_op = ti_qspi_exec_mem_op,
.adjust_op_size = ti_qspi_adjust_op_size,
};
static int ti_qspi_start_transfer_one(struct spi_master *master,
struct spi_message *m)
{
struct ti_qspi *qspi = spi_master_get_devdata(master);
struct spi_device *spi = m->spi;
struct spi_transfer *t;
int status = 0, ret;
unsigned int frame_len_words, transfer_len_words;
int wlen;
/* setup device control reg */
qspi->dc = 0;
if (spi->mode & SPI_CPHA)
qspi->dc |= QSPI_CKPHA(spi_get_chipselect(spi, 0));
if (spi->mode & SPI_CPOL)
qspi->dc |= QSPI_CKPOL(spi_get_chipselect(spi, 0));
if (spi->mode & SPI_CS_HIGH)
qspi->dc |= QSPI_CSPOL(spi_get_chipselect(spi, 0));
frame_len_words = 0;
list_for_each_entry(t, &m->transfers, transfer_list)
frame_len_words += t->len / (t->bits_per_word >> 3);
frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
/* setup command reg */
qspi->cmd = 0;
qspi->cmd |= QSPI_EN_CS(spi_get_chipselect(spi, 0));
qspi->cmd |= QSPI_FLEN(frame_len_words);
ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
mutex_lock(&qspi->list_lock);
if (qspi->mmap_enabled)
ti_qspi_disable_memory_map(spi);
list_for_each_entry(t, &m->transfers, transfer_list) {
qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
QSPI_WLEN(t->bits_per_word));
wlen = t->bits_per_word >> 3;
transfer_len_words = min(t->len / wlen, frame_len_words);
ti_qspi_setup_clk(qspi, t->speed_hz);
ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
if (ret) {
dev_dbg(qspi->dev, "transfer message failed\n");
mutex_unlock(&qspi->list_lock);
return -EINVAL;
}
m->actual_length += transfer_len_words * wlen;
frame_len_words -= transfer_len_words;
if (frame_len_words == 0)
break;
}
mutex_unlock(&qspi->list_lock);
ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
m->status = status;
spi_finalize_current_message(master);
return status;
}
static int ti_qspi_runtime_resume(struct device *dev)
{
struct ti_qspi *qspi;
qspi = dev_get_drvdata(dev);
ti_qspi_restore_ctx(qspi);
return 0;
}
static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
{
if (qspi->rx_bb_addr)
dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
qspi->rx_bb_addr,
qspi->rx_bb_dma_addr);
if (qspi->rx_chan)
dma_release_channel(qspi->rx_chan);
}
static const struct of_device_id ti_qspi_match[] = {
{.compatible = "ti,dra7xxx-qspi" },
{.compatible = "ti,am4372-qspi" },
{},
};
MODULE_DEVICE_TABLE(of, ti_qspi_match);
static int ti_qspi_probe(struct platform_device *pdev)
{
struct ti_qspi *qspi;
struct spi_master *master;
struct resource *r, *res_mmap;
struct device_node *np = pdev->dev.of_node;
u32 max_freq;
int ret = 0, num_cs, irq;
dma_cap_mask_t mask;
master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
if (!master)
return -ENOMEM;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = ti_qspi_setup;
master->auto_runtime_pm = true;
master->transfer_one_message = ti_qspi_start_transfer_one;
master->dev.of_node = pdev->dev.of_node;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
master->mem_ops = &ti_qspi_mem_ops;
if (!of_property_read_u32(np, "num-cs", &num_cs))
master->num_chipselect = num_cs;
qspi = spi_master_get_devdata(master);
qspi->master = master;
qspi->dev = &pdev->dev;
platform_set_drvdata(pdev, qspi);
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
if (r == NULL) {
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
dev_err(&pdev->dev, "missing platform data\n");
ret = -ENODEV;
goto free_master;
}
}
res_mmap = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "qspi_mmap");
if (res_mmap == NULL) {
res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res_mmap == NULL) {
dev_err(&pdev->dev,
"memory mapped resource not required\n");
}
}
if (res_mmap)
qspi->mmap_size = resource_size(res_mmap);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto free_master;
}
mutex_init(&qspi->list_lock);
qspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(qspi->base)) {
ret = PTR_ERR(qspi->base);
goto free_master;
}
if (of_property_read_bool(np, "syscon-chipselects")) {
qspi->ctrl_base =
syscon_regmap_lookup_by_phandle(np,
"syscon-chipselects");
if (IS_ERR(qspi->ctrl_base)) {
ret = PTR_ERR(qspi->ctrl_base);
goto free_master;
}
ret = of_property_read_u32_index(np,
"syscon-chipselects",
1, &qspi->ctrl_reg);
if (ret) {
dev_err(&pdev->dev,
"couldn't get ctrl_mod reg index\n");
goto free_master;
}
}
qspi->fclk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(qspi->fclk)) {
ret = PTR_ERR(qspi->fclk);
dev_err(&pdev->dev, "could not get clk: %d\n", ret);
}
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
master->max_speed_hz = max_freq;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
qspi->rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(qspi->rx_chan)) {
dev_err(qspi->dev,
"No Rx DMA available, trying mmap mode\n");
qspi->rx_chan = NULL;
ret = 0;
goto no_dma;
}
qspi->rx_bb_addr = dma_alloc_coherent(qspi->dev,
QSPI_DMA_BUFFER_SIZE,
&qspi->rx_bb_dma_addr,
GFP_KERNEL | GFP_DMA);
if (!qspi->rx_bb_addr) {
dev_err(qspi->dev,
"dma_alloc_coherent failed, using PIO mode\n");
dma_release_channel(qspi->rx_chan);
goto no_dma;
}
master->dma_rx = qspi->rx_chan;
init_completion(&qspi->transfer_complete);
if (res_mmap)
qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
no_dma:
if (!qspi->rx_chan && res_mmap) {
qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
if (IS_ERR(qspi->mmap_base)) {
dev_info(&pdev->dev,
"mmap failed with error %ld using PIO mode\n",
PTR_ERR(qspi->mmap_base));
qspi->mmap_base = NULL;
master->mem_ops = NULL;
}
}
qspi->mmap_enabled = false;
qspi->current_cs = -1;
ret = devm_spi_register_master(&pdev->dev, master);
if (!ret)
return 0;
ti_qspi_dma_cleanup(qspi);
pm_runtime_disable(&pdev->dev);
free_master:
spi_master_put(master);
return ret;
}
static int ti_qspi_remove(struct platform_device *pdev)
{
struct ti_qspi *qspi = platform_get_drvdata(pdev);
int rc;
rc = spi_master_suspend(qspi->master);
if (rc)
return rc;
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
ti_qspi_dma_cleanup(qspi);
return 0;
}
static const struct dev_pm_ops ti_qspi_pm_ops = {
.runtime_resume = ti_qspi_runtime_resume,
};
static struct platform_driver ti_qspi_driver = {
.probe = ti_qspi_probe,
.remove = ti_qspi_remove,
.driver = {
.name = "ti-qspi",
.pm = &ti_qspi_pm_ops,
.of_match_table = ti_qspi_match,
}
};
module_platform_driver(ti_qspi_driver);
MODULE_AUTHOR("Sourav Poddar <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI QSPI controller driver");
MODULE_ALIAS("platform:ti-qspi");
| linux-master | drivers/spi/spi-ti-qspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel PCH/PCU SPI flash platform driver.
*
* Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include "spi-intel.h"
static int intel_spi_platform_probe(struct platform_device *pdev)
{
struct intel_spi_boardinfo *info;
struct resource *mem;
info = dev_get_platdata(&pdev->dev);
if (!info)
return -EINVAL;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
return intel_spi_probe(&pdev->dev, mem, info);
}
static struct platform_driver intel_spi_platform_driver = {
.probe = intel_spi_platform_probe,
.driver = {
.name = "intel-spi",
},
};
module_platform_driver(intel_spi_platform_driver);
MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver");
MODULE_AUTHOR("Mika Westerberg <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:intel-spi");
| linux-master | drivers/spi/spi-intel-platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Rockchip Serial Flash Controller Driver
*
* Copyright (c) 2017-2021, Rockchip Inc.
* Author: Shawn Lin <[email protected]>
* Chris Morgan <[email protected]>
* Jon Lin <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/spi/spi-mem.h>
/* System control */
#define SFC_CTRL 0x0
#define SFC_CTRL_PHASE_SEL_NEGETIVE BIT(1)
#define SFC_CTRL_CMD_BITS_SHIFT 8
#define SFC_CTRL_ADDR_BITS_SHIFT 10
#define SFC_CTRL_DATA_BITS_SHIFT 12
/* Interrupt mask */
#define SFC_IMR 0x4
#define SFC_IMR_RX_FULL BIT(0)
#define SFC_IMR_RX_UFLOW BIT(1)
#define SFC_IMR_TX_OFLOW BIT(2)
#define SFC_IMR_TX_EMPTY BIT(3)
#define SFC_IMR_TRAN_FINISH BIT(4)
#define SFC_IMR_BUS_ERR BIT(5)
#define SFC_IMR_NSPI_ERR BIT(6)
#define SFC_IMR_DMA BIT(7)
/* Interrupt clear */
#define SFC_ICLR 0x8
#define SFC_ICLR_RX_FULL BIT(0)
#define SFC_ICLR_RX_UFLOW BIT(1)
#define SFC_ICLR_TX_OFLOW BIT(2)
#define SFC_ICLR_TX_EMPTY BIT(3)
#define SFC_ICLR_TRAN_FINISH BIT(4)
#define SFC_ICLR_BUS_ERR BIT(5)
#define SFC_ICLR_NSPI_ERR BIT(6)
#define SFC_ICLR_DMA BIT(7)
/* FIFO threshold level */
#define SFC_FTLR 0xc
#define SFC_FTLR_TX_SHIFT 0
#define SFC_FTLR_TX_MASK 0x1f
#define SFC_FTLR_RX_SHIFT 8
#define SFC_FTLR_RX_MASK 0x1f
/* Reset FSM and FIFO */
#define SFC_RCVR 0x10
#define SFC_RCVR_RESET BIT(0)
/* Enhanced mode */
#define SFC_AX 0x14
/* Address Bit number */
#define SFC_ABIT 0x18
/* Interrupt status */
#define SFC_ISR 0x1c
#define SFC_ISR_RX_FULL_SHIFT BIT(0)
#define SFC_ISR_RX_UFLOW_SHIFT BIT(1)
#define SFC_ISR_TX_OFLOW_SHIFT BIT(2)
#define SFC_ISR_TX_EMPTY_SHIFT BIT(3)
#define SFC_ISR_TX_FINISH_SHIFT BIT(4)
#define SFC_ISR_BUS_ERR_SHIFT BIT(5)
#define SFC_ISR_NSPI_ERR_SHIFT BIT(6)
#define SFC_ISR_DMA_SHIFT BIT(7)
/* FIFO status */
#define SFC_FSR 0x20
#define SFC_FSR_TX_IS_FULL BIT(0)
#define SFC_FSR_TX_IS_EMPTY BIT(1)
#define SFC_FSR_RX_IS_EMPTY BIT(2)
#define SFC_FSR_RX_IS_FULL BIT(3)
#define SFC_FSR_TXLV_MASK GENMASK(12, 8)
#define SFC_FSR_TXLV_SHIFT 8
#define SFC_FSR_RXLV_MASK GENMASK(20, 16)
#define SFC_FSR_RXLV_SHIFT 16
/* FSM status */
#define SFC_SR 0x24
#define SFC_SR_IS_IDLE 0x0
#define SFC_SR_IS_BUSY 0x1
/* Raw interrupt status */
#define SFC_RISR 0x28
#define SFC_RISR_RX_FULL BIT(0)
#define SFC_RISR_RX_UNDERFLOW BIT(1)
#define SFC_RISR_TX_OVERFLOW BIT(2)
#define SFC_RISR_TX_EMPTY BIT(3)
#define SFC_RISR_TRAN_FINISH BIT(4)
#define SFC_RISR_BUS_ERR BIT(5)
#define SFC_RISR_NSPI_ERR BIT(6)
#define SFC_RISR_DMA BIT(7)
/* Version */
#define SFC_VER 0x2C
#define SFC_VER_3 0x3
#define SFC_VER_4 0x4
#define SFC_VER_5 0x5
/* Delay line controller resiter */
#define SFC_DLL_CTRL0 0x3C
#define SFC_DLL_CTRL0_SCLK_SMP_DLL BIT(15)
#define SFC_DLL_CTRL0_DLL_MAX_VER4 0xFFU
#define SFC_DLL_CTRL0_DLL_MAX_VER5 0x1FFU
/* Master trigger */
#define SFC_DMA_TRIGGER 0x80
#define SFC_DMA_TRIGGER_START 1
/* Src or Dst addr for master */
#define SFC_DMA_ADDR 0x84
/* Length control register extension 32GB */
#define SFC_LEN_CTRL 0x88
#define SFC_LEN_CTRL_TRB_SEL 1
#define SFC_LEN_EXT 0x8C
/* Command */
#define SFC_CMD 0x100
#define SFC_CMD_IDX_SHIFT 0
#define SFC_CMD_DUMMY_SHIFT 8
#define SFC_CMD_DIR_SHIFT 12
#define SFC_CMD_DIR_RD 0
#define SFC_CMD_DIR_WR 1
#define SFC_CMD_ADDR_SHIFT 14
#define SFC_CMD_ADDR_0BITS 0
#define SFC_CMD_ADDR_24BITS 1
#define SFC_CMD_ADDR_32BITS 2
#define SFC_CMD_ADDR_XBITS 3
#define SFC_CMD_TRAN_BYTES_SHIFT 16
#define SFC_CMD_CS_SHIFT 30
/* Address */
#define SFC_ADDR 0x104
/* Data */
#define SFC_DATA 0x108
/* The controller and documentation reports that it supports up to 4 CS
* devices (0-3), however I have only been able to test a single CS (CS 0)
* due to the configuration of my device.
*/
#define SFC_MAX_CHIPSELECT_NUM 4
/* The SFC can transfer max 16KB - 1 at one time
* we set it to 15.5KB here for alignment.
*/
#define SFC_MAX_IOSIZE_VER3 (512 * 31)
/* DMA is only enabled for large data transmission */
#define SFC_DMA_TRANS_THRETHOLD (0x40)
/* Maximum clock values from datasheet suggest keeping clock value under
* 150MHz. No minimum or average value is suggested.
*/
#define SFC_MAX_SPEED (150 * 1000 * 1000)
struct rockchip_sfc {
struct device *dev;
void __iomem *regbase;
struct clk *hclk;
struct clk *clk;
u32 frequency;
/* virtual mapped addr for dma_buffer */
void *buffer;
dma_addr_t dma_buffer;
struct completion cp;
bool use_dma;
u32 max_iosize;
u16 version;
};
static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
{
int err;
u32 status;
writel_relaxed(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
!(status & SFC_RCVR_RESET), 20,
jiffies_to_usecs(HZ));
if (err)
dev_err(sfc->dev, "SFC reset never finished\n");
/* Still need to clear the masked interrupt from RISR */
writel_relaxed(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
dev_dbg(sfc->dev, "reset\n");
return err;
}
static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
{
return (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
}
static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
{
return SFC_MAX_IOSIZE_VER3;
}
static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
{
u32 reg;
/* Enable transfer complete interrupt */
reg = readl(sfc->regbase + SFC_IMR);
reg &= ~mask;
writel(reg, sfc->regbase + SFC_IMR);
}
static void rockchip_sfc_irq_mask(struct rockchip_sfc *sfc, u32 mask)
{
u32 reg;
/* Disable transfer finish interrupt */
reg = readl(sfc->regbase + SFC_IMR);
reg |= mask;
writel(reg, sfc->regbase + SFC_IMR);
}
static int rockchip_sfc_init(struct rockchip_sfc *sfc)
{
writel(0, sfc->regbase + SFC_CTRL);
writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
rockchip_sfc_irq_mask(sfc, 0xFFFFFFFF);
if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
return 0;
}
static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
{
int ret = 0;
u32 status;
ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
status & SFC_FSR_TXLV_MASK, 0,
timeout_us);
if (ret) {
dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
return -ETIMEDOUT;
}
return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
}
static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
{
int ret = 0;
u32 status;
ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
status & SFC_FSR_RXLV_MASK, 0,
timeout_us);
if (ret) {
dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
return -ETIMEDOUT;
}
return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
}
static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
{
if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
/*
* SFC not support output DUMMY cycles right after CMD cycles, so
* treat it as ADDR cycles.
*/
op->addr.nbytes = op->dummy.nbytes;
op->addr.buswidth = op->dummy.buswidth;
op->addr.val = 0xFFFFFFFFF;
op->dummy.nbytes = 0;
}
}
static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
struct spi_mem *mem,
const struct spi_mem_op *op,
u32 len)
{
u32 ctrl = 0, cmd = 0;
/* set CMD */
cmd = op->cmd.opcode;
ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
/* set ADDR */
if (op->addr.nbytes) {
if (op->addr.nbytes == 4) {
cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
} else if (op->addr.nbytes == 3) {
cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
} else {
cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
}
ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
}
/* set DUMMY */
if (op->dummy.nbytes) {
if (op->dummy.buswidth == 4)
cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
else if (op->dummy.buswidth == 2)
cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
else
cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
}
/* set DATA */
if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
writel(len, sfc->regbase + SFC_LEN_EXT);
else
cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
if (len) {
if (op->data.dir == SPI_MEM_DATA_OUT)
cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
}
if (!len && op->addr.nbytes)
cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
/* set the Controller */
ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
cmd |= spi_get_chipselect(mem->spi, 0) << SFC_CMD_CS_SHIFT;
dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
op->addr.nbytes, op->addr.buswidth,
op->dummy.nbytes, op->dummy.buswidth);
dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
ctrl, cmd, op->addr.val, len);
writel(ctrl, sfc->regbase + SFC_CTRL);
writel(cmd, sfc->regbase + SFC_CMD);
if (op->addr.nbytes)
writel(op->addr.val, sfc->regbase + SFC_ADDR);
return 0;
}
static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
{
u8 bytes = len & 0x3;
u32 dwords;
int tx_level;
u32 write_words;
u32 tmp = 0;
dwords = len >> 2;
while (dwords) {
tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
if (tx_level < 0)
return tx_level;
write_words = min_t(u32, tx_level, dwords);
iowrite32_rep(sfc->regbase + SFC_DATA, buf, write_words);
buf += write_words << 2;
dwords -= write_words;
}
/* write the rest non word aligned bytes */
if (bytes) {
tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
if (tx_level < 0)
return tx_level;
memcpy(&tmp, buf, bytes);
writel(tmp, sfc->regbase + SFC_DATA);
}
return len;
}
static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
{
u8 bytes = len & 0x3;
u32 dwords;
u8 read_words;
int rx_level;
int tmp;
/* word aligned access only */
dwords = len >> 2;
while (dwords) {
rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
if (rx_level < 0)
return rx_level;
read_words = min_t(u32, rx_level, dwords);
ioread32_rep(sfc->regbase + SFC_DATA, buf, read_words);
buf += read_words << 2;
dwords -= read_words;
}
/* read the rest non word aligned bytes */
if (bytes) {
rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
if (rx_level < 0)
return rx_level;
tmp = readl(sfc->regbase + SFC_DATA);
memcpy(buf, &tmp, bytes);
}
return len;
}
static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
{
writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
return len;
}
static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
const struct spi_mem_op *op, u32 len)
{
dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
if (op->data.dir == SPI_MEM_DATA_OUT)
return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
else
return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
}
static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
const struct spi_mem_op *op, u32 len)
{
int ret;
dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
if (op->data.dir == SPI_MEM_DATA_OUT)
memcpy(sfc->buffer, op->data.buf.out, len);
ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
dev_err(sfc->dev, "DMA wait for transfer finish timeout\n");
ret = -ETIMEDOUT;
}
rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
if (op->data.dir == SPI_MEM_DATA_IN)
memcpy(op->data.buf.in, sfc->buffer, len);
return ret;
}
static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
{
int ret = 0;
u32 status;
ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
!(status & SFC_SR_IS_BUSY),
20, timeout_us);
if (ret) {
dev_err(sfc->dev, "wait sfc idle timeout\n");
rockchip_sfc_reset(sfc);
ret = -EIO;
}
return ret;
}
static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
u32 len = op->data.nbytes;
int ret;
if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
if (ret)
return ret;
sfc->frequency = mem->spi->max_speed_hz;
dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
sfc->frequency, clk_get_rate(sfc->clk));
}
rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
rockchip_sfc_xfer_setup(sfc, mem, op, len);
if (len) {
if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
init_completion(&sfc->cp);
rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA);
ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
} else {
ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
}
if (ret != len) {
dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
return -EIO;
}
}
return rockchip_sfc_xfer_done(sfc, 100000);
}
static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
return 0;
}
static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
.exec_op = rockchip_sfc_exec_mem_op,
.adjust_op_size = rockchip_sfc_adjust_op_size,
};
static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
{
struct rockchip_sfc *sfc = dev_id;
u32 reg;
reg = readl(sfc->regbase + SFC_RISR);
/* Clear interrupt */
writel_relaxed(reg, sfc->regbase + SFC_ICLR);
if (reg & SFC_RISR_DMA) {
complete(&sfc->cp);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int rockchip_sfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *host;
struct rockchip_sfc *sfc;
int ret;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*sfc));
if (!host)
return -ENOMEM;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->mem_ops = &rockchip_sfc_mem_ops;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
host->max_speed_hz = SFC_MAX_SPEED;
host->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
sfc = spi_controller_get_devdata(host);
sfc->dev = dev;
sfc->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sfc->regbase))
return PTR_ERR(sfc->regbase);
sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
if (IS_ERR(sfc->clk)) {
dev_err(&pdev->dev, "Failed to get sfc interface clk\n");
return PTR_ERR(sfc->clk);
}
sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
if (IS_ERR(sfc->hclk)) {
dev_err(&pdev->dev, "Failed to get sfc ahb clk\n");
return PTR_ERR(sfc->hclk);
}
sfc->use_dma = !of_property_read_bool(sfc->dev->of_node,
"rockchip,sfc-no-dma");
if (sfc->use_dma) {
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_warn(dev, "Unable to set dma mask\n");
return ret;
}
sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
&sfc->dma_buffer,
GFP_KERNEL);
if (!sfc->buffer)
return -ENOMEM;
}
ret = clk_prepare_enable(sfc->hclk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable ahb clk\n");
goto err_hclk;
}
ret = clk_prepare_enable(sfc->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable interface clk\n");
goto err_clk;
}
/* Find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_irq;
ret = devm_request_irq(dev, ret, rockchip_sfc_irq_handler,
0, pdev->name, sfc);
if (ret) {
dev_err(dev, "Failed to request irq\n");
goto err_irq;
}
ret = rockchip_sfc_init(sfc);
if (ret)
goto err_irq;
sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
sfc->version = rockchip_sfc_get_version(sfc);
ret = spi_register_controller(host);
if (ret)
goto err_irq;
return 0;
err_irq:
clk_disable_unprepare(sfc->clk);
err_clk:
clk_disable_unprepare(sfc->hclk);
err_hclk:
return ret;
}
static void rockchip_sfc_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
spi_unregister_controller(host);
clk_disable_unprepare(sfc->clk);
clk_disable_unprepare(sfc->hclk);
}
static const struct of_device_id rockchip_sfc_dt_ids[] = {
{ .compatible = "rockchip,sfc"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rockchip_sfc_dt_ids);
static struct platform_driver rockchip_sfc_driver = {
.driver = {
.name = "rockchip-sfc",
.of_match_table = rockchip_sfc_dt_ids,
},
.probe = rockchip_sfc_probe,
.remove_new = rockchip_sfc_remove,
};
module_platform_driver(rockchip_sfc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Rockchip Serial Flash Controller Driver");
MODULE_AUTHOR("Shawn Lin <[email protected]>");
MODULE_AUTHOR("Chris Morgan <[email protected]>");
MODULE_AUTHOR("Jon Lin <[email protected]>");
| linux-master | drivers/spi/spi-rockchip-sfc.c |
// SPDX-License-Identifier: GPL-2.0+
// Platform driver for Loongson SPI Support
// Copyright (C) 2023 Loongson Technology Corporation Limited
#include <linux/err.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "spi-loongson.h"
static int loongson_spi_platform_probe(struct platform_device *pdev)
{
int ret;
void __iomem *reg_base;
struct device *dev = &pdev->dev;
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
ret = loongson_spi_init_controller(dev, reg_base);
if (ret)
return dev_err_probe(dev, ret, "failed to initialize controller\n");
return 0;
}
static const struct of_device_id loongson_spi_id_table[] = {
{ .compatible = "loongson,ls2k1000-spi" },
{ }
};
MODULE_DEVICE_TABLE(of, loongson_spi_id_table);
static struct platform_driver loongson_spi_plat_driver = {
.probe = loongson_spi_platform_probe,
.driver = {
.name = "loongson-spi",
.bus = &platform_bus_type,
.pm = &loongson_spi_dev_pm_ops,
.of_match_table = loongson_spi_id_table,
},
};
module_platform_driver(loongson_spi_plat_driver);
MODULE_DESCRIPTION("Loongson spi platform driver");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(SPI_LOONGSON_CORE);
| linux-master | drivers/spi/spi-loongson-plat.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2018 MediaTek Inc.
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
#define SPIS_IRQ_EN_REG 0x0
#define SPIS_IRQ_CLR_REG 0x4
#define SPIS_IRQ_ST_REG 0x8
#define SPIS_IRQ_MASK_REG 0xc
#define SPIS_CFG_REG 0x10
#define SPIS_RX_DATA_REG 0x14
#define SPIS_TX_DATA_REG 0x18
#define SPIS_RX_DST_REG 0x1c
#define SPIS_TX_SRC_REG 0x20
#define SPIS_DMA_CFG_REG 0x30
#define SPIS_SOFT_RST_REG 0x40
/* SPIS_IRQ_EN_REG */
#define DMA_DONE_EN BIT(7)
#define DATA_DONE_EN BIT(2)
#define RSTA_DONE_EN BIT(1)
#define CMD_INVALID_EN BIT(0)
/* SPIS_IRQ_ST_REG */
#define DMA_DONE_ST BIT(7)
#define DATA_DONE_ST BIT(2)
#define RSTA_DONE_ST BIT(1)
#define CMD_INVALID_ST BIT(0)
/* SPIS_IRQ_MASK_REG */
#define DMA_DONE_MASK BIT(7)
#define DATA_DONE_MASK BIT(2)
#define RSTA_DONE_MASK BIT(1)
#define CMD_INVALID_MASK BIT(0)
/* SPIS_CFG_REG */
#define SPIS_TX_ENDIAN BIT(7)
#define SPIS_RX_ENDIAN BIT(6)
#define SPIS_TXMSBF BIT(5)
#define SPIS_RXMSBF BIT(4)
#define SPIS_CPHA BIT(3)
#define SPIS_CPOL BIT(2)
#define SPIS_TX_EN BIT(1)
#define SPIS_RX_EN BIT(0)
/* SPIS_DMA_CFG_REG */
#define TX_DMA_TRIG_EN BIT(31)
#define TX_DMA_EN BIT(30)
#define RX_DMA_EN BIT(29)
#define TX_DMA_LEN 0xfffff
/* SPIS_SOFT_RST_REG */
#define SPIS_DMA_ADDR_EN BIT(1)
#define SPIS_SOFT_RST BIT(0)
struct mtk_spi_slave {
struct device *dev;
void __iomem *base;
struct clk *spi_clk;
struct completion xfer_done;
struct spi_transfer *cur_transfer;
bool slave_aborted;
const struct mtk_spi_compatible *dev_comp;
};
struct mtk_spi_compatible {
const u32 max_fifo_size;
bool must_rx;
};
static const struct mtk_spi_compatible mt2712_compat = {
.max_fifo_size = 512,
};
static const struct mtk_spi_compatible mt8195_compat = {
.max_fifo_size = 128,
.must_rx = true,
};
static const struct of_device_id mtk_spi_slave_of_match[] = {
{ .compatible = "mediatek,mt2712-spi-slave",
.data = (void *)&mt2712_compat,},
{ .compatible = "mediatek,mt8195-spi-slave",
.data = (void *)&mt8195_compat,},
{}
};
MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
{
u32 reg_val;
reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
reg_val &= ~RX_DMA_EN;
reg_val &= ~TX_DMA_EN;
writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
}
static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
{
u32 reg_val;
reg_val = readl(mdata->base + SPIS_CFG_REG);
reg_val &= ~SPIS_TX_EN;
reg_val &= ~SPIS_RX_EN;
writel(reg_val, mdata->base + SPIS_CFG_REG);
}
static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
{
if (wait_for_completion_interruptible(&mdata->xfer_done) ||
mdata->slave_aborted) {
dev_err(mdata->dev, "interrupted\n");
return -EINTR;
}
return 0;
}
static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
struct spi_device *spi = msg->spi;
bool cpha, cpol;
u32 reg_val;
cpha = spi->mode & SPI_CPHA ? 1 : 0;
cpol = spi->mode & SPI_CPOL ? 1 : 0;
reg_val = readl(mdata->base + SPIS_CFG_REG);
if (cpha)
reg_val |= SPIS_CPHA;
else
reg_val &= ~SPIS_CPHA;
if (cpol)
reg_val |= SPIS_CPOL;
else
reg_val &= ~SPIS_CPOL;
if (spi->mode & SPI_LSB_FIRST)
reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
else
reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
reg_val &= ~SPIS_TX_ENDIAN;
reg_val &= ~SPIS_RX_ENDIAN;
writel(reg_val, mdata->base + SPIS_CFG_REG);
return 0;
}
static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
int reg_val, cnt, remainder, ret;
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
reg_val = readl(mdata->base + SPIS_CFG_REG);
if (xfer->rx_buf)
reg_val |= SPIS_RX_EN;
if (xfer->tx_buf)
reg_val |= SPIS_TX_EN;
writel(reg_val, mdata->base + SPIS_CFG_REG);
cnt = xfer->len / 4;
if (xfer->tx_buf)
iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
xfer->tx_buf, cnt);
remainder = xfer->len % 4;
if (xfer->tx_buf && remainder > 0) {
reg_val = 0;
memcpy(®_val, xfer->tx_buf + cnt * 4, remainder);
writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
}
ret = mtk_spi_slave_wait_for_completion(mdata);
if (ret) {
mtk_spi_slave_disable_xfer(mdata);
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
}
return ret;
}
static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
struct device *dev = mdata->dev;
int reg_val, ret;
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
if (xfer->tx_buf) {
/* tx_buf is a const void* where we need a void * for
* the dma mapping
*/
void *nonconst_tx = (void *)xfer->tx_buf;
xfer->tx_dma = dma_map_single(dev, nonconst_tx,
xfer->len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma)) {
ret = -ENOMEM;
goto disable_transfer;
}
}
if (xfer->rx_buf) {
xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
xfer->len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma)) {
ret = -ENOMEM;
goto unmap_txdma;
}
}
writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
/* enable config reg tx rx_enable */
reg_val = readl(mdata->base + SPIS_CFG_REG);
if (xfer->tx_buf)
reg_val |= SPIS_TX_EN;
if (xfer->rx_buf)
reg_val |= SPIS_RX_EN;
writel(reg_val, mdata->base + SPIS_CFG_REG);
/* config dma */
reg_val = 0;
reg_val |= (xfer->len - 1) & TX_DMA_LEN;
writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
if (xfer->tx_buf)
reg_val |= TX_DMA_EN;
if (xfer->rx_buf)
reg_val |= RX_DMA_EN;
reg_val |= TX_DMA_TRIG_EN;
writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
ret = mtk_spi_slave_wait_for_completion(mdata);
if (ret)
goto unmap_rxdma;
return 0;
unmap_rxdma:
if (xfer->rx_buf)
dma_unmap_single(dev, xfer->rx_dma,
xfer->len, DMA_FROM_DEVICE);
unmap_txdma:
if (xfer->tx_buf)
dma_unmap_single(dev, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
disable_transfer:
mtk_spi_slave_disable_dma(mdata);
mtk_spi_slave_disable_xfer(mdata);
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
return ret;
}
static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
reinit_completion(&mdata->xfer_done);
mdata->slave_aborted = false;
mdata->cur_transfer = xfer;
if (xfer->len > mdata->dev_comp->max_fifo_size)
return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
else
return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
}
static int mtk_spi_slave_setup(struct spi_device *spi)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
u32 reg_val;
reg_val = DMA_DONE_EN | DATA_DONE_EN |
RSTA_DONE_EN | CMD_INVALID_EN;
writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
RSTA_DONE_MASK | CMD_INVALID_MASK;
writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
mtk_spi_slave_disable_dma(mdata);
mtk_spi_slave_disable_xfer(mdata);
return 0;
}
static int mtk_slave_abort(struct spi_controller *ctlr)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
mdata->slave_aborted = true;
complete(&mdata->xfer_done);
return 0;
}
static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
{
struct spi_controller *ctlr = dev_id;
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
struct spi_transfer *trans = mdata->cur_transfer;
u32 int_status, reg_val, cnt, remainder;
int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
if (!trans)
return IRQ_NONE;
if ((int_status & DMA_DONE_ST) &&
((int_status & DATA_DONE_ST) ||
(int_status & RSTA_DONE_ST))) {
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
if (trans->tx_buf)
dma_unmap_single(mdata->dev, trans->tx_dma,
trans->len, DMA_TO_DEVICE);
if (trans->rx_buf)
dma_unmap_single(mdata->dev, trans->rx_dma,
trans->len, DMA_FROM_DEVICE);
mtk_spi_slave_disable_dma(mdata);
mtk_spi_slave_disable_xfer(mdata);
}
if ((!(int_status & DMA_DONE_ST)) &&
((int_status & DATA_DONE_ST) ||
(int_status & RSTA_DONE_ST))) {
cnt = trans->len / 4;
if (trans->rx_buf)
ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
trans->rx_buf, cnt);
remainder = trans->len % 4;
if (trans->rx_buf && remainder > 0) {
reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
memcpy(trans->rx_buf + (cnt * 4),
®_val, remainder);
}
mtk_spi_slave_disable_xfer(mdata);
}
if (int_status & CMD_INVALID_ST) {
dev_warn(&ctlr->dev, "cmd invalid\n");
return IRQ_NONE;
}
mdata->cur_transfer = NULL;
complete(&mdata->xfer_done);
return IRQ_HANDLED;
}
static int mtk_spi_slave_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct mtk_spi_slave *mdata;
int irq, ret;
const struct of_device_id *of_id;
ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
if (!ctlr) {
dev_err(&pdev->dev, "failed to alloc spi slave\n");
return -ENOMEM;
}
ctlr->auto_runtime_pm = true;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
ctlr->mode_bits |= SPI_LSB_FIRST;
ctlr->prepare_message = mtk_spi_slave_prepare_message;
ctlr->transfer_one = mtk_spi_slave_transfer_one;
ctlr->setup = mtk_spi_slave_setup;
ctlr->slave_abort = mtk_slave_abort;
of_id = of_match_node(mtk_spi_slave_of_match, pdev->dev.of_node);
if (!of_id) {
dev_err(&pdev->dev, "failed to probe of_node\n");
ret = -EINVAL;
goto err_put_ctlr;
}
mdata = spi_controller_get_devdata(ctlr);
mdata->dev_comp = of_id->data;
if (mdata->dev_comp->must_rx)
ctlr->flags = SPI_CONTROLLER_MUST_RX;
platform_set_drvdata(pdev, ctlr);
init_completion(&mdata->xfer_done);
mdata->dev = &pdev->dev;
mdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdata->base)) {
ret = PTR_ERR(mdata->base);
goto err_put_ctlr;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto err_put_ctlr;
}
ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
if (ret) {
dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
goto err_put_ctlr;
}
mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(mdata->spi_clk)) {
ret = PTR_ERR(mdata->spi_clk);
dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
goto err_put_ctlr;
}
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
goto err_put_ctlr;
}
pm_runtime_enable(&pdev->dev);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev,
"failed to register slave controller(%d)\n", ret);
clk_disable_unprepare(mdata->spi_clk);
goto err_disable_runtime_pm;
}
clk_disable_unprepare(mdata->spi_clk);
return 0;
err_disable_runtime_pm:
pm_runtime_disable(&pdev->dev);
err_put_ctlr:
spi_controller_put(ctlr);
return ret;
}
static void mtk_spi_slave_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
#ifdef CONFIG_PM_SLEEP
static int mtk_spi_slave_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
int ret;
ret = spi_controller_suspend(ctlr);
if (ret)
return ret;
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(mdata->spi_clk);
return ret;
}
static int mtk_spi_slave_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
int ret;
if (!pm_runtime_suspended(dev)) {
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
return ret;
}
}
ret = spi_controller_resume(ctlr);
if (ret < 0)
clk_disable_unprepare(mdata->spi_clk);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int mtk_spi_slave_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
clk_disable_unprepare(mdata->spi_clk);
return 0;
}
static int mtk_spi_slave_runtime_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
int ret;
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
return ret;
}
return 0;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops mtk_spi_slave_pm = {
SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
mtk_spi_slave_runtime_resume, NULL)
};
static struct platform_driver mtk_spi_slave_driver = {
.driver = {
.name = "mtk-spi-slave",
.pm = &mtk_spi_slave_pm,
.of_match_table = mtk_spi_slave_of_match,
},
.probe = mtk_spi_slave_probe,
.remove_new = mtk_spi_slave_remove,
};
module_platform_driver(mtk_spi_slave_driver);
MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
MODULE_AUTHOR("Leilk Liu <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:mtk-spi-slave");
| linux-master | drivers/spi/spi-slave-mt27xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SH SCI SPI interface
*
* Copyright (c) 2008 Magnus Damm
*
* Based on S3C24XX GPIO based SPI driver, which is:
* Copyright (c) 2006 Ben Dooks
* Copyright (c) 2006 Simtec Electronics
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/module.h>
#include <asm/spi.h>
#include <asm/io.h>
struct sh_sci_spi {
struct spi_bitbang bitbang;
void __iomem *membase;
unsigned char val;
struct sh_spi_info *info;
struct platform_device *dev;
};
#define SCSPTR(sp) (sp->membase + 0x1c)
#define PIN_SCK (1 << 2)
#define PIN_TXD (1 << 0)
#define PIN_RXD PIN_TXD
#define PIN_INIT ((1 << 1) | (1 << 3) | PIN_SCK | PIN_TXD)
static inline void setbits(struct sh_sci_spi *sp, int bits, int on)
{
/*
* We are the only user of SCSPTR so no locking is required.
* Reading bit 2 and 0 in SCSPTR gives pin state as input.
* Writing the same bits sets the output value.
* This makes regular read-modify-write difficult so we
* use sp->val to keep track of the latest register value.
*/
if (on)
sp->val |= bits;
else
sp->val &= ~bits;
iowrite8(sp->val, SCSPTR(sp));
}
static inline void setsck(struct spi_device *dev, int on)
{
setbits(spi_controller_get_devdata(dev->controller), PIN_SCK, on);
}
static inline void setmosi(struct spi_device *dev, int on)
{
setbits(spi_controller_get_devdata(dev->controller), PIN_TXD, on);
}
static inline u32 getmiso(struct spi_device *dev)
{
struct sh_sci_spi *sp = spi_controller_get_devdata(dev->controller);
return (ioread8(SCSPTR(sp)) & PIN_RXD) ? 1 : 0;
}
#define spidelay(x) ndelay(x)
#include "spi-bitbang-txrx.h"
static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits,
unsigned flags)
{
return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits,
unsigned flags)
{
return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits,
unsigned flags)
{
return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits,
unsigned flags)
{
return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
{
struct sh_sci_spi *sp = spi_controller_get_devdata(dev->controller);
if (sp->info->chip_select)
(sp->info->chip_select)(sp->info, spi_get_chipselect(dev, 0), value);
}
static int sh_sci_spi_probe(struct platform_device *dev)
{
struct resource *r;
struct spi_controller *host;
struct sh_sci_spi *sp;
int ret;
host = spi_alloc_host(&dev->dev, sizeof(struct sh_sci_spi));
if (host == NULL) {
dev_err(&dev->dev, "failed to allocate spi host\n");
ret = -ENOMEM;
goto err0;
}
sp = spi_controller_get_devdata(host);
platform_set_drvdata(dev, sp);
sp->info = dev_get_platdata(&dev->dev);
if (!sp->info) {
dev_err(&dev->dev, "platform data is missing\n");
ret = -ENOENT;
goto err1;
}
/* setup spi bitbang adaptor */
sp->bitbang.master = host;
sp->bitbang.master->bus_num = sp->info->bus_num;
sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
sp->bitbang.chipselect = sh_sci_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = sh_sci_spi_txrx_mode0;
sp->bitbang.txrx_word[SPI_MODE_1] = sh_sci_spi_txrx_mode1;
sp->bitbang.txrx_word[SPI_MODE_2] = sh_sci_spi_txrx_mode2;
sp->bitbang.txrx_word[SPI_MODE_3] = sh_sci_spi_txrx_mode3;
r = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (r == NULL) {
ret = -ENOENT;
goto err1;
}
sp->membase = ioremap(r->start, resource_size(r));
if (!sp->membase) {
ret = -ENXIO;
goto err1;
}
sp->val = ioread8(SCSPTR(sp));
setbits(sp, PIN_INIT, 1);
ret = spi_bitbang_start(&sp->bitbang);
if (!ret)
return 0;
setbits(sp, PIN_INIT, 0);
iounmap(sp->membase);
err1:
spi_controller_put(sp->bitbang.master);
err0:
return ret;
}
static void sh_sci_spi_remove(struct platform_device *dev)
{
struct sh_sci_spi *sp = platform_get_drvdata(dev);
spi_bitbang_stop(&sp->bitbang);
setbits(sp, PIN_INIT, 0);
iounmap(sp->membase);
spi_controller_put(sp->bitbang.master);
}
static struct platform_driver sh_sci_spi_drv = {
.probe = sh_sci_spi_probe,
.remove_new = sh_sci_spi_remove,
.driver = {
.name = "spi_sh_sci",
},
};
module_platform_driver(sh_sci_spi_drv);
MODULE_DESCRIPTION("SH SCI SPI Driver");
MODULE_AUTHOR("Magnus Damm <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:spi_sh_sci");
| linux-master | drivers/spi/spi-sh-sci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs
*
* Copyright (C) 2009-2011 Gabor Juhos <[email protected]>
*
* This driver has been based on the spi-gpio.c:
* Copyright (C) 2006,2008 David Brownell
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/err.h>
#define DRV_NAME "ath79-spi"
#define ATH79_SPI_RRW_DELAY_FACTOR 12000
#define MHZ (1000 * 1000)
#define AR71XX_SPI_REG_FS 0x00 /* Function Select */
#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */
#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */
#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */
#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */
#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */
#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */
#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n))
struct ath79_spi {
struct spi_bitbang bitbang;
u32 ioc_base;
u32 reg_ctrl;
void __iomem *base;
struct clk *clk;
unsigned int rrw_delay;
};
static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned int reg)
{
return ioread32(sp->base + reg);
}
static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned int reg, u32 val)
{
iowrite32(val, sp->base + reg);
}
static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
{
return spi_controller_get_devdata(spi->controller);
}
static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned int nsecs)
{
if (nsecs > sp->rrw_delay)
ndelay(nsecs - sp->rrw_delay);
}
static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
u32 cs_bit = AR71XX_SPI_IOC_CS(spi_get_chipselect(spi, 0));
if (cs_high)
sp->ioc_base |= cs_bit;
else
sp->ioc_base &= ~cs_bit;
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
static void ath79_spi_enable(struct ath79_spi *sp)
{
/* enable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
/* save CTRL register */
sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
/* clear clk and mosi in the base state */
sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK);
/* TODO: setup speed? */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
}
static void ath79_spi_disable(struct ath79_spi *sp)
{
/* restore CTRL register */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
/* disable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
}
static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
u32 word, u8 bits, unsigned flags)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
u32 ioc = sp->ioc_base;
/* clock starts at inactive polarity */
for (word <<= (32 - bits); likely(bits); bits--) {
u32 out;
if (word & (1 << 31))
out = ioc | AR71XX_SPI_IOC_DO;
else
out = ioc & ~AR71XX_SPI_IOC_DO;
/* setup MSB (to target) on trailing edge */
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
ath79_spi_delay(sp, nsecs);
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
ath79_spi_delay(sp, nsecs);
if (bits == 1)
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
word <<= 1;
}
return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
}
static int ath79_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct ath79_spi *sp = ath79_spidev_to_sp(mem->spi);
/* Ensures that reading is performed on device connected to hardware cs0 */
if (spi_get_chipselect(mem->spi, 0) || spi_get_csgpiod(mem->spi, 0))
return -ENOTSUPP;
/* Only use for fast-read op. */
if (op->cmd.opcode != 0x0b || op->data.dir != SPI_MEM_DATA_IN ||
op->addr.nbytes != 3 || op->dummy.nbytes != 1)
return -ENOTSUPP;
/* disable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
memcpy_fromio(op->data.buf.in, sp->base + op->addr.val, op->data.nbytes);
/* enable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
/* restore IOC register */
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
return 0;
}
static const struct spi_controller_mem_ops ath79_mem_ops = {
.exec_op = ath79_exec_mem_op,
};
static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct ath79_spi *sp;
unsigned long rate;
int ret;
host = spi_alloc_host(&pdev->dev, sizeof(*sp));
if (host == NULL) {
dev_err(&pdev->dev, "failed to allocate spi host\n");
return -ENOMEM;
}
sp = spi_controller_get_devdata(host);
host->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, sp);
host->use_gpio_descriptors = true;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
host->flags = SPI_CONTROLLER_GPIO_SS;
host->num_chipselect = 3;
host->mem_ops = &ath79_mem_ops;
sp->bitbang.master = host;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.flags = SPI_CS_HIGH;
sp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sp->base)) {
ret = PTR_ERR(sp->base);
goto err_put_host;
}
sp->clk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sp->clk)) {
ret = PTR_ERR(sp->clk);
goto err_put_host;
}
ret = clk_prepare_enable(sp->clk);
if (ret)
goto err_put_host;
rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
if (!rate) {
ret = -EINVAL;
goto err_clk_disable;
}
sp->rrw_delay = ATH79_SPI_RRW_DELAY_FACTOR / rate;
dev_dbg(&pdev->dev, "register read/write delay is %u nsecs\n",
sp->rrw_delay);
ath79_spi_enable(sp);
ret = spi_bitbang_start(&sp->bitbang);
if (ret)
goto err_disable;
return 0;
err_disable:
ath79_spi_disable(sp);
err_clk_disable:
clk_disable_unprepare(sp->clk);
err_put_host:
spi_controller_put(host);
return ret;
}
static void ath79_spi_remove(struct platform_device *pdev)
{
struct ath79_spi *sp = platform_get_drvdata(pdev);
spi_bitbang_stop(&sp->bitbang);
ath79_spi_disable(sp);
clk_disable_unprepare(sp->clk);
spi_controller_put(sp->bitbang.master);
}
static void ath79_spi_shutdown(struct platform_device *pdev)
{
ath79_spi_remove(pdev);
}
static const struct of_device_id ath79_spi_of_match[] = {
{ .compatible = "qca,ar7100-spi", },
{ },
};
MODULE_DEVICE_TABLE(of, ath79_spi_of_match);
static struct platform_driver ath79_spi_driver = {
.probe = ath79_spi_probe,
.remove_new = ath79_spi_remove,
.shutdown = ath79_spi_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ath79_spi_of_match,
},
};
module_platform_driver(ath79_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X");
MODULE_AUTHOR("Gabor Juhos <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/spi/spi-ath79.c |
// SPDX-License-Identifier: GPL-2.0
//
// Driver for AT91 USART Controllers as SPI
//
// Copyright (C) 2018 Microchip Technology Inc.
//
// Author: Radu Pirea <[email protected]>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#define US_CR 0x00
#define US_MR 0x04
#define US_IER 0x08
#define US_IDR 0x0C
#define US_CSR 0x14
#define US_RHR 0x18
#define US_THR 0x1C
#define US_BRGR 0x20
#define US_VERSION 0xFC
#define US_CR_RSTRX BIT(2)
#define US_CR_RSTTX BIT(3)
#define US_CR_RXEN BIT(4)
#define US_CR_RXDIS BIT(5)
#define US_CR_TXEN BIT(6)
#define US_CR_TXDIS BIT(7)
#define US_MR_SPI_HOST 0x0E
#define US_MR_CHRL GENMASK(7, 6)
#define US_MR_CPHA BIT(8)
#define US_MR_CPOL BIT(16)
#define US_MR_CLKO BIT(18)
#define US_MR_WRDBT BIT(20)
#define US_MR_LOOP BIT(15)
#define US_IR_RXRDY BIT(0)
#define US_IR_TXRDY BIT(1)
#define US_IR_OVRE BIT(5)
#define US_BRGR_SIZE BIT(16)
#define US_MIN_CLK_DIV 0x06
#define US_MAX_CLK_DIV BIT(16)
#define US_RESET (US_CR_RSTRX | US_CR_RSTTX)
#define US_DISABLE (US_CR_RXDIS | US_CR_TXDIS)
#define US_ENABLE (US_CR_RXEN | US_CR_TXEN)
#define US_OVRE_RXRDY_IRQS (US_IR_OVRE | US_IR_RXRDY)
#define US_INIT \
(US_MR_SPI_HOST | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
#define US_DMA_MIN_BYTES 16
#define US_DMA_TIMEOUT (msecs_to_jiffies(1000))
/* Register access macros */
#define at91_usart_spi_readl(port, reg) \
readl_relaxed((port)->regs + US_##reg)
#define at91_usart_spi_writel(port, reg, value) \
writel_relaxed((value), (port)->regs + US_##reg)
#define at91_usart_spi_readb(port, reg) \
readb_relaxed((port)->regs + US_##reg)
#define at91_usart_spi_writeb(port, reg, value) \
writeb_relaxed((value), (port)->regs + US_##reg)
struct at91_usart_spi {
struct platform_device *mpdev;
struct spi_transfer *current_transfer;
void __iomem *regs;
struct device *dev;
struct clk *clk;
struct completion xfer_completion;
/*used in interrupt to protect data reading*/
spinlock_t lock;
phys_addr_t phybase;
int irq;
unsigned int current_tx_remaining_bytes;
unsigned int current_rx_remaining_bytes;
u32 spi_clk;
u32 status;
bool xfer_failed;
bool use_dma;
};
static void dma_callback(void *data)
{
struct spi_controller *ctlr = data;
struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
aus->current_rx_remaining_bytes = 0;
complete(&aus->xfer_completion);
}
static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
}
static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
struct at91_usart_spi *aus)
{
struct dma_slave_config slave_config;
struct device *dev = &aus->mpdev->dev;
phys_addr_t phybase = aus->phybase;
dma_cap_mask_t mask;
int err = 0;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
ctlr->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
if (IS_ERR(ctlr->dma_tx)) {
err = PTR_ERR(ctlr->dma_tx);
goto at91_usart_spi_error_clear;
}
dev_dbg(dev,
"DMA TX channel not available, SPI unable to use DMA\n");
err = -EBUSY;
goto at91_usart_spi_error_clear;
}
ctlr->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
if (IS_ERR(ctlr->dma_rx)) {
err = PTR_ERR(ctlr->dma_rx);
goto at91_usart_spi_error;
}
dev_dbg(dev,
"DMA RX channel not available, SPI unable to use DMA\n");
err = -EBUSY;
goto at91_usart_spi_error;
}
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
slave_config.dst_addr = (dma_addr_t)phybase + US_THR;
slave_config.src_addr = (dma_addr_t)phybase + US_RHR;
slave_config.src_maxburst = 1;
slave_config.dst_maxburst = 1;
slave_config.device_fc = false;
slave_config.direction = DMA_DEV_TO_MEM;
if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) {
dev_err(&ctlr->dev,
"failed to configure rx dma channel\n");
err = -EINVAL;
goto at91_usart_spi_error;
}
slave_config.direction = DMA_MEM_TO_DEV;
if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) {
dev_err(&ctlr->dev,
"failed to configure tx dma channel\n");
err = -EINVAL;
goto at91_usart_spi_error;
}
aus->use_dma = true;
return 0;
at91_usart_spi_error:
if (!IS_ERR_OR_NULL(ctlr->dma_tx))
dma_release_channel(ctlr->dma_tx);
if (!IS_ERR_OR_NULL(ctlr->dma_rx))
dma_release_channel(ctlr->dma_rx);
ctlr->dma_tx = NULL;
ctlr->dma_rx = NULL;
at91_usart_spi_error_clear:
return err;
}
static void at91_usart_spi_release_dma(struct spi_controller *ctlr)
{
if (ctlr->dma_rx)
dma_release_channel(ctlr->dma_rx);
if (ctlr->dma_tx)
dma_release_channel(ctlr->dma_tx);
}
static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
{
if (ctlr->dma_rx)
dmaengine_terminate_all(ctlr->dma_rx);
if (ctlr->dma_tx)
dmaengine_terminate_all(ctlr->dma_tx);
}
static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
struct dma_chan *rxchan = ctlr->dma_rx;
struct dma_chan *txchan = ctlr->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
dma_cookie_t cookie;
/* Disable RX interrupt */
at91_usart_spi_writel(aus, IDR, US_IR_RXRDY);
rxdesc = dmaengine_prep_slave_sg(rxchan,
xfer->rx_sg.sgl,
xfer->rx_sg.nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
if (!rxdesc)
goto at91_usart_spi_err_dma;
txdesc = dmaengine_prep_slave_sg(txchan,
xfer->tx_sg.sgl,
xfer->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
if (!txdesc)
goto at91_usart_spi_err_dma;
rxdesc->callback = dma_callback;
rxdesc->callback_param = ctlr;
cookie = rxdesc->tx_submit(rxdesc);
if (dma_submit_error(cookie))
goto at91_usart_spi_err_dma;
cookie = txdesc->tx_submit(txdesc);
if (dma_submit_error(cookie))
goto at91_usart_spi_err_dma;
rxchan->device->device_issue_pending(rxchan);
txchan->device->device_issue_pending(txchan);
return 0;
at91_usart_spi_err_dma:
/* Enable RX interrupt if something fails and fallback to PIO */
at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
at91_usart_spi_stop_dma(ctlr);
return -ENOMEM;
}
static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus)
{
return wait_for_completion_timeout(&aus->xfer_completion,
US_DMA_TIMEOUT);
}
static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
{
return aus->status & US_IR_TXRDY;
}
static inline u32 at91_usart_spi_rx_ready(struct at91_usart_spi *aus)
{
return aus->status & US_IR_RXRDY;
}
static inline u32 at91_usart_spi_check_overrun(struct at91_usart_spi *aus)
{
return aus->status & US_IR_OVRE;
}
static inline u32 at91_usart_spi_read_status(struct at91_usart_spi *aus)
{
aus->status = at91_usart_spi_readl(aus, CSR);
return aus->status;
}
static inline void at91_usart_spi_tx(struct at91_usart_spi *aus)
{
unsigned int len = aus->current_transfer->len;
unsigned int remaining = aus->current_tx_remaining_bytes;
const u8 *tx_buf = aus->current_transfer->tx_buf;
if (!remaining)
return;
if (at91_usart_spi_tx_ready(aus)) {
at91_usart_spi_writeb(aus, THR, tx_buf[len - remaining]);
aus->current_tx_remaining_bytes--;
}
}
static inline void at91_usart_spi_rx(struct at91_usart_spi *aus)
{
int len = aus->current_transfer->len;
int remaining = aus->current_rx_remaining_bytes;
u8 *rx_buf = aus->current_transfer->rx_buf;
if (!remaining)
return;
rx_buf[len - remaining] = at91_usart_spi_readb(aus, RHR);
aus->current_rx_remaining_bytes--;
}
static inline void
at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
struct spi_transfer *xfer)
{
at91_usart_spi_writel(aus, BRGR,
DIV_ROUND_UP(aus->spi_clk, xfer->speed_hz));
}
static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
{
struct spi_controller *controller = dev_id;
struct at91_usart_spi *aus = spi_controller_get_devdata(controller);
spin_lock(&aus->lock);
at91_usart_spi_read_status(aus);
if (at91_usart_spi_check_overrun(aus)) {
aus->xfer_failed = true;
at91_usart_spi_writel(aus, IDR, US_IR_OVRE | US_IR_RXRDY);
spin_unlock(&aus->lock);
return IRQ_HANDLED;
}
if (at91_usart_spi_rx_ready(aus)) {
at91_usart_spi_rx(aus);
spin_unlock(&aus->lock);
return IRQ_HANDLED;
}
spin_unlock(&aus->lock);
return IRQ_NONE;
}
static int at91_usart_spi_setup(struct spi_device *spi)
{
struct at91_usart_spi *aus = spi_controller_get_devdata(spi->controller);
u32 *ausd = spi->controller_state;
unsigned int mr = at91_usart_spi_readl(aus, MR);
if (spi->mode & SPI_CPOL)
mr |= US_MR_CPOL;
else
mr &= ~US_MR_CPOL;
if (spi->mode & SPI_CPHA)
mr |= US_MR_CPHA;
else
mr &= ~US_MR_CPHA;
if (spi->mode & SPI_LOOP)
mr |= US_MR_LOOP;
else
mr &= ~US_MR_LOOP;
if (!ausd) {
ausd = kzalloc(sizeof(*ausd), GFP_KERNEL);
if (!ausd)
return -ENOMEM;
spi->controller_state = ausd;
}
*ausd = mr;
dev_dbg(&spi->dev,
"setup: bpw %u mode 0x%x -> mr %d %08x\n",
spi->bits_per_word, spi->mode, spi_get_chipselect(spi, 0), mr);
return 0;
}
static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
unsigned long dma_timeout = 0;
int ret = 0;
at91_usart_spi_set_xfer_speed(aus, xfer);
aus->xfer_failed = false;
aus->current_transfer = xfer;
aus->current_tx_remaining_bytes = xfer->len;
aus->current_rx_remaining_bytes = xfer->len;
while ((aus->current_tx_remaining_bytes ||
aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
reinit_completion(&aus->xfer_completion);
if (at91_usart_spi_can_dma(ctlr, spi, xfer) &&
!ret) {
ret = at91_usart_spi_dma_transfer(ctlr, xfer);
if (ret)
continue;
dma_timeout = at91_usart_spi_dma_timeout(aus);
if (WARN_ON(dma_timeout == 0)) {
dev_err(&spi->dev, "DMA transfer timeout\n");
return -EIO;
}
aus->current_tx_remaining_bytes = 0;
} else {
at91_usart_spi_read_status(aus);
at91_usart_spi_tx(aus);
}
cpu_relax();
}
if (aus->xfer_failed) {
dev_err(aus->dev, "Overrun!\n");
return -EIO;
}
return 0;
}
static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
struct spi_message *message)
{
struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
u32 *ausd = spi->controller_state;
at91_usart_spi_writel(aus, CR, US_ENABLE);
at91_usart_spi_writel(aus, IER, US_OVRE_RXRDY_IRQS);
at91_usart_spi_writel(aus, MR, *ausd);
return 0;
}
static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
struct spi_message *message)
{
struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
return 0;
}
static void at91_usart_spi_cleanup(struct spi_device *spi)
{
struct at91_usart_spi_device *ausd = spi->controller_state;
spi->controller_state = NULL;
kfree(ausd);
}
static void at91_usart_spi_init(struct at91_usart_spi *aus)
{
at91_usart_spi_writel(aus, MR, US_INIT);
at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
}
static int at91_usart_gpio_setup(struct platform_device *pdev)
{
struct gpio_descs *cs_gpios;
cs_gpios = devm_gpiod_get_array_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
return PTR_ERR_OR_ZERO(cs_gpios);
}
static int at91_usart_spi_probe(struct platform_device *pdev)
{
struct resource *regs;
struct spi_controller *controller;
struct at91_usart_spi *aus;
struct clk *clk;
int irq;
int ret;
regs = platform_get_resource(to_platform_device(pdev->dev.parent),
IORESOURCE_MEM, 0);
if (!regs)
return -EINVAL;
irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
if (irq < 0)
return irq;
clk = devm_clk_get(pdev->dev.parent, "usart");
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = -ENOMEM;
controller = spi_alloc_host(&pdev->dev, sizeof(*aus));
if (!controller)
goto at91_usart_spi_probe_fail;
ret = at91_usart_gpio_setup(pdev);
if (ret)
goto at91_usart_spi_probe_fail;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
controller->dev.of_node = pdev->dev.parent->of_node;
controller->bits_per_word_mask = SPI_BPW_MASK(8);
controller->setup = at91_usart_spi_setup;
controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
controller->transfer_one = at91_usart_spi_transfer_one;
controller->prepare_message = at91_usart_spi_prepare_message;
controller->unprepare_message = at91_usart_spi_unprepare_message;
controller->can_dma = at91_usart_spi_can_dma;
controller->cleanup = at91_usart_spi_cleanup;
controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
US_MIN_CLK_DIV);
controller->min_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
US_MAX_CLK_DIV);
platform_set_drvdata(pdev, controller);
aus = spi_controller_get_devdata(controller);
aus->dev = &pdev->dev;
aus->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(aus->regs)) {
ret = PTR_ERR(aus->regs);
goto at91_usart_spi_probe_fail;
}
aus->irq = irq;
aus->clk = clk;
ret = devm_request_irq(&pdev->dev, irq, at91_usart_spi_interrupt, 0,
dev_name(&pdev->dev), controller);
if (ret)
goto at91_usart_spi_probe_fail;
ret = clk_prepare_enable(clk);
if (ret)
goto at91_usart_spi_probe_fail;
aus->spi_clk = clk_get_rate(clk);
at91_usart_spi_init(aus);
aus->phybase = regs->start;
aus->mpdev = to_platform_device(pdev->dev.parent);
ret = at91_usart_spi_configure_dma(controller, aus);
if (ret)
goto at91_usart_fail_dma;
spin_lock_init(&aus->lock);
init_completion(&aus->xfer_completion);
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret)
goto at91_usart_fail_register_controller;
dev_info(&pdev->dev,
"AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
at91_usart_spi_readl(aus, VERSION),
®s->start, irq);
return 0;
at91_usart_fail_register_controller:
at91_usart_spi_release_dma(controller);
at91_usart_fail_dma:
clk_disable_unprepare(clk);
at91_usart_spi_probe_fail:
spi_controller_put(controller);
return ret;
}
__maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
clk_disable_unprepare(aus->clk);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
__maybe_unused static int at91_usart_spi_runtime_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
pinctrl_pm_select_default_state(dev);
return clk_prepare_enable(aus->clk);
}
__maybe_unused static int at91_usart_spi_suspend(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
int ret;
ret = spi_controller_suspend(ctrl);
if (ret)
return ret;
if (!pm_runtime_suspended(dev))
at91_usart_spi_runtime_suspend(dev);
return 0;
}
__maybe_unused static int at91_usart_spi_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
int ret;
if (!pm_runtime_suspended(dev)) {
ret = at91_usart_spi_runtime_resume(dev);
if (ret)
return ret;
}
at91_usart_spi_init(aus);
return spi_controller_resume(ctrl);
}
static void at91_usart_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = platform_get_drvdata(pdev);
struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_release_dma(ctlr);
clk_disable_unprepare(aus->clk);
}
static const struct dev_pm_ops at91_usart_spi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(at91_usart_spi_suspend, at91_usart_spi_resume)
SET_RUNTIME_PM_OPS(at91_usart_spi_runtime_suspend,
at91_usart_spi_runtime_resume, NULL)
};
static struct platform_driver at91_usart_spi_driver = {
.driver = {
.name = "at91_usart_spi",
.pm = &at91_usart_spi_pm_ops,
},
.probe = at91_usart_spi_probe,
.remove_new = at91_usart_spi_remove,
};
module_platform_driver(at91_usart_spi_driver);
MODULE_DESCRIPTION("Microchip AT91 USART SPI Controller driver");
MODULE_AUTHOR("Radu Pirea <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:at91_usart_spi");
| linux-master | drivers/spi/spi-at91-usart.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Broadcom BCM2835 SPI Controllers
*
* Copyright (C) 2012 Chris Boot
* Copyright (C) 2013 Stephen Warren
* Copyright (C) 2015 Martin Sperl
*
* This driver is inspired by:
* spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <[email protected]>
* spi-atmel.c, Copyright (C) 2006 Atmel Corporation
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h> /* FIXME: using chip internals */
#include <linux/gpio/driver.h> /* FIXME: using chip internals */
#include <linux/of_irq.h>
#include <linux/spi/spi.h>
/* SPI register offsets */
#define BCM2835_SPI_CS 0x00
#define BCM2835_SPI_FIFO 0x04
#define BCM2835_SPI_CLK 0x08
#define BCM2835_SPI_DLEN 0x0c
#define BCM2835_SPI_LTOH 0x10
#define BCM2835_SPI_DC 0x14
/* Bitfields in CS */
#define BCM2835_SPI_CS_LEN_LONG 0x02000000
#define BCM2835_SPI_CS_DMA_LEN 0x01000000
#define BCM2835_SPI_CS_CSPOL2 0x00800000
#define BCM2835_SPI_CS_CSPOL1 0x00400000
#define BCM2835_SPI_CS_CSPOL0 0x00200000
#define BCM2835_SPI_CS_RXF 0x00100000
#define BCM2835_SPI_CS_RXR 0x00080000
#define BCM2835_SPI_CS_TXD 0x00040000
#define BCM2835_SPI_CS_RXD 0x00020000
#define BCM2835_SPI_CS_DONE 0x00010000
#define BCM2835_SPI_CS_LEN 0x00002000
#define BCM2835_SPI_CS_REN 0x00001000
#define BCM2835_SPI_CS_ADCS 0x00000800
#define BCM2835_SPI_CS_INTR 0x00000400
#define BCM2835_SPI_CS_INTD 0x00000200
#define BCM2835_SPI_CS_DMAEN 0x00000100
#define BCM2835_SPI_CS_TA 0x00000080
#define BCM2835_SPI_CS_CSPOL 0x00000040
#define BCM2835_SPI_CS_CLEAR_RX 0x00000020
#define BCM2835_SPI_CS_CLEAR_TX 0x00000010
#define BCM2835_SPI_CS_CPOL 0x00000008
#define BCM2835_SPI_CS_CPHA 0x00000004
#define BCM2835_SPI_CS_CS_10 0x00000002
#define BCM2835_SPI_CS_CS_01 0x00000001
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
#define DRV_NAME "spi-bcm2835"
/* define polling limits */
static unsigned int polling_limit_us = 30;
module_param(polling_limit_us, uint, 0664);
MODULE_PARM_DESC(polling_limit_us,
"time in us to run a transfer in polling mode\n");
/**
* struct bcm2835_spi - BCM2835 SPI controller
* @regs: base address of register map
* @clk: core clock, divided to calculate serial clock
* @clk_hz: core clock cached speed
* @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
* @tfr: SPI transfer currently processed
* @ctlr: SPI controller reverse lookup
* @tx_buf: pointer whence next transmitted byte is read
* @rx_buf: pointer where next received byte is written
* @tx_len: remaining bytes to transmit
* @rx_len: remaining bytes to receive
* @tx_prologue: bytes transmitted without DMA if first TX sglist entry's
* length is not a multiple of 4 (to overcome hardware limitation)
* @rx_prologue: bytes received without DMA if first RX sglist entry's
* length is not a multiple of 4 (to overcome hardware limitation)
* @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
* @debugfs_dir: the debugfs directory - neede to remove debugfs when
* unloading the module
* @count_transfer_polling: count of how often polling mode is used
* @count_transfer_irq: count of how often interrupt mode is used
* @count_transfer_irq_after_polling: count of how often we fall back to
* interrupt mode after starting in polling mode.
* These are counted as well in @count_transfer_polling and
* @count_transfer_irq
* @count_transfer_dma: count how often dma mode is used
* @target: SPI target currently selected
* (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
* @tx_dma_active: whether a TX DMA descriptor is in progress
* @rx_dma_active: whether a RX DMA descriptor is in progress
* (used by bcm2835_spi_dma_tx_done() to handle a race)
* @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
* (cyclically copies from zero page to TX FIFO)
* @fill_tx_addr: bus address of zero page
*/
struct bcm2835_spi {
void __iomem *regs;
struct clk *clk;
unsigned long clk_hz;
int irq;
struct spi_transfer *tfr;
struct spi_controller *ctlr;
const u8 *tx_buf;
u8 *rx_buf;
int tx_len;
int rx_len;
int tx_prologue;
int rx_prologue;
unsigned int tx_spillover;
struct dentry *debugfs_dir;
u64 count_transfer_polling;
u64 count_transfer_irq;
u64 count_transfer_irq_after_polling;
u64 count_transfer_dma;
struct bcm2835_spidev *target;
unsigned int tx_dma_active;
unsigned int rx_dma_active;
struct dma_async_tx_descriptor *fill_tx_desc;
dma_addr_t fill_tx_addr;
};
/**
* struct bcm2835_spidev - BCM2835 SPI target
* @prepare_cs: precalculated CS register value for ->prepare_message()
* (uses target-specific clock polarity and phase settings)
* @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
* (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
* @clear_rx_addr: bus address of @clear_rx_cs
* @clear_rx_cs: precalculated CS register value to clear RX FIFO
* (uses target-specific clock polarity and phase settings)
*/
struct bcm2835_spidev {
u32 prepare_cs;
struct dma_async_tx_descriptor *clear_rx_desc;
dma_addr_t clear_rx_addr;
u32 clear_rx_cs ____cacheline_aligned;
};
#if defined(CONFIG_DEBUG_FS)
static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
const char *dname)
{
char name[64];
struct dentry *dir;
/* get full name */
snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
/* the base directory */
dir = debugfs_create_dir(name, NULL);
bs->debugfs_dir = dir;
/* the counters */
debugfs_create_u64("count_transfer_polling", 0444, dir,
&bs->count_transfer_polling);
debugfs_create_u64("count_transfer_irq", 0444, dir,
&bs->count_transfer_irq);
debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
&bs->count_transfer_irq_after_polling);
debugfs_create_u64("count_transfer_dma", 0444, dir,
&bs->count_transfer_dma);
}
static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
{
debugfs_remove_recursive(bs->debugfs_dir);
bs->debugfs_dir = NULL;
}
#else
static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
const char *dname)
{
}
static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
{
}
#endif /* CONFIG_DEBUG_FS */
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg)
{
return readl(bs->regs + reg);
}
static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val)
{
writel(val, bs->regs + reg);
}
static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
{
u8 byte;
while ((bs->rx_len) &&
(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
if (bs->rx_buf)
*bs->rx_buf++ = byte;
bs->rx_len--;
}
}
static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
{
u8 byte;
while ((bs->tx_len) &&
(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
byte = bs->tx_buf ? *bs->tx_buf++ : 0;
bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
bs->tx_len--;
}
}
/**
* bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
* @bs: BCM2835 SPI controller
* @count: bytes to read from RX FIFO
*
* The caller must ensure that @bs->rx_len is greater than or equal to @count,
* that the RX FIFO contains at least @count bytes and that the DMA Enable flag
* in the CS register is set (such that a read from the FIFO register receives
* 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
*/
static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
{
u32 val;
int len;
bs->rx_len -= count;
do {
val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
len = min(count, 4);
memcpy(bs->rx_buf, &val, len);
bs->rx_buf += len;
count -= 4;
} while (count > 0);
}
/**
* bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
* @bs: BCM2835 SPI controller
* @count: bytes to write to TX FIFO
*
* The caller must ensure that @bs->tx_len is greater than or equal to @count,
* that the TX FIFO can accommodate @count bytes and that the DMA Enable flag
* in the CS register is set (such that a write to the FIFO register transmits
* 32-bit instead of just 8-bit).
*/
static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
{
u32 val;
int len;
bs->tx_len -= count;
do {
if (bs->tx_buf) {
len = min(count, 4);
memcpy(&val, bs->tx_buf, len);
bs->tx_buf += len;
} else {
val = 0;
}
bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
count -= 4;
} while (count > 0);
}
/**
* bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
* @bs: BCM2835 SPI controller
*
* The caller must ensure that the RX FIFO can accommodate as many bytes
* as have been written to the TX FIFO: Transmission is halted once the
* RX FIFO is full, causing this function to spin forever.
*/
static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
{
while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
cpu_relax();
}
/**
* bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
* @bs: BCM2835 SPI controller
* @count: bytes available for reading in RX FIFO
*/
static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
{
u8 val;
count = min(count, bs->rx_len);
bs->rx_len -= count;
do {
val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
if (bs->rx_buf)
*bs->rx_buf++ = val;
} while (--count);
}
/**
* bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
* @bs: BCM2835 SPI controller
* @count: bytes available for writing in TX FIFO
*/
static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
{
u8 val;
count = min(count, bs->tx_len);
bs->tx_len -= count;
do {
val = bs->tx_buf ? *bs->tx_buf++ : 0;
bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
} while (--count);
}
static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs)
{
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* Disable SPI interrupts and transfer */
cs &= ~(BCM2835_SPI_CS_INTR |
BCM2835_SPI_CS_INTD |
BCM2835_SPI_CS_DMAEN |
BCM2835_SPI_CS_TA);
/*
* Transmission sometimes breaks unless the DONE bit is written at the
* end of every transfer. The spec says it's a RO bit. Either the
* spec is wrong and the bit is actually of type RW1C, or it's a
* hardware erratum.
*/
cs |= BCM2835_SPI_CS_DONE;
/* and reset RX/TX FIFOS */
cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
/* and reset the SPI_HW */
bcm2835_wr(bs, BCM2835_SPI_CS, cs);
/* as well as DLEN */
bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
}
static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
{
struct bcm2835_spi *bs = dev_id;
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* Bail out early if interrupts are not enabled */
if (!(cs & BCM2835_SPI_CS_INTR))
return IRQ_NONE;
/*
* An interrupt is signaled either if DONE is set (TX FIFO empty)
* or if RXR is set (RX FIFO >= ¾ full).
*/
if (cs & BCM2835_SPI_CS_RXF)
bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
else if (cs & BCM2835_SPI_CS_RXR)
bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
/* Read as many bytes as possible from FIFO */
bcm2835_rd_fifo(bs);
/* Write as many bytes as possible to FIFO */
bcm2835_wr_fifo(bs);
if (!bs->rx_len) {
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(bs);
/* wake up the framework */
spi_finalize_current_transfer(bs->ctlr);
}
return IRQ_HANDLED;
}
static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs, bool fifo_empty)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* update usage statistics */
bs->count_transfer_irq++;
/*
* Enable HW block, but with interrupts still disabled.
* Otherwise the empty TX FIFO would immediately trigger an interrupt.
*/
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
/* fill TX FIFO as much as possible */
if (fifo_empty)
bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
bcm2835_wr_fifo(bs);
/* enable interrupts */
cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
bcm2835_wr(bs, BCM2835_SPI_CS, cs);
/* signal that we need to wait for completion */
return 1;
}
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
* @ctlr: SPI host controller
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
* @cs: CS register
*
* A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
* Only the final write access is permitted to transmit less than 4 bytes, the
* SPI controller deduces its intended size from the DLEN register.
*
* If a TX or RX sglist contains multiple entries, one per page, and the first
* entry starts in the middle of a page, that first entry's length may not be
* a multiple of 4. Subsequent entries are fine because they span an entire
* page, hence do have a length that's a multiple of 4.
*
* This cannot happen with kmalloc'ed buffers (which is what most clients use)
* because they are contiguous in physical memory and therefore not split on
* page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed
* buffers.
*
* The DMA engine is incapable of combining sglist entries into a continuous
* stream of 4 byte chunks, it treats every entry separately: A TX entry is
* rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
* entry is rounded up by throwing away received bytes.
*
* Overcome this limitation by transferring the first few bytes without DMA:
* E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
* write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
* The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
* the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
*
* Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
* write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
* Caution, the additional 4 bytes spill over to the second TX sglist entry
* if the length of the first is *exactly* 1.
*
* At most 6 bytes are written and at most 3 bytes read. Do we know the
* transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
*
* The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
* by the DMA engine. Toggling the DMA Enable flag in the CS register switches
* the width but also garbles the FIFO's contents. The prologue must therefore
* be transmitted in 32-bit width to ensure that the following DMA transfer can
* pick up the residue in the RX FIFO in ungarbled form.
*/
static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
u32 cs)
{
int tx_remaining;
bs->tfr = tfr;
bs->tx_prologue = 0;
bs->rx_prologue = 0;
bs->tx_spillover = false;
if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
if (bs->rx_prologue > bs->tx_prologue) {
if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
bs->tx_prologue = bs->rx_prologue;
} else {
bs->tx_prologue += 4;
bs->tx_spillover =
!(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
}
}
}
/* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
if (!bs->tx_prologue)
return;
/* Write and read RX prologue. Adjust first entry in RX sglist. */
if (bs->rx_prologue) {
bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
| BCM2835_SPI_CS_DMAEN);
bcm2835_wr_fifo_count(bs, bs->rx_prologue);
bcm2835_wait_tx_fifo_empty(bs);
bcm2835_rd_fifo_count(bs, bs->rx_prologue);
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
| BCM2835_SPI_CS_CLEAR_TX
| BCM2835_SPI_CS_DONE);
dma_sync_single_for_device(ctlr->dma_rx->device->dev,
sg_dma_address(&tfr->rx_sg.sgl[0]),
bs->rx_prologue, DMA_FROM_DEVICE);
sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
}
if (!bs->tx_buf)
return;
/*
* Write remaining TX prologue. Adjust first entry in TX sglist.
* Also adjust second entry if prologue spills over to it.
*/
tx_remaining = bs->tx_prologue - bs->rx_prologue;
if (tx_remaining) {
bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
| BCM2835_SPI_CS_DMAEN);
bcm2835_wr_fifo_count(bs, tx_remaining);
bcm2835_wait_tx_fifo_empty(bs);
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
| BCM2835_SPI_CS_DONE);
}
if (likely(!bs->tx_spillover)) {
sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
} else {
sg_dma_len(&tfr->tx_sg.sgl[0]) = 0;
sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4;
}
}
/**
* bcm2835_spi_undo_prologue() - reconstruct original sglist state
* @bs: BCM2835 SPI controller
*
* Undo changes which were made to an SPI transfer's sglist when transmitting
* the prologue. This is necessary to ensure the same memory ranges are
* unmapped that were originally mapped.
*/
static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
{
struct spi_transfer *tfr = bs->tfr;
if (!bs->tx_prologue)
return;
if (bs->rx_prologue) {
sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
}
if (!bs->tx_buf)
goto out;
if (likely(!bs->tx_spillover)) {
sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
} else {
sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4;
sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
}
out:
bs->tx_prologue = 0;
}
/**
* bcm2835_spi_dma_rx_done() - callback for DMA RX channel
* @data: SPI host controller
*
* Used for bidirectional and RX-only transfers.
*/
static void bcm2835_spi_dma_rx_done(void *data)
{
struct spi_controller *ctlr = data;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* terminate tx-dma as we do not have an irq for it
* because when the rx dma will terminate and this callback
* is called the tx-dma must have finished - can't get to this
* situation otherwise...
*/
dmaengine_terminate_async(ctlr->dma_tx);
bs->tx_dma_active = false;
bs->rx_dma_active = false;
bcm2835_spi_undo_prologue(bs);
/* reset fifo and HW */
bcm2835_spi_reset_hw(bs);
/* and mark as completed */;
spi_finalize_current_transfer(ctlr);
}
/**
* bcm2835_spi_dma_tx_done() - callback for DMA TX channel
* @data: SPI host controller
*
* Used for TX-only transfers.
*/
static void bcm2835_spi_dma_tx_done(void *data)
{
struct spi_controller *ctlr = data;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* busy-wait for TX FIFO to empty */
while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
bcm2835_wr(bs, BCM2835_SPI_CS, bs->target->clear_rx_cs);
bs->tx_dma_active = false;
smp_wmb();
/*
* In case of a very short transfer, RX DMA may not have been
* issued yet. The onus is then on bcm2835_spi_transfer_one_dma()
* to terminate it immediately after issuing.
*/
if (cmpxchg(&bs->rx_dma_active, true, false))
dmaengine_terminate_async(ctlr->dma_rx);
bcm2835_spi_undo_prologue(bs);
bcm2835_spi_reset_hw(bs);
spi_finalize_current_transfer(ctlr);
}
/**
* bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
* @ctlr: SPI host controller
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
* @target: BCM2835 SPI target
* @is_tx: whether to submit DMA descriptor for TX or RX sglist
*
* Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
* Return 0 on success or a negative error number.
*/
static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
struct bcm2835_spidev *target,
bool is_tx)
{
struct dma_chan *chan;
struct scatterlist *sgl;
unsigned int nents;
enum dma_transfer_direction dir;
unsigned long flags;
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
if (is_tx) {
dir = DMA_MEM_TO_DEV;
chan = ctlr->dma_tx;
nents = tfr->tx_sg.nents;
sgl = tfr->tx_sg.sgl;
flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT;
} else {
dir = DMA_DEV_TO_MEM;
chan = ctlr->dma_rx;
nents = tfr->rx_sg.nents;
sgl = tfr->rx_sg.sgl;
flags = DMA_PREP_INTERRUPT;
}
/* prepare the channel */
desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
if (!desc)
return -EINVAL;
/*
* Completion is signaled by the RX channel for bidirectional and
* RX-only transfers; else by the TX channel for TX-only transfers.
*/
if (!is_tx) {
desc->callback = bcm2835_spi_dma_rx_done;
desc->callback_param = ctlr;
} else if (!tfr->rx_buf) {
desc->callback = bcm2835_spi_dma_tx_done;
desc->callback_param = ctlr;
bs->target = target;
}
/* submit it to DMA-engine */
cookie = dmaengine_submit(desc);
return dma_submit_error(cookie);
}
/**
* bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
* @ctlr: SPI host controller
* @tfr: SPI transfer
* @target: BCM2835 SPI target
* @cs: CS register
*
* For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
* the TX and RX DMA channel to copy between memory and FIFO register.
*
* For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
* memory is pointless. However not reading the RX FIFO isn't an option either
* because transmission is halted once it's full. As a workaround, cyclically
* clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
*
* The CS register value is precalculated in bcm2835_spi_setup(). Normally
* this is called only once, on target registration. A DMA descriptor to write
* this value is preallocated in bcm2835_dma_init(). All that's left to do
* when performing a TX-only transfer is to submit this descriptor to the RX
* DMA channel. Latency is thereby minimized. The descriptor does not
* generate any interrupts while running. It must be terminated once the
* TX DMA channel is done.
*
* Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
* when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
* register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
* accesses, whereas clearing it requires only 1 bus access. So an 8-fold
* reduction in bus traffic and thus energy consumption is achieved.
*
* For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
* copying from the zero page. The DMA descriptor to do this is preallocated
* in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
* done and can then be reused.
*
* The BCM2835 DMA driver autodetects when a transaction copies from the zero
* page and utilizes the DMA controller's ability to synthesize zeroes instead
* of copying them from memory. This reduces traffic on the memory bus. The
* feature is not available on so-called "lite" channels, but normally TX DMA
* is backed by a full-featured channel.
*
* Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
* BCM2835 SPI controller continues to assert DREQ even after the DLEN register
* has been counted down to zero (hardware erratum). Thus, when the transfer
* has finished, the DMA engine zero-fills the TX FIFO until it is half full.
* (Tuneable with the DC register.) So up to 9 gratuitous bus accesses are
* performed at the end of an RX-only transfer.
*/
static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
struct spi_transfer *tfr,
struct bcm2835_spidev *target,
u32 cs)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
dma_cookie_t cookie;
int ret;
/* update usage statistics */
bs->count_transfer_dma++;
/*
* Transfer first few bytes without DMA if length of first TX or RX
* sglist entry is not a multiple of 4 bytes (hardware limitation).
*/
bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
/* setup tx-DMA */
if (bs->tx_buf) {
ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, target, true);
} else {
cookie = dmaengine_submit(bs->fill_tx_desc);
ret = dma_submit_error(cookie);
}
if (ret)
goto err_reset_hw;
/* set the DMA length */
bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
/* start the HW */
bcm2835_wr(bs, BCM2835_SPI_CS,
cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
bs->tx_dma_active = true;
smp_wmb();
/* start TX early */
dma_async_issue_pending(ctlr->dma_tx);
/* setup rx-DMA late - to run transfers while
* mapping of the rx buffers still takes place
* this saves 10us or more.
*/
if (bs->rx_buf) {
ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, target, false);
} else {
cookie = dmaengine_submit(target->clear_rx_desc);
ret = dma_submit_error(cookie);
}
if (ret) {
/* need to reset on errors */
dmaengine_terminate_sync(ctlr->dma_tx);
bs->tx_dma_active = false;
goto err_reset_hw;
}
/* start rx dma late */
dma_async_issue_pending(ctlr->dma_rx);
bs->rx_dma_active = true;
smp_mb();
/*
* In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done()
* may run before RX DMA is issued. Terminate RX DMA if so.
*/
if (!bs->rx_buf && !bs->tx_dma_active &&
cmpxchg(&bs->rx_dma_active, true, false)) {
dmaengine_terminate_async(ctlr->dma_rx);
bcm2835_spi_reset_hw(bs);
}
/* wait for wakeup in framework */
return 1;
err_reset_hw:
bcm2835_spi_reset_hw(bs);
bcm2835_spi_undo_prologue(bs);
return ret;
}
static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr)
{
/* we start DMA efforts only on bigger transfers */
if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
return false;
/* return OK */
return true;
}
static void bcm2835_dma_release(struct spi_controller *ctlr,
struct bcm2835_spi *bs)
{
if (ctlr->dma_tx) {
dmaengine_terminate_sync(ctlr->dma_tx);
if (bs->fill_tx_desc)
dmaengine_desc_free(bs->fill_tx_desc);
if (bs->fill_tx_addr)
dma_unmap_page_attrs(ctlr->dma_tx->device->dev,
bs->fill_tx_addr, sizeof(u32),
DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
}
if (ctlr->dma_rx) {
dmaengine_terminate_sync(ctlr->dma_rx);
dma_release_channel(ctlr->dma_rx);
ctlr->dma_rx = NULL;
}
}
static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
struct bcm2835_spi *bs)
{
struct dma_slave_config slave_config;
const __be32 *addr;
dma_addr_t dma_reg_base;
int ret;
/* base address in dma-space */
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
if (!addr) {
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
/* Fall back to interrupt mode */
return 0;
}
dma_reg_base = be32_to_cpup(addr);
/* get tx/rx dma */
ctlr->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(ctlr->dma_tx)) {
ret = dev_err_probe(dev, PTR_ERR(ctlr->dma_tx),
"no tx-dma configuration found - not using dma mode\n");
ctlr->dma_tx = NULL;
goto err;
}
ctlr->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(ctlr->dma_rx)) {
ret = dev_err_probe(dev, PTR_ERR(ctlr->dma_rx),
"no rx-dma configuration found - not using dma mode\n");
ctlr->dma_rx = NULL;
goto err_release;
}
/*
* The TX DMA channel either copies a transfer's TX buffer to the FIFO
* or, in case of an RX-only transfer, cyclically copies from the zero
* page to the FIFO using a preallocated, reusable descriptor.
*/
slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
if (ret)
goto err_config;
bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
ZERO_PAGE(0), 0, sizeof(u32),
DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
dev_err(dev, "cannot map zero page - not using DMA mode\n");
bs->fill_tx_addr = 0;
ret = -ENOMEM;
goto err_release;
}
bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
bs->fill_tx_addr,
sizeof(u32), 0,
DMA_MEM_TO_DEV, 0);
if (!bs->fill_tx_desc) {
dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
ret = -ENOMEM;
goto err_release;
}
ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
if (ret) {
dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n");
goto err_release;
}
/*
* The RX DMA channel is used bidirectionally: It either reads the
* RX FIFO or, in case of a TX-only transfer, cyclically writes a
* precalculated value to the CS register to clear the RX FIFO.
*/
slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
if (ret)
goto err_config;
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
return 0;
err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
ret);
err_release:
bcm2835_dma_release(ctlr, bs);
err:
/*
* Only report error for deferred probing, otherwise fall back to
* interrupt mode
*/
if (ret != -EPROBE_DEFER)
ret = 0;
return ret;
}
static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
unsigned long timeout;
/* update usage statistics */
bs->count_transfer_polling++;
/* enable HW block without interrupts */
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
/* fill in the fifo before timeout calculations
* if we are interrupted here, then the data is
* getting transferred by the HW while we are interrupted
*/
bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
/* set the timeout to at least 2 jiffies */
timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
/* loop until finished the transfer */
while (bs->rx_len) {
/* fill in tx fifo with remaining data */
bcm2835_wr_fifo(bs);
/* read from fifo as much as possible */
bcm2835_rd_fifo(bs);
/* if there is still data pending to read
* then check the timeout
*/
if (bs->rx_len && time_after(jiffies, timeout)) {
dev_dbg_ratelimited(&spi->dev,
"timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* fall back to interrupt mode */
/* update usage statistics */
bs->count_transfer_irq_after_polling++;
return bcm2835_spi_transfer_one_irq(ctlr, spi,
tfr, cs, false);
}
}
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(bs);
/* and return without waiting for completion */
return 0;
}
static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *target = spi_get_ctldata(spi);
unsigned long spi_hz, cdiv;
unsigned long hz_per_byte, byte_limit;
u32 cs = target->prepare_cs;
/* set clock */
spi_hz = tfr->speed_hz;
if (spi_hz >= bs->clk_hz / 2) {
cdiv = 2; /* clk_hz/2 is the fastest we can go */
} else if (spi_hz) {
/* CDIV must be a multiple of two */
cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
cdiv += (cdiv % 2);
if (cdiv >= 65536)
cdiv = 0; /* 0 is the slowest we can go */
} else {
cdiv = 0; /* 0 is the slowest we can go */
}
tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */
if (spi->mode & SPI_3WIRE && tfr->rx_buf)
cs |= BCM2835_SPI_CS_REN;
/* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
bs->rx_buf = tfr->rx_buf;
bs->tx_len = tfr->len;
bs->rx_len = tfr->len;
/* Calculate the estimated time in us the transfer runs. Note that
* there is 1 idle clocks cycles after each byte getting transferred
* so we have 9 cycles/byte. This is used to find the number of Hz
* per byte per polling limit. E.g., we can transfer 1 byte in 30 us
* per 300,000 Hz of bus clock.
*/
hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
/* run in polling mode for short transfers */
if (tfr->len < byte_limit)
return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
/* run in dma mode if conditions are right
* Note that unlike poll or interrupt mode DMA mode does not have
* this 1 idle clock cycle pattern but runs the spi clock without gaps
*/
if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
return bcm2835_spi_transfer_one_dma(ctlr, tfr, target, cs);
/* run in interrupt-mode */
return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
}
static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *target = spi_get_ctldata(spi);
int ret;
if (ctlr->can_dma) {
/*
* DMA transfers are limited to 16 bit (0 to 65535 bytes) by
* the SPI HW due to DLEN. Split up transfers (32-bit FIFO
* aligned) if the limit is exceeded.
*/
ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
GFP_KERNEL | GFP_DMA);
if (ret)
return ret;
}
/*
* Set up clock polarity before spi_transfer_one_message() asserts
* chip select to avoid a gratuitous clock signal edge.
*/
bcm2835_wr(bs, BCM2835_SPI_CS, target->prepare_cs);
return 0;
}
static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
if (ctlr->dma_tx) {
dmaengine_terminate_sync(ctlr->dma_tx);
bs->tx_dma_active = false;
}
if (ctlr->dma_rx) {
dmaengine_terminate_sync(ctlr->dma_rx);
bs->rx_dma_active = false;
}
bcm2835_spi_undo_prologue(bs);
/* and reset */
bcm2835_spi_reset_hw(bs);
}
static int chip_match_name(struct gpio_chip *chip, void *data)
{
return !strcmp(chip->label, data);
}
static void bcm2835_spi_cleanup(struct spi_device *spi)
{
struct bcm2835_spidev *target = spi_get_ctldata(spi);
struct spi_controller *ctlr = spi->controller;
if (target->clear_rx_desc)
dmaengine_desc_free(target->clear_rx_desc);
if (target->clear_rx_addr)
dma_unmap_single(ctlr->dma_rx->device->dev,
target->clear_rx_addr,
sizeof(u32),
DMA_TO_DEVICE);
kfree(target);
}
static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct bcm2835_spi *bs,
struct bcm2835_spidev *target)
{
int ret;
if (!ctlr->dma_rx)
return 0;
target->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
&target->clear_rx_cs,
sizeof(u32),
DMA_TO_DEVICE);
if (dma_mapping_error(ctlr->dma_rx->device->dev, target->clear_rx_addr)) {
dev_err(&spi->dev, "cannot map clear_rx_cs\n");
target->clear_rx_addr = 0;
return -ENOMEM;
}
target->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
target->clear_rx_addr,
sizeof(u32), 0,
DMA_MEM_TO_DEV, 0);
if (!target->clear_rx_desc) {
dev_err(&spi->dev, "cannot prepare clear_rx_desc\n");
return -ENOMEM;
}
ret = dmaengine_desc_set_reuse(target->clear_rx_desc);
if (ret) {
dev_err(&spi->dev, "cannot reuse clear_rx_desc\n");
return ret;
}
return 0;
}
static int bcm2835_spi_setup(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *target = spi_get_ctldata(spi);
struct gpio_chip *chip;
int ret;
u32 cs;
if (!target) {
target = kzalloc(ALIGN(sizeof(*target), dma_get_cache_alignment()),
GFP_KERNEL);
if (!target)
return -ENOMEM;
spi_set_ctldata(spi, target);
ret = bcm2835_spi_setup_dma(ctlr, spi, bs, target);
if (ret)
goto err_cleanup;
}
/*
* Precalculate SPI target's CS register value for ->prepare_message():
* The driver always uses software-controlled GPIO chip select, hence
* set the hardware-controlled native chip select to an invalid value
* to prevent it from interfering.
*/
cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
if (spi->mode & SPI_CPOL)
cs |= BCM2835_SPI_CS_CPOL;
if (spi->mode & SPI_CPHA)
cs |= BCM2835_SPI_CS_CPHA;
target->prepare_cs = cs;
/*
* Precalculate SPI target's CS register value to clear RX FIFO
* in case of a TX-only DMA transfer.
*/
if (ctlr->dma_rx) {
target->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
BCM2835_SPI_CS_DMAEN |
BCM2835_SPI_CS_CLEAR_RX;
dma_sync_single_for_device(ctlr->dma_rx->device->dev,
target->clear_rx_addr,
sizeof(u32),
DMA_TO_DEVICE);
}
/*
* sanity checking the native-chipselects
*/
if (spi->mode & SPI_NO_CS)
return 0;
/*
* The SPI core has successfully requested the CS GPIO line from the
* device tree, so we are done.
*/
if (spi_get_csgpiod(spi, 0))
return 0;
if (spi_get_chipselect(spi, 0) > 1) {
/* error in the case of native CS requested with CS > 1
* officially there is a CS2, but it is not documented
* which GPIO is connected with that...
*/
dev_err(&spi->dev,
"setup: only two native chip-selects are supported\n");
ret = -EINVAL;
goto err_cleanup;
}
/*
* Translate native CS to GPIO
*
* FIXME: poking around in the gpiolib internals like this is
* not very good practice. Find a way to locate the real problem
* and fix it. Why is the GPIO descriptor in spi->cs_gpiod
* sometimes not assigned correctly? Erroneous device trees?
*/
/* get the gpio chip for the base */
chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
if (!chip)
return 0;
spi_set_csgpiod(spi, 0, gpiochip_request_own_desc(chip,
8 - (spi_get_chipselect(spi, 0)),
DRV_NAME,
GPIO_LOOKUP_FLAGS_DEFAULT,
GPIOD_OUT_LOW));
if (IS_ERR(spi_get_csgpiod(spi, 0))) {
ret = PTR_ERR(spi_get_csgpiod(spi, 0));
goto err_cleanup;
}
/* and set up the "mode" and level */
dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
spi_get_chipselect(spi, 0));
return 0;
err_cleanup:
bcm2835_spi_cleanup(spi);
return ret;
}
static int bcm2835_spi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct bcm2835_spi *bs;
int err;
ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*bs));
if (!ctlr)
return -ENOMEM;
platform_set_drvdata(pdev, ctlr);
ctlr->use_gpio_descriptors = true;
ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->num_chipselect = 3;
ctlr->setup = bcm2835_spi_setup;
ctlr->cleanup = bcm2835_spi_cleanup;
ctlr->transfer_one = bcm2835_spi_transfer_one;
ctlr->handle_err = bcm2835_spi_handle_err;
ctlr->prepare_message = bcm2835_spi_prepare_message;
ctlr->dev.of_node = pdev->dev.of_node;
bs = spi_controller_get_devdata(ctlr);
bs->ctlr = ctlr;
bs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bs->regs))
return PTR_ERR(bs->regs);
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
"could not get clk\n");
ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2;
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq < 0)
return bs->irq;
err = clk_prepare_enable(bs->clk);
if (err)
return err;
bs->clk_hz = clk_get_rate(bs->clk);
err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
if (err)
goto out_clk_disable;
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt,
IRQF_SHARED, dev_name(&pdev->dev), bs);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_dma_release;
}
err = spi_register_controller(ctlr);
if (err) {
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
err);
goto out_dma_release;
}
bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
return 0;
out_dma_release:
bcm2835_dma_release(ctlr, bs);
out_clk_disable:
clk_disable_unprepare(bs->clk);
return err;
}
static void bcm2835_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = platform_get_drvdata(pdev);
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
bcm2835_debugfs_remove(bs);
spi_unregister_controller(ctlr);
bcm2835_dma_release(ctlr, bs);
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
clk_disable_unprepare(bs->clk);
}
static const struct of_device_id bcm2835_spi_match[] = {
{ .compatible = "brcm,bcm2835-spi", },
{}
};
MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
static struct platform_driver bcm2835_spi_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = bcm2835_spi_match,
},
.probe = bcm2835_spi_probe,
.remove_new = bcm2835_spi_remove,
.shutdown = bcm2835_spi_remove,
};
module_platform_driver(bcm2835_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
MODULE_AUTHOR("Chris Boot <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-bcm2835.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
// Copyright (C) 2008 Juergen Beisert
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/dma/imx-dma.h>
#define DRIVER_NAME "spi_imx"
static bool use_dma = true;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
/* define polling limits */
static unsigned int polling_limit_us = 30;
module_param(polling_limit_us, uint, 0664);
MODULE_PARM_DESC(polling_limit_us,
"time in us to run a transfer in polling mode\n");
#define MXC_RPM_TIMEOUT 2000 /* 2000ms */
#define MXC_CSPIRXDATA 0x00
#define MXC_CSPITXDATA 0x04
#define MXC_CSPICTRL 0x08
#define MXC_CSPIINT 0x0c
#define MXC_RESET 0x1c
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
/* The maximum bytes that a sdma BD can transfer. */
#define MAX_SDMA_BD_BYTES (1 << 15)
#define MX51_ECSPI_CTRL_MAX_BURST 512
/* The maximum bytes that IMX53_ECSPI can transfer in target mode.*/
#define MX53_MAX_TRANSFER_BYTES 512
enum spi_imx_devtype {
IMX1_CSPI,
IMX21_CSPI,
IMX27_CSPI,
IMX31_CSPI,
IMX35_CSPI, /* CSPI on all i.mx except above */
IMX51_ECSPI, /* ECSPI on i.mx51 */
IMX53_ECSPI, /* ECSPI on i.mx53 and later */
};
struct spi_imx_data;
struct spi_imx_devtype_data {
void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi);
void (*trigger)(struct spi_imx_data *spi_imx);
int (*rx_available)(struct spi_imx_data *spi_imx);
void (*reset)(struct spi_imx_data *spi_imx);
void (*setup_wml)(struct spi_imx_data *spi_imx);
void (*disable)(struct spi_imx_data *spi_imx);
bool has_dmamode;
bool has_targetmode;
unsigned int fifo_size;
bool dynamic_burst;
/*
* ERR009165 fixed or not:
* https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
*/
bool tx_glitch_fixed;
enum spi_imx_devtype devtype;
};
struct spi_imx_data {
struct spi_controller *controller;
struct device *dev;
struct completion xfer_done;
void __iomem *base;
unsigned long base_phys;
struct clk *clk_per;
struct clk *clk_ipg;
unsigned long spi_clk;
unsigned int spi_bus_clk;
unsigned int bits_per_word;
unsigned int spi_drctl;
unsigned int count, remainder;
void (*tx)(struct spi_imx_data *spi_imx);
void (*rx)(struct spi_imx_data *spi_imx);
void *rx_buf;
const void *tx_buf;
unsigned int txfifo; /* number of words pushed in tx FIFO */
unsigned int dynamic_burst;
bool rx_only;
/* Target mode */
bool target_mode;
bool target_aborted;
unsigned int target_burst;
/* DMA */
bool usedma;
u32 wml;
struct completion dma_rx_completion;
struct completion dma_tx_completion;
const struct spi_imx_devtype_data *devtype_data;
};
static inline int is_imx27_cspi(struct spi_imx_data *d)
{
return d->devtype_data->devtype == IMX27_CSPI;
}
static inline int is_imx35_cspi(struct spi_imx_data *d)
{
return d->devtype_data->devtype == IMX35_CSPI;
}
static inline int is_imx51_ecspi(struct spi_imx_data *d)
{
return d->devtype_data->devtype == IMX51_ECSPI;
}
static inline int is_imx53_ecspi(struct spi_imx_data *d)
{
return d->devtype_data->devtype == IMX53_ECSPI;
}
#define MXC_SPI_BUF_RX(type) \
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
{ \
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
\
if (spi_imx->rx_buf) { \
*(type *)spi_imx->rx_buf = val; \
spi_imx->rx_buf += sizeof(type); \
} \
\
spi_imx->remainder -= sizeof(type); \
}
#define MXC_SPI_BUF_TX(type) \
static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
{ \
type val = 0; \
\
if (spi_imx->tx_buf) { \
val = *(type *)spi_imx->tx_buf; \
spi_imx->tx_buf += sizeof(type); \
} \
\
spi_imx->count -= sizeof(type); \
\
writel(val, spi_imx->base + MXC_CSPITXDATA); \
}
MXC_SPI_BUF_RX(u8)
MXC_SPI_BUF_TX(u8)
MXC_SPI_BUF_RX(u16)
MXC_SPI_BUF_TX(u16)
MXC_SPI_BUF_RX(u32)
MXC_SPI_BUF_TX(u32)
/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
* (which is currently not the case in this driver)
*/
static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
256, 384, 512, 768, 1024};
/* MX21, MX27 */
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
unsigned int fspi, unsigned int max, unsigned int *fres)
{
int i;
for (i = 2; i < max; i++)
if (fspi * mxc_clkdivs[i] >= fin)
break;
*fres = fin / mxc_clkdivs[i];
return i;
}
/* MX1, MX31, MX35, MX51 CSPI */
static unsigned int spi_imx_clkdiv_2(unsigned int fin,
unsigned int fspi, unsigned int *fres)
{
int i, div = 4;
for (i = 0; i < 7; i++) {
if (fspi * div >= fin)
goto out;
div <<= 1;
}
out:
*fres = fin / div;
return i;
}
static int spi_imx_bytes_per_word(const int bits_per_word)
{
if (bits_per_word <= 8)
return 1;
else if (bits_per_word <= 16)
return 2;
else
return 4;
}
static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
if (!use_dma || controller->fallback)
return false;
if (!controller->dma_rx)
return false;
if (spi_imx->target_mode)
return false;
if (transfer->len < spi_imx->devtype_data->fifo_size)
return false;
spi_imx->dynamic_burst = 0;
return true;
}
/*
* Note the number of natively supported chip selects for MX51 is 4. Some
* devices may have less actual SS pins but the register map supports 4. When
* using gpio chip selects the cs values passed into the macros below can go
* outside the range 0 - 3. We therefore need to limit the cs value to avoid
* corrupting bits outside the allocated locations.
*
* The simplest way to do this is to just mask the cs bits to 2 bits. This
* still allows all 4 native chip selects to work as well as gpio chip selects
* (which can use any of the 4 chip select configurations).
*/
#define MX51_ECSPI_CTRL 0x08
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
#define MX51_ECSPI_CTRL_XCH (1 << 2)
#define MX51_ECSPI_CTRL_SMC (1 << 3)
#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
#define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
#define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
#define MX51_ECSPI_CTRL_BL_OFFSET 20
#define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
#define MX51_ECSPI_CONFIG 0x0c
#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
#define MX51_ECSPI_CONFIG_DATACTL(cs) (1 << ((cs & 3) + 16))
#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
#define MX51_ECSPI_INT 0x10
#define MX51_ECSPI_INT_TEEN (1 << 0)
#define MX51_ECSPI_INT_RREN (1 << 3)
#define MX51_ECSPI_INT_RDREN (1 << 4)
#define MX51_ECSPI_DMA 0x14
#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
#define MX51_ECSPI_DMA_TEDEN (1 << 7)
#define MX51_ECSPI_DMA_RXDEN (1 << 23)
#define MX51_ECSPI_DMA_RXTDEN (1 << 31)
#define MX51_ECSPI_STAT 0x18
#define MX51_ECSPI_STAT_RR (1 << 3)
#define MX51_ECSPI_TESTREG 0x20
#define MX51_ECSPI_TESTREG_LBC BIT(31)
static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
{
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
if (spi_imx->rx_buf) {
#ifdef __LITTLE_ENDIAN
unsigned int bytes_per_word;
bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
if (bytes_per_word == 1)
swab32s(&val);
else if (bytes_per_word == 2)
swahw32s(&val);
#endif
*(u32 *)spi_imx->rx_buf = val;
spi_imx->rx_buf += sizeof(u32);
}
spi_imx->remainder -= sizeof(u32);
}
static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
{
int unaligned;
u32 val;
unaligned = spi_imx->remainder % 4;
if (!unaligned) {
spi_imx_buf_rx_swap_u32(spi_imx);
return;
}
if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
spi_imx_buf_rx_u16(spi_imx);
return;
}
val = readl(spi_imx->base + MXC_CSPIRXDATA);
while (unaligned--) {
if (spi_imx->rx_buf) {
*(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
spi_imx->rx_buf++;
}
spi_imx->remainder--;
}
}
static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
{
u32 val = 0;
#ifdef __LITTLE_ENDIAN
unsigned int bytes_per_word;
#endif
if (spi_imx->tx_buf) {
val = *(u32 *)spi_imx->tx_buf;
spi_imx->tx_buf += sizeof(u32);
}
spi_imx->count -= sizeof(u32);
#ifdef __LITTLE_ENDIAN
bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
if (bytes_per_word == 1)
swab32s(&val);
else if (bytes_per_word == 2)
swahw32s(&val);
#endif
writel(val, spi_imx->base + MXC_CSPITXDATA);
}
static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
{
int unaligned;
u32 val = 0;
unaligned = spi_imx->count % 4;
if (!unaligned) {
spi_imx_buf_tx_swap_u32(spi_imx);
return;
}
if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
spi_imx_buf_tx_u16(spi_imx);
return;
}
while (unaligned--) {
if (spi_imx->tx_buf) {
val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
spi_imx->tx_buf++;
}
spi_imx->count--;
}
writel(val, spi_imx->base + MXC_CSPITXDATA);
}
static void mx53_ecspi_rx_target(struct spi_imx_data *spi_imx)
{
u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
if (spi_imx->rx_buf) {
int n_bytes = spi_imx->target_burst % sizeof(val);
if (!n_bytes)
n_bytes = sizeof(val);
memcpy(spi_imx->rx_buf,
((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
spi_imx->rx_buf += n_bytes;
spi_imx->target_burst -= n_bytes;
}
spi_imx->remainder -= sizeof(u32);
}
static void mx53_ecspi_tx_target(struct spi_imx_data *spi_imx)
{
u32 val = 0;
int n_bytes = spi_imx->count % sizeof(val);
if (!n_bytes)
n_bytes = sizeof(val);
if (spi_imx->tx_buf) {
memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
spi_imx->tx_buf, n_bytes);
val = cpu_to_be32(val);
spi_imx->tx_buf += n_bytes;
}
spi_imx->count -= n_bytes;
writel(val, spi_imx->base + MXC_CSPITXDATA);
}
/* MX51 eCSPI */
static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
unsigned int fspi, unsigned int *fres)
{
/*
* there are two 4-bit dividers, the pre-divider divides by
* $pre, the post-divider by 2^$post
*/
unsigned int pre, post;
unsigned int fin = spi_imx->spi_clk;
fspi = min(fspi, fin);
post = fls(fin) - fls(fspi);
if (fin > fspi << post)
post++;
/* now we have: (fin <= fspi << post) with post being minimal */
post = max(4U, post) - 4;
if (unlikely(post > 0xf)) {
dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
fspi, fin);
return 0xff;
}
pre = DIV_ROUND_UP(fin, fspi << post) - 1;
dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
__func__, fin, fspi, post, pre);
/* Resulting frequency for the SCLK line. */
*fres = (fin / (pre + 1)) >> post;
return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
}
static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
val |= MX51_ECSPI_INT_TEEN;
if (enable & MXC_INT_RR)
val |= MX51_ECSPI_INT_RREN;
if (enable & MXC_INT_RDR)
val |= MX51_ECSPI_INT_RDREN;
writel(val, spi_imx->base + MX51_ECSPI_INT);
}
static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
u32 reg;
reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
reg |= MX51_ECSPI_CTRL_XCH;
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
{
u32 ctrl;
ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
}
static int mx51_ecspi_channel(const struct spi_device *spi)
{
if (!spi_get_csgpiod(spi, 0))
return spi_get_chipselect(spi, 0);
return spi->controller->unused_native_cs;
}
static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
struct spi_transfer *xfer;
u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
u32 min_speed_hz = ~0U;
u32 testreg, delay;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
u32 current_cfg = cfg;
int channel = mx51_ecspi_channel(spi);
/* set Host or Target mode */
if (spi_imx->target_mode)
ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
else
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/*
* Enable SPI_RDY handling (falling edge/level triggered).
*/
if (spi->mode & SPI_READY)
ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(channel);
/*
* The ctrl register must be written first, with the EN bit set other
* registers must not be written to.
*/
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
if (spi->mode & SPI_LOOP)
testreg |= MX51_ECSPI_TESTREG_LBC;
else
testreg &= ~MX51_ECSPI_TESTREG_LBC;
writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
/*
* eCSPI burst completion by Chip Select signal in Target mode
* is not functional for imx53 Soc, config SPI burst completed when
* BURST_LENGTH + 1 bits are received
*/
if (spi_imx->target_mode && is_imx53_ecspi(spi_imx))
cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(channel);
else
cfg |= MX51_ECSPI_CONFIG_SBBCTRL(channel);
if (spi->mode & SPI_CPOL) {
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(channel);
cfg |= MX51_ECSPI_CONFIG_SCLKCTL(channel);
} else {
cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(channel);
cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(channel);
}
if (spi->mode & SPI_MOSI_IDLE_LOW)
cfg |= MX51_ECSPI_CONFIG_DATACTL(channel);
else
cfg &= ~MX51_ECSPI_CONFIG_DATACTL(channel);
if (spi->mode & SPI_CS_HIGH)
cfg |= MX51_ECSPI_CONFIG_SSBPOL(channel);
else
cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(channel);
if (cfg == current_cfg)
return 0;
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
/*
* Wait until the changes in the configuration register CONFIGREG
* propagate into the hardware. It takes exactly one tick of the
* SCLK clock, but we will wait two SCLK clock just to be sure. The
* effect of the delay it takes for the hardware to apply changes
* is noticable if the SCLK clock run very slow. In such a case, if
* the polarity of SCLK should be inverted, the GPIO ChipSelect might
* be asserted before the SCLK polarity changes, which would disrupt
* the SPI communication as the device on the other end would consider
* the change of SCLK polarity as a clock tick already.
*
* Because spi_imx->spi_bus_clk is only set in prepare_message
* callback, iterate over all the transfers in spi_message, find the
* one with lowest bus frequency, and use that bus frequency for the
* delay calculation. In case all transfers have speed_hz == 0, then
* min_speed_hz is ~0 and the resulting delay is zero.
*/
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (!xfer->speed_hz)
continue;
min_speed_hz = min(xfer->speed_hz, min_speed_hz);
}
delay = (2 * 1000000) / min_speed_hz;
if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
udelay(delay);
else /* SCLK is _very_ slow */
usleep_range(delay, delay + 10);
return 0;
}
static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
bool cpha = (spi->mode & SPI_CPHA);
bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
int channel = mx51_ecspi_channel(spi);
/* Flip cpha logical value iff flip_cpha */
cpha ^= flip_cpha;
if (cpha)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(channel);
else
cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(channel);
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
}
static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
u32 clk;
/* Clear BL field and set the right value */
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
if (spi_imx->target_mode && is_imx53_ecspi(spi_imx))
ctrl |= (spi_imx->target_burst * 8 - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
if (spi_imx->count >= 512)
ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET;
else
ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
/* set clock speed */
ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
spi_imx->spi_bus_clk = clk;
mx51_configure_cpha(spi_imx, spi);
/*
* ERR009165: work in XHC mode instead of SMC as PIO on the chips
* before i.mx6ul.
*/
if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
ctrl |= MX51_ECSPI_CTRL_SMC;
else
ctrl &= ~MX51_ECSPI_CTRL_SMC;
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
return 0;
}
static void mx51_setup_wml(struct spi_imx_data *spi_imx)
{
u32 tx_wml = 0;
if (spi_imx->devtype_data->tx_glitch_fixed)
tx_wml = spi_imx->wml;
/*
* Configure the DMA register: setup the watermark
* and enable DMA request.
*/
writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
MX51_ECSPI_DMA_TX_WML(tx_wml) |
MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
}
static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
}
static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
while (mx51_ecspi_rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA);
}
#define MX31_INTREG_TEEN (1 << 0)
#define MX31_INTREG_RREN (1 << 3)
#define MX31_CSPICTRL_ENABLE (1 << 0)
#define MX31_CSPICTRL_HOST (1 << 1)
#define MX31_CSPICTRL_XCH (1 << 2)
#define MX31_CSPICTRL_SMC (1 << 3)
#define MX31_CSPICTRL_POL (1 << 4)
#define MX31_CSPICTRL_PHA (1 << 5)
#define MX31_CSPICTRL_SSCTL (1 << 6)
#define MX31_CSPICTRL_SSPOL (1 << 7)
#define MX31_CSPICTRL_BC_SHIFT 8
#define MX35_CSPICTRL_BL_SHIFT 20
#define MX31_CSPICTRL_CS_SHIFT 24
#define MX35_CSPICTRL_CS_SHIFT 12
#define MX31_CSPICTRL_DR_SHIFT 16
#define MX31_CSPI_DMAREG 0x10
#define MX31_DMAREG_RH_DEN (1<<4)
#define MX31_DMAREG_TH_DEN (1<<1)
#define MX31_CSPISTATUS 0x14
#define MX31_STATUS_RR (1 << 3)
#define MX31_CSPI_TESTREG 0x1C
#define MX31_TEST_LBC (1 << 14)
/* These functions also work for the i.MX35, but be aware that
* the i.MX35 has a slightly different register layout for bits
* we do not use here.
*/
static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
val |= MX31_INTREG_TEEN;
if (enable & MXC_INT_RR)
val |= MX31_INTREG_RREN;
writel(val, spi_imx->base + MXC_CSPIINT);
}
static void mx31_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
reg = readl(spi_imx->base + MXC_CSPICTRL);
reg |= MX31_CSPICTRL_XCH;
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
static int mx31_prepare_message(struct spi_imx_data *spi_imx,
struct spi_message *msg)
{
return 0;
}
static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_HOST;
unsigned int clk;
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
MX31_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
if (is_imx35_cspi(spi_imx)) {
reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
reg |= MX31_CSPICTRL_SSCTL;
} else {
reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
}
if (spi->mode & SPI_CPHA)
reg |= MX31_CSPICTRL_PHA;
if (spi->mode & SPI_CPOL)
reg |= MX31_CSPICTRL_POL;
if (spi->mode & SPI_CS_HIGH)
reg |= MX31_CSPICTRL_SSPOL;
if (!spi_get_csgpiod(spi, 0))
reg |= (spi_get_chipselect(spi, 0)) <<
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
MX31_CSPICTRL_CS_SHIFT);
if (spi_imx->usedma)
reg |= MX31_CSPICTRL_SMC;
writel(reg, spi_imx->base + MXC_CSPICTRL);
reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
if (spi->mode & SPI_LOOP)
reg |= MX31_TEST_LBC;
else
reg &= ~MX31_TEST_LBC;
writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
if (spi_imx->usedma) {
/*
* configure DMA requests when RXFIFO is half full and
* when TXFIFO is half empty
*/
writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
spi_imx->base + MX31_CSPI_DMAREG);
}
return 0;
}
static int mx31_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
}
static void mx31_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
readl(spi_imx->base + MXC_CSPIRXDATA);
}
#define MX21_INTREG_RR (1 << 4)
#define MX21_INTREG_TEEN (1 << 9)
#define MX21_INTREG_RREN (1 << 13)
#define MX21_CSPICTRL_POL (1 << 5)
#define MX21_CSPICTRL_PHA (1 << 6)
#define MX21_CSPICTRL_SSPOL (1 << 8)
#define MX21_CSPICTRL_XCH (1 << 9)
#define MX21_CSPICTRL_ENABLE (1 << 10)
#define MX21_CSPICTRL_HOST (1 << 11)
#define MX21_CSPICTRL_DR_SHIFT 14
#define MX21_CSPICTRL_CS_SHIFT 19
static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
val |= MX21_INTREG_TEEN;
if (enable & MXC_INT_RR)
val |= MX21_INTREG_RREN;
writel(val, spi_imx->base + MXC_CSPIINT);
}
static void mx21_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
reg = readl(spi_imx->base + MXC_CSPICTRL);
reg |= MX21_CSPICTRL_XCH;
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
static int mx21_prepare_message(struct spi_imx_data *spi_imx,
struct spi_message *msg)
{
return 0;
}
static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_HOST;
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
unsigned int clk;
reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
<< MX21_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
reg |= spi_imx->bits_per_word - 1;
if (spi->mode & SPI_CPHA)
reg |= MX21_CSPICTRL_PHA;
if (spi->mode & SPI_CPOL)
reg |= MX21_CSPICTRL_POL;
if (spi->mode & SPI_CS_HIGH)
reg |= MX21_CSPICTRL_SSPOL;
if (!spi_get_csgpiod(spi, 0))
reg |= spi_get_chipselect(spi, 0) << MX21_CSPICTRL_CS_SHIFT;
writel(reg, spi_imx->base + MXC_CSPICTRL);
return 0;
}
static int mx21_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
}
static void mx21_reset(struct spi_imx_data *spi_imx)
{
writel(1, spi_imx->base + MXC_RESET);
}
#define MX1_INTREG_RR (1 << 3)
#define MX1_INTREG_TEEN (1 << 8)
#define MX1_INTREG_RREN (1 << 11)
#define MX1_CSPICTRL_POL (1 << 4)
#define MX1_CSPICTRL_PHA (1 << 5)
#define MX1_CSPICTRL_XCH (1 << 8)
#define MX1_CSPICTRL_ENABLE (1 << 9)
#define MX1_CSPICTRL_HOST (1 << 10)
#define MX1_CSPICTRL_DR_SHIFT 13
static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
val |= MX1_INTREG_TEEN;
if (enable & MXC_INT_RR)
val |= MX1_INTREG_RREN;
writel(val, spi_imx->base + MXC_CSPIINT);
}
static void mx1_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
reg = readl(spi_imx->base + MXC_CSPICTRL);
reg |= MX1_CSPICTRL_XCH;
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
static int mx1_prepare_message(struct spi_imx_data *spi_imx,
struct spi_message *msg)
{
return 0;
}
static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_HOST;
unsigned int clk;
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
MX1_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
reg |= spi_imx->bits_per_word - 1;
if (spi->mode & SPI_CPHA)
reg |= MX1_CSPICTRL_PHA;
if (spi->mode & SPI_CPOL)
reg |= MX1_CSPICTRL_POL;
writel(reg, spi_imx->base + MXC_CSPICTRL);
return 0;
}
static int mx1_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
}
static void mx1_reset(struct spi_imx_data *spi_imx)
{
writel(1, spi_imx->base + MXC_RESET);
}
static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
.intctrl = mx1_intctrl,
.prepare_message = mx1_prepare_message,
.prepare_transfer = mx1_prepare_transfer,
.trigger = mx1_trigger,
.rx_available = mx1_rx_available,
.reset = mx1_reset,
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
.has_targetmode = false,
.devtype = IMX1_CSPI,
};
static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
.intctrl = mx21_intctrl,
.prepare_message = mx21_prepare_message,
.prepare_transfer = mx21_prepare_transfer,
.trigger = mx21_trigger,
.rx_available = mx21_rx_available,
.reset = mx21_reset,
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
.has_targetmode = false,
.devtype = IMX21_CSPI,
};
static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
/* i.mx27 cspi shares the functions with i.mx21 one */
.intctrl = mx21_intctrl,
.prepare_message = mx21_prepare_message,
.prepare_transfer = mx21_prepare_transfer,
.trigger = mx21_trigger,
.rx_available = mx21_rx_available,
.reset = mx21_reset,
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
.has_targetmode = false,
.devtype = IMX27_CSPI,
};
static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
.intctrl = mx31_intctrl,
.prepare_message = mx31_prepare_message,
.prepare_transfer = mx31_prepare_transfer,
.trigger = mx31_trigger,
.rx_available = mx31_rx_available,
.reset = mx31_reset,
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
.has_targetmode = false,
.devtype = IMX31_CSPI,
};
static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
/* i.mx35 and later cspi shares the functions with i.mx31 one */
.intctrl = mx31_intctrl,
.prepare_message = mx31_prepare_message,
.prepare_transfer = mx31_prepare_transfer,
.trigger = mx31_trigger,
.rx_available = mx31_rx_available,
.reset = mx31_reset,
.fifo_size = 8,
.has_dmamode = true,
.dynamic_burst = false,
.has_targetmode = false,
.devtype = IMX35_CSPI,
};
static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.intctrl = mx51_ecspi_intctrl,
.prepare_message = mx51_ecspi_prepare_message,
.prepare_transfer = mx51_ecspi_prepare_transfer,
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
.setup_wml = mx51_setup_wml,
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
.has_targetmode = true,
.disable = mx51_ecspi_disable,
.devtype = IMX51_ECSPI,
};
static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.intctrl = mx51_ecspi_intctrl,
.prepare_message = mx51_ecspi_prepare_message,
.prepare_transfer = mx51_ecspi_prepare_transfer,
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
.fifo_size = 64,
.has_dmamode = true,
.has_targetmode = true,
.disable = mx51_ecspi_disable,
.devtype = IMX53_ECSPI,
};
static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
.intctrl = mx51_ecspi_intctrl,
.prepare_message = mx51_ecspi_prepare_message,
.prepare_transfer = mx51_ecspi_prepare_transfer,
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
.setup_wml = mx51_setup_wml,
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
.has_targetmode = true,
.tx_glitch_fixed = true,
.disable = mx51_ecspi_disable,
.devtype = IMX51_ECSPI,
};
static const struct of_device_id spi_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
{ .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
{ .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
{
u32 ctrl;
ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
}
static void spi_imx_push(struct spi_imx_data *spi_imx)
{
unsigned int burst_len;
/*
* Reload the FIFO when the remaining bytes to be transferred in the
* current burst is 0. This only applies when bits_per_word is a
* multiple of 8.
*/
if (!spi_imx->remainder) {
if (spi_imx->dynamic_burst) {
/* We need to deal unaligned data first */
burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
if (!burst_len)
burst_len = MX51_ECSPI_CTRL_MAX_BURST;
spi_imx_set_burst_len(spi_imx, burst_len * 8);
spi_imx->remainder = burst_len;
} else {
spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
}
}
while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
if (!spi_imx->count)
break;
if (spi_imx->dynamic_burst &&
spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
break;
spi_imx->tx(spi_imx);
spi_imx->txfifo++;
}
if (!spi_imx->target_mode)
spi_imx->devtype_data->trigger(spi_imx);
}
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
{
struct spi_imx_data *spi_imx = dev_id;
while (spi_imx->txfifo &&
spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
if (spi_imx->count) {
spi_imx_push(spi_imx);
return IRQ_HANDLED;
}
if (spi_imx->txfifo) {
/* No data left to push, but still waiting for rx data,
* enable receive data available interrupt.
*/
spi_imx->devtype_data->intctrl(
spi_imx, MXC_INT_RR);
return IRQ_HANDLED;
}
spi_imx->devtype_data->intctrl(spi_imx, 0);
complete(&spi_imx->xfer_done);
return IRQ_HANDLED;
}
static int spi_imx_dma_configure(struct spi_controller *controller)
{
int ret;
enum dma_slave_buswidth buswidth;
struct dma_slave_config rx = {}, tx = {};
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
case 4:
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
case 2:
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case 1:
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
default:
return -EINVAL;
}
tx.direction = DMA_MEM_TO_DEV;
tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
tx.dst_addr_width = buswidth;
tx.dst_maxburst = spi_imx->wml;
ret = dmaengine_slave_config(controller->dma_tx, &tx);
if (ret) {
dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
return ret;
}
rx.direction = DMA_DEV_TO_MEM;
rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
rx.src_addr_width = buswidth;
rx.src_maxburst = spi_imx->wml;
ret = dmaengine_slave_config(controller->dma_rx, &rx);
if (ret) {
dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
return ret;
}
return 0;
}
static int spi_imx_setupxfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
if (!t)
return 0;
if (!t->speed_hz) {
if (!spi->max_speed_hz) {
dev_err(&spi->dev, "no speed_hz provided!\n");
return -EINVAL;
}
dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
spi_imx->spi_bus_clk = spi->max_speed_hz;
} else
spi_imx->spi_bus_clk = t->speed_hz;
spi_imx->bits_per_word = t->bits_per_word;
spi_imx->count = t->len;
/*
* Initialize the functions for transfer. To transfer non byte-aligned
* words, we have to use multiple word-size bursts, we can't use
* dynamic_burst in that case.
*/
if (spi_imx->devtype_data->dynamic_burst && !spi_imx->target_mode &&
!(spi->mode & SPI_CS_WORD) &&
(spi_imx->bits_per_word == 8 ||
spi_imx->bits_per_word == 16 ||
spi_imx->bits_per_word == 32)) {
spi_imx->rx = spi_imx_buf_rx_swap;
spi_imx->tx = spi_imx_buf_tx_swap;
spi_imx->dynamic_burst = 1;
} else {
if (spi_imx->bits_per_word <= 8) {
spi_imx->rx = spi_imx_buf_rx_u8;
spi_imx->tx = spi_imx_buf_tx_u8;
} else if (spi_imx->bits_per_word <= 16) {
spi_imx->rx = spi_imx_buf_rx_u16;
spi_imx->tx = spi_imx_buf_tx_u16;
} else {
spi_imx->rx = spi_imx_buf_rx_u32;
spi_imx->tx = spi_imx_buf_tx_u32;
}
spi_imx->dynamic_burst = 0;
}
if (spi_imx_can_dma(spi_imx->controller, spi, t))
spi_imx->usedma = true;
else
spi_imx->usedma = false;
spi_imx->rx_only = ((t->tx_buf == NULL)
|| (t->tx_buf == spi->controller->dummy_tx));
if (is_imx53_ecspi(spi_imx) && spi_imx->target_mode) {
spi_imx->rx = mx53_ecspi_rx_target;
spi_imx->tx = mx53_ecspi_tx_target;
spi_imx->target_burst = t->len;
}
spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
return 0;
}
static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
{
struct spi_controller *controller = spi_imx->controller;
if (controller->dma_rx) {
dma_release_channel(controller->dma_rx);
controller->dma_rx = NULL;
}
if (controller->dma_tx) {
dma_release_channel(controller->dma_tx);
controller->dma_tx = NULL;
}
}
static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
struct spi_controller *controller)
{
int ret;
spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
/* Prepare for TX DMA: */
controller->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(controller->dma_tx)) {
ret = PTR_ERR(controller->dma_tx);
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
controller->dma_tx = NULL;
goto err;
}
/* Prepare for RX : */
controller->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(controller->dma_rx)) {
ret = PTR_ERR(controller->dma_rx);
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
controller->dma_rx = NULL;
goto err;
}
init_completion(&spi_imx->dma_rx_completion);
init_completion(&spi_imx->dma_tx_completion);
controller->can_dma = spi_imx_can_dma;
controller->max_dma_len = MAX_SDMA_BD_BYTES;
spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
SPI_CONTROLLER_MUST_TX;
return 0;
err:
spi_imx_sdma_exit(spi_imx);
return ret;
}
static void spi_imx_dma_rx_callback(void *cookie)
{
struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
complete(&spi_imx->dma_rx_completion);
}
static void spi_imx_dma_tx_callback(void *cookie)
{
struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
complete(&spi_imx->dma_tx_completion);
}
static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
{
unsigned long timeout = 0;
/* Time with actual data transfer and CS change delay related to HW */
timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
/* Add extra second for scheduler related activities */
timeout += 1;
/* Double calculated timeout */
return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
}
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
struct spi_transfer *transfer)
{
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
unsigned long transfer_timeout;
unsigned long timeout;
struct spi_controller *controller = spi_imx->controller;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
unsigned int bytes_per_word, i;
int ret;
/* Get the right burst length from the last sg to ensure no tail data */
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
break;
}
/* Use 1 as wml in case no available burst length got */
if (i == 0)
i = 1;
spi_imx->wml = i;
ret = spi_imx_dma_configure(controller);
if (ret)
goto dma_failure_no_start;
if (!spi_imx->devtype_data->setup_wml) {
dev_err(spi_imx->dev, "No setup_wml()?\n");
ret = -EINVAL;
goto dma_failure_no_start;
}
spi_imx->devtype_data->setup_wml(spi_imx);
/*
* The TX DMA setup starts the transfer, so make sure RX is configured
* before TX.
*/
desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EINVAL;
goto dma_failure_no_start;
}
desc_rx->callback = spi_imx_dma_rx_callback;
desc_rx->callback_param = (void *)spi_imx;
dmaengine_submit(desc_rx);
reinit_completion(&spi_imx->dma_rx_completion);
dma_async_issue_pending(controller->dma_rx);
desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
return -EINVAL;
}
desc_tx->callback = spi_imx_dma_tx_callback;
desc_tx->callback_param = (void *)spi_imx;
dmaengine_submit(desc_tx);
reinit_completion(&spi_imx->dma_tx_completion);
dma_async_issue_pending(controller->dma_tx);
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
/* Wait SDMA to finish the data transfer.*/
timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
transfer_timeout);
if (!timeout) {
dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
return -ETIMEDOUT;
}
timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
transfer_timeout);
if (!timeout) {
dev_err(&controller->dev, "I/O Error in DMA RX\n");
spi_imx->devtype_data->reset(spi_imx);
dmaengine_terminate_all(controller->dma_rx);
return -ETIMEDOUT;
}
return 0;
/* fallback to pio */
dma_failure_no_start:
transfer->error |= SPI_TRANS_FAIL_NO_START;
return ret;
}
static int spi_imx_pio_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
unsigned long transfer_timeout;
unsigned long timeout;
spi_imx->tx_buf = transfer->tx_buf;
spi_imx->rx_buf = transfer->rx_buf;
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
spi_imx->remainder = 0;
reinit_completion(&spi_imx->xfer_done);
spi_imx_push(spi_imx);
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
transfer_timeout);
if (!timeout) {
dev_err(&spi->dev, "I/O Error in PIO\n");
spi_imx->devtype_data->reset(spi_imx);
return -ETIMEDOUT;
}
return 0;
}
static int spi_imx_poll_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
unsigned long timeout;
spi_imx->tx_buf = transfer->tx_buf;
spi_imx->rx_buf = transfer->rx_buf;
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
spi_imx->remainder = 0;
/* fill in the fifo before timeout calculations if we are
* interrupted here, then the data is getting transferred by
* the HW while we are interrupted
*/
spi_imx_push(spi_imx);
timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
while (spi_imx->txfifo) {
/* RX */
while (spi_imx->txfifo &&
spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
/* TX */
if (spi_imx->count) {
spi_imx_push(spi_imx);
continue;
}
if (spi_imx->txfifo &&
time_after(jiffies, timeout)) {
dev_err_ratelimited(&spi->dev,
"timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
jiffies - timeout);
/* fall back to interrupt mode */
return spi_imx_pio_transfer(spi, transfer);
}
}
return 0;
}
static int spi_imx_pio_transfer_target(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
int ret = 0;
if (is_imx53_ecspi(spi_imx) &&
transfer->len > MX53_MAX_TRANSFER_BYTES) {
dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
MX53_MAX_TRANSFER_BYTES);
return -EMSGSIZE;
}
spi_imx->tx_buf = transfer->tx_buf;
spi_imx->rx_buf = transfer->rx_buf;
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
spi_imx->remainder = 0;
reinit_completion(&spi_imx->xfer_done);
spi_imx->target_aborted = false;
spi_imx_push(spi_imx);
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
spi_imx->target_aborted) {
dev_dbg(&spi->dev, "interrupted\n");
ret = -EINTR;
}
/* ecspi has a HW issue when works in Target mode,
* after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
* ECSPI_TXDATA keeps shift out the last word data,
* so we have to disable ECSPI when in target mode after the
* transfer completes
*/
if (spi_imx->devtype_data->disable)
spi_imx->devtype_data->disable(spi_imx);
return ret;
}
static int spi_imx_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
unsigned long hz_per_byte, byte_limit;
spi_imx_setupxfer(spi, transfer);
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
/* flush rxfifo before transfer */
while (spi_imx->devtype_data->rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA);
if (spi_imx->target_mode)
return spi_imx_pio_transfer_target(spi, transfer);
/*
* If we decided in spi_imx_can_dma() that we want to do a DMA
* transfer, the SPI transfer has already been mapped, so we
* have to do the DMA transfer here.
*/
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
/*
* Calculate the estimated time in us the transfer runs. Find
* the number of Hz per byte per polling limit.
*/
hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0;
byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1;
/* run in polling mode for short transfers */
if (transfer->len < byte_limit)
return spi_imx_poll_transfer(spi, transfer);
return spi_imx_pio_transfer(spi, transfer);
}
static int spi_imx_setup(struct spi_device *spi)
{
dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
spi->mode, spi->bits_per_word, spi->max_speed_hz);
return 0;
}
static void spi_imx_cleanup(struct spi_device *spi)
{
}
static int
spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
int ret;
ret = pm_runtime_resume_and_get(spi_imx->dev);
if (ret < 0) {
dev_err(spi_imx->dev, "failed to enable clock\n");
return ret;
}
ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
if (ret) {
pm_runtime_mark_last_busy(spi_imx->dev);
pm_runtime_put_autosuspend(spi_imx->dev);
}
return ret;
}
static int
spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
pm_runtime_mark_last_busy(spi_imx->dev);
pm_runtime_put_autosuspend(spi_imx->dev);
return 0;
}
static int spi_imx_target_abort(struct spi_controller *controller)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
spi_imx->target_aborted = true;
complete(&spi_imx->xfer_done);
return 0;
}
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spi_controller *controller;
struct spi_imx_data *spi_imx;
struct resource *res;
int ret, irq, spi_drctl;
const struct spi_imx_devtype_data *devtype_data =
of_device_get_match_data(&pdev->dev);
bool target_mode;
u32 val;
target_mode = devtype_data->has_targetmode &&
of_property_read_bool(np, "spi-slave");
if (target_mode)
controller = spi_alloc_target(&pdev->dev,
sizeof(struct spi_imx_data));
else
controller = spi_alloc_host(&pdev->dev,
sizeof(struct spi_imx_data));
if (!controller)
return -ENOMEM;
ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
if ((ret < 0) || (spi_drctl >= 0x3)) {
/* '11' is reserved */
spi_drctl = 0;
}
platform_set_drvdata(pdev, controller);
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
controller->bus_num = np ? -1 : pdev->id;
controller->use_gpio_descriptors = true;
spi_imx = spi_controller_get_devdata(controller);
spi_imx->controller = controller;
spi_imx->dev = &pdev->dev;
spi_imx->target_mode = target_mode;
spi_imx->devtype_data = devtype_data;
/*
* Get number of chip selects from device properties. This can be
* coming from device tree or boardfiles, if it is not defined,
* a default value of 3 chip selects will be used, as all the legacy
* board files have <= 3 chip selects.
*/
if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
controller->num_chipselect = val;
else
controller->num_chipselect = 3;
controller->transfer_one = spi_imx_transfer_one;
controller->setup = spi_imx_setup;
controller->cleanup = spi_imx_cleanup;
controller->prepare_message = spi_imx_prepare_message;
controller->unprepare_message = spi_imx_unprepare_message;
controller->target_abort = spi_imx_target_abort;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS |
SPI_MOSI_IDLE_LOW;
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
is_imx53_ecspi(spi_imx))
controller->mode_bits |= SPI_LOOP | SPI_READY;
if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
controller->mode_bits |= SPI_RX_CPHA_FLIP;
if (is_imx51_ecspi(spi_imx) &&
device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
/*
* When using HW-CS implementing SPI_CS_WORD can be done by just
* setting the burst length to the word size. This is
* considerably faster than manually controlling the CS.
*/
controller->mode_bits |= SPI_CS_WORD;
if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) {
controller->max_native_cs = 4;
controller->flags |= SPI_CONTROLLER_GPIO_SS;
}
spi_imx->spi_drctl = spi_drctl;
init_completion(&spi_imx->xfer_done);
spi_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(spi_imx->base)) {
ret = PTR_ERR(spi_imx->base);
goto out_controller_put;
}
spi_imx->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto out_controller_put;
}
ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
dev_name(&pdev->dev), spi_imx);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
goto out_controller_put;
}
spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(spi_imx->clk_ipg)) {
ret = PTR_ERR(spi_imx->clk_ipg);
goto out_controller_put;
}
spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(spi_imx->clk_per)) {
ret = PTR_ERR(spi_imx->clk_per);
goto out_controller_put;
}
ret = clk_prepare_enable(spi_imx->clk_per);
if (ret)
goto out_controller_put;
ret = clk_prepare_enable(spi_imx->clk_ipg);
if (ret)
goto out_put_per;
pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
pm_runtime_use_autosuspend(spi_imx->dev);
pm_runtime_get_noresume(spi_imx->dev);
pm_runtime_set_active(spi_imx->dev);
pm_runtime_enable(spi_imx->dev);
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
/*
* Only validated on i.mx35 and i.mx6 now, can remove the constraint
* if validated on other chips.
*/
if (spi_imx->devtype_data->has_dmamode) {
ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
if (ret == -EPROBE_DEFER)
goto out_runtime_pm_put;
if (ret < 0)
dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
ret);
}
spi_imx->devtype_data->reset(spi_imx);
spi_imx->devtype_data->intctrl(spi_imx, 0);
controller->dev.of_node = pdev->dev.of_node;
ret = spi_register_controller(controller);
if (ret) {
dev_err_probe(&pdev->dev, ret, "register controller failed\n");
goto out_register_controller;
}
pm_runtime_mark_last_busy(spi_imx->dev);
pm_runtime_put_autosuspend(spi_imx->dev);
return ret;
out_register_controller:
if (spi_imx->devtype_data->has_dmamode)
spi_imx_sdma_exit(spi_imx);
out_runtime_pm_put:
pm_runtime_dont_use_autosuspend(spi_imx->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(spi_imx->dev);
clk_disable_unprepare(spi_imx->clk_ipg);
out_put_per:
clk_disable_unprepare(spi_imx->clk_per);
out_controller_put:
spi_controller_put(controller);
return ret;
}
static void spi_imx_remove(struct platform_device *pdev)
{
struct spi_controller *controller = platform_get_drvdata(pdev);
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
int ret;
spi_unregister_controller(controller);
ret = pm_runtime_get_sync(spi_imx->dev);
if (ret >= 0)
writel(0, spi_imx->base + MXC_CSPICTRL);
else
dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
pm_runtime_dont_use_autosuspend(spi_imx->dev);
pm_runtime_put_sync(spi_imx->dev);
pm_runtime_disable(spi_imx->dev);
spi_imx_sdma_exit(spi_imx);
}
static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
{
struct spi_controller *controller = dev_get_drvdata(dev);
struct spi_imx_data *spi_imx;
int ret;
spi_imx = spi_controller_get_devdata(controller);
ret = clk_prepare_enable(spi_imx->clk_per);
if (ret)
return ret;
ret = clk_prepare_enable(spi_imx->clk_ipg);
if (ret) {
clk_disable_unprepare(spi_imx->clk_per);
return ret;
}
return 0;
}
static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
{
struct spi_controller *controller = dev_get_drvdata(dev);
struct spi_imx_data *spi_imx;
spi_imx = spi_controller_get_devdata(controller);
clk_disable_unprepare(spi_imx->clk_per);
clk_disable_unprepare(spi_imx->clk_ipg);
return 0;
}
static int __maybe_unused spi_imx_suspend(struct device *dev)
{
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int __maybe_unused spi_imx_resume(struct device *dev)
{
pinctrl_pm_select_default_state(dev);
return 0;
}
static const struct dev_pm_ops imx_spi_pm = {
SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
spi_imx_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
};
static struct platform_driver spi_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = spi_imx_dt_ids,
.pm = &imx_spi_pm,
},
.probe = spi_imx_probe,
.remove_new = spi_imx_remove,
};
module_platform_driver(spi_imx_driver);
MODULE_DESCRIPTION("i.MX SPI Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/spi/spi-imx.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Freescale i.MX7ULP LPSPI driver
//
// Copyright 2016 Freescale Semiconductor, Inc.
// Copyright 2018 NXP Semiconductors
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/dma/imx-dma.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
#define DRIVER_NAME "fsl_lpspi"
#define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
/* The maximum bytes that edma can transfer once.*/
#define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
/* i.MX7ULP LPSPI registers */
#define IMX7ULP_VERID 0x0
#define IMX7ULP_PARAM 0x4
#define IMX7ULP_CR 0x10
#define IMX7ULP_SR 0x14
#define IMX7ULP_IER 0x18
#define IMX7ULP_DER 0x1c
#define IMX7ULP_CFGR0 0x20
#define IMX7ULP_CFGR1 0x24
#define IMX7ULP_DMR0 0x30
#define IMX7ULP_DMR1 0x34
#define IMX7ULP_CCR 0x40
#define IMX7ULP_FCR 0x58
#define IMX7ULP_FSR 0x5c
#define IMX7ULP_TCR 0x60
#define IMX7ULP_TDR 0x64
#define IMX7ULP_RSR 0x70
#define IMX7ULP_RDR 0x74
/* General control register field define */
#define CR_RRF BIT(9)
#define CR_RTF BIT(8)
#define CR_RST BIT(1)
#define CR_MEN BIT(0)
#define SR_MBF BIT(24)
#define SR_TCF BIT(10)
#define SR_FCF BIT(9)
#define SR_RDF BIT(1)
#define SR_TDF BIT(0)
#define IER_TCIE BIT(10)
#define IER_FCIE BIT(9)
#define IER_RDIE BIT(1)
#define IER_TDIE BIT(0)
#define DER_RDDE BIT(1)
#define DER_TDDE BIT(0)
#define CFGR1_PCSCFG BIT(27)
#define CFGR1_PINCFG (BIT(24)|BIT(25))
#define CFGR1_PCSPOL BIT(8)
#define CFGR1_NOSTALL BIT(3)
#define CFGR1_HOST BIT(0)
#define FSR_TXCOUNT (0xFF)
#define RSR_RXEMPTY BIT(1)
#define TCR_CPOL BIT(31)
#define TCR_CPHA BIT(30)
#define TCR_CONT BIT(21)
#define TCR_CONTC BIT(20)
#define TCR_RXMSK BIT(19)
#define TCR_TXMSK BIT(18)
struct lpspi_config {
u8 bpw;
u8 chip_select;
u8 prescale;
u16 mode;
u32 speed_hz;
};
struct fsl_lpspi_data {
struct device *dev;
void __iomem *base;
unsigned long base_phys;
struct clk *clk_ipg;
struct clk *clk_per;
bool is_target;
bool is_only_cs1;
bool is_first_byte;
void *rx_buf;
const void *tx_buf;
void (*tx)(struct fsl_lpspi_data *);
void (*rx)(struct fsl_lpspi_data *);
u32 remain;
u8 watermark;
u8 txfifosize;
u8 rxfifosize;
struct lpspi_config config;
struct completion xfer_done;
bool target_aborted;
/* DMA */
bool usedma;
struct completion dma_rx_completion;
struct completion dma_tx_completion;
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-spi", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
#define LPSPI_BUF_RX(type) \
static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
{ \
unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
\
if (fsl_lpspi->rx_buf) { \
*(type *)fsl_lpspi->rx_buf = val; \
fsl_lpspi->rx_buf += sizeof(type); \
} \
}
#define LPSPI_BUF_TX(type) \
static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
{ \
type val = 0; \
\
if (fsl_lpspi->tx_buf) { \
val = *(type *)fsl_lpspi->tx_buf; \
fsl_lpspi->tx_buf += sizeof(type); \
} \
\
fsl_lpspi->remain -= sizeof(type); \
writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
}
LPSPI_BUF_RX(u8)
LPSPI_BUF_TX(u8)
LPSPI_BUF_RX(u16)
LPSPI_BUF_TX(u16)
LPSPI_BUF_RX(u32)
LPSPI_BUF_TX(u32)
static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
unsigned int enable)
{
writel(enable, fsl_lpspi->base + IMX7ULP_IER);
}
static int fsl_lpspi_bytes_per_word(const int bpw)
{
return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
}
static bool fsl_lpspi_can_dma(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
unsigned int bytes_per_word;
if (!controller->dma_rx)
return false;
bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
switch (bytes_per_word) {
case 1:
case 2:
case 4:
break;
default:
return false;
}
return true;
}
static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
int ret;
ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
if (ret < 0) {
dev_err(fsl_lpspi->dev, "failed to enable clock\n");
return ret;
}
return 0;
}
static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
pm_runtime_mark_last_busy(fsl_lpspi->dev);
pm_runtime_put_autosuspend(fsl_lpspi->dev);
return 0;
}
static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
{
u8 txfifo_cnt;
u32 temp;
txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
while (txfifo_cnt < fsl_lpspi->txfifosize) {
if (!fsl_lpspi->remain)
break;
fsl_lpspi->tx(fsl_lpspi);
txfifo_cnt++;
}
if (txfifo_cnt < fsl_lpspi->txfifosize) {
if (!fsl_lpspi->is_target) {
temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
temp &= ~TCR_CONTC;
writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
}
fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
} else
fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
}
static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
{
while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
fsl_lpspi->rx(fsl_lpspi);
}
static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp = 0;
temp |= fsl_lpspi->config.bpw - 1;
temp |= (fsl_lpspi->config.mode & 0x3) << 30;
temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
if (!fsl_lpspi->is_target) {
temp |= fsl_lpspi->config.prescale << 27;
/*
* Set TCR_CONT will keep SS asserted after current transfer.
* For the first transfer, clear TCR_CONTC to assert SS.
* For subsequent transfer, set TCR_CONTC to keep SS asserted.
*/
if (!fsl_lpspi->usedma) {
temp |= TCR_CONT;
if (fsl_lpspi->is_first_byte)
temp &= ~TCR_CONTC;
else
temp |= TCR_CONTC;
}
}
writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
}
static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
if (!fsl_lpspi->usedma)
temp = fsl_lpspi->watermark >> 1 |
(fsl_lpspi->watermark >> 1) << 16;
else
temp = fsl_lpspi->watermark >> 1;
writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
}
static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
{
struct lpspi_config config = fsl_lpspi->config;
unsigned int perclk_rate, scldiv;
u8 prescale;
perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
if (!config.speed_hz) {
dev_err(fsl_lpspi->dev,
"error: the transmission speed provided is 0!\n");
return -EINVAL;
}
if (config.speed_hz > perclk_rate / 2) {
dev_err(fsl_lpspi->dev,
"per-clk should be at least two times of transfer speed");
return -EINVAL;
}
for (prescale = 0; prescale < 8; prescale++) {
scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
if (scldiv < 256) {
fsl_lpspi->config.prescale = prescale;
break;
}
}
if (scldiv >= 256)
return -EINVAL;
writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
fsl_lpspi->base + IMX7ULP_CCR);
dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n",
perclk_rate, config.speed_hz, prescale, scldiv);
return 0;
}
static int fsl_lpspi_dma_configure(struct spi_controller *controller)
{
int ret;
enum dma_slave_buswidth buswidth;
struct dma_slave_config rx = {}, tx = {};
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
case 4:
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
case 2:
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case 1:
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
default:
return -EINVAL;
}
tx.direction = DMA_MEM_TO_DEV;
tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
tx.dst_addr_width = buswidth;
tx.dst_maxburst = 1;
ret = dmaengine_slave_config(controller->dma_tx, &tx);
if (ret) {
dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
ret);
return ret;
}
rx.direction = DMA_DEV_TO_MEM;
rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
rx.src_addr_width = buswidth;
rx.src_maxburst = 1;
ret = dmaengine_slave_config(controller->dma_rx, &rx);
if (ret) {
dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
ret);
return ret;
}
return 0;
}
static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
int ret;
if (!fsl_lpspi->is_target) {
ret = fsl_lpspi_set_bitrate(fsl_lpspi);
if (ret)
return ret;
}
fsl_lpspi_set_watermark(fsl_lpspi);
if (!fsl_lpspi->is_target)
temp = CFGR1_HOST;
else
temp = CFGR1_PINCFG;
if (fsl_lpspi->config.mode & SPI_CS_HIGH)
temp |= CFGR1_PCSPOL;
writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
temp = readl(fsl_lpspi->base + IMX7ULP_CR);
temp |= CR_RRF | CR_RTF | CR_MEN;
writel(temp, fsl_lpspi->base + IMX7ULP_CR);
temp = 0;
if (fsl_lpspi->usedma)
temp = DER_TDDE | DER_RDDE;
writel(temp, fsl_lpspi->base + IMX7ULP_DER);
return 0;
}
static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(spi->controller);
if (t == NULL)
return -EINVAL;
fsl_lpspi->config.mode = spi->mode;
fsl_lpspi->config.bpw = t->bits_per_word;
fsl_lpspi->config.speed_hz = t->speed_hz;
if (fsl_lpspi->is_only_cs1)
fsl_lpspi->config.chip_select = 1;
else
fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0);
if (!fsl_lpspi->config.speed_hz)
fsl_lpspi->config.speed_hz = spi->max_speed_hz;
if (!fsl_lpspi->config.bpw)
fsl_lpspi->config.bpw = spi->bits_per_word;
/* Initialize the functions for transfer */
if (fsl_lpspi->config.bpw <= 8) {
fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
} else if (fsl_lpspi->config.bpw <= 16) {
fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
} else {
fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
}
if (t->len <= fsl_lpspi->txfifosize)
fsl_lpspi->watermark = t->len;
else
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
if (fsl_lpspi_can_dma(controller, spi, t))
fsl_lpspi->usedma = true;
else
fsl_lpspi->usedma = false;
return fsl_lpspi_config(fsl_lpspi);
}
static int fsl_lpspi_target_abort(struct spi_controller *controller)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
fsl_lpspi->target_aborted = true;
if (!fsl_lpspi->usedma)
complete(&fsl_lpspi->xfer_done);
else {
complete(&fsl_lpspi->dma_tx_completion);
complete(&fsl_lpspi->dma_rx_completion);
}
return 0;
}
static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
if (fsl_lpspi->is_target) {
if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
fsl_lpspi->target_aborted) {
dev_dbg(fsl_lpspi->dev, "interrupted\n");
return -EINTR;
}
} else {
if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
return -ETIMEDOUT;
}
}
return 0;
}
static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
if (!fsl_lpspi->usedma) {
/* Disable all interrupt */
fsl_lpspi_intctrl(fsl_lpspi, 0);
}
/* W1C for all flags in SR */
temp = 0x3F << 8;
writel(temp, fsl_lpspi->base + IMX7ULP_SR);
/* Clear FIFO and disable module */
temp = CR_RRF | CR_RTF;
writel(temp, fsl_lpspi->base + IMX7ULP_CR);
return 0;
}
static void fsl_lpspi_dma_rx_callback(void *cookie)
{
struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
complete(&fsl_lpspi->dma_rx_completion);
}
static void fsl_lpspi_dma_tx_callback(void *cookie)
{
struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
complete(&fsl_lpspi->dma_tx_completion);
}
static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
int size)
{
unsigned long timeout = 0;
/* Time with actual data transfer and CS change delay related to HW */
timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
/* Add extra second for scheduler related activities */
timeout += 1;
/* Double calculated timeout */
return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
}
static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
struct fsl_lpspi_data *fsl_lpspi,
struct spi_transfer *transfer)
{
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
unsigned long transfer_timeout;
unsigned long timeout;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
int ret;
ret = fsl_lpspi_dma_configure(controller);
if (ret)
return ret;
desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
return -EINVAL;
desc_rx->callback = fsl_lpspi_dma_rx_callback;
desc_rx->callback_param = (void *)fsl_lpspi;
dmaengine_submit(desc_rx);
reinit_completion(&fsl_lpspi->dma_rx_completion);
dma_async_issue_pending(controller->dma_rx);
desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dmaengine_terminate_all(controller->dma_tx);
return -EINVAL;
}
desc_tx->callback = fsl_lpspi_dma_tx_callback;
desc_tx->callback_param = (void *)fsl_lpspi;
dmaengine_submit(desc_tx);
reinit_completion(&fsl_lpspi->dma_tx_completion);
dma_async_issue_pending(controller->dma_tx);
fsl_lpspi->target_aborted = false;
if (!fsl_lpspi->is_target) {
transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
transfer->len);
/* Wait eDMA to finish the data transfer.*/
timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
transfer_timeout);
if (!timeout) {
dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
fsl_lpspi_reset(fsl_lpspi);
return -ETIMEDOUT;
}
timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
transfer_timeout);
if (!timeout) {
dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
fsl_lpspi_reset(fsl_lpspi);
return -ETIMEDOUT;
}
} else {
if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
fsl_lpspi->target_aborted) {
dev_dbg(fsl_lpspi->dev,
"I/O Error in DMA TX interrupted\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
fsl_lpspi_reset(fsl_lpspi);
return -EINTR;
}
if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
fsl_lpspi->target_aborted) {
dev_dbg(fsl_lpspi->dev,
"I/O Error in DMA RX interrupted\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
fsl_lpspi_reset(fsl_lpspi);
return -EINTR;
}
}
fsl_lpspi_reset(fsl_lpspi);
return 0;
}
static void fsl_lpspi_dma_exit(struct spi_controller *controller)
{
if (controller->dma_rx) {
dma_release_channel(controller->dma_rx);
controller->dma_rx = NULL;
}
if (controller->dma_tx) {
dma_release_channel(controller->dma_tx);
controller->dma_tx = NULL;
}
}
static int fsl_lpspi_dma_init(struct device *dev,
struct fsl_lpspi_data *fsl_lpspi,
struct spi_controller *controller)
{
int ret;
/* Prepare for TX DMA: */
controller->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(controller->dma_tx)) {
ret = PTR_ERR(controller->dma_tx);
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
controller->dma_tx = NULL;
goto err;
}
/* Prepare for RX DMA: */
controller->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(controller->dma_rx)) {
ret = PTR_ERR(controller->dma_rx);
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
controller->dma_rx = NULL;
goto err;
}
init_completion(&fsl_lpspi->dma_rx_completion);
init_completion(&fsl_lpspi->dma_tx_completion);
controller->can_dma = fsl_lpspi_can_dma;
controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
return 0;
err:
fsl_lpspi_dma_exit(controller);
return ret;
}
static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
int ret;
fsl_lpspi->tx_buf = t->tx_buf;
fsl_lpspi->rx_buf = t->rx_buf;
fsl_lpspi->remain = t->len;
reinit_completion(&fsl_lpspi->xfer_done);
fsl_lpspi->target_aborted = false;
fsl_lpspi_write_tx_fifo(fsl_lpspi);
ret = fsl_lpspi_wait_for_completion(controller);
if (ret)
return ret;
fsl_lpspi_reset(fsl_lpspi);
return 0;
}
static int fsl_lpspi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
int ret;
fsl_lpspi->is_first_byte = true;
ret = fsl_lpspi_setup_transfer(controller, spi, t);
if (ret < 0)
return ret;
fsl_lpspi_set_cmd(fsl_lpspi);
fsl_lpspi->is_first_byte = false;
if (fsl_lpspi->usedma)
ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
else
ret = fsl_lpspi_pio_transfer(controller, t);
if (ret < 0)
return ret;
return 0;
}
static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
{
u32 temp_SR, temp_IER;
struct fsl_lpspi_data *fsl_lpspi = dev_id;
temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
fsl_lpspi_intctrl(fsl_lpspi, 0);
temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
fsl_lpspi_read_rx_fifo(fsl_lpspi);
if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
fsl_lpspi_write_tx_fifo(fsl_lpspi);
return IRQ_HANDLED;
}
if (temp_SR & SR_MBF ||
readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
return IRQ_HANDLED;
}
if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
complete(&fsl_lpspi->xfer_done);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
#ifdef CONFIG_PM
static int fsl_lpspi_runtime_resume(struct device *dev)
{
struct spi_controller *controller = dev_get_drvdata(dev);
struct fsl_lpspi_data *fsl_lpspi;
int ret;
fsl_lpspi = spi_controller_get_devdata(controller);
ret = clk_prepare_enable(fsl_lpspi->clk_per);
if (ret)
return ret;
ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
if (ret) {
clk_disable_unprepare(fsl_lpspi->clk_per);
return ret;
}
return 0;
}
static int fsl_lpspi_runtime_suspend(struct device *dev)
{
struct spi_controller *controller = dev_get_drvdata(dev);
struct fsl_lpspi_data *fsl_lpspi;
fsl_lpspi = spi_controller_get_devdata(controller);
clk_disable_unprepare(fsl_lpspi->clk_per);
clk_disable_unprepare(fsl_lpspi->clk_ipg);
return 0;
}
#endif
static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
{
struct device *dev = fsl_lpspi->dev;
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
pm_runtime_use_autosuspend(dev);
return 0;
}
static int fsl_lpspi_probe(struct platform_device *pdev)
{
struct fsl_lpspi_data *fsl_lpspi;
struct spi_controller *controller;
struct resource *res;
int ret, irq;
u32 num_cs;
u32 temp;
bool is_target;
is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
if (is_target)
controller = spi_alloc_target(&pdev->dev,
sizeof(struct fsl_lpspi_data));
else
controller = spi_alloc_host(&pdev->dev,
sizeof(struct fsl_lpspi_data));
if (!controller)
return -ENOMEM;
platform_set_drvdata(pdev, controller);
fsl_lpspi = spi_controller_get_devdata(controller);
fsl_lpspi->dev = &pdev->dev;
fsl_lpspi->is_target = is_target;
fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
"fsl,spi-only-use-cs1-sel");
init_completion(&fsl_lpspi->xfer_done);
fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(fsl_lpspi->base)) {
ret = PTR_ERR(fsl_lpspi->base);
goto out_controller_put;
}
fsl_lpspi->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto out_controller_put;
}
ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
dev_name(&pdev->dev), fsl_lpspi);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
goto out_controller_put;
}
fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(fsl_lpspi->clk_per)) {
ret = PTR_ERR(fsl_lpspi->clk_per);
goto out_controller_put;
}
fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fsl_lpspi->clk_ipg)) {
ret = PTR_ERR(fsl_lpspi->clk_ipg);
goto out_controller_put;
}
/* enable the clock */
ret = fsl_lpspi_init_rpm(fsl_lpspi);
if (ret)
goto out_controller_put;
ret = pm_runtime_get_sync(fsl_lpspi->dev);
if (ret < 0) {
dev_err(fsl_lpspi->dev, "failed to enable clock\n");
goto out_pm_get;
}
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
&num_cs)) {
if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx93-spi"))
num_cs = ((temp >> 16) & 0xf);
else
num_cs = 1;
}
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
controller->transfer_one = fsl_lpspi_transfer_one;
controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
controller->dev.of_node = pdev->dev.of_node;
controller->bus_num = pdev->id;
controller->num_chipselect = num_cs;
controller->target_abort = fsl_lpspi_target_abort;
if (!fsl_lpspi->is_target)
controller->use_gpio_descriptors = true;
ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
if (ret == -EPROBE_DEFER)
goto out_pm_get;
if (ret < 0)
dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
else
/*
* disable LPSPI module IRQ when enable DMA mode successfully,
* to prevent the unexpected LPSPI module IRQ events.
*/
disable_irq(irq);
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
goto free_dma;
}
pm_runtime_mark_last_busy(fsl_lpspi->dev);
pm_runtime_put_autosuspend(fsl_lpspi->dev);
return 0;
free_dma:
fsl_lpspi_dma_exit(controller);
out_pm_get:
pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_put_sync(fsl_lpspi->dev);
pm_runtime_disable(fsl_lpspi->dev);
out_controller_put:
spi_controller_put(controller);
return ret;
}
static void fsl_lpspi_remove(struct platform_device *pdev)
{
struct spi_controller *controller = platform_get_drvdata(pdev);
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
fsl_lpspi_dma_exit(controller);
pm_runtime_disable(fsl_lpspi->dev);
}
static int __maybe_unused fsl_lpspi_suspend(struct device *dev)
{
pinctrl_pm_select_sleep_state(dev);
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused fsl_lpspi_resume(struct device *dev)
{
int ret;
ret = pm_runtime_force_resume(dev);
if (ret) {
dev_err(dev, "Error in resume: %d\n", ret);
return ret;
}
pinctrl_pm_select_default_state(dev);
return 0;
}
static const struct dev_pm_ops fsl_lpspi_pm_ops = {
SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
fsl_lpspi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
};
static struct platform_driver fsl_lpspi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fsl_lpspi_dt_ids,
.pm = &fsl_lpspi_pm_ops,
},
.probe = fsl_lpspi_probe,
.remove_new = fsl_lpspi_remove,
};
module_platform_driver(fsl_lpspi_driver);
MODULE_DESCRIPTION("LPSPI Controller driver");
MODULE_AUTHOR("Gao Pan <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-fsl-lpspi.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/qcom-gpi-dma.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/soc/qcom/geni-se.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
/* SPI SE specific registers and respective register fields */
#define SE_SPI_CPHA 0x224
#define CPHA BIT(0)
#define SE_SPI_LOOPBACK 0x22c
#define LOOPBACK_ENABLE 0x1
#define NORMAL_MODE 0x0
#define LOOPBACK_MSK GENMASK(1, 0)
#define SE_SPI_CPOL 0x230
#define CPOL BIT(2)
#define SE_SPI_DEMUX_OUTPUT_INV 0x24c
#define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
#define SE_SPI_DEMUX_SEL 0x250
#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
#define SE_SPI_TRANS_CFG 0x25c
#define CS_TOGGLE BIT(1)
#define SE_SPI_WORD_LEN 0x268
#define WORD_LEN_MSK GENMASK(9, 0)
#define MIN_WORD_LEN 4
#define SE_SPI_TX_TRANS_LEN 0x26c
#define SE_SPI_RX_TRANS_LEN 0x270
#define TRANS_LEN_MSK GENMASK(23, 0)
#define SE_SPI_PRE_POST_CMD_DLY 0x274
#define SE_SPI_DELAY_COUNTERS 0x278
#define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0)
#define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
#define SPI_CS_CLK_DELAY_SHFT 10
#define SE_SPI_SLAVE_EN (0x2BC)
#define SPI_SLAVE_EN BIT(0)
/* M_CMD OP codes for SPI */
#define SPI_TX_ONLY 1
#define SPI_RX_ONLY 2
#define SPI_TX_RX 7
#define SPI_CS_ASSERT 8
#define SPI_CS_DEASSERT 9
#define SPI_SCK_ONLY 10
/* M_CMD params for SPI */
#define SPI_PRE_CMD_DELAY BIT(0)
#define TIMESTAMP_BEFORE BIT(1)
#define FRAGMENTATION BIT(2)
#define TIMESTAMP_AFTER BIT(3)
#define POST_CMD_DELAY BIT(4)
#define GSI_LOOPBACK_EN BIT(0)
#define GSI_CS_TOGGLE BIT(3)
#define GSI_CPHA BIT(4)
#define GSI_CPOL BIT(5)
struct spi_geni_master {
struct geni_se se;
struct device *dev;
u32 tx_fifo_depth;
u32 fifo_width_bits;
u32 tx_wm;
u32 last_mode;
unsigned long cur_speed_hz;
unsigned long cur_sclk_hz;
unsigned int cur_bits_per_word;
unsigned int tx_rem_bytes;
unsigned int rx_rem_bytes;
const struct spi_transfer *cur_xfer;
struct completion cs_done;
struct completion cancel_done;
struct completion abort_done;
struct completion tx_reset_done;
struct completion rx_reset_done;
unsigned int oversampling;
spinlock_t lock;
int irq;
bool cs_flag;
bool abort_failed;
struct dma_chan *tx;
struct dma_chan *rx;
int cur_xfer_mode;
};
static void spi_slv_setup(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
writel(SPI_SLAVE_EN, se->base + SE_SPI_SLAVE_EN);
writel(GENI_IO_MUX_0_EN, se->base + GENI_OUTPUT_CTRL);
writel(START_TRIGGER, se->base + SE_GENI_CFG_SEQ_START);
dev_dbg(mas->dev, "spi slave setup done\n");
}
static int get_spi_clk_cfg(unsigned int speed_hz,
struct spi_geni_master *mas,
unsigned int *clk_idx,
unsigned int *clk_div)
{
unsigned long sclk_freq;
unsigned int actual_hz;
int ret;
ret = geni_se_clk_freq_match(&mas->se,
speed_hz * mas->oversampling,
clk_idx, &sclk_freq, false);
if (ret) {
dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
ret, speed_hz);
return ret;
}
*clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
actual_hz = sclk_freq / (mas->oversampling * *clk_div);
dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
actual_hz, sclk_freq, *clk_idx, *clk_div);
ret = dev_pm_opp_set_rate(mas->dev, sclk_freq);
if (ret)
dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n", ret);
else
mas->cur_sclk_hz = sclk_freq;
return ret;
}
static void handle_se_timeout(struct spi_master *spi,
struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
unsigned long time_left;
struct geni_se *se = &mas->se;
const struct spi_transfer *xfer;
spin_lock_irq(&mas->lock);
if (mas->cur_xfer_mode == GENI_SE_FIFO)
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
xfer = mas->cur_xfer;
mas->cur_xfer = NULL;
if (spi->slave) {
/*
* skip CMD Cancel sequnece since spi slave
* doesn`t support CMD Cancel sequnece
*/
spin_unlock_irq(&mas->lock);
goto unmap_if_dma;
}
reinit_completion(&mas->cancel_done);
geni_se_cancel_m_cmd(se);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
if (time_left)
goto unmap_if_dma;
spin_lock_irq(&mas->lock);
reinit_completion(&mas->abort_done);
geni_se_abort_m_cmd(se);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
/*
* No need for a lock since SPI core has a lock and we never
* access this from an interrupt.
*/
mas->abort_failed = true;
}
unmap_if_dma:
if (mas->cur_xfer_mode == GENI_SE_DMA) {
if (xfer) {
if (xfer->tx_buf) {
spin_lock_irq(&mas->lock);
reinit_completion(&mas->tx_reset_done);
writel(1, se->base + SE_DMA_TX_FSM_RST);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
if (!time_left)
dev_err(mas->dev, "DMA TX RESET failed\n");
}
if (xfer->rx_buf) {
spin_lock_irq(&mas->lock);
reinit_completion(&mas->rx_reset_done);
writel(1, se->base + SE_DMA_RX_FSM_RST);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
if (!time_left)
dev_err(mas->dev, "DMA RX RESET failed\n");
}
} else {
/*
* This can happen if a timeout happened and we had to wait
* for lock in this function because isr was holding the lock
* and handling transfer completion at that time.
*/
dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n");
}
}
}
static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
dmaengine_terminate_sync(mas->tx);
dmaengine_terminate_sync(mas->rx);
}
static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
case GENI_SE_DMA:
handle_se_timeout(spi, msg);
break;
case GENI_GPI_DMA:
handle_gpi_timeout(spi, msg);
break;
default:
dev_err(mas->dev, "Abort on Mode:%d not supported", mas->cur_xfer_mode);
}
}
static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
u32 m_irq, m_irq_en;
if (!mas->abort_failed)
return false;
/*
* The only known case where a transfer times out and then a cancel
* times out then an abort times out is if something is blocking our
* interrupt handler from running. Avoid starting any new transfers
* until that sorts itself out.
*/
spin_lock_irq(&mas->lock);
m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
spin_unlock_irq(&mas->lock);
if (m_irq & m_irq_en) {
dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
m_irq & m_irq_en);
return true;
}
/*
* If we're here the problem resolved itself so no need to check more
* on future transfers.
*/
mas->abort_failed = false;
return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
struct spi_master *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned long time_left;
if (!(slv->mode & SPI_CS_HIGH))
set_flag = !set_flag;
if (set_flag == mas->cs_flag)
return;
pm_runtime_get_sync(mas->dev);
if (spi_geni_is_abort_still_pending(mas)) {
dev_err(mas->dev, "Can't set chip select\n");
goto exit;
}
spin_lock_irq(&mas->lock);
if (mas->cur_xfer) {
dev_err(mas->dev, "Can't set CS when prev xfer running\n");
spin_unlock_irq(&mas->lock);
goto exit;
}
mas->cs_flag = set_flag;
/* set xfer_mode to FIFO to complete cs_done in isr */
mas->cur_xfer_mode = GENI_SE_FIFO;
geni_se_select_mode(se, mas->cur_xfer_mode);
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
else
geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
if (!time_left) {
dev_warn(mas->dev, "Timeout setting chip select\n");
handle_se_timeout(spi, NULL);
}
exit:
pm_runtime_put(mas->dev);
}
static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
unsigned int bits_per_word)
{
unsigned int pack_words;
bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
struct geni_se *se = &mas->se;
u32 word_len;
/*
* If bits_per_word isn't a byte aligned value, set the packing to be
* 1 SPI word per FIFO word.
*/
if (!(mas->fifo_width_bits % bits_per_word))
pack_words = mas->fifo_width_bits / bits_per_word;
else
pack_words = 1;
geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
true, true);
word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK;
writel(word_len, se->base + SE_SPI_WORD_LEN);
}
static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
unsigned long clk_hz)
{
u32 clk_sel, m_clk_cfg, idx, div;
struct geni_se *se = &mas->se;
int ret;
if (clk_hz == mas->cur_speed_hz)
return 0;
ret = get_spi_clk_cfg(clk_hz, mas, &idx, &div);
if (ret) {
dev_err(mas->dev, "Err setting clk to %lu: %d\n", clk_hz, ret);
return ret;
}
/*
* SPI core clock gets configured with the requested frequency
* or the frequency closer to the requested frequency.
* For that reason requested frequency is stored in the
* cur_speed_hz and referred in the consecutive transfer instead
* of calling clk_get_rate() API.
*/
mas->cur_speed_hz = clk_hz;
clk_sel = idx & CLK_SEL_MSK;
m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
writel(clk_sel, se->base + SE_GENI_CLK_SEL);
writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
/* Set BW quota for CPU as driver supports FIFO mode only. */
se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz);
ret = geni_icc_set_bw(se);
if (ret)
return ret;
return 0;
}
static int setup_fifo_params(struct spi_device *spi_slv,
struct spi_master *spi)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
u32 demux_sel;
if (mas->last_mode != spi_slv->mode) {
if (spi_slv->mode & SPI_LOOP)
loopback_cfg = LOOPBACK_ENABLE;
if (spi_slv->mode & SPI_CPOL)
cpol = CPOL;
if (spi_slv->mode & SPI_CPHA)
cpha = CPHA;
if (spi_slv->mode & SPI_CS_HIGH)
demux_output_inv = BIT(spi_get_chipselect(spi_slv, 0));
demux_sel = spi_get_chipselect(spi_slv, 0);
mas->cur_bits_per_word = spi_slv->bits_per_word;
spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
writel(cpha, se->base + SE_SPI_CPHA);
writel(cpol, se->base + SE_SPI_CPOL);
writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
mas->last_mode = spi_slv->mode;
}
return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
}
static void
spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
{
struct spi_master *spi = cb;
spi->cur_msg->status = -EIO;
if (result->result != DMA_TRANS_NOERROR) {
dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
spi_finalize_current_transfer(spi);
return;
}
if (!result->residue) {
spi->cur_msg->status = 0;
dev_dbg(&spi->dev, "DMA txn completed\n");
} else {
dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
}
spi_finalize_current_transfer(spi);
}
static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
struct spi_device *spi_slv, struct spi_master *spi)
{
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
struct dma_slave_config config = {};
struct gpi_spi_config peripheral = {};
struct dma_async_tx_descriptor *tx_desc, *rx_desc;
int ret;
config.peripheral_config = &peripheral;
config.peripheral_size = sizeof(peripheral);
peripheral.set_config = true;
if (xfer->bits_per_word != mas->cur_bits_per_word ||
xfer->speed_hz != mas->cur_speed_hz) {
mas->cur_bits_per_word = xfer->bits_per_word;
mas->cur_speed_hz = xfer->speed_hz;
}
if (xfer->tx_buf && xfer->rx_buf) {
peripheral.cmd = SPI_DUPLEX;
} else if (xfer->tx_buf) {
peripheral.cmd = SPI_TX;
peripheral.rx_len = 0;
} else if (xfer->rx_buf) {
peripheral.cmd = SPI_RX;
if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
} else {
int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
peripheral.rx_len = (xfer->len / bytes_per_word);
}
}
peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP);
peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL);
peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA);
peripheral.cs = spi_get_chipselect(spi_slv, 0);
peripheral.pack_en = true;
peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
&peripheral.clk_src, &peripheral.clk_div);
if (ret) {
dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret);
return ret;
}
if (!xfer->cs_change) {
if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
peripheral.fragmentation = FRAGMENTATION;
}
if (peripheral.cmd & SPI_RX) {
dmaengine_slave_config(mas->rx, &config);
rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
DMA_DEV_TO_MEM, flags);
if (!rx_desc) {
dev_err(mas->dev, "Err setting up rx desc\n");
return -EIO;
}
}
/*
* Prepare the TX always, even for RX or tx_buf being null, we would
* need TX to be prepared per GSI spec
*/
dmaengine_slave_config(mas->tx, &config);
tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents,
DMA_MEM_TO_DEV, flags);
if (!tx_desc) {
dev_err(mas->dev, "Err setting up tx desc\n");
return -EIO;
}
tx_desc->callback_result = spi_gsi_callback_result;
tx_desc->callback_param = spi;
if (peripheral.cmd & SPI_RX)
dmaengine_submit(rx_desc);
dmaengine_submit(tx_desc);
if (peripheral.cmd & SPI_RX)
dma_async_issue_pending(mas->rx);
dma_async_issue_pending(mas->tx);
return 1;
}
static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
struct spi_geni_master *mas)
{
u32 len;
if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
else
len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
len &= TRANS_LEN_MSK;
return len;
}
static bool geni_can_dma(struct spi_controller *ctlr,
struct spi_device *slv, struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
u32 len, fifo_size;
if (mas->cur_xfer_mode == GENI_GPI_DMA)
return true;
/* Set SE DMA mode for SPI slave. */
if (ctlr->slave)
return true;
len = get_xfer_len_in_words(xfer, mas);
fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
if (len > fifo_size)
return true;
else
return false;
}
static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_message *spi_msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
int ret;
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
case GENI_SE_DMA:
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
return ret;
case GENI_GPI_DMA:
/* nothing to do for GPI DMA */
return 0;
}
dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode);
return -EINVAL;
}
static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
{
int ret;
mas->tx = dma_request_chan(mas->dev, "tx");
if (IS_ERR(mas->tx)) {
ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
"Failed to get tx DMA ch\n");
goto err_tx;
}
mas->rx = dma_request_chan(mas->dev, "rx");
if (IS_ERR(mas->rx)) {
ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
"Failed to get rx DMA ch\n");
goto err_rx;
}
return 0;
err_rx:
mas->rx = NULL;
dma_release_channel(mas->tx);
err_tx:
mas->tx = NULL;
return ret;
}
static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
{
if (mas->rx) {
dma_release_channel(mas->rx);
mas->rx = NULL;
}
if (mas->tx) {
dma_release_channel(mas->tx);
mas->tx = NULL;
}
}
static int spi_geni_init(struct spi_geni_master *mas)
{
struct spi_master *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned int proto, major, minor, ver;
u32 spi_tx_cfg, fifo_disable;
int ret = -ENXIO;
pm_runtime_get_sync(mas->dev);
proto = geni_se_read_proto(se);
if (spi->slave) {
if (proto != GENI_SE_SPI_SLAVE) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
goto out_pm;
}
spi_slv_setup(mas);
} else if (proto != GENI_SE_SPI) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
goto out_pm;
}
mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
/* Width of Tx and Rx FIFO is same */
mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
/*
* Hardware programming guide suggests to configure
* RX FIFO RFR level to fifo_depth-2.
*/
geni_se_init(se, mas->tx_fifo_depth - 3, mas->tx_fifo_depth - 2);
/* Transmit an entire FIFO worth of data per IRQ */
mas->tx_wm = 1;
ver = geni_se_get_qup_hw_version(se);
major = GENI_SE_VERSION_MAJOR(ver);
minor = GENI_SE_VERSION_MINOR(ver);
if (major == 1 && minor == 0)
mas->oversampling = 2;
else
mas->oversampling = 1;
fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
switch (fifo_disable) {
case 1:
ret = spi_geni_grab_gpi_chan(mas);
if (!ret) { /* success case */
mas->cur_xfer_mode = GENI_GPI_DMA;
geni_se_select_mode(se, GENI_GPI_DMA);
dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
break;
} else if (ret == -EPROBE_DEFER) {
goto out_pm;
}
/*
* in case of failure to get gpi dma channel, we can still do the
* FIFO mode, so fallthrough
*/
dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
fallthrough;
case 0:
mas->cur_xfer_mode = GENI_SE_FIFO;
geni_se_select_mode(se, GENI_SE_FIFO);
ret = 0;
break;
}
/* We always control CS manually */
if (!spi->slave) {
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
spi_tx_cfg &= ~CS_TOGGLE;
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
}
out_pm:
pm_runtime_put(mas->dev);
return ret;
}
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
{
/*
* Calculate how many bytes we'll put in each FIFO word. If the
* transfer words don't pack cleanly into a FIFO word we'll just put
* one transfer word in each FIFO word. If they do pack we'll pack 'em.
*/
if (mas->fifo_width_bits % mas->cur_bits_per_word)
return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
BITS_PER_BYTE));
return mas->fifo_width_bits / BITS_PER_BYTE;
}
static bool geni_spi_handle_tx(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
unsigned int max_bytes;
const u8 *tx_buf;
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
/* Stop the watermark IRQ if nothing to send */
if (!mas->cur_xfer) {
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
return false;
}
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
while (i < max_bytes) {
unsigned int j;
unsigned int bytes_to_write;
u32 fifo_word = 0;
u8 *fifo_byte = (u8 *)&fifo_word;
bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
for (j = 0; j < bytes_to_write; j++)
fifo_byte[j] = tx_buf[i++];
iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
}
mas->tx_rem_bytes -= max_bytes;
if (!mas->tx_rem_bytes) {
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
return false;
}
return true;
}
static void geni_spi_handle_rx(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
u32 rx_fifo_status;
unsigned int rx_bytes;
unsigned int rx_last_byte_valid;
u8 *rx_buf;
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
if (rx_fifo_status & RX_LAST) {
rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
/* Clear out the FIFO and bail if nowhere to put it */
if (!mas->cur_xfer) {
for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
readl(se->base + SE_GENI_RX_FIFOn);
return;
}
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
while (i < rx_bytes) {
u32 fifo_word = 0;
u8 *fifo_byte = (u8 *)&fifo_word;
unsigned int bytes_to_read;
unsigned int j;
bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
for (j = 0; j < bytes_to_read; j++)
rx_buf[i++] = fifo_byte[j];
}
mas->rx_rem_bytes -= rx_bytes;
}
static int setup_se_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas,
u16 mode, struct spi_master *spi)
{
u32 m_cmd = 0;
u32 len;
struct geni_se *se = &mas->se;
int ret;
/*
* Ensure that our interrupt handler isn't still running from some
* prior command before we start messing with the hardware behind
* its back. We don't need to _keep_ the lock here since we're only
* worried about racing with out interrupt handler. The SPI core
* already handles making sure that we're not trying to do two
* transfers at once or setting a chip select and doing a transfer
* concurrently.
*
* NOTE: we actually _can't_ hold the lock here because possibly we
* might call clk_set_rate() which needs to be able to sleep.
*/
spin_lock_irq(&mas->lock);
spin_unlock_irq(&mas->lock);
if (xfer->bits_per_word != mas->cur_bits_per_word) {
spi_setup_word_len(mas, mode, xfer->bits_per_word);
mas->cur_bits_per_word = xfer->bits_per_word;
}
/* Speed and bits per word can be overridden per transfer */
ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
if (ret)
return ret;
mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0;
len = get_xfer_len_in_words(xfer, mas);
mas->cur_xfer = xfer;
if (xfer->tx_buf) {
m_cmd |= SPI_TX_ONLY;
mas->tx_rem_bytes = xfer->len;
writel(len, se->base + SE_SPI_TX_TRANS_LEN);
}
if (xfer->rx_buf) {
m_cmd |= SPI_RX_ONLY;
writel(len, se->base + SE_SPI_RX_TRANS_LEN);
mas->rx_rem_bytes = xfer->len;
}
/*
* Select DMA mode if sgt are present; and with only 1 entry
* This is not a serious limitation because the xfer buffers are
* expected to fit into in 1 entry almost always, and if any
* doesn't for any reason we fall back to FIFO mode anyway
*/
if (!xfer->tx_sg.nents && !xfer->rx_sg.nents)
mas->cur_xfer_mode = GENI_SE_FIFO;
else if (xfer->tx_sg.nents > 1 || xfer->rx_sg.nents > 1) {
dev_warn_once(mas->dev, "Doing FIFO, cannot handle tx_nents-%d, rx_nents-%d\n",
xfer->tx_sg.nents, xfer->rx_sg.nents);
mas->cur_xfer_mode = GENI_SE_FIFO;
} else
mas->cur_xfer_mode = GENI_SE_DMA;
geni_se_select_mode(se, mas->cur_xfer_mode);
/*
* Lock around right before we start the transfer since our
* interrupt could come in at any time now.
*/
spin_lock_irq(&mas->lock);
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
if (mas->cur_xfer_mode == GENI_SE_DMA) {
if (m_cmd & SPI_RX_ONLY)
geni_se_rx_init_dma(se, sg_dma_address(xfer->rx_sg.sgl),
sg_dma_len(xfer->rx_sg.sgl));
if (m_cmd & SPI_TX_ONLY)
geni_se_tx_init_dma(se, sg_dma_address(xfer->tx_sg.sgl),
sg_dma_len(xfer->tx_sg.sgl));
} else if (m_cmd & SPI_TX_ONLY) {
if (geni_spi_handle_tx(mas))
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
}
spin_unlock_irq(&mas->lock);
return ret;
}
static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_device *slv,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
int ret;
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) {
ret = setup_se_xfer(xfer, mas, slv->mode, spi);
/* SPI framework expects +ve ret code to wait for transfer complete */
if (!ret)
ret = 1;
return ret;
}
return setup_gsi_xfer(xfer, mas, slv, spi);
}
static irqreturn_t geni_spi_isr(int irq, void *data)
{
struct spi_master *spi = data;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 m_irq;
m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
if (!m_irq)
return IRQ_NONE;
if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |
M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN |
M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN))
dev_warn(mas->dev, "Unexpected IRQ err status %#010x\n", m_irq);
spin_lock(&mas->lock);
if (mas->cur_xfer_mode == GENI_SE_FIFO) {
if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
geni_spi_handle_rx(mas);
if (m_irq & M_TX_FIFO_WATERMARK_EN)
geni_spi_handle_tx(mas);
if (m_irq & M_CMD_DONE_EN) {
if (mas->cur_xfer) {
spi_finalize_current_transfer(spi);
mas->cur_xfer = NULL;
/*
* If this happens, then a CMD_DONE came before all the
* Tx buffer bytes were sent out. This is unusual, log
* this condition and disable the WM interrupt to
* prevent the system from stalling due an interrupt
* storm.
*
* If this happens when all Rx bytes haven't been
* received, log the condition. The only known time
* this can happen is if bits_per_word != 8 and some
* registers that expect xfer lengths in num spi_words
* weren't written correctly.
*/
if (mas->tx_rem_bytes) {
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
mas->tx_rem_bytes, mas->cur_bits_per_word);
}
if (mas->rx_rem_bytes)
dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
mas->rx_rem_bytes, mas->cur_bits_per_word);
} else {
complete(&mas->cs_done);
}
}
} else if (mas->cur_xfer_mode == GENI_SE_DMA) {
const struct spi_transfer *xfer = mas->cur_xfer;
u32 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT);
u32 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT);
if (dma_tx_status)
writel(dma_tx_status, se->base + SE_DMA_TX_IRQ_CLR);
if (dma_rx_status)
writel(dma_rx_status, se->base + SE_DMA_RX_IRQ_CLR);
if (dma_tx_status & TX_DMA_DONE)
mas->tx_rem_bytes = 0;
if (dma_rx_status & RX_DMA_DONE)
mas->rx_rem_bytes = 0;
if (dma_tx_status & TX_RESET_DONE)
complete(&mas->tx_reset_done);
if (dma_rx_status & RX_RESET_DONE)
complete(&mas->rx_reset_done);
if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
spi_finalize_current_transfer(spi);
mas->cur_xfer = NULL;
}
}
if (m_irq & M_CMD_CANCEL_EN)
complete(&mas->cancel_done);
if (m_irq & M_CMD_ABORT_EN)
complete(&mas->abort_done);
/*
* It's safe or a good idea to Ack all of our interrupts at the end
* of the function. Specifically:
* - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
* clearing Acks. Clearing at the end relies on nobody else having
* started a new transfer yet or else we could be clearing _their_
* done bit, but everyone grabs the spinlock before starting a new
* transfer.
* - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear
* to be "latched level" interrupts so it's important to clear them
* _after_ you've handled the condition and always safe to do so
* since they'll re-assert if they're still happening.
*/
writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
spin_unlock(&mas->lock);
return IRQ_HANDLED;
}
static int spi_geni_probe(struct platform_device *pdev)
{
int ret, irq;
struct spi_master *spi;
struct spi_geni_master *mas;
void __iomem *base;
struct clk *clk;
struct device *dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
return dev_err_probe(dev, ret, "could not set DMA mask\n");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
clk = devm_clk_get(dev, "se");
if (IS_ERR(clk))
return PTR_ERR(clk);
spi = devm_spi_alloc_master(dev, sizeof(*mas));
if (!spi)
return -ENOMEM;
platform_set_drvdata(pdev, spi);
mas = spi_master_get_devdata(spi);
mas->irq = irq;
mas->dev = dev;
mas->se.dev = dev;
mas->se.wrapper = dev_get_drvdata(dev->parent);
mas->se.base = base;
mas->se.clk = clk;
ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev);
if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
return ret;
}
spi->bus_num = -1;
spi->dev.of_node = dev->of_node;
spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
spi->num_chipselect = 4;
spi->max_speed_hz = 50000000;
spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */
spi->prepare_message = spi_geni_prepare_message;
spi->transfer_one = spi_geni_transfer_one;
spi->can_dma = geni_can_dma;
spi->dma_map_dev = dev->parent;
spi->auto_runtime_pm = true;
spi->handle_err = spi_geni_handle_err;
spi->use_gpio_descriptors = true;
init_completion(&mas->cs_done);
init_completion(&mas->cancel_done);
init_completion(&mas->abort_done);
init_completion(&mas->tx_reset_done);
init_completion(&mas->rx_reset_done);
spin_lock_init(&mas->lock);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
pm_runtime_enable(dev);
if (device_property_read_bool(&pdev->dev, "spi-slave"))
spi->slave = true;
ret = geni_icc_get(&mas->se, NULL);
if (ret)
goto spi_geni_probe_runtime_disable;
/* Set the bus quota to a reasonable value for register access */
mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
ret = geni_icc_set_bw(&mas->se);
if (ret)
goto spi_geni_probe_runtime_disable;
ret = spi_geni_init(mas);
if (ret)
goto spi_geni_probe_runtime_disable;
/*
* check the mode supported and set_cs for fifo mode only
* for dma (gsi) mode, the gsi will set cs based on params passed in
* TRE
*/
if (!spi->slave && mas->cur_xfer_mode == GENI_SE_FIFO)
spi->set_cs = spi_geni_set_cs;
/*
* TX is required per GSI spec, see setup_gsi_xfer().
*/
if (mas->cur_xfer_mode == GENI_GPI_DMA)
spi->flags = SPI_CONTROLLER_MUST_TX;
ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
if (ret)
goto spi_geni_release_dma;
ret = spi_register_master(spi);
if (ret)
goto spi_geni_probe_free_irq;
return 0;
spi_geni_probe_free_irq:
free_irq(mas->irq, spi);
spi_geni_release_dma:
spi_geni_release_dma_chan(mas);
spi_geni_probe_runtime_disable:
pm_runtime_disable(dev);
return ret;
}
static void spi_geni_remove(struct platform_device *pdev)
{
struct spi_master *spi = platform_get_drvdata(pdev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
spi_unregister_master(spi);
spi_geni_release_dma_chan(mas);
free_irq(mas->irq, spi);
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
int ret;
/* Drop the performance state vote */
dev_pm_opp_set_rate(dev, 0);
ret = geni_se_resources_off(&mas->se);
if (ret)
return ret;
return geni_icc_disable(&mas->se);
}
static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
int ret;
ret = geni_icc_enable(&mas->se);
if (ret)
return ret;
ret = geni_se_resources_on(&mas->se);
if (ret)
return ret;
return dev_pm_opp_set_rate(mas->dev, mas->cur_sclk_hz);
}
static int __maybe_unused spi_geni_suspend(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
int ret;
ret = spi_master_suspend(spi);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret)
spi_master_resume(spi);
return ret;
}
static int __maybe_unused spi_geni_resume(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
ret = spi_master_resume(spi);
if (ret)
pm_runtime_force_suspend(dev);
return ret;
}
static const struct dev_pm_ops spi_geni_pm_ops = {
SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
spi_geni_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
};
static const struct of_device_id spi_geni_dt_match[] = {
{ .compatible = "qcom,geni-spi" },
{}
};
MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
static struct platform_driver spi_geni_driver = {
.probe = spi_geni_probe,
.remove_new = spi_geni_remove,
.driver = {
.name = "geni_spi",
.pm = &spi_geni_pm_ops,
.of_match_table = spi_geni_dt_match,
},
};
module_platform_driver(spi_geni_driver);
MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-geni-qcom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale eSPI controller driver.
*
* Copyright 2010 Freescale Semiconductor, Inc.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fsl_devices.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/pm_runtime.h>
#include <sysdev/fsl_soc.h>
/* eSPI Controller registers */
#define ESPI_SPMODE 0x00 /* eSPI mode register */
#define ESPI_SPIE 0x04 /* eSPI event register */
#define ESPI_SPIM 0x08 /* eSPI mask register */
#define ESPI_SPCOM 0x0c /* eSPI command register */
#define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/
#define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/
#define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */
#define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4)
/* eSPI Controller mode register definitions */
#define SPMODE_ENABLE BIT(31)
#define SPMODE_LOOP BIT(30)
#define SPMODE_TXTHR(x) ((x) << 8)
#define SPMODE_RXTHR(x) ((x) << 0)
/* eSPI Controller CS mode register definitions */
#define CSMODE_CI_INACTIVEHIGH BIT(31)
#define CSMODE_CP_BEGIN_EDGECLK BIT(30)
#define CSMODE_REV BIT(29)
#define CSMODE_DIV16 BIT(28)
#define CSMODE_PM(x) ((x) << 24)
#define CSMODE_POL_1 BIT(20)
#define CSMODE_LEN(x) ((x) << 16)
#define CSMODE_BEF(x) ((x) << 12)
#define CSMODE_AFT(x) ((x) << 8)
#define CSMODE_CG(x) ((x) << 3)
#define FSL_ESPI_FIFO_SIZE 32
#define FSL_ESPI_RXTHR 15
/* Default mode/csmode for eSPI controller */
#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
| CSMODE_AFT(0) | CSMODE_CG(1))
/* SPIE register values */
#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
#define SPIE_TXE BIT(15) /* TX FIFO empty */
#define SPIE_DON BIT(14) /* TX done */
#define SPIE_RXT BIT(13) /* RX FIFO threshold */
#define SPIE_RXF BIT(12) /* RX FIFO full */
#define SPIE_TXT BIT(11) /* TX FIFO threshold*/
#define SPIE_RNE BIT(9) /* RX FIFO not empty */
#define SPIE_TNF BIT(8) /* TX FIFO not full */
/* SPIM register values */
#define SPIM_TXE BIT(15) /* TX FIFO empty */
#define SPIM_DON BIT(14) /* TX done */
#define SPIM_RXT BIT(13) /* RX FIFO threshold */
#define SPIM_RXF BIT(12) /* RX FIFO full */
#define SPIM_TXT BIT(11) /* TX FIFO threshold*/
#define SPIM_RNE BIT(9) /* RX FIFO not empty */
#define SPIM_TNF BIT(8) /* TX FIFO not full */
/* SPCOM register values */
#define SPCOM_CS(x) ((x) << 30)
#define SPCOM_DO BIT(28) /* Dual output */
#define SPCOM_TO BIT(27) /* TX only */
#define SPCOM_RXSKIP(x) ((x) << 16)
#define SPCOM_TRANLEN(x) ((x) << 0)
#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
#define AUTOSUSPEND_TIMEOUT 2000
struct fsl_espi {
struct device *dev;
void __iomem *reg_base;
struct list_head *m_transfers;
struct spi_transfer *tx_t;
unsigned int tx_pos;
bool tx_done;
struct spi_transfer *rx_t;
unsigned int rx_pos;
bool rx_done;
bool swab;
unsigned int rxskip;
spinlock_t lock;
u32 spibrg; /* SPIBRG input clock */
struct completion done;
};
struct fsl_espi_cs {
u32 hw_mode;
};
static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset)
{
return ioread32be(espi->reg_base + offset);
}
static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset)
{
return ioread16be(espi->reg_base + offset);
}
static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset)
{
return ioread8(espi->reg_base + offset);
}
static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset,
u32 val)
{
iowrite32be(val, espi->reg_base + offset);
}
static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset,
u16 val)
{
iowrite16be(val, espi->reg_base + offset);
}
static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
u8 val)
{
iowrite8(val, espi->reg_base + offset);
}
static int fsl_espi_check_message(struct spi_message *m)
{
struct fsl_espi *espi = spi_controller_get_devdata(m->spi->controller);
struct spi_transfer *t, *first;
if (m->frame_length > SPCOM_TRANLEN_MAX) {
dev_err(espi->dev, "message too long, size is %u bytes\n",
m->frame_length);
return -EMSGSIZE;
}
first = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
list_for_each_entry(t, &m->transfers, transfer_list) {
if (first->bits_per_word != t->bits_per_word ||
first->speed_hz != t->speed_hz) {
dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
return -EINVAL;
}
}
/* ESPI supports MSB-first transfers for word size 8 / 16 only */
if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 &&
first->bits_per_word != 16) {
dev_err(espi->dev,
"MSB-first transfer not supported for wordsize %u\n",
first->bits_per_word);
return -EINVAL;
}
return 0;
}
static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m)
{
struct spi_transfer *t;
unsigned int i = 0, rxskip = 0;
/*
* prerequisites for ESPI rxskip mode:
* - message has two transfers
* - first transfer is a write and second is a read
*
* In addition the current low-level transfer mechanism requires
* that the rxskip bytes fit into the TX FIFO. Else the transfer
* would hang because after the first FSL_ESPI_FIFO_SIZE bytes
* the TX FIFO isn't re-filled.
*/
list_for_each_entry(t, &m->transfers, transfer_list) {
if (i == 0) {
if (!t->tx_buf || t->rx_buf ||
t->len > FSL_ESPI_FIFO_SIZE)
return 0;
rxskip = t->len;
} else if (i == 1) {
if (t->tx_buf || !t->rx_buf)
return 0;
}
i++;
}
return i == 2 ? rxskip : 0;
}
static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events)
{
u32 tx_fifo_avail;
unsigned int tx_left;
const void *tx_buf;
/* if events is zero transfer has not started and tx fifo is empty */
tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE;
start:
tx_left = espi->tx_t->len - espi->tx_pos;
tx_buf = espi->tx_t->tx_buf;
while (tx_fifo_avail >= min(4U, tx_left) && tx_left) {
if (tx_left >= 4) {
if (!tx_buf)
fsl_espi_write_reg(espi, ESPI_SPITF, 0);
else if (espi->swab)
fsl_espi_write_reg(espi, ESPI_SPITF,
swahb32p(tx_buf + espi->tx_pos));
else
fsl_espi_write_reg(espi, ESPI_SPITF,
*(u32 *)(tx_buf + espi->tx_pos));
espi->tx_pos += 4;
tx_left -= 4;
tx_fifo_avail -= 4;
} else if (tx_left >= 2 && tx_buf && espi->swab) {
fsl_espi_write_reg16(espi, ESPI_SPITF,
swab16p(tx_buf + espi->tx_pos));
espi->tx_pos += 2;
tx_left -= 2;
tx_fifo_avail -= 2;
} else {
if (!tx_buf)
fsl_espi_write_reg8(espi, ESPI_SPITF, 0);
else
fsl_espi_write_reg8(espi, ESPI_SPITF,
*(u8 *)(tx_buf + espi->tx_pos));
espi->tx_pos += 1;
tx_left -= 1;
tx_fifo_avail -= 1;
}
}
if (!tx_left) {
/* Last transfer finished, in rxskip mode only one is needed */
if (list_is_last(&espi->tx_t->transfer_list,
espi->m_transfers) || espi->rxskip) {
espi->tx_done = true;
return;
}
espi->tx_t = list_next_entry(espi->tx_t, transfer_list);
espi->tx_pos = 0;
/* continue with next transfer if tx fifo is not full */
if (tx_fifo_avail)
goto start;
}
}
static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events)
{
u32 rx_fifo_avail = SPIE_RXCNT(events);
unsigned int rx_left;
void *rx_buf;
start:
rx_left = espi->rx_t->len - espi->rx_pos;
rx_buf = espi->rx_t->rx_buf;
while (rx_fifo_avail >= min(4U, rx_left) && rx_left) {
if (rx_left >= 4) {
u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF);
if (rx_buf && espi->swab)
*(u32 *)(rx_buf + espi->rx_pos) = swahb32(val);
else if (rx_buf)
*(u32 *)(rx_buf + espi->rx_pos) = val;
espi->rx_pos += 4;
rx_left -= 4;
rx_fifo_avail -= 4;
} else if (rx_left >= 2 && rx_buf && espi->swab) {
u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF);
*(u16 *)(rx_buf + espi->rx_pos) = swab16(val);
espi->rx_pos += 2;
rx_left -= 2;
rx_fifo_avail -= 2;
} else {
u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF);
if (rx_buf)
*(u8 *)(rx_buf + espi->rx_pos) = val;
espi->rx_pos += 1;
rx_left -= 1;
rx_fifo_avail -= 1;
}
}
if (!rx_left) {
if (list_is_last(&espi->rx_t->transfer_list,
espi->m_transfers)) {
espi->rx_done = true;
return;
}
espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
espi->rx_pos = 0;
/* continue with next transfer if rx fifo is not empty */
if (rx_fifo_avail)
goto start;
}
}
static void fsl_espi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct fsl_espi *espi = spi_controller_get_devdata(spi->controller);
int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
struct fsl_espi_cs *cs = spi_get_ctldata(spi);
u32 hw_mode_old = cs->hw_mode;
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1;
if (pm > 15) {
cs->hw_mode |= CSMODE_DIV16;
pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1;
}
cs->hw_mode |= CSMODE_PM(pm);
/* don't write the mode register if the mode doesn't change */
if (cs->hw_mode != hw_mode_old)
fsl_espi_write_reg(espi, ESPI_SPMODEx(spi_get_chipselect(spi, 0)),
cs->hw_mode);
}
static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct fsl_espi *espi = spi_controller_get_devdata(spi->controller);
unsigned int rx_len = t->len;
u32 mask, spcom;
int ret;
reinit_completion(&espi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
spcom = SPCOM_CS(spi_get_chipselect(spi, 0));
spcom |= SPCOM_TRANLEN(t->len - 1);
/* configure RXSKIP mode */
if (espi->rxskip) {
spcom |= SPCOM_RXSKIP(espi->rxskip);
rx_len = t->len - espi->rxskip;
if (t->rx_nbits == SPI_NBITS_DUAL)
spcom |= SPCOM_DO;
}
fsl_espi_write_reg(espi, ESPI_SPCOM, spcom);
/* enable interrupts */
mask = SPIM_DON;
if (rx_len > FSL_ESPI_FIFO_SIZE)
mask |= SPIM_RXT;
fsl_espi_write_reg(espi, ESPI_SPIM, mask);
/* Prevent filling the fifo from getting interrupted */
spin_lock_irq(&espi->lock);
fsl_espi_fill_tx_fifo(espi, 0);
spin_unlock_irq(&espi->lock);
/* Won't hang up forever, SPI bus sometimes got lost interrupts... */
ret = wait_for_completion_timeout(&espi->done, 2 * HZ);
if (ret == 0)
dev_err(espi->dev, "Transfer timed out!\n");
/* disable rx ints */
fsl_espi_write_reg(espi, ESPI_SPIM, 0);
return ret == 0 ? -ETIMEDOUT : 0;
}
static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
{
struct fsl_espi *espi = spi_controller_get_devdata(m->spi->controller);
struct spi_device *spi = m->spi;
int ret;
/* In case of LSB-first and bits_per_word > 8 byte-swap all words */
espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8;
espi->m_transfers = &m->transfers;
espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
espi->tx_pos = 0;
espi->tx_done = false;
espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
espi->rx_pos = 0;
espi->rx_done = false;
espi->rxskip = fsl_espi_check_rxskip_mode(m);
if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) {
dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n");
return -EINVAL;
}
/* In RXSKIP mode skip first transfer for reads */
if (espi->rxskip)
espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
fsl_espi_setup_transfer(spi, trans);
ret = fsl_espi_bufs(spi, trans);
spi_transfer_delay_exec(trans);
return ret;
}
static int fsl_espi_do_one_msg(struct spi_controller *host,
struct spi_message *m)
{
unsigned int rx_nbits = 0, delay_nsecs = 0;
struct spi_transfer *t, trans = {};
int ret;
ret = fsl_espi_check_message(m);
if (ret)
goto out;
list_for_each_entry(t, &m->transfers, transfer_list) {
unsigned int delay = spi_delay_to_ns(&t->delay, t);
if (delay > delay_nsecs)
delay_nsecs = delay;
if (t->rx_nbits > rx_nbits)
rx_nbits = t->rx_nbits;
}
t = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
trans.len = m->frame_length;
trans.speed_hz = t->speed_hz;
trans.bits_per_word = t->bits_per_word;
trans.delay.value = delay_nsecs;
trans.delay.unit = SPI_DELAY_UNIT_NSECS;
trans.rx_nbits = rx_nbits;
if (trans.len)
ret = fsl_espi_trans(m, &trans);
m->actual_length = ret ? 0 : trans.len;
out:
if (m->status == -EINPROGRESS)
m->status = ret;
spi_finalize_current_message(host);
return ret;
}
static int fsl_espi_setup(struct spi_device *spi)
{
struct fsl_espi *espi;
u32 loop_mode;
struct fsl_espi_cs *cs = spi_get_ctldata(spi);
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi_set_ctldata(spi, cs);
}
espi = spi_controller_get_devdata(spi->controller);
pm_runtime_get_sync(espi->dev);
cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi_get_chipselect(spi, 0)));
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
| CSMODE_REV);
if (spi->mode & SPI_CPHA)
cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
if (spi->mode & SPI_CPOL)
cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
if (!(spi->mode & SPI_LSB_FIRST))
cs->hw_mode |= CSMODE_REV;
/* Handle the loop mode */
loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE);
loop_mode &= ~SPMODE_LOOP;
if (spi->mode & SPI_LOOP)
loop_mode |= SPMODE_LOOP;
fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode);
fsl_espi_setup_transfer(spi, NULL);
pm_runtime_mark_last_busy(espi->dev);
pm_runtime_put_autosuspend(espi->dev);
return 0;
}
static void fsl_espi_cleanup(struct spi_device *spi)
{
struct fsl_espi_cs *cs = spi_get_ctldata(spi);
kfree(cs);
spi_set_ctldata(spi, NULL);
}
static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
{
if (!espi->rx_done)
fsl_espi_read_rx_fifo(espi, events);
if (!espi->tx_done)
fsl_espi_fill_tx_fifo(espi, events);
if (!espi->tx_done || !espi->rx_done)
return;
/* we're done, but check for errors before returning */
events = fsl_espi_read_reg(espi, ESPI_SPIE);
if (!(events & SPIE_DON))
dev_err(espi->dev,
"Transfer done but SPIE_DON isn't set!\n");
if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) {
dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
dev_err(espi->dev, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n",
SPIE_RXCNT(events), SPIE_TXCNT(events));
}
complete(&espi->done);
}
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{
struct fsl_espi *espi = context_data;
u32 events, mask;
spin_lock(&espi->lock);
/* Get interrupt events(tx/rx) */
events = fsl_espi_read_reg(espi, ESPI_SPIE);
mask = fsl_espi_read_reg(espi, ESPI_SPIM);
if (!(events & mask)) {
spin_unlock(&espi->lock);
return IRQ_NONE;
}
dev_vdbg(espi->dev, "%s: events %x\n", __func__, events);
fsl_espi_cpu_irq(espi, events);
/* Clear the events */
fsl_espi_write_reg(espi, ESPI_SPIE, events);
spin_unlock(&espi->lock);
return IRQ_HANDLED;
}
#ifdef CONFIG_PM
static int fsl_espi_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct fsl_espi *espi = spi_controller_get_devdata(host);
u32 regval;
regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
regval &= ~SPMODE_ENABLE;
fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
return 0;
}
static int fsl_espi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct fsl_espi *espi = spi_controller_get_devdata(host);
u32 regval;
regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
regval |= SPMODE_ENABLE;
fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
return 0;
}
#endif
static size_t fsl_espi_max_message_size(struct spi_device *spi)
{
return SPCOM_TRANLEN_MAX;
}
static void fsl_espi_init_regs(struct device *dev, bool initial)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct fsl_espi *espi = spi_controller_get_devdata(host);
struct device_node *nc;
u32 csmode, cs, prop;
int ret;
/* SPI controller initializations */
fsl_espi_write_reg(espi, ESPI_SPMODE, 0);
fsl_espi_write_reg(espi, ESPI_SPIM, 0);
fsl_espi_write_reg(espi, ESPI_SPCOM, 0);
fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
/* Init eSPI CS mode register */
for_each_available_child_of_node(host->dev.of_node, nc) {
/* get chip select */
ret = of_property_read_u32(nc, "reg", &cs);
if (ret || cs >= host->num_chipselect)
continue;
csmode = CSMODE_INIT_VAL;
/* check if CSBEF is set in device tree */
ret = of_property_read_u32(nc, "fsl,csbef", &prop);
if (!ret) {
csmode &= ~(CSMODE_BEF(0xf));
csmode |= CSMODE_BEF(prop);
}
/* check if CSAFT is set in device tree */
ret = of_property_read_u32(nc, "fsl,csaft", &prop);
if (!ret) {
csmode &= ~(CSMODE_AFT(0xf));
csmode |= CSMODE_AFT(prop);
}
fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode);
if (initial)
dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode);
}
/* Enable SPI interface */
fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE);
}
static int fsl_espi_probe(struct device *dev, struct resource *mem,
unsigned int irq, unsigned int num_cs)
{
struct spi_controller *host;
struct fsl_espi *espi;
int ret;
host = spi_alloc_host(dev, sizeof(struct fsl_espi));
if (!host)
return -ENOMEM;
dev_set_drvdata(dev, host);
host->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
SPI_LSB_FIRST | SPI_LOOP;
host->dev.of_node = dev->of_node;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
host->setup = fsl_espi_setup;
host->cleanup = fsl_espi_cleanup;
host->transfer_one_message = fsl_espi_do_one_msg;
host->auto_runtime_pm = true;
host->max_message_size = fsl_espi_max_message_size;
host->num_chipselect = num_cs;
espi = spi_controller_get_devdata(host);
spin_lock_init(&espi->lock);
espi->dev = dev;
espi->spibrg = fsl_get_sys_freq();
if (espi->spibrg == -1) {
dev_err(dev, "Can't get sys frequency!\n");
ret = -EINVAL;
goto err_probe;
}
/* determined by clock divider fields DIV16/PM in register SPMODEx */
host->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
host->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
init_completion(&espi->done);
espi->reg_base = devm_ioremap_resource(dev, mem);
if (IS_ERR(espi->reg_base)) {
ret = PTR_ERR(espi->reg_base);
goto err_probe;
}
/* Register for SPI Interrupt */
ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi);
if (ret)
goto err_probe;
fsl_espi_init_regs(dev, true);
pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ret = devm_spi_register_controller(dev, host);
if (ret < 0)
goto err_pm;
dev_info(dev, "irq = %u\n", irq);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
err_pm:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
err_probe:
spi_controller_put(host);
return ret;
}
static int of_fsl_espi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
u32 num_cs;
int ret;
ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs);
if (ret) {
dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
return 0;
}
return num_cs;
}
static int of_fsl_espi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct resource mem;
unsigned int irq, num_cs;
int ret;
if (of_property_read_bool(np, "mode")) {
dev_err(dev, "mode property is not supported on ESPI!\n");
return -EINVAL;
}
num_cs = of_fsl_espi_get_chipselects(dev);
if (!num_cs)
return -EINVAL;
ret = of_address_to_resource(np, 0, &mem);
if (ret)
return ret;
irq = irq_of_parse_and_map(np, 0);
if (!irq)
return -EINVAL;
return fsl_espi_probe(dev, &mem, irq, num_cs);
}
static void of_fsl_espi_remove(struct platform_device *dev)
{
pm_runtime_disable(&dev->dev);
}
#ifdef CONFIG_PM_SLEEP
static int of_fsl_espi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
ret = spi_controller_suspend(host);
if (ret)
return ret;
return pm_runtime_force_suspend(dev);
}
static int of_fsl_espi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
fsl_espi_init_regs(dev, false);
ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
return spi_controller_resume(host);
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops espi_pm = {
SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend,
fsl_espi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
};
static const struct of_device_id of_fsl_espi_match[] = {
{ .compatible = "fsl,mpc8536-espi" },
{}
};
MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
static struct platform_driver fsl_espi_driver = {
.driver = {
.name = "fsl_espi",
.of_match_table = of_fsl_espi_match,
.pm = &espi_pm,
},
.probe = of_fsl_espi_probe,
.remove_new = of_fsl_espi_remove,
};
module_platform_driver(fsl_espi_driver);
MODULE_AUTHOR("Mingkai Hu");
MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-fsl-espi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2012 Thomas Langer <[email protected]>
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <lantiq_soc.h>
#define DRV_NAME "sflash-falcon"
#define FALCON_SPI_XFER_BEGIN (1 << 0)
#define FALCON_SPI_XFER_END (1 << 1)
/* Bus Read Configuration Register0 */
#define BUSRCON0 0x00000010
/* Bus Write Configuration Register0 */
#define BUSWCON0 0x00000018
/* Serial Flash Configuration Register */
#define SFCON 0x00000080
/* Serial Flash Time Register */
#define SFTIME 0x00000084
/* Serial Flash Status Register */
#define SFSTAT 0x00000088
/* Serial Flash Command Register */
#define SFCMD 0x0000008C
/* Serial Flash Address Register */
#define SFADDR 0x00000090
/* Serial Flash Data Register */
#define SFDATA 0x00000094
/* Serial Flash I/O Control Register */
#define SFIO 0x00000098
/* EBU Clock Control Register */
#define EBUCC 0x000000C4
/* Dummy Phase Length */
#define SFCMD_DUMLEN_OFFSET 16
#define SFCMD_DUMLEN_MASK 0x000F0000
/* Chip Select */
#define SFCMD_CS_OFFSET 24
#define SFCMD_CS_MASK 0x07000000
/* field offset */
#define SFCMD_ALEN_OFFSET 20
#define SFCMD_ALEN_MASK 0x00700000
/* SCK Rise-edge Position */
#define SFTIME_SCKR_POS_OFFSET 8
#define SFTIME_SCKR_POS_MASK 0x00000F00
/* SCK Period */
#define SFTIME_SCK_PER_OFFSET 0
#define SFTIME_SCK_PER_MASK 0x0000000F
/* SCK Fall-edge Position */
#define SFTIME_SCKF_POS_OFFSET 12
#define SFTIME_SCKF_POS_MASK 0x0000F000
/* Device Size */
#define SFCON_DEV_SIZE_A23_0 0x03000000
#define SFCON_DEV_SIZE_MASK 0x0F000000
/* Read Data Position */
#define SFTIME_RD_POS_MASK 0x000F0000
/* Data Output */
#define SFIO_UNUSED_WD_MASK 0x0000000F
/* Command Opcode mask */
#define SFCMD_OPC_MASK 0x000000FF
/* dlen bytes of data to write */
#define SFCMD_DIR_WRITE 0x00000100
/* Data Length offset */
#define SFCMD_DLEN_OFFSET 9
/* Command Error */
#define SFSTAT_CMD_ERR 0x20000000
/* Access Command Pending */
#define SFSTAT_CMD_PEND 0x00400000
/* Frequency set to 100MHz. */
#define EBUCC_EBUDIV_SELF100 0x00000001
/* Serial Flash */
#define BUSRCON0_AGEN_SERIAL_FLASH 0xF0000000
/* 8-bit multiplexed */
#define BUSRCON0_PORTW_8_BIT_MUX 0x00000000
/* Serial Flash */
#define BUSWCON0_AGEN_SERIAL_FLASH 0xF0000000
/* Chip Select after opcode */
#define SFCMD_KEEP_CS_KEEP_SELECTED 0x00008000
#define CLOCK_100M 100000000
#define CLOCK_50M 50000000
struct falcon_sflash {
u32 sfcmd; /* for caching of opcode, direction, ... */
struct spi_controller *host;
};
int falcon_sflash_xfer(struct spi_device *spi, struct spi_transfer *t,
unsigned long flags)
{
struct device *dev = &spi->dev;
struct falcon_sflash *priv = spi_controller_get_devdata(spi->controller);
const u8 *txp = t->tx_buf;
u8 *rxp = t->rx_buf;
unsigned int bytelen = ((8 * t->len + 7) / 8);
unsigned int len, alen, dumlen;
u32 val;
enum {
state_init,
state_command_prepare,
state_write,
state_read,
state_disable_cs,
state_end
} state = state_init;
do {
switch (state) {
case state_init: /* detect phase of upper layer sequence */
{
/* initial write ? */
if (flags & FALCON_SPI_XFER_BEGIN) {
if (!txp) {
dev_err(dev,
"BEGIN without tx data!\n");
return -ENODATA;
}
/*
* Prepare the parts of the sfcmd register,
* which should not change during a sequence!
* Only exception are the length fields,
* especially alen and dumlen.
*/
priv->sfcmd = ((spi_get_chipselect(spi, 0)
<< SFCMD_CS_OFFSET)
& SFCMD_CS_MASK);
priv->sfcmd |= SFCMD_KEEP_CS_KEEP_SELECTED;
priv->sfcmd |= *txp;
txp++;
bytelen--;
if (bytelen) {
/*
* more data:
* maybe address and/or dummy
*/
state = state_command_prepare;
break;
} else {
dev_dbg(dev, "write cmd %02X\n",
priv->sfcmd & SFCMD_OPC_MASK);
}
}
/* continued write ? */
if (txp && bytelen) {
state = state_write;
break;
}
/* read data? */
if (rxp && bytelen) {
state = state_read;
break;
}
/* end of sequence? */
if (flags & FALCON_SPI_XFER_END)
state = state_disable_cs;
else
state = state_end;
break;
}
/* collect tx data for address and dummy phase */
case state_command_prepare:
{
/* txp is valid, already checked */
val = 0;
alen = 0;
dumlen = 0;
while (bytelen > 0) {
if (alen < 3) {
val = (val << 8) | (*txp++);
alen++;
} else if ((dumlen < 15) && (*txp == 0)) {
/*
* assume dummy bytes are set to 0
* from upper layer
*/
dumlen++;
txp++;
} else {
break;
}
bytelen--;
}
priv->sfcmd &= ~(SFCMD_ALEN_MASK | SFCMD_DUMLEN_MASK);
priv->sfcmd |= (alen << SFCMD_ALEN_OFFSET) |
(dumlen << SFCMD_DUMLEN_OFFSET);
if (alen > 0)
ltq_ebu_w32(val, SFADDR);
dev_dbg(dev, "wr %02X, alen=%d (addr=%06X) dlen=%d\n",
priv->sfcmd & SFCMD_OPC_MASK,
alen, val, dumlen);
if (bytelen > 0) {
/* continue with write */
state = state_write;
} else if (flags & FALCON_SPI_XFER_END) {
/* end of sequence? */
state = state_disable_cs;
} else {
/*
* go to end and expect another
* call (read or write)
*/
state = state_end;
}
break;
}
case state_write:
{
/* txp still valid */
priv->sfcmd |= SFCMD_DIR_WRITE;
len = 0;
val = 0;
do {
if (bytelen--)
val |= (*txp++) << (8 * len++);
if ((flags & FALCON_SPI_XFER_END)
&& (bytelen == 0)) {
priv->sfcmd &=
~SFCMD_KEEP_CS_KEEP_SELECTED;
}
if ((len == 4) || (bytelen == 0)) {
ltq_ebu_w32(val, SFDATA);
ltq_ebu_w32(priv->sfcmd
| (len<<SFCMD_DLEN_OFFSET),
SFCMD);
len = 0;
val = 0;
priv->sfcmd &= ~(SFCMD_ALEN_MASK
| SFCMD_DUMLEN_MASK);
}
} while (bytelen);
state = state_end;
break;
}
case state_read:
{
/* read data */
priv->sfcmd &= ~SFCMD_DIR_WRITE;
do {
if ((flags & FALCON_SPI_XFER_END)
&& (bytelen <= 4)) {
priv->sfcmd &=
~SFCMD_KEEP_CS_KEEP_SELECTED;
}
len = (bytelen > 4) ? 4 : bytelen;
bytelen -= len;
ltq_ebu_w32(priv->sfcmd
| (len << SFCMD_DLEN_OFFSET), SFCMD);
priv->sfcmd &= ~(SFCMD_ALEN_MASK
| SFCMD_DUMLEN_MASK);
do {
val = ltq_ebu_r32(SFSTAT);
if (val & SFSTAT_CMD_ERR) {
/* reset error status */
dev_err(dev, "SFSTAT: CMD_ERR");
dev_err(dev, " (%x)\n", val);
ltq_ebu_w32(SFSTAT_CMD_ERR,
SFSTAT);
return -EBADE;
}
} while (val & SFSTAT_CMD_PEND);
val = ltq_ebu_r32(SFDATA);
do {
*rxp = (val & 0xFF);
rxp++;
val >>= 8;
len--;
} while (len);
} while (bytelen);
state = state_end;
break;
}
case state_disable_cs:
{
priv->sfcmd &= ~SFCMD_KEEP_CS_KEEP_SELECTED;
ltq_ebu_w32(priv->sfcmd | (0 << SFCMD_DLEN_OFFSET),
SFCMD);
val = ltq_ebu_r32(SFSTAT);
if (val & SFSTAT_CMD_ERR) {
/* reset error status */
dev_err(dev, "SFSTAT: CMD_ERR (%x)\n", val);
ltq_ebu_w32(SFSTAT_CMD_ERR, SFSTAT);
return -EBADE;
}
state = state_end;
break;
}
case state_end:
break;
}
} while (state != state_end);
return 0;
}
static int falcon_sflash_setup(struct spi_device *spi)
{
unsigned int i;
unsigned long flags;
spin_lock_irqsave(&ebu_lock, flags);
if (spi->max_speed_hz >= CLOCK_100M) {
/* set EBU clock to 100 MHz */
ltq_sys1_w32_mask(0, EBUCC_EBUDIV_SELF100, EBUCC);
i = 1; /* divider */
} else {
/* set EBU clock to 50 MHz */
ltq_sys1_w32_mask(EBUCC_EBUDIV_SELF100, 0, EBUCC);
/* search for suitable divider */
for (i = 1; i < 7; i++) {
if (CLOCK_50M / i <= spi->max_speed_hz)
break;
}
}
/* setup period of serial clock */
ltq_ebu_w32_mask(SFTIME_SCKF_POS_MASK
| SFTIME_SCKR_POS_MASK
| SFTIME_SCK_PER_MASK,
(i << SFTIME_SCKR_POS_OFFSET)
| (i << (SFTIME_SCK_PER_OFFSET + 1)),
SFTIME);
/*
* set some bits of unused_wd, to not trigger HOLD/WP
* signals on non QUAD flashes
*/
ltq_ebu_w32((SFIO_UNUSED_WD_MASK & (0x8 | 0x4)), SFIO);
ltq_ebu_w32(BUSRCON0_AGEN_SERIAL_FLASH | BUSRCON0_PORTW_8_BIT_MUX,
BUSRCON0);
ltq_ebu_w32(BUSWCON0_AGEN_SERIAL_FLASH, BUSWCON0);
/* set address wrap around to maximum for 24-bit addresses */
ltq_ebu_w32_mask(SFCON_DEV_SIZE_MASK, SFCON_DEV_SIZE_A23_0, SFCON);
spin_unlock_irqrestore(&ebu_lock, flags);
return 0;
}
static int falcon_sflash_xfer_one(struct spi_controller *host,
struct spi_message *m)
{
struct falcon_sflash *priv = spi_controller_get_devdata(host);
struct spi_transfer *t;
unsigned long spi_flags;
unsigned long flags;
int ret = 0;
priv->sfcmd = 0;
m->actual_length = 0;
spi_flags = FALCON_SPI_XFER_BEGIN;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (list_is_last(&t->transfer_list, &m->transfers))
spi_flags |= FALCON_SPI_XFER_END;
spin_lock_irqsave(&ebu_lock, flags);
ret = falcon_sflash_xfer(m->spi, t, spi_flags);
spin_unlock_irqrestore(&ebu_lock, flags);
if (ret)
break;
m->actual_length += t->len;
WARN_ON(t->delay.value || t->cs_change);
spi_flags = 0;
}
m->status = ret;
spi_finalize_current_message(host);
return 0;
}
static int falcon_sflash_probe(struct platform_device *pdev)
{
struct falcon_sflash *priv;
struct spi_controller *host;
int ret;
host = spi_alloc_host(&pdev->dev, sizeof(*priv));
if (!host)
return -ENOMEM;
priv = spi_controller_get_devdata(host);
priv->host = host;
host->mode_bits = SPI_MODE_3;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->setup = falcon_sflash_setup;
host->transfer_one_message = falcon_sflash_xfer_one;
host->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
spi_controller_put(host);
return ret;
}
static const struct of_device_id falcon_sflash_match[] = {
{ .compatible = "lantiq,sflash-falcon" },
{},
};
MODULE_DEVICE_TABLE(of, falcon_sflash_match);
static struct platform_driver falcon_sflash_driver = {
.probe = falcon_sflash_probe,
.driver = {
.name = DRV_NAME,
.of_match_table = falcon_sflash_match,
}
};
module_platform_driver(falcon_sflash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Lantiq Falcon SPI/SFLASH controller driver");
| linux-master | drivers/spi/spi-falcon.c |
// SPDX-License-Identifier: GPL-2.0
//
// CS42L43 SPI Controller Driver
//
// Copyright (C) 2022-2023 Cirrus Logic, Inc. and
// Cirrus Logic International Semiconductor Ltd.
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/cs42l43.h>
#include <linux/mfd/cs42l43-regs.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
#define CS42L43_FIFO_SIZE 16
#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ)
#define CS42L43_SPI_MAX_LENGTH 65532
enum cs42l43_spi_cmd {
CS42L43_WRITE,
CS42L43_READ
};
struct cs42l43_spi {
struct device *dev;
struct regmap *regmap;
struct spi_controller *ctlr;
};
static const unsigned int cs42l43_clock_divs[] = {
2, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
};
static int cs42l43_spi_tx(struct regmap *regmap, const u8 *buf, unsigned int len)
{
const u8 *end = buf + len;
u32 val = 0;
int ret;
while (buf < end) {
const u8 *block = min(buf + CS42L43_FIFO_SIZE, end);
while (buf < block) {
const u8 *word = min(buf + sizeof(u32), block);
int pad = (buf + sizeof(u32)) - word;
while (buf < word) {
val >>= BITS_PER_BYTE;
val |= FIELD_PREP(GENMASK(31, 24), *buf);
buf++;
}
val >>= pad * BITS_PER_BYTE;
regmap_write(regmap, CS42L43_TX_DATA, val);
}
regmap_write(regmap, CS42L43_TRAN_CONFIG8, CS42L43_SPI_TX_DONE_MASK);
ret = regmap_read_poll_timeout(regmap, CS42L43_TRAN_STATUS1,
val, (val & CS42L43_SPI_TX_REQUEST_MASK),
1000, 5000);
if (ret)
return ret;
}
return 0;
}
static int cs42l43_spi_rx(struct regmap *regmap, u8 *buf, unsigned int len)
{
u8 *end = buf + len;
u32 val;
int ret;
while (buf < end) {
u8 *block = min(buf + CS42L43_FIFO_SIZE, end);
ret = regmap_read_poll_timeout(regmap, CS42L43_TRAN_STATUS1,
val, (val & CS42L43_SPI_RX_REQUEST_MASK),
1000, 5000);
if (ret)
return ret;
while (buf < block) {
u8 *word = min(buf + sizeof(u32), block);
ret = regmap_read(regmap, CS42L43_RX_DATA, &val);
if (ret)
return ret;
while (buf < word) {
*buf = FIELD_GET(GENMASK(7, 0), val);
val >>= BITS_PER_BYTE;
buf++;
}
}
regmap_write(regmap, CS42L43_TRAN_CONFIG8, CS42L43_SPI_RX_DONE_MASK);
}
return 0;
}
static int cs42l43_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *tfr)
{
struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller);
int i, ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(cs42l43_clock_divs); i++) {
if (CS42L43_SPI_ROOT_HZ / cs42l43_clock_divs[i] <= tfr->speed_hz)
break;
}
if (i == ARRAY_SIZE(cs42l43_clock_divs))
return -EINVAL;
regmap_write(priv->regmap, CS42L43_SPI_CLK_CONFIG1, i);
if (tfr->tx_buf) {
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG3, CS42L43_WRITE);
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG4, tfr->len - 1);
} else if (tfr->rx_buf) {
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG3, CS42L43_READ);
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG5, tfr->len - 1);
}
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG1, CS42L43_SPI_START_MASK);
if (tfr->tx_buf)
ret = cs42l43_spi_tx(priv->regmap, (const u8 *)tfr->tx_buf, tfr->len);
else if (tfr->rx_buf)
ret = cs42l43_spi_rx(priv->regmap, (u8 *)tfr->rx_buf, tfr->len);
return ret;
}
static void cs42l43_set_cs(struct spi_device *spi, bool is_high)
{
struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller);
if (spi_get_chipselect(spi, 0) == 0)
regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
}
static int cs42l43_prepare_message(struct spi_controller *ctlr, struct spi_message *msg)
{
struct cs42l43_spi *priv = spi_controller_get_devdata(ctlr);
struct spi_device *spi = msg->spi;
unsigned int spi_config1 = 0;
/* select another internal CS, which doesn't exist, so CS 0 is not used */
if (spi_get_csgpiod(spi, 0))
spi_config1 |= 1 << CS42L43_SPI_SS_SEL_SHIFT;
if (spi->mode & SPI_CPOL)
spi_config1 |= CS42L43_SPI_CPOL_MASK;
if (spi->mode & SPI_CPHA)
spi_config1 |= CS42L43_SPI_CPHA_MASK;
if (spi->mode & SPI_3WIRE)
spi_config1 |= CS42L43_SPI_THREE_WIRE_MASK;
regmap_write(priv->regmap, CS42L43_SPI_CONFIG1, spi_config1);
return 0;
}
static int cs42l43_prepare_transfer_hardware(struct spi_controller *ctlr)
{
struct cs42l43_spi *priv = spi_controller_get_devdata(ctlr);
int ret;
ret = regmap_write(priv->regmap, CS42L43_BLOCK_EN2, CS42L43_SPI_MSTR_EN_MASK);
if (ret)
dev_err(priv->dev, "Failed to enable SPI controller: %d\n", ret);
return ret;
}
static int cs42l43_unprepare_transfer_hardware(struct spi_controller *ctlr)
{
struct cs42l43_spi *priv = spi_controller_get_devdata(ctlr);
int ret;
ret = regmap_write(priv->regmap, CS42L43_BLOCK_EN2, 0);
if (ret)
dev_err(priv->dev, "Failed to disable SPI controller: %d\n", ret);
return ret;
}
static size_t cs42l43_spi_max_length(struct spi_device *spi)
{
return CS42L43_SPI_MAX_LENGTH;
}
static int cs42l43_spi_probe(struct platform_device *pdev)
{
struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent);
struct cs42l43_spi *priv;
struct fwnode_handle *fwnode = dev_fwnode(cs42l43->dev);
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*priv->ctlr));
if (!priv->ctlr)
return -ENOMEM;
spi_controller_set_devdata(priv->ctlr, priv);
priv->dev = &pdev->dev;
priv->regmap = cs42l43->regmap;
priv->ctlr->prepare_message = cs42l43_prepare_message;
priv->ctlr->prepare_transfer_hardware = cs42l43_prepare_transfer_hardware;
priv->ctlr->unprepare_transfer_hardware = cs42l43_unprepare_transfer_hardware;
priv->ctlr->transfer_one = cs42l43_transfer_one;
priv->ctlr->set_cs = cs42l43_set_cs;
priv->ctlr->max_transfer_size = cs42l43_spi_max_length;
if (is_of_node(fwnode))
fwnode = fwnode_get_named_child_node(fwnode, "spi");
device_set_node(&priv->ctlr->dev, fwnode);
priv->ctlr->mode_bits = SPI_3WIRE | SPI_MODE_X_MASK;
priv->ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
priv->ctlr->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(32);
priv->ctlr->min_speed_hz = CS42L43_SPI_ROOT_HZ /
cs42l43_clock_divs[ARRAY_SIZE(cs42l43_clock_divs) - 1];
priv->ctlr->max_speed_hz = CS42L43_SPI_ROOT_HZ / cs42l43_clock_divs[0];
priv->ctlr->use_gpio_descriptors = true;
priv->ctlr->auto_runtime_pm = true;
devm_pm_runtime_enable(priv->dev);
pm_runtime_idle(priv->dev);
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG6, CS42L43_FIFO_SIZE - 1);
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG7, CS42L43_FIFO_SIZE - 1);
// Disable Watchdog timer and enable stall
regmap_write(priv->regmap, CS42L43_SPI_CONFIG3, 0);
regmap_write(priv->regmap, CS42L43_SPI_CONFIG4, CS42L43_SPI_STALL_ENA_MASK);
ret = devm_spi_register_controller(priv->dev, priv->ctlr);
if (ret) {
pm_runtime_disable(priv->dev);
dev_err(priv->dev, "Failed to register SPI controller: %d\n", ret);
}
return ret;
}
static const struct platform_device_id cs42l43_spi_id_table[] = {
{ "cs42l43-spi", },
{}
};
MODULE_DEVICE_TABLE(platform, cs42l43_spi_id_table);
static struct platform_driver cs42l43_spi_driver = {
.driver = {
.name = "cs42l43-spi",
},
.probe = cs42l43_spi_probe,
.id_table = cs42l43_spi_id_table,
};
module_platform_driver(cs42l43_spi_driver);
MODULE_DESCRIPTION("CS42L43 SPI Driver");
MODULE_AUTHOR("Lucas Tanure <[email protected]>");
MODULE_AUTHOR("Maciej Strozek <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-cs42l43.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OpenCores tiny SPI master driver
*
* https://opencores.org/project,tiny_spi
*
* Copyright (C) 2011 Thomas Chou <[email protected]>
*
* Based on spi_s3c24xx.c, which is:
* Copyright (c) 2006 Ben Dooks
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/spi_oc_tiny.h>
#include <linux/io.h>
#include <linux/of.h>
#define DRV_NAME "spi_oc_tiny"
#define TINY_SPI_RXDATA 0
#define TINY_SPI_TXDATA 4
#define TINY_SPI_STATUS 8
#define TINY_SPI_CONTROL 12
#define TINY_SPI_BAUD 16
#define TINY_SPI_STATUS_TXE 0x1
#define TINY_SPI_STATUS_TXR 0x2
struct tiny_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
struct completion done;
void __iomem *base;
int irq;
unsigned int freq;
unsigned int baudwidth;
unsigned int baud;
unsigned int speed_hz;
unsigned int mode;
unsigned int len;
unsigned int txc, rxc;
const u8 *txp;
u8 *rxp;
};
static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
{
return spi_master_get_devdata(sdev->master);
}
static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
}
static int tiny_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
unsigned int baud = hw->baud;
if (t) {
if (t->speed_hz && t->speed_hz != hw->speed_hz)
baud = tiny_spi_baud(spi, t->speed_hz);
}
writel(baud, hw->base + TINY_SPI_BAUD);
writel(hw->mode, hw->base + TINY_SPI_CONTROL);
return 0;
}
static int tiny_spi_setup(struct spi_device *spi)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
if (spi->max_speed_hz != hw->speed_hz) {
hw->speed_hz = spi->max_speed_hz;
hw->baud = tiny_spi_baud(spi, hw->speed_hz);
}
hw->mode = spi->mode & SPI_MODE_X_MASK;
return 0;
}
static inline void tiny_spi_wait_txr(struct tiny_spi *hw)
{
while (!(readb(hw->base + TINY_SPI_STATUS) &
TINY_SPI_STATUS_TXR))
cpu_relax();
}
static inline void tiny_spi_wait_txe(struct tiny_spi *hw)
{
while (!(readb(hw->base + TINY_SPI_STATUS) &
TINY_SPI_STATUS_TXE))
cpu_relax();
}
static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
const u8 *txp = t->tx_buf;
u8 *rxp = t->rx_buf;
unsigned int i;
if (hw->irq >= 0) {
/* use interrupt driven data transfer */
hw->len = t->len;
hw->txp = t->tx_buf;
hw->rxp = t->rx_buf;
hw->txc = 0;
hw->rxc = 0;
/* send the first byte */
if (t->len > 1) {
writeb(hw->txp ? *hw->txp++ : 0,
hw->base + TINY_SPI_TXDATA);
hw->txc++;
writeb(hw->txp ? *hw->txp++ : 0,
hw->base + TINY_SPI_TXDATA);
hw->txc++;
writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS);
} else {
writeb(hw->txp ? *hw->txp++ : 0,
hw->base + TINY_SPI_TXDATA);
hw->txc++;
writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS);
}
wait_for_completion(&hw->done);
} else {
/* we need to tighten the transfer loop */
writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
for (i = 1; i < t->len; i++) {
writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
if (rxp || (i != t->len - 1))
tiny_spi_wait_txr(hw);
if (rxp)
*rxp++ = readb(hw->base + TINY_SPI_TXDATA);
}
tiny_spi_wait_txe(hw);
if (rxp)
*rxp++ = readb(hw->base + TINY_SPI_RXDATA);
}
return t->len;
}
static irqreturn_t tiny_spi_irq(int irq, void *dev)
{
struct tiny_spi *hw = dev;
writeb(0, hw->base + TINY_SPI_STATUS);
if (hw->rxc + 1 == hw->len) {
if (hw->rxp)
*hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA);
hw->rxc++;
complete(&hw->done);
} else {
if (hw->rxp)
*hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA);
hw->rxc++;
if (hw->txc < hw->len) {
writeb(hw->txp ? *hw->txp++ : 0,
hw->base + TINY_SPI_TXDATA);
hw->txc++;
writeb(TINY_SPI_STATUS_TXR,
hw->base + TINY_SPI_STATUS);
} else {
writeb(TINY_SPI_STATUS_TXE,
hw->base + TINY_SPI_STATUS);
}
}
return IRQ_HANDLED;
}
#ifdef CONFIG_OF
#include <linux/of_gpio.h>
static int tiny_spi_of_probe(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
u32 val;
if (!np)
return 0;
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
if (!of_property_read_u32(np, "clock-frequency", &val))
hw->freq = val;
if (!of_property_read_u32(np, "baud-width", &val))
hw->baudwidth = val;
return 0;
}
#else /* !CONFIG_OF */
static int tiny_spi_of_probe(struct platform_device *pdev)
{
return 0;
}
#endif /* CONFIG_OF */
static int tiny_spi_probe(struct platform_device *pdev)
{
struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct tiny_spi *hw;
struct spi_master *master;
int err = -ENODEV;
master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
if (!master)
return err;
/* setup the master state. */
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tiny_spi_setup;
master->use_gpio_descriptors = true;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
/* find and map our resources */
hw->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->base)) {
err = PTR_ERR(hw->base);
goto exit;
}
/* irq is optional */
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq >= 0) {
init_completion(&hw->done);
err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0,
pdev->name, hw);
if (err)
goto exit;
}
/* find platform data */
if (platp) {
hw->freq = platp->freq;
hw->baudwidth = platp->baudwidth;
} else {
err = tiny_spi_of_probe(pdev);
if (err)
goto exit;
}
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
if (err)
goto exit;
dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
return 0;
exit:
spi_master_put(master);
return err;
}
static void tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
spi_bitbang_stop(&hw->bitbang);
spi_master_put(master);
}
#ifdef CONFIG_OF
static const struct of_device_id tiny_spi_match[] = {
{ .compatible = "opencores,tiny-spi-rtlsvn2", },
{},
};
MODULE_DEVICE_TABLE(of, tiny_spi_match);
#endif /* CONFIG_OF */
static struct platform_driver tiny_spi_driver = {
.probe = tiny_spi_probe,
.remove_new = tiny_spi_remove,
.driver = {
.name = DRV_NAME,
.pm = NULL,
.of_match_table = of_match_ptr(tiny_spi_match),
},
};
module_platform_driver(tiny_spi_driver);
MODULE_DESCRIPTION("OpenCores tiny SPI driver");
MODULE_AUTHOR("Thomas Chou <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/spi/spi-oc-tiny.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell Armada-3700 SPI controller driver
*
* Copyright (C) 2016 Marvell Ltd.
*
* Author: Wilson Ding <[email protected]>
* Author: Romain Perier <[email protected]>
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "armada_3700_spi"
#define A3700_SPI_MAX_SPEED_HZ 100000000
#define A3700_SPI_MAX_PRESCALE 30
#define A3700_SPI_TIMEOUT 10
/* SPI Register Offest */
#define A3700_SPI_IF_CTRL_REG 0x00
#define A3700_SPI_IF_CFG_REG 0x04
#define A3700_SPI_DATA_OUT_REG 0x08
#define A3700_SPI_DATA_IN_REG 0x0C
#define A3700_SPI_IF_INST_REG 0x10
#define A3700_SPI_IF_ADDR_REG 0x14
#define A3700_SPI_IF_RMODE_REG 0x18
#define A3700_SPI_IF_HDR_CNT_REG 0x1C
#define A3700_SPI_IF_DIN_CNT_REG 0x20
#define A3700_SPI_IF_TIME_REG 0x24
#define A3700_SPI_INT_STAT_REG 0x28
#define A3700_SPI_INT_MASK_REG 0x2C
/* A3700_SPI_IF_CTRL_REG */
#define A3700_SPI_EN BIT(16)
#define A3700_SPI_ADDR_NOT_CONFIG BIT(12)
#define A3700_SPI_WFIFO_OVERFLOW BIT(11)
#define A3700_SPI_WFIFO_UNDERFLOW BIT(10)
#define A3700_SPI_RFIFO_OVERFLOW BIT(9)
#define A3700_SPI_RFIFO_UNDERFLOW BIT(8)
#define A3700_SPI_WFIFO_FULL BIT(7)
#define A3700_SPI_WFIFO_EMPTY BIT(6)
#define A3700_SPI_RFIFO_FULL BIT(5)
#define A3700_SPI_RFIFO_EMPTY BIT(4)
#define A3700_SPI_WFIFO_RDY BIT(3)
#define A3700_SPI_RFIFO_RDY BIT(2)
#define A3700_SPI_XFER_RDY BIT(1)
#define A3700_SPI_XFER_DONE BIT(0)
/* A3700_SPI_IF_CFG_REG */
#define A3700_SPI_WFIFO_THRS BIT(28)
#define A3700_SPI_RFIFO_THRS BIT(24)
#define A3700_SPI_AUTO_CS BIT(20)
#define A3700_SPI_DMA_RD_EN BIT(18)
#define A3700_SPI_FIFO_MODE BIT(17)
#define A3700_SPI_SRST BIT(16)
#define A3700_SPI_XFER_START BIT(15)
#define A3700_SPI_XFER_STOP BIT(14)
#define A3700_SPI_INST_PIN BIT(13)
#define A3700_SPI_ADDR_PIN BIT(12)
#define A3700_SPI_DATA_PIN1 BIT(11)
#define A3700_SPI_DATA_PIN0 BIT(10)
#define A3700_SPI_FIFO_FLUSH BIT(9)
#define A3700_SPI_RW_EN BIT(8)
#define A3700_SPI_CLK_POL BIT(7)
#define A3700_SPI_CLK_PHA BIT(6)
#define A3700_SPI_BYTE_LEN BIT(5)
#define A3700_SPI_CLK_PRESCALE BIT(0)
#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
#define A3700_SPI_CLK_EVEN_OFFS (0x10)
#define A3700_SPI_WFIFO_THRS_BIT 28
#define A3700_SPI_RFIFO_THRS_BIT 24
#define A3700_SPI_FIFO_THRS_MASK 0x7
#define A3700_SPI_DATA_PIN_MASK 0x3
/* A3700_SPI_IF_HDR_CNT_REG */
#define A3700_SPI_DUMMY_CNT_BIT 12
#define A3700_SPI_DUMMY_CNT_MASK 0x7
#define A3700_SPI_RMODE_CNT_BIT 8
#define A3700_SPI_RMODE_CNT_MASK 0x3
#define A3700_SPI_ADDR_CNT_BIT 4
#define A3700_SPI_ADDR_CNT_MASK 0x7
#define A3700_SPI_INSTR_CNT_BIT 0
#define A3700_SPI_INSTR_CNT_MASK 0x3
/* A3700_SPI_IF_TIME_REG */
#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
struct a3700_spi {
struct spi_controller *host;
void __iomem *base;
struct clk *clk;
unsigned int irq;
unsigned int flags;
bool xmit_data;
const u8 *tx_buf;
u8 *rx_buf;
size_t buf_len;
u8 byte_len;
u32 wait_mask;
struct completion done;
};
static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
{
return readl(a3700_spi->base + offset);
}
static void spireg_write(struct a3700_spi *a3700_spi, u32 offset, u32 data)
{
writel(data, a3700_spi->base + offset);
}
static void a3700_spi_auto_cs_unset(struct a3700_spi *a3700_spi)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val &= ~A3700_SPI_AUTO_CS;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static void a3700_spi_activate_cs(struct a3700_spi *a3700_spi, unsigned int cs)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
val |= (A3700_SPI_EN << cs);
spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
}
static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
unsigned int cs)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
val &= ~(A3700_SPI_EN << cs);
spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
}
static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
unsigned int pin_mode, bool receiving)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val &= ~(A3700_SPI_INST_PIN | A3700_SPI_ADDR_PIN);
val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1);
switch (pin_mode) {
case SPI_NBITS_SINGLE:
break;
case SPI_NBITS_DUAL:
val |= A3700_SPI_DATA_PIN0;
break;
case SPI_NBITS_QUAD:
val |= A3700_SPI_DATA_PIN1;
/* RX during address reception uses 4-pin */
if (receiving)
val |= A3700_SPI_ADDR_PIN;
break;
default:
dev_err(&a3700_spi->host->dev, "wrong pin mode %u", pin_mode);
return -EINVAL;
}
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
return 0;
}
static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi, bool enable)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
if (enable)
val |= A3700_SPI_FIFO_MODE;
else
val &= ~A3700_SPI_FIFO_MODE;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
unsigned int mode_bits)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
if (mode_bits & SPI_CPOL)
val |= A3700_SPI_CLK_POL;
else
val &= ~A3700_SPI_CLK_POL;
if (mode_bits & SPI_CPHA)
val |= A3700_SPI_CLK_PHA;
else
val &= ~A3700_SPI_CLK_PHA;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
unsigned int speed_hz)
{
u32 val;
u32 prescale;
prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
/* For prescaler values over 15, we can only set it by steps of 2.
* Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
* 30. We only use this range from 16 to 30.
*/
if (prescale > 15)
prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
val = val | (prescale & A3700_SPI_CLK_PRESCALE_MASK);
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
if (prescale <= 2) {
val = spireg_read(a3700_spi, A3700_SPI_IF_TIME_REG);
val |= A3700_SPI_CLK_CAPT_EDGE;
spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
}
}
static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
if (len == 4)
val |= A3700_SPI_BYTE_LEN;
else
val &= ~A3700_SPI_BYTE_LEN;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
a3700_spi->byte_len = len;
}
static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi)
{
int timeout = A3700_SPI_TIMEOUT;
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val |= A3700_SPI_FIFO_FLUSH;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
while (--timeout) {
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
if (!(val & A3700_SPI_FIFO_FLUSH))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static void a3700_spi_init(struct a3700_spi *a3700_spi)
{
struct spi_controller *host = a3700_spi->host;
u32 val;
int i;
/* Reset SPI unit */
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val |= A3700_SPI_SRST;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
udelay(A3700_SPI_TIMEOUT);
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val &= ~A3700_SPI_SRST;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
/* Disable AUTO_CS and deactivate all chip-selects */
a3700_spi_auto_cs_unset(a3700_spi);
for (i = 0; i < host->num_chipselect; i++)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
a3700_spi_mode_set(a3700_spi, host->mode_bits);
/* Reset counters */
spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, 0);
/* Mask the interrupts and clear cause bits */
spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U);
}
static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
{
struct spi_controller *host = dev_id;
struct a3700_spi *a3700_spi;
u32 cause;
a3700_spi = spi_controller_get_devdata(host);
/* Get interrupt causes */
cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG);
if (!cause || !(a3700_spi->wait_mask & cause))
return IRQ_NONE;
/* mask and acknowledge the SPI interrupts */
spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause);
/* Wake up the transfer */
complete(&a3700_spi->done);
return IRQ_HANDLED;
}
static bool a3700_spi_wait_completion(struct spi_device *spi)
{
struct a3700_spi *a3700_spi;
unsigned int timeout;
unsigned int ctrl_reg;
unsigned long timeout_jiffies;
a3700_spi = spi_controller_get_devdata(spi->controller);
/* SPI interrupt is edge-triggered, which means an interrupt will
* be generated only when detecting a specific status bit changed
* from '0' to '1'. So when we start waiting for a interrupt, we
* need to check status bit in control reg first, if it is already 1,
* then we do not need to wait for interrupt
*/
ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
if (a3700_spi->wait_mask & ctrl_reg)
return true;
reinit_completion(&a3700_spi->done);
spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG,
a3700_spi->wait_mask);
timeout_jiffies = msecs_to_jiffies(A3700_SPI_TIMEOUT);
timeout = wait_for_completion_timeout(&a3700_spi->done,
timeout_jiffies);
a3700_spi->wait_mask = 0;
if (timeout)
return true;
/* there might be the case that right after we checked the
* status bits in this routine and before start to wait for
* interrupt by wait_for_completion_timeout, the interrupt
* happens, to avoid missing it we need to double check
* status bits in control reg, if it is already 1, then
* consider that we have the interrupt successfully and
* return true.
*/
ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
if (a3700_spi->wait_mask & ctrl_reg)
return true;
spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
/* Timeout was reached */
return false;
}
static bool a3700_spi_transfer_wait(struct spi_device *spi,
unsigned int bit_mask)
{
struct a3700_spi *a3700_spi;
a3700_spi = spi_controller_get_devdata(spi->controller);
a3700_spi->wait_mask = bit_mask;
return a3700_spi_wait_completion(spi);
}
static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi,
unsigned int bytes)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_RFIFO_THRS_BIT);
val |= (bytes - 1) << A3700_SPI_RFIFO_THRS_BIT;
val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_WFIFO_THRS_BIT);
val |= (7 - bytes) << A3700_SPI_WFIFO_THRS_BIT;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static void a3700_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi;
a3700_spi = spi_controller_get_devdata(spi->controller);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
/* Use 4 bytes long transfers. Each transfer method has its way to deal
* with the remaining bytes for non 4-bytes aligned transfers.
*/
a3700_spi_bytelen_set(a3700_spi, 4);
/* Initialize the working buffers */
a3700_spi->tx_buf = xfer->tx_buf;
a3700_spi->rx_buf = xfer->rx_buf;
a3700_spi->buf_len = xfer->len;
}
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
{
struct a3700_spi *a3700_spi = spi_controller_get_devdata(spi->controller);
if (!enable)
a3700_spi_activate_cs(a3700_spi, spi_get_chipselect(spi, 0));
else
a3700_spi_deactivate_cs(a3700_spi, spi_get_chipselect(spi, 0));
}
static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
{
unsigned int addr_cnt;
u32 val = 0;
/* Clear the header registers */
spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
/* Set header counters */
if (a3700_spi->tx_buf) {
/*
* when tx data is not 4 bytes aligned, there will be unexpected
* bytes out of SPI output register, since it always shifts out
* as whole 4 bytes. This might cause incorrect transaction with
* some devices. To avoid that, use SPI header count feature to
* transfer up to 3 bytes of data first, and then make the rest
* of data 4-byte aligned.
*/
addr_cnt = a3700_spi->buf_len % 4;
if (addr_cnt) {
val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK)
<< A3700_SPI_ADDR_CNT_BIT;
spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
/* Update the buffer length to be transferred */
a3700_spi->buf_len -= addr_cnt;
/* transfer 1~3 bytes through address count */
val = 0;
while (addr_cnt--) {
val = (val << 8) | a3700_spi->tx_buf[0];
a3700_spi->tx_buf++;
}
spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
}
}
}
static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
return (val & A3700_SPI_WFIFO_FULL);
}
static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
{
u32 val;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, cpu_to_le32(val));
a3700_spi->buf_len -= 4;
a3700_spi->tx_buf += 4;
}
return 0;
}
static int a3700_is_rfifo_empty(struct a3700_spi *a3700_spi)
{
u32 val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
return (val & A3700_SPI_RFIFO_EMPTY);
}
static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
{
u32 val;
while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
val = le32_to_cpu(val);
memcpy(a3700_spi->rx_buf, &val, 4);
a3700_spi->buf_len -= 4;
a3700_spi->rx_buf += 4;
} else {
/*
* When remain bytes is not larger than 4, we should
* avoid memory overwriting and just write the left rx
* buffer bytes.
*/
while (a3700_spi->buf_len) {
*a3700_spi->rx_buf = val & 0xff;
val >>= 8;
a3700_spi->buf_len--;
a3700_spi->rx_buf++;
}
}
}
return 0;
}
static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi)
{
int timeout = A3700_SPI_TIMEOUT;
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val |= A3700_SPI_XFER_STOP;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
while (--timeout) {
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
if (!(val & A3700_SPI_XFER_START))
break;
udelay(1);
}
a3700_spi_fifo_flush(a3700_spi);
val &= ~A3700_SPI_XFER_STOP;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static int a3700_spi_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
struct spi_device *spi = message->spi;
int ret;
ret = clk_enable(a3700_spi->clk);
if (ret) {
dev_err(&spi->dev, "failed to enable clk with error %d\n", ret);
return ret;
}
/* Flush the FIFOs */
ret = a3700_spi_fifo_flush(a3700_spi);
if (ret)
return ret;
a3700_spi_mode_set(a3700_spi, spi->mode);
return 0;
}
static int a3700_spi_transfer_one_fifo(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
unsigned int nbits = 0, byte_len;
u32 val;
/* Make sure we use FIFO mode */
a3700_spi_fifo_mode_set(a3700_spi, true);
/* Configure FIFO thresholds */
byte_len = xfer->bits_per_word >> 3;
a3700_spi_fifo_thres_set(a3700_spi, byte_len);
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
else if (xfer->rx_buf)
nbits = xfer->rx_nbits;
a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
/* Flush the FIFOs */
a3700_spi_fifo_flush(a3700_spi);
/* Transfer first bytes of data when buffer is not 4-byte aligned */
a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
/* Clear WFIFO, since it's last 2 bytes are shifted out during
* a read operation
*/
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
/* Set read data length */
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
a3700_spi->buf_len);
/* Start READ transfer */
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val &= ~A3700_SPI_RW_EN;
val |= A3700_SPI_XFER_START;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
} else if (xfer->tx_buf) {
/* Start Write transfer */
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val |= (A3700_SPI_XFER_START | A3700_SPI_RW_EN);
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
/*
* If there are data to be written to the SPI device, xmit_data
* flag is set true; otherwise the instruction in SPI_INSTR does
* not require data to be written to the SPI device, then
* xmit_data flag is set false.
*/
a3700_spi->xmit_data = (a3700_spi->buf_len != 0);
}
while (a3700_spi->buf_len) {
if (a3700_spi->tx_buf) {
/* Wait wfifo ready */
if (!a3700_spi_transfer_wait(spi,
A3700_SPI_WFIFO_RDY)) {
dev_err(&spi->dev,
"wait wfifo ready timed out\n");
ret = -ETIMEDOUT;
goto error;
}
/* Fill up the wfifo */
ret = a3700_spi_fifo_write(a3700_spi);
if (ret)
goto error;
} else if (a3700_spi->rx_buf) {
/* Wait rfifo ready */
if (!a3700_spi_transfer_wait(spi,
A3700_SPI_RFIFO_RDY)) {
dev_err(&spi->dev,
"wait rfifo ready timed out\n");
ret = -ETIMEDOUT;
goto error;
}
/* Drain out the rfifo */
ret = a3700_spi_fifo_read(a3700_spi);
if (ret)
goto error;
}
}
/*
* Stop a write transfer in fifo mode:
* - wait all the bytes in wfifo to be shifted out
* - set XFER_STOP bit
* - wait XFER_START bit clear
* - clear XFER_STOP bit
* Stop a read transfer in fifo mode:
* - the hardware is to reset the XFER_START bit
* after the number of bytes indicated in DIN_CNT
* register
* - just wait XFER_START bit clear
*/
if (a3700_spi->tx_buf) {
if (a3700_spi->xmit_data) {
/*
* If there are data written to the SPI device, wait
* until SPI_WFIFO_EMPTY is 1 to wait for all data to
* transfer out of write FIFO.
*/
if (!a3700_spi_transfer_wait(spi,
A3700_SPI_WFIFO_EMPTY)) {
dev_err(&spi->dev, "wait wfifo empty timed out\n");
return -ETIMEDOUT;
}
}
if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
dev_err(&spi->dev, "wait xfer ready timed out\n");
return -ETIMEDOUT;
}
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val |= A3700_SPI_XFER_STOP;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
while (--timeout) {
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
if (!(val & A3700_SPI_XFER_START))
break;
udelay(1);
}
if (timeout == 0) {
dev_err(&spi->dev, "wait transfer start clear timed out\n");
ret = -ETIMEDOUT;
goto error;
}
val &= ~A3700_SPI_XFER_STOP;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
goto out;
error:
a3700_spi_transfer_abort_fifo(a3700_spi);
out:
spi_finalize_current_transfer(host);
return ret;
}
static int a3700_spi_transfer_one_full_duplex(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
u32 val;
/* Disable FIFO mode */
a3700_spi_fifo_mode_set(a3700_spi, false);
while (a3700_spi->buf_len) {
/* When we have less than 4 bytes to transfer, switch to 1 byte
* mode. This is reset after each transfer
*/
if (a3700_spi->buf_len < 4)
a3700_spi_bytelen_set(a3700_spi, 1);
if (a3700_spi->byte_len == 1)
val = *a3700_spi->tx_buf;
else
val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
/* Wait for all the data to be shifted in / out */
while (!(spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG) &
A3700_SPI_XFER_DONE))
cpu_relax();
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
memcpy(a3700_spi->rx_buf, &val, a3700_spi->byte_len);
a3700_spi->buf_len -= a3700_spi->byte_len;
a3700_spi->tx_buf += a3700_spi->byte_len;
a3700_spi->rx_buf += a3700_spi->byte_len;
}
spi_finalize_current_transfer(host);
return 0;
}
static int a3700_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
a3700_spi_transfer_setup(spi, xfer);
if (xfer->tx_buf && xfer->rx_buf)
return a3700_spi_transfer_one_full_duplex(host, spi, xfer);
return a3700_spi_transfer_one_fifo(host, spi, xfer);
}
static int a3700_spi_unprepare_message(struct spi_controller *host,
struct spi_message *message)
{
struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
clk_disable(a3700_spi->clk);
return 0;
}
static const struct of_device_id a3700_spi_dt_ids[] = {
{ .compatible = "marvell,armada-3700-spi", .data = NULL },
{},
};
MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids);
static int a3700_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
struct spi_controller *host;
struct a3700_spi *spi;
u32 num_cs = 0;
int irq, ret = 0;
host = spi_alloc_host(dev, sizeof(*spi));
if (!host) {
dev_err(dev, "host allocation failed\n");
ret = -ENOMEM;
goto out;
}
if (of_property_read_u32(of_node, "num-cs", &num_cs)) {
dev_err(dev, "could not find num-cs\n");
ret = -ENXIO;
goto error;
}
host->bus_num = pdev->id;
host->dev.of_node = of_node;
host->mode_bits = SPI_MODE_3;
host->num_chipselect = num_cs;
host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
host->prepare_message = a3700_spi_prepare_message;
host->transfer_one = a3700_spi_transfer_one;
host->unprepare_message = a3700_spi_unprepare_message;
host->set_cs = a3700_spi_set_cs;
host->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
platform_set_drvdata(pdev, host);
spi = spi_controller_get_devdata(host);
spi->host = host;
spi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->base)) {
ret = PTR_ERR(spi->base);
goto error;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
goto error;
}
spi->irq = irq;
init_completion(&spi->done);
spi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(spi->clk)) {
dev_err(dev, "could not find clk: %ld\n", PTR_ERR(spi->clk));
goto error;
}
ret = clk_prepare(spi->clk);
if (ret) {
dev_err(dev, "could not prepare clk: %d\n", ret);
goto error;
}
host->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
clk_get_rate(spi->clk));
host->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
A3700_SPI_MAX_PRESCALE);
a3700_spi_init(spi);
ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0,
dev_name(dev), host);
if (ret) {
dev_err(dev, "could not request IRQ: %d\n", ret);
goto error_clk;
}
ret = devm_spi_register_controller(dev, host);
if (ret) {
dev_err(dev, "Failed to register host\n");
goto error_clk;
}
return 0;
error_clk:
clk_unprepare(spi->clk);
error:
spi_controller_put(host);
out:
return ret;
}
static void a3700_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct a3700_spi *spi = spi_controller_get_devdata(host);
clk_unprepare(spi->clk);
}
static struct platform_driver a3700_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(a3700_spi_dt_ids),
},
.probe = a3700_spi_probe,
.remove_new = a3700_spi_remove,
};
module_platform_driver(a3700_spi_driver);
MODULE_DESCRIPTION("Armada-3700 SPI driver");
MODULE_AUTHOR("Wilson Ding <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/spi/spi-armada-3700.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2013 Freescale Semiconductor, Inc.
// Copyright 2020 NXP
//
// Freescale DSPI driver
// This file contains a driver for the Freescale DSPI
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-fsl-dspi.h>
#define DRIVER_NAME "fsl-dspi"
#define SPI_MCR 0x00
#define SPI_MCR_HOST BIT(31)
#define SPI_MCR_PCSIS(x) ((x) << 16)
#define SPI_MCR_CLR_TXF BIT(11)
#define SPI_MCR_CLR_RXF BIT(10)
#define SPI_MCR_XSPI BIT(3)
#define SPI_MCR_DIS_TXF BIT(13)
#define SPI_MCR_DIS_RXF BIT(12)
#define SPI_MCR_HALT BIT(0)
#define SPI_TCR 0x08
#define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4))
#define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
#define SPI_CTAR_CPOL BIT(26)
#define SPI_CTAR_CPHA BIT(25)
#define SPI_CTAR_LSBFE BIT(24)
#define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22))
#define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20))
#define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18))
#define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16))
#define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12))
#define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8))
#define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4))
#define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0))
#define SPI_CTAR_SCALE_BITS 0xf
#define SPI_CTAR0_SLAVE 0x0c
#define SPI_SR 0x2c
#define SPI_SR_TCFQF BIT(31)
#define SPI_SR_TFUF BIT(27)
#define SPI_SR_TFFF BIT(25)
#define SPI_SR_CMDTCF BIT(23)
#define SPI_SR_SPEF BIT(21)
#define SPI_SR_RFOF BIT(19)
#define SPI_SR_TFIWF BIT(18)
#define SPI_SR_RFDF BIT(17)
#define SPI_SR_CMDFFF BIT(16)
#define SPI_SR_CLEAR (SPI_SR_TCFQF | \
SPI_SR_TFUF | SPI_SR_TFFF | \
SPI_SR_CMDTCF | SPI_SR_SPEF | \
SPI_SR_RFOF | SPI_SR_TFIWF | \
SPI_SR_RFDF | SPI_SR_CMDFFF)
#define SPI_RSER_TFFFE BIT(25)
#define SPI_RSER_TFFFD BIT(24)
#define SPI_RSER_RFDFE BIT(17)
#define SPI_RSER_RFDFD BIT(16)
#define SPI_RSER 0x30
#define SPI_RSER_TCFQE BIT(31)
#define SPI_RSER_CMDTCFE BIT(23)
#define SPI_PUSHR 0x34
#define SPI_PUSHR_CMD_CONT BIT(15)
#define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12)))
#define SPI_PUSHR_CMD_EOQ BIT(11)
#define SPI_PUSHR_CMD_CTCNT BIT(10)
#define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0))
#define SPI_PUSHR_SLAVE 0x34
#define SPI_POPR 0x38
#define SPI_TXFR0 0x3c
#define SPI_TXFR1 0x40
#define SPI_TXFR2 0x44
#define SPI_TXFR3 0x48
#define SPI_RXFR0 0x7c
#define SPI_RXFR1 0x80
#define SPI_RXFR2 0x84
#define SPI_RXFR3 0x88
#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4))
#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
#define SPI_SREX 0x13c
#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
struct chip_data {
u32 ctar_val;
};
enum dspi_trans_mode {
DSPI_XSPI_MODE,
DSPI_DMA_MODE,
};
struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
u8 max_clock_factor;
int fifo_size;
};
enum {
LS1021A,
LS1012A,
LS1028A,
LS1043A,
LS1046A,
LS2080A,
LS2085A,
LX2160A,
MCF5441X,
VF610,
};
static const struct fsl_dspi_devtype_data devtype_data[] = {
[VF610] = {
.trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 2,
.fifo_size = 4,
},
[LS1021A] = {
/* Has A-011218 DMA erratum */
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
},
[LS1012A] = {
/* Has A-011218 DMA erratum */
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
},
[LS1028A] = {
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
},
[LS1043A] = {
/* Has A-011218 DMA erratum */
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
},
[LS1046A] = {
/* Has A-011218 DMA erratum */
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
},
[LS2080A] = {
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
},
[LS2085A] = {
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
},
[LX2160A] = {
.trans_mode = DSPI_XSPI_MODE,
.max_clock_factor = 8,
.fifo_size = 4,
},
[MCF5441X] = {
.trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
},
};
struct fsl_dspi_dma {
u32 *tx_dma_buf;
struct dma_chan *chan_tx;
dma_addr_t tx_dma_phys;
struct completion cmd_tx_complete;
struct dma_async_tx_descriptor *tx_desc;
u32 *rx_dma_buf;
struct dma_chan *chan_rx;
dma_addr_t rx_dma_phys;
struct completion cmd_rx_complete;
struct dma_async_tx_descriptor *rx_desc;
};
struct fsl_dspi {
struct spi_controller *ctlr;
struct platform_device *pdev;
struct regmap *regmap;
struct regmap *regmap_pushr;
int irq;
struct clk *clk;
struct spi_transfer *cur_transfer;
struct spi_message *cur_msg;
struct chip_data *cur_chip;
size_t progress;
size_t len;
const void *tx;
void *rx;
u16 tx_cmd;
const struct fsl_dspi_devtype_data *devtype_data;
struct completion xfer_done;
struct fsl_dspi_dma *dma;
int oper_word_size;
int oper_bits_per_word;
int words_in_flight;
/*
* Offsets for CMD and TXDATA within SPI_PUSHR when accessed
* individually (in XSPI mode)
*/
int pushr_cmd;
int pushr_tx;
void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
};
static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
{
switch (dspi->oper_word_size) {
case 1:
*txdata = *(u8 *)dspi->tx;
break;
case 2:
*txdata = *(u16 *)dspi->tx;
break;
case 4:
*txdata = *(u32 *)dspi->tx;
break;
}
dspi->tx += dspi->oper_word_size;
}
static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
{
switch (dspi->oper_word_size) {
case 1:
*(u8 *)dspi->rx = rxdata;
break;
case 2:
*(u16 *)dspi->rx = rxdata;
break;
case 4:
*(u32 *)dspi->rx = rxdata;
break;
}
dspi->rx += dspi->oper_word_size;
}
static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
{
*txdata = cpu_to_be32(*(u32 *)dspi->tx);
dspi->tx += sizeof(u32);
}
static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
{
*(u32 *)dspi->rx = be32_to_cpu(rxdata);
dspi->rx += sizeof(u32);
}
static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
{
*txdata = cpu_to_be16(*(u16 *)dspi->tx);
dspi->tx += sizeof(u16);
}
static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
{
*(u16 *)dspi->rx = be16_to_cpu(rxdata);
dspi->rx += sizeof(u16);
}
static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
{
u16 hi = *(u16 *)dspi->tx;
u16 lo = *(u16 *)(dspi->tx + 2);
*txdata = (u32)hi << 16 | lo;
dspi->tx += sizeof(u32);
}
static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
{
u16 hi = rxdata & 0xffff;
u16 lo = rxdata >> 16;
*(u16 *)dspi->rx = lo;
*(u16 *)(dspi->rx + 2) = hi;
dspi->rx += sizeof(u32);
}
/*
* Pop one word from the TX buffer for pushing into the
* PUSHR register (TX FIFO)
*/
static u32 dspi_pop_tx(struct fsl_dspi *dspi)
{
u32 txdata = 0;
if (dspi->tx)
dspi->host_to_dev(dspi, &txdata);
dspi->len -= dspi->oper_word_size;
return txdata;
}
/* Prepare one TX FIFO entry (txdata plus cmd) */
static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
if (spi_controller_is_target(dspi->ctlr))
return data;
if (dspi->len > 0)
cmd |= SPI_PUSHR_CMD_CONT;
return cmd << 16 | data;
}
/* Push one word to the RX buffer from the POPR register (RX FIFO) */
static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
{
if (!dspi->rx)
return;
dspi->dev_to_host(dspi, rxdata);
}
static void dspi_tx_dma_callback(void *arg)
{
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
complete(&dma->cmd_tx_complete);
}
static void dspi_rx_dma_callback(void *arg)
{
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
int i;
if (dspi->rx) {
for (i = 0; i < dspi->words_in_flight; i++)
dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
}
complete(&dma->cmd_rx_complete);
}
static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
{
struct device *dev = &dspi->pdev->dev;
struct fsl_dspi_dma *dma = dspi->dma;
int time_left;
int i;
for (i = 0; i < dspi->words_in_flight; i++)
dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
dma->tx_dma_phys,
dspi->words_in_flight *
DMA_SLAVE_BUSWIDTH_4_BYTES,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma->tx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
return -EIO;
}
dma->tx_desc->callback = dspi_tx_dma_callback;
dma->tx_desc->callback_param = dspi;
if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
dev_err(dev, "DMA submit failed\n");
return -EINVAL;
}
dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
dma->rx_dma_phys,
dspi->words_in_flight *
DMA_SLAVE_BUSWIDTH_4_BYTES,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma->rx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
return -EIO;
}
dma->rx_desc->callback = dspi_rx_dma_callback;
dma->rx_desc->callback_param = dspi;
if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
dev_err(dev, "DMA submit failed\n");
return -EINVAL;
}
reinit_completion(&dspi->dma->cmd_rx_complete);
reinit_completion(&dspi->dma->cmd_tx_complete);
dma_async_issue_pending(dma->chan_rx);
dma_async_issue_pending(dma->chan_tx);
if (spi_controller_is_target(dspi->ctlr)) {
wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
return 0;
}
time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
DMA_COMPLETION_TIMEOUT);
if (time_left == 0) {
dev_err(dev, "DMA tx timeout\n");
dmaengine_terminate_all(dma->chan_tx);
dmaengine_terminate_all(dma->chan_rx);
return -ETIMEDOUT;
}
time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
DMA_COMPLETION_TIMEOUT);
if (time_left == 0) {
dev_err(dev, "DMA rx timeout\n");
dmaengine_terminate_all(dma->chan_tx);
dmaengine_terminate_all(dma->chan_rx);
return -ETIMEDOUT;
}
return 0;
}
static void dspi_setup_accel(struct fsl_dspi *dspi);
static int dspi_dma_xfer(struct fsl_dspi *dspi)
{
struct spi_message *message = dspi->cur_msg;
struct device *dev = &dspi->pdev->dev;
int ret = 0;
/*
* dspi->len gets decremented by dspi_pop_tx_pushr in
* dspi_next_xfer_dma_submit
*/
while (dspi->len) {
/* Figure out operational bits-per-word for this chunk */
dspi_setup_accel(dspi);
dspi->words_in_flight = dspi->len / dspi->oper_word_size;
if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
dspi->words_in_flight = dspi->devtype_data->fifo_size;
message->actual_length += dspi->words_in_flight *
dspi->oper_word_size;
ret = dspi_next_xfer_dma_submit(dspi);
if (ret) {
dev_err(dev, "DMA transfer failed\n");
break;
}
}
return ret;
}
static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
{
int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct device *dev = &dspi->pdev->dev;
struct dma_slave_config cfg;
struct fsl_dspi_dma *dma;
int ret;
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
return -ENOMEM;
dma->chan_rx = dma_request_chan(dev, "rx");
if (IS_ERR(dma->chan_rx)) {
return dev_err_probe(dev, PTR_ERR(dma->chan_rx),
"rx dma channel not available\n");
}
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
ret = PTR_ERR(dma->chan_tx);
dev_err_probe(dev, ret, "tx dma channel not available\n");
goto err_tx_channel;
}
dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
dma_bufsize, &dma->tx_dma_phys,
GFP_KERNEL);
if (!dma->tx_dma_buf) {
ret = -ENOMEM;
goto err_tx_dma_buf;
}
dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
dma_bufsize, &dma->rx_dma_phys,
GFP_KERNEL);
if (!dma->rx_dma_buf) {
ret = -ENOMEM;
goto err_rx_dma_buf;
}
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = phy_addr + SPI_POPR;
cfg.dst_addr = phy_addr + SPI_PUSHR;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.src_maxburst = 1;
cfg.dst_maxburst = 1;
cfg.direction = DMA_DEV_TO_MEM;
ret = dmaengine_slave_config(dma->chan_rx, &cfg);
if (ret) {
dev_err(dev, "can't configure rx dma channel\n");
ret = -EINVAL;
goto err_slave_config;
}
cfg.direction = DMA_MEM_TO_DEV;
ret = dmaengine_slave_config(dma->chan_tx, &cfg);
if (ret) {
dev_err(dev, "can't configure tx dma channel\n");
ret = -EINVAL;
goto err_slave_config;
}
dspi->dma = dma;
init_completion(&dma->cmd_tx_complete);
init_completion(&dma->cmd_rx_complete);
return 0;
err_slave_config:
dma_free_coherent(dma->chan_rx->device->dev,
dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
err_rx_dma_buf:
dma_free_coherent(dma->chan_tx->device->dev,
dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
err_tx_dma_buf:
dma_release_channel(dma->chan_tx);
err_tx_channel:
dma_release_channel(dma->chan_rx);
devm_kfree(dev, dma);
dspi->dma = NULL;
return ret;
}
static void dspi_release_dma(struct fsl_dspi *dspi)
{
int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct fsl_dspi_dma *dma = dspi->dma;
if (!dma)
return;
if (dma->chan_tx) {
dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
dma->tx_dma_buf, dma->tx_dma_phys);
dma_release_channel(dma->chan_tx);
}
if (dma->chan_rx) {
dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
dma->rx_dma_buf, dma->rx_dma_phys);
dma_release_channel(dma->chan_rx);
}
}
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
unsigned long clkrate)
{
/* Valid baud rate pre-scaler values */
int pbr_tbl[4] = {2, 3, 5, 7};
int brs[16] = { 2, 4, 6, 8,
16, 32, 64, 128,
256, 512, 1024, 2048,
4096, 8192, 16384, 32768 };
int scale_needed, scale, minscale = INT_MAX;
int i, j;
scale_needed = clkrate / speed_hz;
if (clkrate % speed_hz)
scale_needed++;
for (i = 0; i < ARRAY_SIZE(brs); i++)
for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
scale = brs[i] * pbr_tbl[j];
if (scale >= scale_needed) {
if (scale < minscale) {
minscale = scale;
*br = i;
*pbr = j;
}
break;
}
}
if (minscale == INT_MAX) {
pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
speed_hz, clkrate);
*pbr = ARRAY_SIZE(pbr_tbl) - 1;
*br = ARRAY_SIZE(brs) - 1;
}
}
static void ns_delay_scale(char *psc, char *sc, int delay_ns,
unsigned long clkrate)
{
int scale_needed, scale, minscale = INT_MAX;
int pscale_tbl[4] = {1, 3, 5, 7};
u32 remainder;
int i, j;
scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
&remainder);
if (remainder)
scale_needed++;
for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
scale = pscale_tbl[i] * (2 << j);
if (scale >= scale_needed) {
if (scale < minscale) {
minscale = scale;
*psc = i;
*sc = j;
}
break;
}
}
if (minscale == INT_MAX) {
pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
delay_ns, clkrate);
*psc = ARRAY_SIZE(pscale_tbl) - 1;
*sc = SPI_CTAR_SCALE_BITS;
}
}
static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
{
/*
* The only time when the PCS doesn't need continuation after this word
* is when it's last. We need to look ahead, because we actually call
* dspi_pop_tx (the function that decrements dspi->len) _after_
* dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
* word is enough. If there's more to transmit than that,
* dspi_xspi_write will know to split the FIFO writes in 2, and
* generate a new PUSHR command with the final word that will have PCS
* deasserted (not continued) here.
*/
if (dspi->len > dspi->oper_word_size)
cmd |= SPI_PUSHR_CMD_CONT;
regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
}
static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
{
regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
}
static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
{
int num_bytes = num_words * dspi->oper_word_size;
u16 tx_cmd = dspi->tx_cmd;
/*
* If the PCS needs to de-assert (i.e. we're at the end of the buffer
* and cs_change does not want the PCS to stay on), then we need a new
* PUSHR command, since this one (for the body of the buffer)
* necessarily has the CONT bit set.
* So send one word less during this go, to force a split and a command
* with a single word next time, when CONT will be unset.
*/
if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
tx_cmd |= SPI_PUSHR_CMD_EOQ;
/* Update CTARE */
regmap_write(dspi->regmap, SPI_CTARE(0),
SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
SPI_CTARE_DTCP(num_words));
/*
* Write the CMD FIFO entry first, and then the two
* corresponding TX FIFO entries (or one...).
*/
dspi_pushr_cmd_write(dspi, tx_cmd);
/* Fill TX FIFO with as many transfers as possible */
while (num_words--) {
u32 data = dspi_pop_tx(dspi);
dspi_pushr_txdata_write(dspi, data & 0xFFFF);
if (dspi->oper_bits_per_word > 16)
dspi_pushr_txdata_write(dspi, data >> 16);
}
}
static u32 dspi_popr_read(struct fsl_dspi *dspi)
{
u32 rxdata = 0;
regmap_read(dspi->regmap, SPI_POPR, &rxdata);
return rxdata;
}
static void dspi_fifo_read(struct fsl_dspi *dspi)
{
int num_fifo_entries = dspi->words_in_flight;
/* Read one FIFO entry and push to rx buffer */
while (num_fifo_entries--)
dspi_push_rx(dspi, dspi_popr_read(dspi));
}
static void dspi_setup_accel(struct fsl_dspi *dspi)
{
struct spi_transfer *xfer = dspi->cur_transfer;
bool odd = !!(dspi->len & 1);
/* No accel for frames not multiple of 8 bits at the moment */
if (xfer->bits_per_word % 8)
goto no_accel;
if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
dspi->oper_bits_per_word = 16;
} else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
dspi->oper_bits_per_word = 8;
} else {
/* Start off with maximum supported by hardware */
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
dspi->oper_bits_per_word = 32;
else
dspi->oper_bits_per_word = 16;
/*
* And go down only if the buffer can't be sent with
* words this big
*/
do {
if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
break;
dspi->oper_bits_per_word /= 2;
} while (dspi->oper_bits_per_word > 8);
}
if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
dspi->dev_to_host = dspi_8on32_dev_to_host;
dspi->host_to_dev = dspi_8on32_host_to_dev;
} else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
dspi->dev_to_host = dspi_8on16_dev_to_host;
dspi->host_to_dev = dspi_8on16_host_to_dev;
} else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
dspi->dev_to_host = dspi_16on32_dev_to_host;
dspi->host_to_dev = dspi_16on32_host_to_dev;
} else {
no_accel:
dspi->dev_to_host = dspi_native_dev_to_host;
dspi->host_to_dev = dspi_native_host_to_dev;
dspi->oper_bits_per_word = xfer->bits_per_word;
}
dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
/*
* Update CTAR here (code is common for XSPI and DMA modes).
* We will update CTARE in the portion specific to XSPI, when we
* also know the preload value (DTCP).
*/
regmap_write(dspi->regmap, SPI_CTAR(0),
dspi->cur_chip->ctar_val |
SPI_FRAME_BITS(dspi->oper_bits_per_word));
}
static void dspi_fifo_write(struct fsl_dspi *dspi)
{
int num_fifo_entries = dspi->devtype_data->fifo_size;
struct spi_transfer *xfer = dspi->cur_transfer;
struct spi_message *msg = dspi->cur_msg;
int num_words, num_bytes;
dspi_setup_accel(dspi);
/* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
if (dspi->oper_word_size == 4)
num_fifo_entries /= 2;
/*
* Integer division intentionally trims off odd (or non-multiple of 4)
* numbers of bytes at the end of the buffer, which will be sent next
* time using a smaller oper_word_size.
*/
num_words = dspi->len / dspi->oper_word_size;
if (num_words > num_fifo_entries)
num_words = num_fifo_entries;
/* Update total number of bytes that were transferred */
num_bytes = num_words * dspi->oper_word_size;
msg->actual_length += num_bytes;
dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
/*
* Update shared variable for use in the next interrupt (both in
* dspi_fifo_read and in dspi_fifo_write).
*/
dspi->words_in_flight = num_words;
spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
dspi_xspi_fifo_write(dspi, num_words);
/*
* Everything after this point is in a potential race with the next
* interrupt, so we must never use dspi->words_in_flight again since it
* might already be modified by the next dspi_fifo_write.
*/
spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
dspi->progress, !dspi->irq);
}
static int dspi_rxtx(struct fsl_dspi *dspi)
{
dspi_fifo_read(dspi);
if (!dspi->len)
/* Success! */
return 0;
dspi_fifo_write(dspi);
return -EINPROGRESS;
}
static int dspi_poll(struct fsl_dspi *dspi)
{
int tries = 1000;
u32 spi_sr;
do {
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
if (spi_sr & SPI_SR_CMDTCF)
break;
} while (--tries);
if (!tries)
return -ETIMEDOUT;
return dspi_rxtx(dspi);
}
static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
u32 spi_sr;
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
if (!(spi_sr & SPI_SR_CMDTCF))
return IRQ_NONE;
if (dspi_rxtx(dspi) == 0)
complete(&dspi->xfer_done);
return IRQ_HANDLED;
}
static void dspi_assert_cs(struct spi_device *spi, bool *cs)
{
if (!spi_get_csgpiod(spi, 0) || *cs)
return;
gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true);
*cs = true;
}
static void dspi_deassert_cs(struct spi_device *spi, bool *cs)
{
if (!spi_get_csgpiod(spi, 0) || !*cs)
return;
gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false);
*cs = false;
}
static int dspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *message)
{
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
struct spi_transfer *transfer;
bool cs = false;
int status = 0;
message->actual_length = 0;
list_for_each_entry(transfer, &message->transfers, transfer_list) {
dspi->cur_transfer = transfer;
dspi->cur_msg = message;
dspi->cur_chip = spi_get_ctldata(spi);
dspi_assert_cs(spi, &cs);
/* Prepare command word for CMD FIFO */
dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0);
if (!spi_get_csgpiod(spi, 0))
dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0));
if (list_is_last(&dspi->cur_transfer->transfer_list,
&dspi->cur_msg->transfers)) {
/* Leave PCS activated after last transfer when
* cs_change is set.
*/
if (transfer->cs_change)
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
} else {
/* Keep PCS active between transfers in same message
* when cs_change is not set, and de-activate PCS
* between transfers in the same message when
* cs_change is set.
*/
if (!transfer->cs_change)
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
dspi->len = transfer->len;
dspi->progress = 0;
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->progress, !dspi->irq);
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
status = dspi_dma_xfer(dspi);
} else {
dspi_fifo_write(dspi);
if (dspi->irq) {
wait_for_completion(&dspi->xfer_done);
reinit_completion(&dspi->xfer_done);
} else {
do {
status = dspi_poll(dspi);
} while (status == -EINPROGRESS);
}
}
if (status)
break;
spi_transfer_delay_exec(transfer);
if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT))
dspi_deassert_cs(spi, &cs);
}
message->status = status;
spi_finalize_current_message(ctlr);
return status;
}
static int dspi_setup(struct spi_device *spi)
{
struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
u32 cs_sck_delay = 0, sck_cs_delay = 0;
struct fsl_dspi_platform_data *pdata;
unsigned char pasc = 0, asc = 0;
struct chip_data *chip;
unsigned long clkrate;
bool cs = true;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
}
pdata = dev_get_platdata(&dspi->pdev->dev);
if (!pdata) {
of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
&cs_sck_delay);
of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
&sck_cs_delay);
} else {
cs_sck_delay = pdata->cs_sck_delay;
sck_cs_delay = pdata->sck_cs_delay;
}
/* Since tCSC and tASC apply to continuous transfers too, avoid SCK
* glitches of half a cycle by never allowing tCSC + tASC to go below
* half a SCK period.
*/
if (cs_sck_delay < quarter_period_ns)
cs_sck_delay = quarter_period_ns;
if (sck_cs_delay < quarter_period_ns)
sck_cs_delay = quarter_period_ns;
dev_dbg(&spi->dev,
"DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
cs_sck_delay, sck_cs_delay);
clkrate = clk_get_rate(dspi->clk);
hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
/* Set PCS to SCK delay scale values */
ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
/* Set After SCK delay scale values */
ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
chip->ctar_val = 0;
if (spi->mode & SPI_CPOL)
chip->ctar_val |= SPI_CTAR_CPOL;
if (spi->mode & SPI_CPHA)
chip->ctar_val |= SPI_CTAR_CPHA;
if (!spi_controller_is_target(dspi->ctlr)) {
chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
SPI_CTAR_CSSCK(cssck) |
SPI_CTAR_PASC(pasc) |
SPI_CTAR_ASC(asc) |
SPI_CTAR_PBR(pbr) |
SPI_CTAR_BR(br);
if (spi->mode & SPI_LSB_FIRST)
chip->ctar_val |= SPI_CTAR_LSBFE;
}
gpiod_direction_output(spi_get_csgpiod(spi, 0), false);
dspi_deassert_cs(spi, &cs);
spi_set_ctldata(spi, chip);
return 0;
}
static void dspi_cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
spi->controller->bus_num, spi_get_chipselect(spi, 0));
kfree(chip);
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
{
.compatible = "fsl,vf610-dspi",
.data = &devtype_data[VF610],
}, {
.compatible = "fsl,ls1021a-v1.0-dspi",
.data = &devtype_data[LS1021A],
}, {
.compatible = "fsl,ls1012a-dspi",
.data = &devtype_data[LS1012A],
}, {
.compatible = "fsl,ls1028a-dspi",
.data = &devtype_data[LS1028A],
}, {
.compatible = "fsl,ls1043a-dspi",
.data = &devtype_data[LS1043A],
}, {
.compatible = "fsl,ls1046a-dspi",
.data = &devtype_data[LS1046A],
}, {
.compatible = "fsl,ls2080a-dspi",
.data = &devtype_data[LS2080A],
}, {
.compatible = "fsl,ls2085a-dspi",
.data = &devtype_data[LS2085A],
}, {
.compatible = "fsl,lx2160a-dspi",
.data = &devtype_data[LX2160A],
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
#ifdef CONFIG_PM_SLEEP
static int dspi_suspend(struct device *dev)
{
struct fsl_dspi *dspi = dev_get_drvdata(dev);
if (dspi->irq)
disable_irq(dspi->irq);
spi_controller_suspend(dspi->ctlr);
clk_disable_unprepare(dspi->clk);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int dspi_resume(struct device *dev)
{
struct fsl_dspi *dspi = dev_get_drvdata(dev);
int ret;
pinctrl_pm_select_default_state(dev);
ret = clk_prepare_enable(dspi->clk);
if (ret)
return ret;
spi_controller_resume(dspi->ctlr);
if (dspi->irq)
enable_irq(dspi->irq);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
static const struct regmap_range dspi_volatile_ranges[] = {
regmap_reg_range(SPI_MCR, SPI_TCR),
regmap_reg_range(SPI_SR, SPI_SR),
regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
};
static const struct regmap_access_table dspi_volatile_table = {
.yes_ranges = dspi_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
};
static const struct regmap_config dspi_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x88,
.volatile_table = &dspi_volatile_table,
};
static const struct regmap_range dspi_xspi_volatile_ranges[] = {
regmap_reg_range(SPI_MCR, SPI_TCR),
regmap_reg_range(SPI_SR, SPI_SR),
regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
regmap_reg_range(SPI_SREX, SPI_SREX),
};
static const struct regmap_access_table dspi_xspi_volatile_table = {
.yes_ranges = dspi_xspi_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
};
static const struct regmap_config dspi_xspi_regmap_config[] = {
{
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x13c,
.volatile_table = &dspi_xspi_volatile_table,
},
{
.name = "pushr",
.reg_bits = 16,
.val_bits = 16,
.reg_stride = 2,
.max_register = 0x2,
},
};
static int dspi_init(struct fsl_dspi *dspi)
{
unsigned int mcr;
/* Set idle states for all chip select signals to high */
mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
mcr |= SPI_MCR_XSPI;
if (!spi_controller_is_target(dspi->ctlr))
mcr |= SPI_MCR_HOST;
regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
switch (dspi->devtype_data->trans_mode) {
case DSPI_XSPI_MODE:
regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
break;
case DSPI_DMA_MODE:
regmap_write(dspi->regmap, SPI_RSER,
SPI_RSER_TFFFE | SPI_RSER_TFFFD |
SPI_RSER_RFDFE | SPI_RSER_RFDFD);
break;
default:
dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
dspi->devtype_data->trans_mode);
return -EINVAL;
}
return 0;
}
static int dspi_target_abort(struct spi_controller *host)
{
struct fsl_dspi *dspi = spi_controller_get_devdata(host);
/*
* Terminate all pending DMA transactions for the SPI working
* in TARGET mode.
*/
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
dmaengine_terminate_sync(dspi->dma->chan_rx);
dmaengine_terminate_sync(dspi->dma->chan_tx);
}
/* Clear the internal DSPI RX and TX FIFO buffers */
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
return 0;
}
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct regmap_config *regmap_config;
struct fsl_dspi_platform_data *pdata;
struct spi_controller *ctlr;
int ret, cs_num, bus_num = -1;
struct fsl_dspi *dspi;
struct resource *res;
void __iomem *base;
bool big_endian;
dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL);
if (!dspi)
return -ENOMEM;
ctlr = spi_alloc_host(&pdev->dev, 0);
if (!ctlr)
return -ENOMEM;
spi_controller_set_devdata(ctlr, dspi);
platform_set_drvdata(pdev, dspi);
dspi->pdev = pdev;
dspi->ctlr = ctlr;
ctlr->setup = dspi_setup;
ctlr->transfer_one_message = dspi_transfer_one_message;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
ctlr->target_abort = dspi_target_abort;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
ctlr->use_gpio_descriptors = true;
pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num;
ctlr->bus_num = pdata->bus_num;
/* Only Coldfire uses platform data */
dspi->devtype_data = &devtype_data[MCF5441X];
big_endian = true;
} else {
ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
if (ret < 0) {
dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
goto out_ctlr_put;
}
ctlr->num_chipselect = ctlr->max_native_cs = cs_num;
of_property_read_u32(np, "bus-num", &bus_num);
ctlr->bus_num = bus_num;
if (of_property_read_bool(np, "spi-slave"))
ctlr->target = true;
dspi->devtype_data = of_device_get_match_data(&pdev->dev);
if (!dspi->devtype_data) {
dev_err(&pdev->dev, "can't get devtype_data\n");
ret = -EFAULT;
goto out_ctlr_put;
}
big_endian = of_device_is_big_endian(np);
}
if (big_endian) {
dspi->pushr_cmd = 0;
dspi->pushr_tx = 2;
} else {
dspi->pushr_cmd = 2;
dspi->pushr_tx = 0;
}
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
else
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_ctlr_put;
}
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
regmap_config = &dspi_xspi_regmap_config[0];
else
regmap_config = &dspi_regmap_config;
dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
if (IS_ERR(dspi->regmap)) {
dev_err(&pdev->dev, "failed to init regmap: %ld\n",
PTR_ERR(dspi->regmap));
ret = PTR_ERR(dspi->regmap);
goto out_ctlr_put;
}
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
dspi->regmap_pushr = devm_regmap_init_mmio(
&pdev->dev, base + SPI_PUSHR,
&dspi_xspi_regmap_config[1]);
if (IS_ERR(dspi->regmap_pushr)) {
dev_err(&pdev->dev,
"failed to init pushr regmap: %ld\n",
PTR_ERR(dspi->regmap_pushr));
ret = PTR_ERR(dspi->regmap_pushr);
goto out_ctlr_put;
}
}
dspi->clk = devm_clk_get(&pdev->dev, "dspi");
if (IS_ERR(dspi->clk)) {
ret = PTR_ERR(dspi->clk);
dev_err(&pdev->dev, "unable to get clock\n");
goto out_ctlr_put;
}
ret = clk_prepare_enable(dspi->clk);
if (ret)
goto out_ctlr_put;
ret = dspi_init(dspi);
if (ret)
goto out_clk_put;
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
dev_info(&pdev->dev,
"can't get platform irq, using poll mode\n");
dspi->irq = 0;
goto poll_mode;
}
init_completion(&dspi->xfer_done);
ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
IRQF_SHARED, pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
goto out_clk_put;
}
poll_mode:
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
if (ret < 0) {
dev_err(&pdev->dev, "can't get dma channels\n");
goto out_free_irq;
}
}
ctlr->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
ctlr->ptp_sts_supported = true;
ret = spi_register_controller(ctlr);
if (ret != 0) {
dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
goto out_release_dma;
}
return ret;
out_release_dma:
dspi_release_dma(dspi);
out_free_irq:
if (dspi->irq)
free_irq(dspi->irq, dspi);
out_clk_put:
clk_disable_unprepare(dspi->clk);
out_ctlr_put:
spi_controller_put(ctlr);
return ret;
}
static void dspi_remove(struct platform_device *pdev)
{
struct fsl_dspi *dspi = platform_get_drvdata(pdev);
/* Disconnect from the SPI framework */
spi_unregister_controller(dspi->ctlr);
/* Disable RX and TX */
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
/* Stop Running */
regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
dspi_release_dma(dspi);
if (dspi->irq)
free_irq(dspi->irq, dspi);
clk_disable_unprepare(dspi->clk);
}
static void dspi_shutdown(struct platform_device *pdev)
{
dspi_remove(pdev);
}
static struct platform_driver fsl_dspi_driver = {
.driver.name = DRIVER_NAME,
.driver.of_match_table = fsl_dspi_dt_ids,
.driver.owner = THIS_MODULE,
.driver.pm = &dspi_pm,
.probe = dspi_probe,
.remove_new = dspi_remove,
.shutdown = dspi_shutdown,
};
module_platform_driver(fsl_dspi_driver);
MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/spi/spi-fsl-dspi.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright 2018 SiFive, Inc.
//
// SiFive SPI controller driver (master mode only)
//
// Author: SiFive, Inc.
// [email protected]
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/io.h>
#include <linux/log2.h>
#define SIFIVE_SPI_DRIVER_NAME "sifive_spi"
#define SIFIVE_SPI_MAX_CS 32
#define SIFIVE_SPI_DEFAULT_DEPTH 8
#define SIFIVE_SPI_DEFAULT_MAX_BITS 8
/* register offsets */
#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */
#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */
#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */
#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */
#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */
#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */
#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */
#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */
#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */
#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */
#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */
#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */
#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */
#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */
#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */
#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */
/* sckdiv bits */
#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU
/* sckmode bits */
#define SIFIVE_SPI_SCKMODE_PHA BIT(0)
#define SIFIVE_SPI_SCKMODE_POL BIT(1)
#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \
SIFIVE_SPI_SCKMODE_POL)
/* csmode bits */
#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U
#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U
#define SIFIVE_SPI_CSMODE_MODE_OFF 3U
/* delay0 bits */
#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x))
#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU
#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16)
#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16)
/* delay1 bits */
#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x))
#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU
#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16)
#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16)
/* fmt bits */
#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U
#define SIFIVE_SPI_FMT_PROTO_DUAL 1U
#define SIFIVE_SPI_FMT_PROTO_QUAD 2U
#define SIFIVE_SPI_FMT_PROTO_MASK 3U
#define SIFIVE_SPI_FMT_ENDIAN BIT(2)
#define SIFIVE_SPI_FMT_DIR BIT(3)
#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16)
#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16)
/* txdata bits */
#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU
#define SIFIVE_SPI_TXDATA_FULL BIT(31)
/* rxdata bits */
#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU
#define SIFIVE_SPI_RXDATA_EMPTY BIT(31)
/* ie and ip bits */
#define SIFIVE_SPI_IP_TXWM BIT(0)
#define SIFIVE_SPI_IP_RXWM BIT(1)
struct sifive_spi {
void __iomem *regs; /* virt. address of control registers */
struct clk *clk; /* bus clock */
unsigned int fifo_depth; /* fifo depth in words */
u32 cs_inactive; /* level of the CS pins when inactive */
struct completion done; /* wake-up from interrupt */
};
static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value)
{
iowrite32(value, spi->regs + offset);
}
static u32 sifive_spi_read(struct sifive_spi *spi, int offset)
{
return ioread32(spi->regs + offset);
}
static void sifive_spi_init(struct sifive_spi *spi)
{
/* Watermark interrupts are disabled by default */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
/* Default watermark FIFO threshold values */
sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1);
sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0);
/* Set CS/SCK Delays and Inactive Time to defaults */
sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0,
SIFIVE_SPI_DELAY0_CSSCK(1) |
SIFIVE_SPI_DELAY0_SCKCS(1));
sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1,
SIFIVE_SPI_DELAY1_INTERCS(1) |
SIFIVE_SPI_DELAY1_INTERXFR(0));
/* Exit specialized memory-mapped SPI flash mode */
sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0);
}
static int
sifive_spi_prepare_message(struct spi_controller *host, struct spi_message *msg)
{
struct sifive_spi *spi = spi_controller_get_devdata(host);
struct spi_device *device = msg->spi;
/* Update the chip select polarity */
if (device->mode & SPI_CS_HIGH)
spi->cs_inactive &= ~BIT(spi_get_chipselect(device, 0));
else
spi->cs_inactive |= BIT(spi_get_chipselect(device, 0));
sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
/* Select the correct device */
sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, spi_get_chipselect(device, 0));
/* Set clock mode */
sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE,
device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK);
return 0;
}
static void sifive_spi_set_cs(struct spi_device *device, bool is_high)
{
struct sifive_spi *spi = spi_controller_get_devdata(device->controller);
/* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */
if (device->mode & SPI_CS_HIGH)
is_high = !is_high;
sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ?
SIFIVE_SPI_CSMODE_MODE_AUTO :
SIFIVE_SPI_CSMODE_MODE_HOLD);
}
static int
sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device,
struct spi_transfer *t)
{
u32 cr;
unsigned int mode;
/* Calculate and program the clock rate */
cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1;
cr &= SIFIVE_SPI_SCKDIV_DIV_MASK;
sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr);
mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits);
/* Set frame format */
cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word);
switch (mode) {
case SPI_NBITS_QUAD:
cr |= SIFIVE_SPI_FMT_PROTO_QUAD;
break;
case SPI_NBITS_DUAL:
cr |= SIFIVE_SPI_FMT_PROTO_DUAL;
break;
default:
cr |= SIFIVE_SPI_FMT_PROTO_SINGLE;
break;
}
if (device->mode & SPI_LSB_FIRST)
cr |= SIFIVE_SPI_FMT_ENDIAN;
if (!t->rx_buf)
cr |= SIFIVE_SPI_FMT_DIR;
sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr);
/* We will want to poll if the time we need to wait is
* less than the context switching time.
* Let's call that threshold 5us. The operation will take:
* (8/mode) * fifo_depth / hz <= 5 * 10^-6
* 1600000 * fifo_depth <= hz * mode
*/
return 1600000 * spi->fifo_depth <= t->speed_hz * mode;
}
static irqreturn_t sifive_spi_irq(int irq, void *dev_id)
{
struct sifive_spi *spi = dev_id;
u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) {
/* Disable interrupts until next transfer */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
complete(&spi->done);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll)
{
if (poll) {
u32 cr;
do {
cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
} while (!(cr & bit));
} else {
reinit_completion(&spi->done);
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit);
wait_for_completion(&spi->done);
}
}
static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr)
{
WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA)
& SIFIVE_SPI_TXDATA_FULL) != 0);
sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA,
*tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK);
}
static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr)
{
u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA);
WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0);
*rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK;
}
static int
sifive_spi_transfer_one(struct spi_controller *host, struct spi_device *device,
struct spi_transfer *t)
{
struct sifive_spi *spi = spi_controller_get_devdata(host);
int poll = sifive_spi_prep_transfer(spi, device, t);
const u8 *tx_ptr = t->tx_buf;
u8 *rx_ptr = t->rx_buf;
unsigned int remaining_words = t->len;
while (remaining_words) {
unsigned int n_words = min(remaining_words, spi->fifo_depth);
unsigned int i;
/* Enqueue n_words for transmission */
for (i = 0; i < n_words; i++)
sifive_spi_tx(spi, tx_ptr++);
if (rx_ptr) {
/* Wait for transmission + reception to complete */
sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK,
n_words - 1);
sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll);
/* Read out all the data from the RX FIFO */
for (i = 0; i < n_words; i++)
sifive_spi_rx(spi, rx_ptr++);
} else {
/* Wait for transmission to complete */
sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll);
}
remaining_words -= n_words;
}
return 0;
}
static int sifive_spi_probe(struct platform_device *pdev)
{
struct sifive_spi *spi;
int ret, irq, num_cs;
u32 cs_bits, max_bits_per_word;
struct spi_controller *host;
host = spi_alloc_host(&pdev->dev, sizeof(struct sifive_spi));
if (!host) {
dev_err(&pdev->dev, "out of memory\n");
return -ENOMEM;
}
spi = spi_controller_get_devdata(host);
init_completion(&spi->done);
platform_set_drvdata(pdev, host);
spi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->regs)) {
ret = PTR_ERR(spi->regs);
goto put_host;
}
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
dev_err(&pdev->dev, "Unable to find bus clock\n");
ret = PTR_ERR(spi->clk);
goto put_host;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto put_host;
}
/* Optional parameters */
ret =
of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth",
&spi->fifo_depth);
if (ret < 0)
spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH;
ret =
of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word",
&max_bits_per_word);
if (!ret && max_bits_per_word < 8) {
dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n");
ret = -EINVAL;
goto put_host;
}
/* Spin up the bus clock before hitting registers */
ret = clk_prepare_enable(spi->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable bus clock\n");
goto put_host;
}
/* probe the number of CS lines */
spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU);
cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
if (!cs_bits) {
dev_err(&pdev->dev, "Could not auto probe CS lines\n");
ret = -EINVAL;
goto disable_clk;
}
num_cs = ilog2(cs_bits) + 1;
if (num_cs > SIFIVE_SPI_MAX_CS) {
dev_err(&pdev->dev, "Invalid number of spi targets\n");
ret = -EINVAL;
goto disable_clk;
}
/* Define our host */
host->dev.of_node = pdev->dev.of_node;
host->bus_num = pdev->id;
host->num_chipselect = num_cs;
host->mode_bits = SPI_CPHA | SPI_CPOL
| SPI_CS_HIGH | SPI_LSB_FIRST
| SPI_TX_DUAL | SPI_TX_QUAD
| SPI_RX_DUAL | SPI_RX_QUAD;
/* TODO: add driver support for bits_per_word < 8
* we need to "left-align" the bits (unless SPI_LSB_FIRST)
*/
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->flags = SPI_CONTROLLER_MUST_TX | SPI_CONTROLLER_GPIO_SS;
host->prepare_message = sifive_spi_prepare_message;
host->set_cs = sifive_spi_set_cs;
host->transfer_one = sifive_spi_transfer_one;
pdev->dev.dma_mask = NULL;
/* Configure the SPI host hardware */
sifive_spi_init(spi);
/* Register for SPI Interrupt */
ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0,
dev_name(&pdev->dev), spi);
if (ret) {
dev_err(&pdev->dev, "Unable to bind to interrupt\n");
goto disable_clk;
}
dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
irq, host->num_chipselect);
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_host failed\n");
goto disable_clk;
}
return 0;
disable_clk:
clk_disable_unprepare(spi->clk);
put_host:
spi_controller_put(host);
return ret;
}
static void sifive_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct sifive_spi *spi = spi_controller_get_devdata(host);
/* Disable all the interrupts just in case */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
clk_disable_unprepare(spi->clk);
}
static int sifive_spi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct sifive_spi *spi = spi_controller_get_devdata(host);
int ret;
ret = spi_controller_suspend(host);
if (ret)
return ret;
/* Disable all the interrupts just in case */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
clk_disable_unprepare(spi->clk);
return ret;
}
static int sifive_spi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct sifive_spi *spi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(spi->clk);
if (ret)
return ret;
ret = spi_controller_resume(host);
if (ret)
clk_disable_unprepare(spi->clk);
return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(sifive_spi_pm_ops,
sifive_spi_suspend, sifive_spi_resume);
static const struct of_device_id sifive_spi_of_match[] = {
{ .compatible = "sifive,spi0", },
{}
};
MODULE_DEVICE_TABLE(of, sifive_spi_of_match);
static struct platform_driver sifive_spi_driver = {
.probe = sifive_spi_probe,
.remove_new = sifive_spi_remove,
.driver = {
.name = SIFIVE_SPI_DRIVER_NAME,
.pm = &sifive_spi_pm_ops,
.of_match_table = sifive_spi_of_match,
},
};
module_platform_driver(sifive_spi_driver);
MODULE_AUTHOR("SiFive, Inc. <[email protected]>");
MODULE_DESCRIPTION("SiFive SPI driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-sifive.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SPI bus driver for the Ingenic SoCs
* Copyright (c) 2017-2021 Artur Rojek <[email protected]>
* Copyright (c) 2017-2021 Paul Cercueil <[email protected]>
* Copyright (c) 2022 周琰杰 (Zhou Yanjie) <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#define REG_SSIDR 0x0
#define REG_SSICR0 0x4
#define REG_SSICR1 0x8
#define REG_SSISR 0xc
#define REG_SSIGR 0x18
#define REG_SSICR0_TENDIAN_LSB BIT(19)
#define REG_SSICR0_RENDIAN_LSB BIT(17)
#define REG_SSICR0_SSIE BIT(15)
#define REG_SSICR0_LOOP BIT(10)
#define REG_SSICR0_EACLRUN BIT(7)
#define REG_SSICR0_FSEL BIT(6)
#define REG_SSICR0_TFLUSH BIT(2)
#define REG_SSICR0_RFLUSH BIT(1)
#define REG_SSICR1_FRMHL_MASK (BIT(31) | BIT(30))
#define REG_SSICR1_FRMHL BIT(30)
#define REG_SSICR1_LFST BIT(25)
#define REG_SSICR1_UNFIN BIT(23)
#define REG_SSICR1_PHA BIT(1)
#define REG_SSICR1_POL BIT(0)
#define REG_SSISR_END BIT(7)
#define REG_SSISR_BUSY BIT(6)
#define REG_SSISR_TFF BIT(5)
#define REG_SSISR_RFE BIT(4)
#define REG_SSISR_RFHF BIT(2)
#define REG_SSISR_UNDR BIT(1)
#define REG_SSISR_OVER BIT(0)
#define SPI_INGENIC_FIFO_SIZE 128u
struct jz_soc_info {
u32 bits_per_word_mask;
struct reg_field flen_field;
bool has_trendian;
unsigned int max_speed_hz;
unsigned int max_native_cs;
};
struct ingenic_spi {
const struct jz_soc_info *soc_info;
struct clk *clk;
struct resource *mem_res;
struct regmap *map;
struct regmap_field *flen_field;
};
static int spi_ingenic_wait(struct ingenic_spi *priv,
unsigned long mask,
bool condition)
{
unsigned int val;
return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
!!(val & mask) == condition,
100, 10000);
}
static void spi_ingenic_set_cs(struct spi_device *spi, bool disable)
{
struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller);
if (disable) {
regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
regmap_clear_bits(priv->map, REG_SSISR,
REG_SSISR_UNDR | REG_SSISR_OVER);
spi_ingenic_wait(priv, REG_SSISR_END, true);
} else {
regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
}
regmap_set_bits(priv->map, REG_SSICR0,
REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH);
}
static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv,
struct spi_device *spi,
struct spi_transfer *xfer)
{
unsigned long clk_hz = clk_get_rate(priv->clk);
u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz,
bits_per_word = xfer->bits_per_word ?: spi->bits_per_word;
cdiv = clk_hz / (speed_hz * 2);
cdiv = clamp(cdiv, 1u, 0x100u) - 1;
regmap_write(priv->map, REG_SSIGR, cdiv);
regmap_field_write(priv->flen_field, bits_per_word - 2);
}
static void spi_ingenic_finalize_transfer(void *controller)
{
spi_finalize_current_transfer(controller);
}
static struct dma_async_tx_descriptor *
spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan,
struct sg_table *sg, enum dma_transfer_direction dir,
unsigned int bits)
{
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
struct dma_slave_config cfg = {
.direction = dir,
.src_addr = priv->mem_res->start + REG_SSIDR,
.dst_addr = priv->mem_res->start + REG_SSIDR,
};
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
int ret;
if (bits > 16) {
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.src_maxburst = cfg.dst_maxburst = 4;
} else if (bits > 8) {
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.src_maxburst = cfg.dst_maxburst = 2;
} else {
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
cfg.src_maxburst = cfg.dst_maxburst = 1;
}
ret = dmaengine_slave_config(chan, &cfg);
if (ret)
return ERR_PTR(ret);
desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir,
DMA_PREP_INTERRUPT);
if (!desc)
return ERR_PTR(-ENOMEM);
if (dir == DMA_DEV_TO_MEM) {
desc->callback = spi_ingenic_finalize_transfer;
desc->callback_param = ctlr;
}
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret) {
dmaengine_desc_free(desc);
return ERR_PTR(ret);
}
return desc;
}
static int spi_ingenic_dma_tx(struct spi_controller *ctlr,
struct spi_transfer *xfer, unsigned int bits)
{
struct dma_async_tx_descriptor *rx_desc, *tx_desc;
rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx,
&xfer->rx_sg, DMA_DEV_TO_MEM, bits);
if (IS_ERR(rx_desc))
return PTR_ERR(rx_desc);
tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx,
&xfer->tx_sg, DMA_MEM_TO_DEV, bits);
if (IS_ERR(tx_desc)) {
dmaengine_terminate_async(ctlr->dma_rx);
dmaengine_desc_free(rx_desc);
return PTR_ERR(tx_desc);
}
dma_async_issue_pending(ctlr->dma_rx);
dma_async_issue_pending(ctlr->dma_tx);
return 1;
}
#define SPI_INGENIC_TX(x) \
static int spi_ingenic_tx##x(struct ingenic_spi *priv, \
struct spi_transfer *xfer) \
{ \
unsigned int count = xfer->len / (x / 8); \
unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE); \
const u##x *tx_buf = xfer->tx_buf; \
u##x *rx_buf = xfer->rx_buf; \
unsigned int i, val; \
int err; \
\
/* Fill up the TX fifo */ \
for (i = 0; i < prefill; i++) { \
val = tx_buf ? tx_buf[i] : 0; \
\
regmap_write(priv->map, REG_SSIDR, val); \
} \
\
for (i = 0; i < count; i++) { \
err = spi_ingenic_wait(priv, REG_SSISR_RFE, false); \
if (err) \
return err; \
\
regmap_read(priv->map, REG_SSIDR, &val); \
if (rx_buf) \
rx_buf[i] = val; \
\
if (i < count - prefill) { \
val = tx_buf ? tx_buf[i + prefill] : 0; \
\
regmap_write(priv->map, REG_SSIDR, val); \
} \
} \
\
return 0; \
}
SPI_INGENIC_TX(8)
SPI_INGENIC_TX(16)
SPI_INGENIC_TX(32)
#undef SPI_INGENIC_TX
static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
spi_ingenic_prepare_transfer(priv, spi, xfer);
if (ctlr->cur_msg_mapped && can_dma)
return spi_ingenic_dma_tx(ctlr, xfer, bits);
if (bits > 16)
return spi_ingenic_tx32(priv, xfer);
if (bits > 8)
return spi_ingenic_tx16(priv, xfer);
return spi_ingenic_tx8(priv, xfer);
}
static int spi_ingenic_prepare_message(struct spi_controller *ctlr,
struct spi_message *message)
{
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
unsigned int cs = REG_SSICR1_FRMHL << spi_get_chipselect(spi, 0);
unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL;
unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs;
unsigned int ssicr0 = 0, ssicr1 = 0;
if (priv->soc_info->has_trendian) {
ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
if (spi->mode & SPI_LSB_FIRST)
ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
} else {
ssicr1_mask |= REG_SSICR1_LFST;
if (spi->mode & SPI_LSB_FIRST)
ssicr1 |= REG_SSICR1_LFST;
}
if (spi->mode & SPI_LOOP)
ssicr0 |= REG_SSICR0_LOOP;
if (spi_get_chipselect(spi, 0))
ssicr0 |= REG_SSICR0_FSEL;
if (spi->mode & SPI_CPHA)
ssicr1 |= REG_SSICR1_PHA;
if (spi->mode & SPI_CPOL)
ssicr1 |= REG_SSICR1_POL;
if (spi->mode & SPI_CS_HIGH)
ssicr1 |= cs;
regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
return 0;
}
static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr)
{
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
int ret;
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
regmap_write(priv->map, REG_SSICR1, 0);
regmap_write(priv->map, REG_SSISR, 0);
regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
return 0;
}
static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr)
{
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
clk_disable_unprepare(priv->clk);
return 0;
}
static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct dma_slave_caps caps;
int ret;
ret = dma_get_slave_caps(ctlr->dma_tx, &caps);
if (ret) {
dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret);
return false;
}
return !caps.max_sg_burst ||
xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE;
}
static int spi_ingenic_request_dma(struct spi_controller *ctlr,
struct device *dev)
{
ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
if (!ctlr->dma_rx)
return -ENODEV;
ctlr->can_dma = spi_ingenic_can_dma;
return 0;
}
static void spi_ingenic_release_dma(void *data)
{
struct spi_controller *ctlr = data;
if (ctlr->dma_tx)
dma_release_channel(ctlr->dma_tx);
if (ctlr->dma_rx)
dma_release_channel(ctlr->dma_rx);
}
static const struct regmap_config spi_ingenic_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = REG_SSIGR,
};
static int spi_ingenic_probe(struct platform_device *pdev)
{
const struct jz_soc_info *pdata;
struct device *dev = &pdev->dev;
struct spi_controller *ctlr;
struct ingenic_spi *priv;
void __iomem *base;
int num_cs, ret;
pdata = of_device_get_match_data(dev);
if (!pdata) {
dev_err(dev, "Missing platform data.\n");
return -EINVAL;
}
ctlr = devm_spi_alloc_host(dev, sizeof(*priv));
if (!ctlr) {
dev_err(dev, "Unable to allocate SPI controller.\n");
return -ENOMEM;
}
priv = spi_controller_get_devdata(ctlr);
priv->soc_info = pdata;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
return dev_err_probe(dev, PTR_ERR(priv->clk),
"Unable to get clock.\n");
}
base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res);
if (IS_ERR(base))
return PTR_ERR(base);
priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
if (IS_ERR(priv->map))
return PTR_ERR(priv->map);
priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
pdata->flen_field);
if (IS_ERR(priv->flen_field))
return PTR_ERR(priv->flen_field);
if (device_property_read_u32(dev, "num-cs", &num_cs))
num_cs = pdata->max_native_cs;
platform_set_drvdata(pdev, ctlr);
ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware;
ctlr->prepare_message = spi_ingenic_prepare_message;
ctlr->set_cs = spi_ingenic_set_cs;
ctlr->transfer_one = spi_ingenic_transfer_one;
ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH;
ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
ctlr->min_speed_hz = 7200;
ctlr->max_speed_hz = pdata->max_speed_hz;
ctlr->use_gpio_descriptors = true;
ctlr->max_native_cs = pdata->max_native_cs;
ctlr->num_chipselect = num_cs;
ctlr->dev.of_node = pdev->dev.of_node;
if (spi_ingenic_request_dma(ctlr, dev))
dev_warn(dev, "DMA not available.\n");
ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr);
if (ret) {
dev_err(dev, "Unable to add action.\n");
return ret;
}
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
dev_err(dev, "Unable to register SPI controller.\n");
return ret;
}
static const struct jz_soc_info jz4750_soc_info = {
.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
.flen_field = REG_FIELD(REG_SSICR1, 4, 7),
.has_trendian = false,
.max_speed_hz = 54000000,
.max_native_cs = 2,
};
static const struct jz_soc_info jz4780_soc_info = {
.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
.has_trendian = true,
.max_speed_hz = 54000000,
.max_native_cs = 2,
};
static const struct jz_soc_info x1000_soc_info = {
.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
.has_trendian = true,
.max_speed_hz = 50000000,
.max_native_cs = 2,
};
static const struct jz_soc_info x2000_soc_info = {
.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
.has_trendian = true,
.max_speed_hz = 50000000,
.max_native_cs = 1,
};
static const struct of_device_id spi_ingenic_of_match[] = {
{ .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
{ .compatible = "ingenic,jz4775-spi", .data = &jz4780_soc_info },
{ .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
{ .compatible = "ingenic,x1000-spi", .data = &x1000_soc_info },
{ .compatible = "ingenic,x2000-spi", .data = &x2000_soc_info },
{}
};
MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
static struct platform_driver spi_ingenic_driver = {
.driver = {
.name = "spi-ingenic",
.of_match_table = spi_ingenic_of_match,
},
.probe = spi_ingenic_probe,
};
module_platform_driver(spi_ingenic_driver);
MODULE_DESCRIPTION("SPI bus driver for the Ingenic SoCs");
MODULE_AUTHOR("Artur Rojek <[email protected]>");
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-ingenic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* parport-to-butterfly adapter
*
* Copyright (C) 2005 David Brownell
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/parport.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/flash.h>
#include <linux/mtd/partitions.h>
/*
* This uses SPI to talk with an "AVR Butterfly", which is a $US20 card
* with a battery powered AVR microcontroller and lots of goodies. You
* can use GCC to develop firmware for this.
*
* See Documentation/spi/butterfly.rst for information about how to build
* and use this custom parallel port cable.
*/
/* DATA output bits (pins 2..9 == D0..D7) */
#define butterfly_nreset (1 << 1) /* pin 3 */
#define spi_sck_bit (1 << 0) /* pin 2 */
#define spi_mosi_bit (1 << 7) /* pin 9 */
#define vcc_bits ((1 << 6) | (1 << 5)) /* pins 7, 8 */
/* STATUS input bits */
#define spi_miso_bit PARPORT_STATUS_BUSY /* pin 11 */
/* CONTROL output bits */
#define spi_cs_bit PARPORT_CONTROL_SELECT /* pin 17 */
static inline struct butterfly *spidev_to_pp(struct spi_device *spi)
{
return spi->controller_data;
}
struct butterfly {
/* REVISIT ... for now, this must be first */
struct spi_bitbang bitbang;
struct parport *port;
struct pardevice *pd;
u8 lastbyte;
struct spi_device *dataflash;
struct spi_device *butterfly;
struct spi_board_info info[2];
};
/*----------------------------------------------------------------------*/
static inline void
setsck(struct spi_device *spi, int is_on)
{
struct butterfly *pp = spidev_to_pp(spi);
u8 bit, byte = pp->lastbyte;
bit = spi_sck_bit;
if (is_on)
byte |= bit;
else
byte &= ~bit;
parport_write_data(pp->port, byte);
pp->lastbyte = byte;
}
static inline void
setmosi(struct spi_device *spi, int is_on)
{
struct butterfly *pp = spidev_to_pp(spi);
u8 bit, byte = pp->lastbyte;
bit = spi_mosi_bit;
if (is_on)
byte |= bit;
else
byte &= ~bit;
parport_write_data(pp->port, byte);
pp->lastbyte = byte;
}
static inline int getmiso(struct spi_device *spi)
{
struct butterfly *pp = spidev_to_pp(spi);
int value;
u8 bit;
bit = spi_miso_bit;
/* only STATUS_BUSY is NOT negated */
value = !(parport_read_status(pp->port) & bit);
return (bit == PARPORT_STATUS_BUSY) ? value : !value;
}
static void butterfly_chipselect(struct spi_device *spi, int value)
{
struct butterfly *pp = spidev_to_pp(spi);
/* set default clock polarity */
if (value != BITBANG_CS_INACTIVE)
setsck(spi, spi->mode & SPI_CPOL);
/* here, value == "activate or not";
* most PARPORT_CONTROL_* bits are negated, so we must
* morph it to value == "bit value to write in control register"
*/
if (spi_cs_bit == PARPORT_CONTROL_INIT)
value = !value;
parport_frob_control(pp->port, spi_cs_bit, value ? spi_cs_bit : 0);
}
/* we only needed to implement one mode here, and choose SPI_MODE_0 */
#define spidelay(X) do { } while (0)
/* #define spidelay ndelay */
#include "spi-bitbang-txrx.h"
static u32
butterfly_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word,
u8 bits, unsigned flags)
{
return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
/*----------------------------------------------------------------------*/
/* override default partitioning with cmdlinepart */
static struct mtd_partition partitions[] = { {
/* JFFS2 wants partitions of 4*N blocks for this device,
* so sectors 0 and 1 can't be partitions by themselves.
*/
/* sector 0 = 8 pages * 264 bytes/page (1 block)
* sector 1 = 248 pages * 264 bytes/page
*/
.name = "bookkeeping", /* 66 KB */
.offset = 0,
.size = (8 + 248) * 264,
/* .mask_flags = MTD_WRITEABLE, */
}, {
/* sector 2 = 256 pages * 264 bytes/page
* sectors 3-5 = 512 pages * 264 bytes/page
*/
.name = "filesystem", /* 462 KB */
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
} };
static struct flash_platform_data flash = {
.name = "butterflash",
.parts = partitions,
.nr_parts = ARRAY_SIZE(partitions),
};
/* REVISIT remove this ugly global and its "only one" limitation */
static struct butterfly *butterfly;
static void butterfly_attach(struct parport *p)
{
struct pardevice *pd;
int status;
struct butterfly *pp;
struct spi_controller *host;
struct device *dev = p->physport->dev;
struct pardev_cb butterfly_cb;
if (butterfly || !dev)
return;
/* REVISIT: this just _assumes_ a butterfly is there ... no probe,
* and no way to be selective about what it binds to.
*/
host = spi_alloc_host(dev, sizeof(*pp));
if (!host) {
status = -ENOMEM;
goto done;
}
pp = spi_controller_get_devdata(host);
/*
* SPI and bitbang hookup
*
* use default setup(), cleanup(), and transfer() methods; and
* only bother implementing mode 0. Start it later.
*/
host->bus_num = 42;
host->num_chipselect = 2;
pp->bitbang.master = host;
pp->bitbang.chipselect = butterfly_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
/*
* parport hookup
*/
pp->port = p;
memset(&butterfly_cb, 0, sizeof(butterfly_cb));
butterfly_cb.private = pp;
pd = parport_register_dev_model(p, "spi_butterfly", &butterfly_cb, 0);
if (!pd) {
status = -ENOMEM;
goto clean0;
}
pp->pd = pd;
status = parport_claim(pd);
if (status < 0)
goto clean1;
/*
* Butterfly reset, powerup, run firmware
*/
pr_debug("%s: powerup/reset Butterfly\n", p->name);
/* nCS for dataflash (this bit is inverted on output) */
parport_frob_control(pp->port, spi_cs_bit, 0);
/* stabilize power with chip in reset (nRESET), and
* spi_sck_bit clear (CPOL=0)
*/
pp->lastbyte |= vcc_bits;
parport_write_data(pp->port, pp->lastbyte);
msleep(5);
/* take it out of reset; assume long reset delay */
pp->lastbyte |= butterfly_nreset;
parport_write_data(pp->port, pp->lastbyte);
msleep(100);
/*
* Start SPI ... for now, hide that we're two physical busses.
*/
status = spi_bitbang_start(&pp->bitbang);
if (status < 0)
goto clean2;
/* Bus 1 lets us talk to at45db041b (firmware disables AVR SPI), AVR
* (firmware resets at45, acts as spi slave) or neither (we ignore
* both, AVR uses AT45). Here we expect firmware for the first option.
*/
pp->info[0].max_speed_hz = 15 * 1000 * 1000;
strcpy(pp->info[0].modalias, "mtd_dataflash");
pp->info[0].platform_data = &flash;
pp->info[0].chip_select = 1;
pp->info[0].controller_data = pp;
pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]);
if (pp->dataflash)
pr_debug("%s: dataflash at %s\n", p->name,
dev_name(&pp->dataflash->dev));
pr_info("%s: AVR Butterfly\n", p->name);
butterfly = pp;
return;
clean2:
/* turn off VCC */
parport_write_data(pp->port, 0);
parport_release(pp->pd);
clean1:
parport_unregister_device(pd);
clean0:
spi_controller_put(host);
done:
pr_debug("%s: butterfly probe, fail %d\n", p->name, status);
}
static void butterfly_detach(struct parport *p)
{
struct butterfly *pp;
/* FIXME this global is ugly ... but, how to quickly get from
* the parport to the "struct butterfly" associated with it?
* "old school" driver-internal device lists?
*/
if (!butterfly || butterfly->port != p)
return;
pp = butterfly;
butterfly = NULL;
/* stop() unregisters child devices too */
spi_bitbang_stop(&pp->bitbang);
/* turn off VCC */
parport_write_data(pp->port, 0);
msleep(10);
parport_release(pp->pd);
parport_unregister_device(pp->pd);
spi_controller_put(pp->bitbang.master);
}
static struct parport_driver butterfly_driver = {
.name = "spi_butterfly",
.match_port = butterfly_attach,
.detach = butterfly_detach,
.devmodel = true,
};
module_parport_driver(butterfly_driver);
MODULE_DESCRIPTION("Parport Adapter driver for AVR Butterfly");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-butterfly.c |
// SPDX-License-Identifier: GPL-2.0
//
// General Purpose SPI multiplexer
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#define SPI_MUX_NO_CS ((unsigned int)-1)
/**
* DOC: Driver description
*
* This driver supports a MUX on an SPI bus. This can be useful when you need
* more chip selects than the hardware peripherals support, or than are
* available in a particular board setup.
*
* The driver will create an additional SPI controller. Devices added under the
* mux will be handled as 'chip selects' on this controller.
*/
/**
* struct spi_mux_priv - the basic spi_mux structure
* @spi: pointer to the device struct attached to the parent
* spi controller
* @current_cs: The current chip select set in the mux
* @child_msg_complete: The mux replaces the complete callback in the child's
* message to its own callback; this field is used by the
* driver to store the child's callback during a transfer
* @child_msg_context: Used to store the child's context to the callback
* @child_msg_dev: Used to store the spi_device pointer to the child
* @mux: mux_control structure used to provide chip selects for
* downstream spi devices
*/
struct spi_mux_priv {
struct spi_device *spi;
unsigned int current_cs;
void (*child_msg_complete)(void *context);
void *child_msg_context;
struct spi_device *child_msg_dev;
struct mux_control *mux;
};
/* should not get called when the parent controller is doing a transfer */
static int spi_mux_select(struct spi_device *spi)
{
struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
int ret;
ret = mux_control_select(priv->mux, spi_get_chipselect(spi, 0));
if (ret)
return ret;
if (priv->current_cs == spi_get_chipselect(spi, 0))
return 0;
dev_dbg(&priv->spi->dev, "setting up the mux for cs %d\n",
spi_get_chipselect(spi, 0));
/* copy the child device's settings except for the cs */
priv->spi->max_speed_hz = spi->max_speed_hz;
priv->spi->mode = spi->mode;
priv->spi->bits_per_word = spi->bits_per_word;
priv->current_cs = spi_get_chipselect(spi, 0);
return 0;
}
static int spi_mux_setup(struct spi_device *spi)
{
struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
/*
* can be called multiple times, won't do a valid setup now but we will
* change the settings when we do a transfer (necessary because we
* can't predict from which device it will be anyway)
*/
return spi_setup(priv->spi);
}
static void spi_mux_complete_cb(void *context)
{
struct spi_mux_priv *priv = (struct spi_mux_priv *)context;
struct spi_controller *ctlr = spi_get_drvdata(priv->spi);
struct spi_message *m = ctlr->cur_msg;
m->complete = priv->child_msg_complete;
m->context = priv->child_msg_context;
m->spi = priv->child_msg_dev;
spi_finalize_current_message(ctlr);
mux_control_deselect(priv->mux);
}
static int spi_mux_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *m)
{
struct spi_mux_priv *priv = spi_controller_get_devdata(ctlr);
struct spi_device *spi = m->spi;
int ret;
ret = spi_mux_select(spi);
if (ret)
return ret;
/*
* Replace the complete callback, context and spi_device with our own
* pointers. Save originals
*/
priv->child_msg_complete = m->complete;
priv->child_msg_context = m->context;
priv->child_msg_dev = m->spi;
m->complete = spi_mux_complete_cb;
m->context = priv;
m->spi = priv->spi;
/* do the transfer */
return spi_async(priv->spi, m);
}
static int spi_mux_probe(struct spi_device *spi)
{
struct spi_controller *ctlr;
struct spi_mux_priv *priv;
int ret;
ctlr = spi_alloc_master(&spi->dev, sizeof(*priv));
if (!ctlr)
return -ENOMEM;
spi_set_drvdata(spi, ctlr);
priv = spi_controller_get_devdata(ctlr);
priv->spi = spi;
/*
* Increase lockdep class as these lock are taken while the parent bus
* already holds their instance's lock.
*/
lockdep_set_subclass(&ctlr->io_mutex, 1);
lockdep_set_subclass(&ctlr->add_lock, 1);
priv->mux = devm_mux_control_get(&spi->dev, NULL);
if (IS_ERR(priv->mux)) {
ret = dev_err_probe(&spi->dev, PTR_ERR(priv->mux),
"failed to get control-mux\n");
goto err_put_ctlr;
}
priv->current_cs = SPI_MUX_NO_CS;
/* supported modes are the same as our parent's */
ctlr->mode_bits = spi->controller->mode_bits;
ctlr->flags = spi->controller->flags;
ctlr->transfer_one_message = spi_mux_transfer_one_message;
ctlr->setup = spi_mux_setup;
ctlr->num_chipselect = mux_control_states(priv->mux);
ctlr->bus_num = -1;
ctlr->dev.of_node = spi->dev.of_node;
ctlr->must_async = true;
ret = devm_spi_register_controller(&spi->dev, ctlr);
if (ret)
goto err_put_ctlr;
return 0;
err_put_ctlr:
spi_controller_put(ctlr);
return ret;
}
static const struct spi_device_id spi_mux_id[] = {
{ "spi-mux" },
{ }
};
MODULE_DEVICE_TABLE(spi, spi_mux_id);
static const struct of_device_id spi_mux_of_match[] = {
{ .compatible = "spi-mux" },
{ }
};
MODULE_DEVICE_TABLE(of, spi_mux_of_match);
static struct spi_driver spi_mux_driver = {
.probe = spi_mux_probe,
.driver = {
.name = "spi-mux",
.of_match_table = spi_mux_of_match,
},
.id_table = spi_mux_id,
};
module_spi_driver(spi_mux_driver);
MODULE_DESCRIPTION("SPI multiplexer");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-mux.c |
// SPDX-License-Identifier: GPL-2.0
//
// Driver for the SPI-NAND mode of Mediatek NAND Flash Interface
//
// Copyright (c) 2022 Chuanhong Guo <[email protected]>
//
// This driver is based on the SPI-NAND mtd driver from Mediatek SDK:
//
// Copyright (C) 2020 MediaTek Inc.
// Author: Weijie Gao <[email protected]>
//
// This controller organize the page data as several interleaved sectors
// like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size)
// +---------+------+------+---------+------+------+-----+
// | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... |
// +---------+------+------+---------+------+------+-----+
// With auto-format turned on, DMA only returns this part:
// +---------+---------+-----+
// | Sector1 | Sector2 | ... |
// +---------+---------+-----+
// The FDM data will be filled to the registers, and ECC parity data isn't
// accessible.
// With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA
// in it's original order shown in the first table. ECC can't be turned on when
// auto-format is off.
//
// However, Linux SPI-NAND driver expects the data returned as:
// +------+-----+
// | Page | OOB |
// +------+-----+
// where the page data is continuously stored instead of interleaved.
// So we assume all instructions matching the page_op template between ECC
// prepare_io_req and finish_io_req are for page cache r/w.
// Here's how this spi-mem driver operates when reading:
// 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off).
// 2. Perform page ops and let the controller fill the DMA bounce buffer with
// de-interleaved sector data and set FDM registers.
// 3. Return the data as:
// +---------+---------+-----+------+------+-----+
// | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... |
// +---------+---------+-----+------+------+-----+
// 4. For other matching spi_mem ops outside a prepare/finish_io_req pair,
// read the data with auto-format off into the bounce buffer and copy
// needed data to the buffer specified in the request.
//
// Write requests operates in a similar manner.
// As a limitation of this strategy, we won't be able to access any ECC parity
// data at all in Linux.
//
// Here's the bad block mark situation on MTK chips:
// In older chips like mt7622, MTK uses the first FDM byte in the first sector
// as the bad block mark. After de-interleaving, this byte appears at [pagesize]
// in the returned data, which is the BBM position expected by kernel. However,
// the conventional bad block mark is the first byte of the OOB, which is part
// of the last sector data in the interleaved layout. Instead of fixing their
// hardware, MTK decided to address this inconsistency in software. On these
// later chips, the BootROM expects the following:
// 1. The [pagesize] byte on a nand page is used as BBM, which will appear at
// (page_size - (nsectors - 1) * spare_size) in the DMA buffer.
// 2. The original byte stored at that position in the DMA buffer will be stored
// as the first byte of the FDM section in the last sector.
// We can't disagree with the BootROM, so after de-interleaving, we need to
// perform the following swaps in read:
// 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size],
// which is the expected BBM position by kernel.
// 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to
// [page_size - (nsectors - 1) * spare_size]
// Similarly, when writing, we need to perform swaps in the other direction.
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/mtd/nand-ecc-mtk.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/mtd/nand.h>
// NFI registers
#define NFI_CNFG 0x000
#define CNFG_OP_MODE_S 12
#define CNFG_OP_MODE_CUST 6
#define CNFG_OP_MODE_PROGRAM 3
#define CNFG_AUTO_FMT_EN BIT(9)
#define CNFG_HW_ECC_EN BIT(8)
#define CNFG_DMA_BURST_EN BIT(2)
#define CNFG_READ_MODE BIT(1)
#define CNFG_DMA_MODE BIT(0)
#define NFI_PAGEFMT 0x0004
#define NFI_SPARE_SIZE_LS_S 16
#define NFI_FDM_ECC_NUM_S 12
#define NFI_FDM_NUM_S 8
#define NFI_SPARE_SIZE_S 4
#define NFI_SEC_SEL_512 BIT(2)
#define NFI_PAGE_SIZE_S 0
#define NFI_PAGE_SIZE_512_2K 0
#define NFI_PAGE_SIZE_2K_4K 1
#define NFI_PAGE_SIZE_4K_8K 2
#define NFI_PAGE_SIZE_8K_16K 3
#define NFI_CON 0x008
#define CON_SEC_NUM_S 12
#define CON_BWR BIT(9)
#define CON_BRD BIT(8)
#define CON_NFI_RST BIT(1)
#define CON_FIFO_FLUSH BIT(0)
#define NFI_INTR_EN 0x010
#define NFI_INTR_STA 0x014
#define NFI_IRQ_INTR_EN BIT(31)
#define NFI_IRQ_CUS_READ BIT(8)
#define NFI_IRQ_CUS_PG BIT(7)
#define NFI_CMD 0x020
#define NFI_CMD_DUMMY_READ 0x00
#define NFI_CMD_DUMMY_WRITE 0x80
#define NFI_STRDATA 0x040
#define STR_DATA BIT(0)
#define NFI_STA 0x060
#define NFI_NAND_FSM_7622 GENMASK(28, 24)
#define NFI_NAND_FSM_7986 GENMASK(29, 23)
#define NFI_FSM GENMASK(19, 16)
#define READ_EMPTY BIT(12)
#define NFI_FIFOSTA 0x064
#define FIFO_WR_REMAIN_S 8
#define FIFO_RD_REMAIN_S 0
#define NFI_ADDRCNTR 0x070
#define SEC_CNTR GENMASK(16, 12)
#define SEC_CNTR_S 12
#define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
#define NFI_STRADDR 0x080
#define NFI_BYTELEN 0x084
#define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
#define NFI_FDM0L 0x0a0
#define NFI_FDM0M 0x0a4
#define NFI_FDML(n) (NFI_FDM0L + (n)*8)
#define NFI_FDMM(n) (NFI_FDM0M + (n)*8)
#define NFI_DEBUG_CON1 0x220
#define WBUF_EN BIT(2)
#define NFI_MASTERSTA 0x224
#define MAS_ADDR GENMASK(11, 9)
#define MAS_RD GENMASK(8, 6)
#define MAS_WR GENMASK(5, 3)
#define MAS_RDDLY GENMASK(2, 0)
#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
#define NFI_MASTERSTA_MASK_7986 3
// SNFI registers
#define SNF_MAC_CTL 0x500
#define MAC_XIO_SEL BIT(4)
#define SF_MAC_EN BIT(3)
#define SF_TRIG BIT(2)
#define WIP_READY BIT(1)
#define WIP BIT(0)
#define SNF_MAC_OUTL 0x504
#define SNF_MAC_INL 0x508
#define SNF_RD_CTL2 0x510
#define DATA_READ_DUMMY_S 8
#define DATA_READ_MAX_DUMMY 0xf
#define DATA_READ_CMD_S 0
#define SNF_RD_CTL3 0x514
#define SNF_PG_CTL1 0x524
#define PG_LOAD_CMD_S 8
#define SNF_PG_CTL2 0x528
#define SNF_MISC_CTL 0x538
#define SW_RST BIT(28)
#define FIFO_RD_LTC_S 25
#define PG_LOAD_X4_EN BIT(20)
#define DATA_READ_MODE_S 16
#define DATA_READ_MODE GENMASK(18, 16)
#define DATA_READ_MODE_X1 0
#define DATA_READ_MODE_X2 1
#define DATA_READ_MODE_X4 2
#define DATA_READ_MODE_DUAL 5
#define DATA_READ_MODE_QUAD 6
#define DATA_READ_LATCH_LAT GENMASK(9, 8)
#define DATA_READ_LATCH_LAT_S 8
#define PG_LOAD_CUSTOM_EN BIT(7)
#define DATARD_CUSTOM_EN BIT(6)
#define CS_DESELECT_CYC_S 0
#define SNF_MISC_CTL2 0x53c
#define PROGRAM_LOAD_BYTE_NUM_S 16
#define READ_DATA_BYTE_NUM_S 11
#define SNF_DLY_CTL3 0x548
#define SFCK_SAM_DLY_S 0
#define SFCK_SAM_DLY GENMASK(5, 0)
#define SFCK_SAM_DLY_TOTAL 9
#define SFCK_SAM_DLY_RANGE 47
#define SNF_STA_CTL1 0x550
#define CUS_PG_DONE BIT(28)
#define CUS_READ_DONE BIT(27)
#define SPI_STATE_S 0
#define SPI_STATE GENMASK(3, 0)
#define SNF_CFG 0x55c
#define SPI_MODE BIT(0)
#define SNF_GPRAM 0x800
#define SNF_GPRAM_SIZE 0xa0
#define SNFI_POLL_INTERVAL 1000000
static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 };
static const u8 mt7986_spare_sizes[] = {
16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
74
};
struct mtk_snand_caps {
u16 sector_size;
u16 max_sectors;
u16 fdm_size;
u16 fdm_ecc_size;
u16 fifo_size;
bool bbm_swap;
bool empty_page_check;
u32 mastersta_mask;
u32 nandfsm_mask;
const u8 *spare_sizes;
u32 num_spare_size;
};
static const struct mtk_snand_caps mt7622_snand_caps = {
.sector_size = 512,
.max_sectors = 8,
.fdm_size = 8,
.fdm_ecc_size = 1,
.fifo_size = 32,
.bbm_swap = false,
.empty_page_check = false,
.mastersta_mask = NFI_MASTERSTA_MASK_7622,
.nandfsm_mask = NFI_NAND_FSM_7622,
.spare_sizes = mt7622_spare_sizes,
.num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
};
static const struct mtk_snand_caps mt7629_snand_caps = {
.sector_size = 512,
.max_sectors = 8,
.fdm_size = 8,
.fdm_ecc_size = 1,
.fifo_size = 32,
.bbm_swap = true,
.empty_page_check = false,
.mastersta_mask = NFI_MASTERSTA_MASK_7622,
.nandfsm_mask = NFI_NAND_FSM_7622,
.spare_sizes = mt7622_spare_sizes,
.num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
};
static const struct mtk_snand_caps mt7986_snand_caps = {
.sector_size = 1024,
.max_sectors = 8,
.fdm_size = 8,
.fdm_ecc_size = 1,
.fifo_size = 64,
.bbm_swap = true,
.empty_page_check = true,
.mastersta_mask = NFI_MASTERSTA_MASK_7986,
.nandfsm_mask = NFI_NAND_FSM_7986,
.spare_sizes = mt7986_spare_sizes,
.num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
};
struct mtk_snand_conf {
size_t page_size;
size_t oob_size;
u8 nsectors;
u8 spare_size;
};
struct mtk_snand {
struct spi_controller *ctlr;
struct device *dev;
struct clk *nfi_clk;
struct clk *pad_clk;
struct clk *nfi_hclk;
void __iomem *nfi_base;
int irq;
struct completion op_done;
const struct mtk_snand_caps *caps;
struct mtk_ecc_config *ecc_cfg;
struct mtk_ecc *ecc;
struct mtk_snand_conf nfi_cfg;
struct mtk_ecc_stats ecc_stats;
struct nand_ecc_engine ecc_eng;
bool autofmt;
u8 *buf;
size_t buf_len;
};
static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand)
{
struct nand_ecc_engine *eng = nand->ecc.engine;
return container_of(eng, struct mtk_snand, ecc_eng);
}
static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size)
{
if (snf->buf_len >= size)
return 0;
kfree(snf->buf);
snf->buf = kmalloc(size, GFP_KERNEL);
if (!snf->buf)
return -ENOMEM;
snf->buf_len = size;
memset(snf->buf, 0xff, snf->buf_len);
return 0;
}
static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg)
{
return readl(snf->nfi_base + reg);
}
static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val)
{
writel(val, snf->nfi_base + reg);
}
static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val)
{
writew(val, snf->nfi_base + reg);
}
static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set)
{
u32 val;
val = readl(snf->nfi_base + reg);
val &= ~clr;
val |= set;
writel(val, snf->nfi_base + reg);
}
static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len)
{
u32 i, val = 0, es = sizeof(u32);
for (i = reg; i < reg + len; i++) {
if (i == reg || i % es == 0)
val = nfi_read32(snf, i & ~(es - 1));
*data++ = (u8)(val >> (8 * (i % es)));
}
}
static int mtk_nfi_reset(struct mtk_snand *snf)
{
u32 val, fifo_mask;
int ret;
nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
!(val & snf->caps->mastersta_mask), 0,
SNFI_POLL_INTERVAL);
if (ret) {
dev_err(snf->dev, "NFI master is still busy after reset\n");
return ret;
}
ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
!(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0,
SNFI_POLL_INTERVAL);
if (ret) {
dev_err(snf->dev, "Failed to reset NFI\n");
return ret;
}
fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) |
((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S);
ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
!(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
if (ret) {
dev_err(snf->dev, "NFI FIFOs are not empty\n");
return ret;
}
return 0;
}
static int mtk_snand_mac_reset(struct mtk_snand *snf)
{
int ret;
u32 val;
nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
!(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
if (ret)
dev_err(snf->dev, "Failed to reset SNFI MAC\n");
nfi_write32(snf, SNF_MISC_CTL,
(2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S));
return ret;
}
static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen)
{
int ret;
u32 val;
nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
nfi_write32(snf, SNF_MAC_OUTL, outlen);
nfi_write32(snf, SNF_MAC_INL, inlen);
nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
val & WIP_READY, 0, SNFI_POLL_INTERVAL);
if (ret) {
dev_err(snf->dev, "Timed out waiting for WIP_READY\n");
goto cleanup;
}
ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP),
0, SNFI_POLL_INTERVAL);
if (ret)
dev_err(snf->dev, "Timed out waiting for WIP cleared\n");
cleanup:
nfi_write32(snf, SNF_MAC_CTL, 0);
return ret;
}
static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op)
{
u32 rx_len = 0;
u32 reg_offs = 0;
u32 val = 0;
const u8 *tx_buf = NULL;
u8 *rx_buf = NULL;
int i, ret;
u8 b;
if (op->data.dir == SPI_MEM_DATA_IN) {
rx_len = op->data.nbytes;
rx_buf = op->data.buf.in;
} else {
tx_buf = op->data.buf.out;
}
mtk_snand_mac_reset(snf);
for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) {
b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
val |= b << (8 * (reg_offs % 4));
if (reg_offs % 4 == 3) {
nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
val = 0;
}
}
for (i = 0; i < op->addr.nbytes; i++, reg_offs++) {
b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
val |= b << (8 * (reg_offs % 4));
if (reg_offs % 4 == 3) {
nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
val = 0;
}
}
for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) {
if (reg_offs % 4 == 3) {
nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
val = 0;
}
}
if (op->data.dir == SPI_MEM_DATA_OUT) {
for (i = 0; i < op->data.nbytes; i++, reg_offs++) {
val |= tx_buf[i] << (8 * (reg_offs % 4));
if (reg_offs % 4 == 3) {
nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
val = 0;
}
}
}
if (reg_offs % 4)
nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val);
for (i = 0; i < reg_offs; i += 4)
dev_dbg(snf->dev, "%d: %08X", i,
nfi_read32(snf, SNF_GPRAM + i));
dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len);
ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len);
if (ret)
return ret;
if (!rx_len)
return 0;
nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len);
return 0;
}
static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size,
u32 oob_size)
{
int spare_idx = -1;
u32 spare_size, spare_size_shift, pagesize_idx;
u32 sector_size_512;
u8 nsectors;
int i;
// skip if it's already configured as required.
if (snf->nfi_cfg.page_size == page_size &&
snf->nfi_cfg.oob_size == oob_size)
return 0;
nsectors = page_size / snf->caps->sector_size;
if (nsectors > snf->caps->max_sectors) {
dev_err(snf->dev, "too many sectors required.\n");
goto err;
}
if (snf->caps->sector_size == 512) {
sector_size_512 = NFI_SEC_SEL_512;
spare_size_shift = NFI_SPARE_SIZE_S;
} else {
sector_size_512 = 0;
spare_size_shift = NFI_SPARE_SIZE_LS_S;
}
switch (page_size) {
case SZ_512:
pagesize_idx = NFI_PAGE_SIZE_512_2K;
break;
case SZ_2K:
if (snf->caps->sector_size == 512)
pagesize_idx = NFI_PAGE_SIZE_2K_4K;
else
pagesize_idx = NFI_PAGE_SIZE_512_2K;
break;
case SZ_4K:
if (snf->caps->sector_size == 512)
pagesize_idx = NFI_PAGE_SIZE_4K_8K;
else
pagesize_idx = NFI_PAGE_SIZE_2K_4K;
break;
case SZ_8K:
if (snf->caps->sector_size == 512)
pagesize_idx = NFI_PAGE_SIZE_8K_16K;
else
pagesize_idx = NFI_PAGE_SIZE_4K_8K;
break;
case SZ_16K:
pagesize_idx = NFI_PAGE_SIZE_8K_16K;
break;
default:
dev_err(snf->dev, "unsupported page size.\n");
goto err;
}
spare_size = oob_size / nsectors;
// If we're using the 1KB sector size, HW will automatically double the
// spare size. We should only use half of the value in this case.
if (snf->caps->sector_size == 1024)
spare_size /= 2;
for (i = snf->caps->num_spare_size - 1; i >= 0; i--) {
if (snf->caps->spare_sizes[i] <= spare_size) {
spare_size = snf->caps->spare_sizes[i];
if (snf->caps->sector_size == 1024)
spare_size *= 2;
spare_idx = i;
break;
}
}
if (spare_idx < 0) {
dev_err(snf->dev, "unsupported spare size: %u\n", spare_size);
goto err;
}
nfi_write32(snf, NFI_PAGEFMT,
(snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
(snf->caps->fdm_size << NFI_FDM_NUM_S) |
(spare_idx << spare_size_shift) |
(pagesize_idx << NFI_PAGE_SIZE_S) |
sector_size_512);
snf->nfi_cfg.page_size = page_size;
snf->nfi_cfg.oob_size = oob_size;
snf->nfi_cfg.nsectors = nsectors;
snf->nfi_cfg.spare_size = spare_size;
dev_dbg(snf->dev, "page format: (%u + %u) * %u\n",
snf->caps->sector_size, spare_size, nsectors);
return snand_prepare_bouncebuf(snf, page_size + oob_size);
err:
dev_err(snf->dev, "page size %u + %u is not supported\n", page_size,
oob_size);
return -EOPNOTSUPP;
}
static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc)
{
// ECC area is not accessible
return -ERANGE;
}
static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobfree)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct mtk_snand *ms = nand_to_mtk_snand(nand);
if (section >= ms->nfi_cfg.nsectors)
return -ERANGE;
oobfree->length = ms->caps->fdm_size - 1;
oobfree->offset = section * ms->caps->fdm_size + 1;
return 0;
}
static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
.ecc = mtk_snand_ooblayout_ecc,
.free = mtk_snand_ooblayout_free,
};
static int mtk_snand_ecc_init_ctx(struct nand_device *nand)
{
struct mtk_snand *snf = nand_to_mtk_snand(nand);
struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
struct nand_ecc_props *reqs = &nand->ecc.requirements;
struct nand_ecc_props *user = &nand->ecc.user_conf;
struct mtd_info *mtd = nanddev_to_mtd(nand);
int step_size = 0, strength = 0, desired_correction = 0, steps;
bool ecc_user = false;
int ret;
u32 parity_bits, max_ecc_bytes;
struct mtk_ecc_config *ecc_cfg;
ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
nand->memorg.oobsize);
if (ret)
return ret;
ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
if (!ecc_cfg)
return -ENOMEM;
nand->ecc.ctx.priv = ecc_cfg;
if (user->step_size && user->strength) {
step_size = user->step_size;
strength = user->strength;
ecc_user = true;
} else if (reqs->step_size && reqs->strength) {
step_size = reqs->step_size;
strength = reqs->strength;
}
if (step_size && strength) {
steps = mtd->writesize / step_size;
desired_correction = steps * strength;
strength = desired_correction / snf->nfi_cfg.nsectors;
}
ecc_cfg->mode = ECC_NFI_MODE;
ecc_cfg->sectors = snf->nfi_cfg.nsectors;
ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size;
// calculate the max possible strength under current page format
parity_bits = mtk_ecc_get_parity_bits(snf->ecc);
max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size;
ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits;
mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength);
// if there's a user requested strength, find the minimum strength that
// meets the requirement. Otherwise use the maximum strength which is
// expected by BootROM.
if (ecc_user && strength) {
u32 s_next = ecc_cfg->strength - 1;
while (1) {
mtk_ecc_adjust_strength(snf->ecc, &s_next);
if (s_next >= ecc_cfg->strength)
break;
if (s_next < strength)
break;
s_next = ecc_cfg->strength - 1;
}
}
mtd_set_ooblayout(mtd, &mtk_snand_ooblayout);
conf->step_size = snf->caps->sector_size;
conf->strength = ecc_cfg->strength;
if (ecc_cfg->strength < strength)
dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n",
strength);
dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n",
ecc_cfg->strength, snf->caps->sector_size);
return 0;
}
static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand)
{
struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
kfree(ecc_cfg);
}
static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct mtk_snand *snf = nand_to_mtk_snand(nand);
struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
int ret;
ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
nand->memorg.oobsize);
if (ret)
return ret;
snf->autofmt = true;
snf->ecc_cfg = ecc_cfg;
return 0;
}
static int mtk_snand_ecc_finish_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct mtk_snand *snf = nand_to_mtk_snand(nand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
snf->ecc_cfg = NULL;
snf->autofmt = false;
if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ))
return 0;
if (snf->ecc_stats.failed)
mtd->ecc_stats.failed += snf->ecc_stats.failed;
mtd->ecc_stats.corrected += snf->ecc_stats.corrected;
return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips;
}
static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = {
.init_ctx = mtk_snand_ecc_init_ctx,
.cleanup_ctx = mtk_snand_ecc_cleanup_ctx,
.prepare_io_req = mtk_snand_ecc_prepare_io_req,
.finish_io_req = mtk_snand_ecc_finish_io_req,
};
static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf)
{
u32 vall, valm;
u8 *oobptr = buf;
int i, j;
for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
vall = nfi_read32(snf, NFI_FDML(i));
valm = nfi_read32(snf, NFI_FDMM(i));
for (j = 0; j < snf->caps->fdm_size; j++)
oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
oobptr += snf->caps->fdm_size;
}
}
static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf)
{
u32 fdm_size = snf->caps->fdm_size;
const u8 *oobptr = buf;
u32 vall, valm;
int i, j;
for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
vall = 0;
valm = 0;
for (j = 0; j < 8; j++) {
if (j < 4)
vall |= (j < fdm_size ? oobptr[j] : 0xff)
<< (j * 8);
else
valm |= (j < fdm_size ? oobptr[j] : 0xff)
<< ((j - 4) * 8);
}
nfi_write32(snf, NFI_FDML(i), vall);
nfi_write32(snf, NFI_FDMM(i), valm);
oobptr += fdm_size;
}
}
static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf)
{
u32 buf_bbm_pos, fdm_bbm_pos;
if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
return;
// swap [pagesize] byte on nand with the first fdm byte
// in the last sector.
buf_bbm_pos = snf->nfi_cfg.page_size -
(snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size;
fdm_bbm_pos = snf->nfi_cfg.page_size +
(snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]);
}
static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
{
u32 fdm_bbm_pos1, fdm_bbm_pos2;
if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
return;
// swap the first fdm byte in the first and the last sector.
fdm_bbm_pos1 = snf->nfi_cfg.page_size;
fdm_bbm_pos2 = snf->nfi_cfg.page_size +
(snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]);
}
static int mtk_snand_read_page_cache(struct mtk_snand *snf,
const struct spi_mem_op *op)
{
u8 *buf = snf->buf;
u8 *buf_fdm = buf + snf->nfi_cfg.page_size;
// the address part to be sent by the controller
u32 op_addr = op->addr.val;
// where to start copying data from bounce buffer
u32 rd_offset = 0;
u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth);
u32 op_mode = 0;
u32 dma_len = snf->buf_len;
int ret = 0;
u32 rd_mode, rd_bytes, val;
dma_addr_t buf_dma;
if (snf->autofmt) {
u32 last_bit;
u32 mask;
dma_len = snf->nfi_cfg.page_size;
op_mode = CNFG_AUTO_FMT_EN;
if (op->data.ecc)
op_mode |= CNFG_HW_ECC_EN;
// extract the plane bit:
// Find the highest bit set in (pagesize+oobsize).
// Bits higher than that in op->addr are kept and sent over SPI
// Lower bits are used as an offset for copying data from DMA
// bounce buffer.
last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
mask = (1 << last_bit) - 1;
rd_offset = op_addr & mask;
op_addr &= ~mask;
// check if we can dma to the caller memory
if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size)
buf = op->data.buf.in;
}
mtk_snand_mac_reset(snf);
mtk_nfi_reset(snf);
// command and dummy cycles
nfi_write32(snf, SNF_RD_CTL2,
(dummy_clk << DATA_READ_DUMMY_S) |
(op->cmd.opcode << DATA_READ_CMD_S));
// read address
nfi_write32(snf, SNF_RD_CTL3, op_addr);
// Set read op_mode
if (op->data.buswidth == 4)
rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD :
DATA_READ_MODE_X4;
else if (op->data.buswidth == 2)
rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL :
DATA_READ_MODE_X2;
else
rd_mode = DATA_READ_MODE_X1;
rd_mode <<= DATA_READ_MODE_S;
nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
rd_mode | DATARD_CUSTOM_EN);
// Set bytes to read
rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
snf->nfi_cfg.nsectors;
nfi_write32(snf, SNF_MISC_CTL2,
(rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes);
// NFI read prepare
nfi_write16(snf, NFI_CNFG,
(CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN |
CNFG_READ_MODE | CNFG_DMA_MODE | op_mode);
nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE);
ret = dma_mapping_error(snf->dev, buf_dma);
if (ret) {
dev_err(snf->dev, "DMA mapping failed.\n");
goto cleanup;
}
nfi_write32(snf, NFI_STRADDR, buf_dma);
if (op->data.ecc) {
snf->ecc_cfg->op = ECC_DECODE;
ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
if (ret)
goto cleanup_dma;
}
// Prepare for custom read interrupt
nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
reinit_completion(&snf->op_done);
// Trigger NFI into custom mode
nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
// Start DMA read
nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
nfi_write16(snf, NFI_STRDATA, STR_DATA);
if (!wait_for_completion_timeout(
&snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
dev_err(snf->dev, "DMA timed out for reading from cache.\n");
ret = -ETIMEDOUT;
goto cleanup;
}
// Wait for BUS_SEC_CNTR returning expected value
ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
SNFI_POLL_INTERVAL);
if (ret) {
dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n");
goto cleanup2;
}
// Wait for bus becoming idle
ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
!(val & snf->caps->mastersta_mask), 0,
SNFI_POLL_INTERVAL);
if (ret) {
dev_err(snf->dev, "Timed out waiting for bus becoming idle\n");
goto cleanup2;
}
if (op->data.ecc) {
ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE);
if (ret) {
dev_err(snf->dev, "wait ecc done timeout\n");
goto cleanup2;
}
// save status before disabling ecc
mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats,
snf->nfi_cfg.nsectors);
}
dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
if (snf->autofmt) {
mtk_snand_read_fdm(snf, buf_fdm);
if (snf->caps->bbm_swap) {
mtk_snand_bm_swap(snf, buf);
mtk_snand_fdm_bm_swap(snf);
}
}
// copy data back
if (nfi_read32(snf, NFI_STA) & READ_EMPTY) {
memset(op->data.buf.in, 0xff, op->data.nbytes);
snf->ecc_stats.bitflips = 0;
snf->ecc_stats.failed = 0;
snf->ecc_stats.corrected = 0;
} else {
if (buf == op->data.buf.in) {
u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size;
u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size;
if (req_left)
memcpy(op->data.buf.in + snf->nfi_cfg.page_size,
buf_fdm,
cap_len < req_left ? cap_len : req_left);
} else if (rd_offset < snf->buf_len) {
u32 cap_len = snf->buf_len - rd_offset;
if (op->data.nbytes < cap_len)
cap_len = op->data.nbytes;
memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len);
}
}
cleanup2:
if (op->data.ecc)
mtk_ecc_disable(snf->ecc);
cleanup_dma:
// unmap dma only if any error happens. (otherwise it's done before
// data copying)
if (ret)
dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
cleanup:
// Stop read
nfi_write32(snf, NFI_CON, 0);
nfi_write16(snf, NFI_CNFG, 0);
// Clear SNF done flag
nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
nfi_write32(snf, SNF_STA_CTL1, 0);
// Disable interrupt
nfi_read32(snf, NFI_INTR_STA);
nfi_write32(snf, NFI_INTR_EN, 0);
nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
return ret;
}
static int mtk_snand_write_page_cache(struct mtk_snand *snf,
const struct spi_mem_op *op)
{
// the address part to be sent by the controller
u32 op_addr = op->addr.val;
// where to start copying data from bounce buffer
u32 wr_offset = 0;
u32 op_mode = 0;
int ret = 0;
u32 wr_mode = 0;
u32 dma_len = snf->buf_len;
u32 wr_bytes, val;
size_t cap_len;
dma_addr_t buf_dma;
if (snf->autofmt) {
u32 last_bit;
u32 mask;
dma_len = snf->nfi_cfg.page_size;
op_mode = CNFG_AUTO_FMT_EN;
if (op->data.ecc)
op_mode |= CNFG_HW_ECC_EN;
last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
mask = (1 << last_bit) - 1;
wr_offset = op_addr & mask;
op_addr &= ~mask;
}
mtk_snand_mac_reset(snf);
mtk_nfi_reset(snf);
if (wr_offset)
memset(snf->buf, 0xff, wr_offset);
cap_len = snf->buf_len - wr_offset;
if (op->data.nbytes < cap_len)
cap_len = op->data.nbytes;
memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len);
if (snf->autofmt) {
if (snf->caps->bbm_swap) {
mtk_snand_fdm_bm_swap(snf);
mtk_snand_bm_swap(snf, snf->buf);
}
mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size);
}
// Command
nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S));
// write address
nfi_write32(snf, SNF_PG_CTL2, op_addr);
// Set read op_mode
if (op->data.buswidth == 4)
wr_mode = PG_LOAD_X4_EN;
nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN,
wr_mode | PG_LOAD_CUSTOM_EN);
// Set bytes to write
wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
snf->nfi_cfg.nsectors;
nfi_write32(snf, SNF_MISC_CTL2,
(wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes);
// NFI write prepare
nfi_write16(snf, NFI_CNFG,
(CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode);
nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE);
ret = dma_mapping_error(snf->dev, buf_dma);
if (ret) {
dev_err(snf->dev, "DMA mapping failed.\n");
goto cleanup;
}
nfi_write32(snf, NFI_STRADDR, buf_dma);
if (op->data.ecc) {
snf->ecc_cfg->op = ECC_ENCODE;
ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
if (ret)
goto cleanup_dma;
}
// Prepare for custom write interrupt
nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
reinit_completion(&snf->op_done);
;
// Trigger NFI into custom mode
nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
// Start DMA write
nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
nfi_write16(snf, NFI_STRDATA, STR_DATA);
if (!wait_for_completion_timeout(
&snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
dev_err(snf->dev, "DMA timed out for program load.\n");
ret = -ETIMEDOUT;
goto cleanup_ecc;
}
// Wait for NFI_SEC_CNTR returning expected value
ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
SNFI_POLL_INTERVAL);
if (ret)
dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n");
cleanup_ecc:
if (op->data.ecc)
mtk_ecc_disable(snf->ecc);
cleanup_dma:
dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE);
cleanup:
// Stop write
nfi_write32(snf, NFI_CON, 0);
nfi_write16(snf, NFI_CNFG, 0);
// Clear SNF done flag
nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
nfi_write32(snf, SNF_STA_CTL1, 0);
// Disable interrupt
nfi_read32(snf, NFI_INTR_STA);
nfi_write32(snf, NFI_INTR_EN, 0);
nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
return ret;
}
/**
* mtk_snand_is_page_ops() - check if the op is a controller supported page op.
* @op spi-mem op to check
*
* Check whether op can be executed with read_from_cache or program_load
* mode in the controller.
* This controller can execute typical Read From Cache and Program Load
* instructions found on SPI-NAND with 2-byte address.
* DTR and cmd buswidth & nbytes should be checked before calling this.
*
* Return: true if the op matches the instruction template
*/
static bool mtk_snand_is_page_ops(const struct spi_mem_op *op)
{
if (op->addr.nbytes != 2)
return false;
if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
op->addr.buswidth != 4)
return false;
// match read from page instructions
if (op->data.dir == SPI_MEM_DATA_IN) {
// check dummy cycle first
if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth >
DATA_READ_MAX_DUMMY)
return false;
// quad io / quad out
if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) &&
op->data.buswidth == 4)
return true;
// dual io / dual out
if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) &&
op->data.buswidth == 2)
return true;
// standard spi
if (op->addr.buswidth == 1 && op->data.buswidth == 1)
return true;
} else if (op->data.dir == SPI_MEM_DATA_OUT) {
// check dummy cycle first
if (op->dummy.nbytes)
return false;
// program load quad out
if (op->addr.buswidth == 1 && op->data.buswidth == 4)
return true;
// standard spi
if (op->addr.buswidth == 1 && op->data.buswidth == 1)
return true;
}
return false;
}
static bool mtk_snand_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
return false;
if (mtk_snand_is_page_ops(op))
return true;
return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) &&
(op->dummy.nbytes == 0 || op->dummy.buswidth == 1) &&
(op->data.nbytes == 0 || op->data.buswidth == 1));
}
static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
// page ops transfer size must be exactly ((sector_size + spare_size) *
// nsectors). Limit the op size if the caller requests more than that.
// exec_op will read more than needed and discard the leftover if the
// caller requests less data.
if (mtk_snand_is_page_ops(op)) {
size_t l;
// skip adjust_op_size for page ops
if (ms->autofmt)
return 0;
l = ms->caps->sector_size + ms->nfi_cfg.spare_size;
l *= ms->nfi_cfg.nsectors;
if (op->data.nbytes > l)
op->data.nbytes = l;
} else {
size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
if (hl >= SNF_GPRAM_SIZE)
return -EOPNOTSUPP;
if (op->data.nbytes > SNF_GPRAM_SIZE - hl)
op->data.nbytes = SNF_GPRAM_SIZE - hl;
}
return 0;
}
static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
op->addr.val, op->addr.buswidth, op->addr.nbytes,
op->data.buswidth, op->data.nbytes);
if (mtk_snand_is_page_ops(op)) {
if (op->data.dir == SPI_MEM_DATA_IN)
return mtk_snand_read_page_cache(ms, op);
else
return mtk_snand_write_page_cache(ms, op);
} else {
return mtk_snand_mac_io(ms, op);
}
}
static const struct spi_controller_mem_ops mtk_snand_mem_ops = {
.adjust_op_size = mtk_snand_adjust_op_size,
.supports_op = mtk_snand_supports_op,
.exec_op = mtk_snand_exec_op,
};
static const struct spi_controller_mem_caps mtk_snand_mem_caps = {
.ecc = true,
};
static irqreturn_t mtk_snand_irq(int irq, void *id)
{
struct mtk_snand *snf = id;
u32 sta, ien;
sta = nfi_read32(snf, NFI_INTR_STA);
ien = nfi_read32(snf, NFI_INTR_EN);
if (!(sta & ien))
return IRQ_NONE;
nfi_write32(snf, NFI_INTR_EN, 0);
complete(&snf->op_done);
return IRQ_HANDLED;
}
static const struct of_device_id mtk_snand_ids[] = {
{ .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps },
{ .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps },
{ .compatible = "mediatek,mt7986-snand", .data = &mt7986_snand_caps },
{},
};
MODULE_DEVICE_TABLE(of, mtk_snand_ids);
static int mtk_snand_enable_clk(struct mtk_snand *ms)
{
int ret;
ret = clk_prepare_enable(ms->nfi_clk);
if (ret) {
dev_err(ms->dev, "unable to enable nfi clk\n");
return ret;
}
ret = clk_prepare_enable(ms->pad_clk);
if (ret) {
dev_err(ms->dev, "unable to enable pad clk\n");
goto err1;
}
ret = clk_prepare_enable(ms->nfi_hclk);
if (ret) {
dev_err(ms->dev, "unable to enable nfi hclk\n");
goto err2;
}
return 0;
err2:
clk_disable_unprepare(ms->pad_clk);
err1:
clk_disable_unprepare(ms->nfi_clk);
return ret;
}
static void mtk_snand_disable_clk(struct mtk_snand *ms)
{
clk_disable_unprepare(ms->nfi_hclk);
clk_disable_unprepare(ms->pad_clk);
clk_disable_unprepare(ms->nfi_clk);
}
static int mtk_snand_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *dev_id;
struct spi_controller *ctlr;
struct mtk_snand *ms;
unsigned long spi_freq;
u32 val = 0;
int ret;
dev_id = of_match_node(mtk_snand_ids, np);
if (!dev_id)
return -EINVAL;
ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms));
if (!ctlr)
return -ENOMEM;
platform_set_drvdata(pdev, ctlr);
ms = spi_controller_get_devdata(ctlr);
ms->ctlr = ctlr;
ms->caps = dev_id->data;
ms->ecc = of_mtk_ecc_get(np);
if (IS_ERR(ms->ecc))
return PTR_ERR(ms->ecc);
else if (!ms->ecc)
return -ENODEV;
ms->nfi_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ms->nfi_base)) {
ret = PTR_ERR(ms->nfi_base);
goto release_ecc;
}
ms->dev = &pdev->dev;
ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk");
if (IS_ERR(ms->nfi_clk)) {
ret = PTR_ERR(ms->nfi_clk);
dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret);
goto release_ecc;
}
ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk");
if (IS_ERR(ms->pad_clk)) {
ret = PTR_ERR(ms->pad_clk);
dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret);
goto release_ecc;
}
ms->nfi_hclk = devm_clk_get_optional(&pdev->dev, "nfi_hclk");
if (IS_ERR(ms->nfi_hclk)) {
ret = PTR_ERR(ms->nfi_hclk);
dev_err(&pdev->dev, "unable to get nfi_hclk, err = %d\n", ret);
goto release_ecc;
}
ret = mtk_snand_enable_clk(ms);
if (ret)
goto release_ecc;
init_completion(&ms->op_done);
ms->irq = platform_get_irq(pdev, 0);
if (ms->irq < 0) {
ret = ms->irq;
goto disable_clk;
}
ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0,
"mtk-snand", ms);
if (ret) {
dev_err(ms->dev, "failed to request snfi irq\n");
goto disable_clk;
}
ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(ms->dev, "failed to set dma mask\n");
goto disable_clk;
}
// switch to SNFI mode
nfi_write32(ms, SNF_CFG, SPI_MODE);
ret = of_property_read_u32(np, "rx-sample-delay-ns", &val);
if (!ret)
nfi_rmw32(ms, SNF_DLY_CTL3, SFCK_SAM_DLY,
val * SFCK_SAM_DLY_RANGE / SFCK_SAM_DLY_TOTAL);
ret = of_property_read_u32(np, "mediatek,rx-latch-latency-ns", &val);
if (!ret) {
spi_freq = clk_get_rate(ms->pad_clk);
val = DIV_ROUND_CLOSEST(val, NSEC_PER_SEC / spi_freq);
nfi_rmw32(ms, SNF_MISC_CTL, DATA_READ_LATCH_LAT,
val << DATA_READ_LATCH_LAT_S);
}
// setup an initial page format for ops matching page_cache_op template
// before ECC is called.
ret = mtk_snand_setup_pagefmt(ms, SZ_2K, SZ_64);
if (ret) {
dev_err(ms->dev, "failed to set initial page format\n");
goto disable_clk;
}
// setup ECC engine
ms->ecc_eng.dev = &pdev->dev;
ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops;
ms->ecc_eng.priv = ms;
ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng);
if (ret) {
dev_err(&pdev->dev, "failed to register ecc engine.\n");
goto disable_clk;
}
ctlr->num_chipselect = 1;
ctlr->mem_ops = &mtk_snand_mem_ops;
ctlr->mem_caps = &mtk_snand_mem_caps;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->dev.of_node = pdev->dev.of_node;
ret = spi_register_controller(ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_controller failed.\n");
goto disable_clk;
}
return 0;
disable_clk:
mtk_snand_disable_clk(ms);
release_ecc:
mtk_ecc_release(ms->ecc);
return ret;
}
static void mtk_snand_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = platform_get_drvdata(pdev);
struct mtk_snand *ms = spi_controller_get_devdata(ctlr);
spi_unregister_controller(ctlr);
mtk_snand_disable_clk(ms);
mtk_ecc_release(ms->ecc);
kfree(ms->buf);
}
static struct platform_driver mtk_snand_driver = {
.probe = mtk_snand_probe,
.remove_new = mtk_snand_remove,
.driver = {
.name = "mtk-snand",
.of_match_table = mtk_snand_ids,
},
};
module_platform_driver(mtk_snand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chuanhong Guo <[email protected]>");
MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");
| linux-master | drivers/spi/spi-mtk-snfi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QuadSPI driver.
*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
* Copyright (C) 2018 Bootlin
* Copyright (C) 2018 exceet electronics GmbH
* Copyright (C) 2018 Kontron Electronics GmbH
*
* Transition to SPI MEM interface:
* Authors:
* Boris Brezillon <[email protected]>
* Frieder Schrempf <[email protected]>
* Yogesh Gaur <[email protected]>
* Suresh Gupta <[email protected]>
*
* Based on the original fsl-quadspi.c SPI NOR driver:
* Author: Freescale Semiconductor, Inc.
*
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/sizes.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
/*
* The driver only uses one single LUT entry, that is updated on
* each call of exec_op(). Index 0 is preset at boot with a basic
* read operation, so let's use the last entry (15).
*/
#define SEQID_LUT 15
/* Registers used by the driver */
#define QUADSPI_MCR 0x00
#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
#define QUADSPI_MCR_MDIS_MASK BIT(14)
#define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
#define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
#define QUADSPI_MCR_DDR_EN_MASK BIT(7)
#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
#define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
#define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
#define QUADSPI_IPCR 0x08
#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
#define QUADSPI_FLSHCR 0x0c
#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
#define QUADSPI_BUF0CR 0x10
#define QUADSPI_BUF1CR 0x14
#define QUADSPI_BUF2CR 0x18
#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
#define QUADSPI_BUF3CR 0x1c
#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
#define QUADSPI_BFGENCR 0x20
#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
#define QUADSPI_BUF0IND 0x30
#define QUADSPI_BUF1IND 0x34
#define QUADSPI_BUF2IND 0x38
#define QUADSPI_SFAR 0x100
#define QUADSPI_SMPR 0x108
#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
#define QUADSPI_SMPR_FSDLY_MASK BIT(6)
#define QUADSPI_SMPR_FSPHS_MASK BIT(5)
#define QUADSPI_SMPR_HSENA_MASK BIT(0)
#define QUADSPI_RBCT 0x110
#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
#define QUADSPI_TBDR 0x154
#define QUADSPI_SR 0x15c
#define QUADSPI_SR_IP_ACC_MASK BIT(1)
#define QUADSPI_SR_AHB_ACC_MASK BIT(2)
#define QUADSPI_FR 0x160
#define QUADSPI_FR_TFF_MASK BIT(0)
#define QUADSPI_RSER 0x164
#define QUADSPI_RSER_TFIE BIT(0)
#define QUADSPI_SPTRCLR 0x16c
#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
#define QUADSPI_SFA1AD 0x180
#define QUADSPI_SFA2AD 0x184
#define QUADSPI_SFB1AD 0x188
#define QUADSPI_SFB2AD 0x18c
#define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
#define QUADSPI_LUTKEY 0x300
#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
#define QUADSPI_LCKCR 0x304
#define QUADSPI_LCKER_LOCK BIT(0)
#define QUADSPI_LCKER_UNLOCK BIT(1)
#define QUADSPI_LUT_BASE 0x310
#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
#define QUADSPI_LUT_REG(idx) \
(QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
/* Instruction set for the LUT register */
#define LUT_STOP 0
#define LUT_CMD 1
#define LUT_ADDR 2
#define LUT_DUMMY 3
#define LUT_MODE 4
#define LUT_MODE2 5
#define LUT_MODE4 6
#define LUT_FSL_READ 7
#define LUT_FSL_WRITE 8
#define LUT_JMP_ON_CS 9
#define LUT_ADDR_DDR 10
#define LUT_MODE_DDR 11
#define LUT_MODE2_DDR 12
#define LUT_MODE4_DDR 13
#define LUT_FSL_READ_DDR 14
#define LUT_FSL_WRITE_DDR 15
#define LUT_DATA_LEARN 16
/*
* The PAD definitions for LUT register.
*
* The pad stands for the number of IO lines [0:3].
* For example, the quad read needs four IO lines,
* so you should use LUT_PAD(4).
*/
#define LUT_PAD(x) (fls(x) - 1)
/*
* Macro for constructing the LUT entries with the following
* register layout:
*
* ---------------------------------------------------
* | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
* ---------------------------------------------------
*/
#define LUT_DEF(idx, ins, pad, opr) \
((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
/* Controller needs driver to swap endianness */
#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
/* Controller needs 4x internal clock */
#define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
/*
* TKT253890, the controller needs the driver to fill the txfifo with
* 16 bytes at least to trigger a data transfer, even though the extra
* data won't be transferred.
*/
#define QUADSPI_QUIRK_TKT253890 BIT(2)
/* TKT245618, the controller cannot wake up from wait mode */
#define QUADSPI_QUIRK_TKT245618 BIT(3)
/*
* Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
* internally. No need to add it when setting SFXXAD and SFAR registers
*/
#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
/*
* Controller uses TDH bits in register QUADSPI_FLSHCR.
* They need to be set in accordance with the DDR/SDR mode.
*/
#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
struct fsl_qspi_devtype_data {
unsigned int rxfifo;
unsigned int txfifo;
int invalid_mstrid;
unsigned int ahb_buf_size;
unsigned int quirks;
bool little_endian;
};
static const struct fsl_qspi_devtype_data vybrid_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data imx6sx_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data imx7d_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
QUADSPI_QUIRK_USE_TDH_SETTING,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data imx6ul_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
QUADSPI_QUIRK_USE_TDH_SETTING,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data ls1021a_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
.invalid_mstrid = QUADSPI_BUFXCR_INVALID_MSTRID,
.ahb_buf_size = SZ_1K,
.quirks = 0,
.little_endian = false,
};
static const struct fsl_qspi_devtype_data ls2080a_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
.ahb_buf_size = SZ_1K,
.invalid_mstrid = 0x0,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
.little_endian = true,
};
struct fsl_qspi {
void __iomem *iobase;
void __iomem *ahb_addr;
u32 memmap_phy;
struct clk *clk, *clk_en;
struct device *dev;
struct completion c;
const struct fsl_qspi_devtype_data *devtype_data;
struct mutex lock;
struct pm_qos_request pm_qos_req;
int selected;
};
static inline int needs_swap_endian(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
}
static inline int needs_4x_clock(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
}
static inline int needs_fill_txfifo(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
}
static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
}
static inline int needs_amba_base_offset(struct fsl_qspi *q)
{
return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
}
static inline int needs_tdh_setting(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
}
/*
* An IC bug makes it necessary to rearrange the 32-bit data.
* Later chips, such as IMX6SLX, have fixed this bug.
*/
static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
{
return needs_swap_endian(q) ? __swab32(a) : a;
}
/*
* R/W functions for big- or little-endian registers:
* The QSPI controller's endianness is independent of
* the CPU core's endianness. So far, although the CPU
* core is little-endian the QSPI controller can use
* big-endian or little-endian.
*/
static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
{
if (q->devtype_data->little_endian)
iowrite32(val, addr);
else
iowrite32be(val, addr);
}
static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
{
if (q->devtype_data->little_endian)
return ioread32(addr);
return ioread32be(addr);
}
static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
{
struct fsl_qspi *q = dev_id;
u32 reg;
/* clear interrupt */
reg = qspi_readl(q, q->iobase + QUADSPI_FR);
qspi_writel(q, reg, q->iobase + QUADSPI_FR);
if (reg & QUADSPI_FR_TFF_MASK)
complete(&q->c);
dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
return IRQ_HANDLED;
}
static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
{
switch (width) {
case 1:
case 2:
case 4:
return 0;
}
return -ENOTSUPP;
}
static bool fsl_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
int ret;
ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
if (op->addr.nbytes)
ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
if (op->dummy.nbytes)
ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
if (op->data.nbytes)
ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
if (ret)
return false;
/*
* The number of instructions needed for the op, needs
* to fit into a single LUT entry.
*/
if (op->addr.nbytes +
(op->dummy.nbytes ? 1:0) +
(op->data.nbytes ? 1:0) > 6)
return false;
/* Max 64 dummy clock cycles supported */
if (op->dummy.nbytes &&
(op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
return false;
/* Max data length, check controller limits and alignment */
if (op->data.dir == SPI_MEM_DATA_IN &&
(op->data.nbytes > q->devtype_data->ahb_buf_size ||
(op->data.nbytes > q->devtype_data->rxfifo - 4 &&
!IS_ALIGNED(op->data.nbytes, 8))))
return false;
if (op->data.dir == SPI_MEM_DATA_OUT &&
op->data.nbytes > q->devtype_data->txfifo)
return false;
return spi_mem_default_supports_op(mem, op);
}
static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
const struct spi_mem_op *op)
{
void __iomem *base = q->iobase;
u32 lutval[4] = {};
int lutidx = 1, i;
lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
op->cmd.opcode);
/*
* For some unknown reason, using LUT_ADDR doesn't work in some
* cases (at least with only one byte long addresses), so
* let's use LUT_MODE to write the address bytes one by one
*/
for (i = 0; i < op->addr.nbytes; i++) {
u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
LUT_PAD(op->addr.buswidth),
addrbyte);
lutidx++;
}
if (op->dummy.nbytes) {
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
LUT_PAD(op->dummy.buswidth),
op->dummy.nbytes * 8 /
op->dummy.buswidth);
lutidx++;
}
if (op->data.nbytes) {
lutval[lutidx / 2] |= LUT_DEF(lutidx,
op->data.dir == SPI_MEM_DATA_IN ?
LUT_FSL_READ : LUT_FSL_WRITE,
LUT_PAD(op->data.buswidth),
0);
lutidx++;
}
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
/* unlock LUT */
qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
/* fill LUT */
for (i = 0; i < ARRAY_SIZE(lutval); i++)
qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
/* lock LUT */
qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
}
static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
{
int ret;
ret = clk_prepare_enable(q->clk_en);
if (ret)
return ret;
ret = clk_prepare_enable(q->clk);
if (ret) {
clk_disable_unprepare(q->clk_en);
return ret;
}
if (needs_wakeup_wait_mode(q))
cpu_latency_qos_add_request(&q->pm_qos_req, 0);
return 0;
}
static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
{
if (needs_wakeup_wait_mode(q))
cpu_latency_qos_remove_request(&q->pm_qos_req);
clk_disable_unprepare(q->clk);
clk_disable_unprepare(q->clk_en);
}
/*
* If we have changed the content of the flash by writing or erasing, or if we
* read from flash with a different offset into the page buffer, we need to
* invalidate the AHB buffer. If we do not do so, we may read out the wrong
* data. The spec tells us reset the AHB domain and Serial Flash domain at
* the same time.
*/
static void fsl_qspi_invalidate(struct fsl_qspi *q)
{
u32 reg;
reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
/*
* The minimum delay : 1 AHB + 2 SFCK clocks.
* Delay 1 us is enough.
*/
udelay(1);
reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
}
static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
{
unsigned long rate = spi->max_speed_hz;
int ret;
if (q->selected == spi_get_chipselect(spi, 0))
return;
if (needs_4x_clock(q))
rate *= 4;
fsl_qspi_clk_disable_unprep(q);
ret = clk_set_rate(q->clk, rate);
if (ret)
return;
ret = fsl_qspi_clk_prep_enable(q);
if (ret)
return;
q->selected = spi_get_chipselect(spi, 0);
fsl_qspi_invalidate(q);
}
static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
{
memcpy_fromio(op->data.buf.in,
q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
op->data.nbytes);
}
static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
const struct spi_mem_op *op)
{
void __iomem *base = q->iobase;
int i;
u32 val;
for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
memcpy(&val, op->data.buf.out + i, 4);
val = fsl_qspi_endian_xchg(q, val);
qspi_writel(q, val, base + QUADSPI_TBDR);
}
if (i < op->data.nbytes) {
memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
val = fsl_qspi_endian_xchg(q, val);
qspi_writel(q, val, base + QUADSPI_TBDR);
}
if (needs_fill_txfifo(q)) {
for (i = op->data.nbytes; i < 16; i += 4)
qspi_writel(q, 0, base + QUADSPI_TBDR);
}
}
static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
const struct spi_mem_op *op)
{
void __iomem *base = q->iobase;
int i;
u8 *buf = op->data.buf.in;
u32 val;
for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
val = fsl_qspi_endian_xchg(q, val);
memcpy(buf + i, &val, 4);
}
if (i < op->data.nbytes) {
val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
val = fsl_qspi_endian_xchg(q, val);
memcpy(buf + i, &val, op->data.nbytes - i);
}
}
static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
{
void __iomem *base = q->iobase;
int err = 0;
init_completion(&q->c);
/*
* Always start the sequence at the same index since we update
* the LUT at each exec_op() call. And also specify the DATA
* length, since it's has not been specified in the LUT.
*/
qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
base + QUADSPI_IPCR);
/* Wait for the interrupt. */
if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
fsl_qspi_read_rxfifo(q, op);
return err;
}
static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
u32 mask, u32 delay_us, u32 timeout_us)
{
u32 reg;
if (!q->devtype_data->little_endian)
mask = (u32)cpu_to_be32(mask);
return readl_poll_timeout(base, reg, !(reg & mask), delay_us,
timeout_us);
}
static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
void __iomem *base = q->iobase;
u32 addr_offset = 0;
int err = 0;
int invalid_mstrid = q->devtype_data->invalid_mstrid;
mutex_lock(&q->lock);
/* wait for the controller being ready */
fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
fsl_qspi_select_mem(q, mem->spi);
if (needs_amba_base_offset(q))
addr_offset = q->memmap_phy;
qspi_writel(q,
q->selected * q->devtype_data->ahb_buf_size + addr_offset,
base + QUADSPI_SFAR);
qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
base + QUADSPI_MCR);
qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
base + QUADSPI_SPTRCLR);
qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF0CR);
qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF1CR);
qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF2CR);
fsl_qspi_prepare_lut(q, op);
/*
* If we have large chunks of data, we read them through the AHB bus
* by accessing the mapped memory. In all other cases we use
* IP commands to access the flash.
*/
if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
op->data.dir == SPI_MEM_DATA_IN) {
fsl_qspi_read_ahb(q, op);
} else {
qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
fsl_qspi_fill_txfifo(q, op);
err = fsl_qspi_do_op(q, op);
}
/* Invalidate the data in the AHB buffer. */
fsl_qspi_invalidate(q);
mutex_unlock(&q->lock);
return err;
}
static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
if (op->data.dir == SPI_MEM_DATA_OUT) {
if (op->data.nbytes > q->devtype_data->txfifo)
op->data.nbytes = q->devtype_data->txfifo;
} else {
if (op->data.nbytes > q->devtype_data->ahb_buf_size)
op->data.nbytes = q->devtype_data->ahb_buf_size;
else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
}
return 0;
}
static int fsl_qspi_default_setup(struct fsl_qspi *q)
{
void __iomem *base = q->iobase;
u32 reg, addr_offset = 0;
int ret;
/* disable and unprepare clock to avoid glitch pass to controller */
fsl_qspi_clk_disable_unprep(q);
/* the default frequency, we will change it later if necessary. */
ret = clk_set_rate(q->clk, 66000000);
if (ret)
return ret;
ret = fsl_qspi_clk_prep_enable(q);
if (ret)
return ret;
/* Reset the module */
qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
base + QUADSPI_MCR);
udelay(1);
/* Disable the module */
qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
base + QUADSPI_MCR);
/*
* Previous boot stages (BootROM, bootloader) might have used DDR
* mode and did not clear the TDH bits. As we currently use SDR mode
* only, clear the TDH bits if necessary.
*/
if (needs_tdh_setting(q))
qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
~QUADSPI_FLSHCR_TDH_MASK,
base + QUADSPI_FLSHCR);
reg = qspi_readl(q, base + QUADSPI_SMPR);
qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
| QUADSPI_SMPR_FSPHS_MASK
| QUADSPI_SMPR_HSENA_MASK
| QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
/* We only use the buffer3 for AHB read */
qspi_writel(q, 0, base + QUADSPI_BUF0IND);
qspi_writel(q, 0, base + QUADSPI_BUF1IND);
qspi_writel(q, 0, base + QUADSPI_BUF2IND);
qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
q->iobase + QUADSPI_BFGENCR);
qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
base + QUADSPI_BUF3CR);
if (needs_amba_base_offset(q))
addr_offset = q->memmap_phy;
/*
* In HW there can be a maximum of four chips on two buses with
* two chip selects on each bus. We use four chip selects in SW
* to differentiate between the four chips.
* We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
* SFB2AD accordingly.
*/
qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset,
base + QUADSPI_SFA1AD);
qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset,
base + QUADSPI_SFA2AD);
qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset,
base + QUADSPI_SFB1AD);
qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset,
base + QUADSPI_SFB2AD);
q->selected = -1;
/* Enable the module */
qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
base + QUADSPI_MCR);
/* clear all interrupt status */
qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
/* enable the interrupt */
qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
return 0;
}
static const char *fsl_qspi_get_name(struct spi_mem *mem)
{
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = &mem->spi->dev;
const char *name;
/*
* In order to keep mtdparts compatible with the old MTD driver at
* mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the
* platform_device of the controller.
*/
if (of_get_available_child_count(q->dev->of_node) == 1)
return dev_name(q->dev);
name = devm_kasprintf(dev, GFP_KERNEL,
"%s-%d", dev_name(q->dev),
spi_get_chipselect(mem->spi, 0));
if (!name) {
dev_err(dev, "failed to get memory for custom flash name\n");
return ERR_PTR(-ENOMEM);
}
return name;
}
static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
.adjust_op_size = fsl_qspi_adjust_op_size,
.supports_op = fsl_qspi_supports_op,
.exec_op = fsl_qspi_exec_op,
.get_name = fsl_qspi_get_name,
};
static int fsl_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct resource *res;
struct fsl_qspi *q;
int ret;
ctlr = spi_alloc_host(&pdev->dev, sizeof(*q));
if (!ctlr)
return -ENOMEM;
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
q = spi_controller_get_devdata(ctlr);
q->dev = dev;
q->devtype_data = of_device_get_match_data(dev);
if (!q->devtype_data) {
ret = -ENODEV;
goto err_put_ctrl;
}
platform_set_drvdata(pdev, q);
/* find the resources */
q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI");
if (IS_ERR(q->iobase)) {
ret = PTR_ERR(q->iobase);
goto err_put_ctrl;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
if (!res) {
ret = -EINVAL;
goto err_put_ctrl;
}
q->memmap_phy = res->start;
/* Since there are 4 cs, map size required is 4 times ahb_buf_size */
q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
(q->devtype_data->ahb_buf_size * 4));
if (!q->ahb_addr) {
ret = -ENOMEM;
goto err_put_ctrl;
}
/* find the clocks */
q->clk_en = devm_clk_get(dev, "qspi_en");
if (IS_ERR(q->clk_en)) {
ret = PTR_ERR(q->clk_en);
goto err_put_ctrl;
}
q->clk = devm_clk_get(dev, "qspi");
if (IS_ERR(q->clk)) {
ret = PTR_ERR(q->clk);
goto err_put_ctrl;
}
ret = fsl_qspi_clk_prep_enable(q);
if (ret) {
dev_err(dev, "can not enable the clock\n");
goto err_put_ctrl;
}
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_disable_clk;
ret = devm_request_irq(dev, ret,
fsl_qspi_irq_handler, 0, pdev->name, q);
if (ret) {
dev_err(dev, "failed to request irq: %d\n", ret);
goto err_disable_clk;
}
mutex_init(&q->lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 4;
ctlr->mem_ops = &fsl_qspi_mem_ops;
fsl_qspi_default_setup(q);
ctlr->dev.of_node = np;
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
goto err_destroy_mutex;
return 0;
err_destroy_mutex:
mutex_destroy(&q->lock);
err_disable_clk:
fsl_qspi_clk_disable_unprep(q);
err_put_ctrl:
spi_controller_put(ctlr);
dev_err(dev, "Freescale QuadSPI probe failed\n");
return ret;
}
static void fsl_qspi_remove(struct platform_device *pdev)
{
struct fsl_qspi *q = platform_get_drvdata(pdev);
/* disable the hardware */
qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
fsl_qspi_clk_disable_unprep(q);
mutex_destroy(&q->lock);
}
static int fsl_qspi_suspend(struct device *dev)
{
return 0;
}
static int fsl_qspi_resume(struct device *dev)
{
struct fsl_qspi *q = dev_get_drvdata(dev);
fsl_qspi_default_setup(q);
return 0;
}
static const struct of_device_id fsl_qspi_dt_ids[] = {
{ .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
{ .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
{ .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
{ .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
{ .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, },
{ .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
static const struct dev_pm_ops fsl_qspi_pm_ops = {
.suspend = fsl_qspi_suspend,
.resume = fsl_qspi_resume,
};
static struct platform_driver fsl_qspi_driver = {
.driver = {
.name = "fsl-quadspi",
.of_match_table = fsl_qspi_dt_ids,
.pm = &fsl_qspi_pm_ops,
},
.probe = fsl_qspi_probe,
.remove_new = fsl_qspi_remove,
};
module_platform_driver(fsl_qspi_driver);
MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
MODULE_AUTHOR("Freescale Semiconductor Inc.");
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_AUTHOR("Frieder Schrempf <[email protected]>");
MODULE_AUTHOR("Yogesh Gaur <[email protected]>");
MODULE_AUTHOR("Suresh Gupta <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-fsl-qspi.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright (C) 2020 NVIDIA CORPORATION.
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#include <linux/acpi.h>
#include <linux/property.h>
#define QSPI_COMMAND1 0x000
#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
#define QSPI_PACKED BIT(5)
#define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7)
#define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7)
#define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0)
#define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1)
#define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2)
#define QSPI_SDR_DDR_SEL BIT(9)
#define QSPI_TX_EN BIT(11)
#define QSPI_RX_EN BIT(12)
#define QSPI_CS_SW_VAL BIT(20)
#define QSPI_CS_SW_HW BIT(21)
#define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
#define QSPI_CS_POL_INACTIVE_MASK (0xF << 22)
#define QSPI_CS_SEL_0 (0 << 26)
#define QSPI_CS_SEL_1 (1 << 26)
#define QSPI_CS_SEL_2 (2 << 26)
#define QSPI_CS_SEL_3 (3 << 26)
#define QSPI_CS_SEL_MASK (3 << 26)
#define QSPI_CS_SEL(x) (((x) & 0x3) << 26)
#define QSPI_CONTROL_MODE_0 (0 << 28)
#define QSPI_CONTROL_MODE_3 (3 << 28)
#define QSPI_CONTROL_MODE_MASK (3 << 28)
#define QSPI_M_S BIT(30)
#define QSPI_PIO BIT(31)
#define QSPI_COMMAND2 0x004
#define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10)
#define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0)
#define QSPI_CS_TIMING1 0x008
#define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
#define QSPI_CS_TIMING2 0x00c
#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0)
#define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5)
#define QSPI_TRANS_STATUS 0x010
#define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff)
#define QSPI_RDY BIT(30)
#define QSPI_FIFO_STATUS 0x014
#define QSPI_RX_FIFO_EMPTY BIT(0)
#define QSPI_RX_FIFO_FULL BIT(1)
#define QSPI_TX_FIFO_EMPTY BIT(2)
#define QSPI_TX_FIFO_FULL BIT(3)
#define QSPI_RX_FIFO_UNF BIT(4)
#define QSPI_RX_FIFO_OVF BIT(5)
#define QSPI_TX_FIFO_UNF BIT(6)
#define QSPI_TX_FIFO_OVF BIT(7)
#define QSPI_ERR BIT(8)
#define QSPI_TX_FIFO_FLUSH BIT(14)
#define QSPI_RX_FIFO_FLUSH BIT(15)
#define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f)
#define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f)
#define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \
QSPI_RX_FIFO_OVF | \
QSPI_TX_FIFO_UNF | \
QSPI_TX_FIFO_OVF)
#define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \
QSPI_TX_FIFO_EMPTY)
#define QSPI_TX_DATA 0x018
#define QSPI_RX_DATA 0x01c
#define QSPI_DMA_CTL 0x020
#define QSPI_TX_TRIG(n) (((n) & 0x3) << 15)
#define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0)
#define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1)
#define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2)
#define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3)
#define QSPI_RX_TRIG(n) (((n) & 0x3) << 19)
#define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0)
#define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1)
#define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2)
#define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3)
#define QSPI_DMA_EN BIT(31)
#define QSPI_DMA_BLK 0x024
#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
#define QSPI_TX_FIFO 0x108
#define QSPI_RX_FIFO 0x188
#define QSPI_FIFO_DEPTH 64
#define QSPI_INTR_MASK 0x18c
#define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25)
#define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26)
#define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27)
#define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28)
#define QSPI_INTR_RDY_MASK BIT(29)
#define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \
QSPI_INTR_RX_FIFO_OVF_MASK | \
QSPI_INTR_TX_FIFO_UNF_MASK | \
QSPI_INTR_TX_FIFO_OVF_MASK)
#define QSPI_MISC_REG 0x194
#define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
#define QSPI_DUMMY_CYCLES_MAX 0xff
#define QSPI_CMB_SEQ_CMD 0x19c
#define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0)
#define QSPI_CMB_SEQ_CMD_CFG 0x1a0
#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13)
#define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13)
#define QSPI_COMMAND_SDR_DDR BIT(12)
#define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0)
#define QSPI_GLOBAL_CONFIG 0X1a4
#define QSPI_CMB_SEQ_EN BIT(0)
#define QSPI_TPM_WAIT_POLL_EN BIT(1)
#define QSPI_CMB_SEQ_ADDR 0x1a8
#define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0)
#define QSPI_CMB_SEQ_ADDR_CFG 0x1ac
#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13)
#define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13)
#define QSPI_ADDRESS_SDR_DDR BIT(12)
#define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0)
#define DATA_DIR_TX BIT(0)
#define DATA_DIR_RX BIT(1)
#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
#define CMD_TRANSFER 0
#define ADDR_TRANSFER 1
#define DATA_TRANSFER 2
struct tegra_qspi_soc_data {
bool has_dma;
bool cmb_xfer_capable;
bool supports_tpm;
unsigned int cs_count;
};
struct tegra_qspi_client_data {
int tx_clk_tap_delay;
int rx_clk_tap_delay;
};
struct tegra_qspi {
struct device *dev;
struct spi_master *master;
/* lock to protect data accessed by irq */
spinlock_t lock;
struct clk *clk;
void __iomem *base;
phys_addr_t phys;
unsigned int irq;
u32 cur_speed;
unsigned int cur_pos;
unsigned int words_per_32bit;
unsigned int bytes_per_word;
unsigned int curr_dma_words;
unsigned int cur_direction;
unsigned int cur_rx_pos;
unsigned int cur_tx_pos;
unsigned int dma_buf_size;
unsigned int max_buf_size;
bool is_curr_dma_xfer;
struct completion rx_dma_complete;
struct completion tx_dma_complete;
u32 tx_status;
u32 rx_status;
u32 status_reg;
bool is_packed;
bool use_dma;
u32 command1_reg;
u32 dma_control_reg;
u32 def_command1_reg;
u32 def_command2_reg;
u32 spi_cs_timing1;
u32 spi_cs_timing2;
u8 dummy_cycles;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
struct dma_chan *rx_dma_chan;
u32 *rx_dma_buf;
dma_addr_t rx_dma_phys;
struct dma_async_tx_descriptor *rx_dma_desc;
struct dma_chan *tx_dma_chan;
u32 *tx_dma_buf;
dma_addr_t tx_dma_phys;
struct dma_async_tx_descriptor *tx_dma_desc;
const struct tegra_qspi_soc_data *soc_data;
};
static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
{
return readl(tqspi->base + offset);
}
static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
{
writel(value, tqspi->base + offset);
/* read back register to make sure that register writes completed */
if (offset != QSPI_TX_FIFO)
readl(tqspi->base + QSPI_COMMAND1);
}
static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
{
u32 value;
/* write 1 to clear status register */
value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
if (!(value & QSPI_INTR_RDY_MASK)) {
value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
}
/* clear fifo status error if any */
value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
if (value & QSPI_ERR)
tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
}
static unsigned int
tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
unsigned int max_word, max_len, total_fifo_words;
unsigned int remain_len = t->len - tqspi->cur_pos;
unsigned int bits_per_word = t->bits_per_word;
tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
/*
* Tegra QSPI controller supports packed or unpacked mode transfers.
* Packed mode is used for data transfers using 8, 16, or 32 bits per
* word with a minimum transfer of 1 word and for all other transfers
* unpacked mode will be used.
*/
if ((bits_per_word == 8 || bits_per_word == 16 ||
bits_per_word == 32) && t->len > 3) {
tqspi->is_packed = true;
tqspi->words_per_32bit = 32 / bits_per_word;
} else {
tqspi->is_packed = false;
tqspi->words_per_32bit = 1;
}
if (tqspi->is_packed) {
max_len = min(remain_len, tqspi->max_buf_size);
tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
total_fifo_words = (max_len + 3) / 4;
} else {
max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
max_word = min(max_word, tqspi->max_buf_size / 4);
tqspi->curr_dma_words = max_word;
total_fifo_words = max_word;
}
return total_fifo_words;
}
static unsigned int
tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
unsigned int written_words, fifo_words_left, count;
unsigned int len, tx_empty_count, max_n_32bit, i;
u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
u32 fifo_status;
fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
if (tqspi->is_packed) {
fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
written_words = min(fifo_words_left, tqspi->curr_dma_words);
len = written_words * tqspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(len, 4);
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
for (i = 0; (i < 4) && len; i++, len--)
x |= (u32)(*tx_buf++) << (i * 8);
tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
}
tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
} else {
unsigned int write_bytes;
u8 bytes_per_word = tqspi->bytes_per_word;
max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
written_words = max_n_32bit;
len = written_words * tqspi->bytes_per_word;
if (len > t->len - tqspi->cur_pos)
len = t->len - tqspi->cur_pos;
write_bytes = len;
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
for (i = 0; len && (i < bytes_per_word); i++, len--)
x |= (u32)(*tx_buf++) << (i * 8);
tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
}
tqspi->cur_tx_pos += write_bytes;
}
return written_words;
}
static unsigned int
tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
unsigned int len, rx_full_count, count, i;
unsigned int read_words = 0;
u32 fifo_status, x;
fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
if (tqspi->is_packed) {
len = tqspi->curr_dma_words * tqspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i * 8) & 0xff;
}
read_words += tqspi->curr_dma_words;
tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
} else {
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
u8 bytes_per_word = tqspi->bytes_per_word;
unsigned int read_bytes;
len = rx_full_count * bytes_per_word;
if (len > t->len - tqspi->cur_pos)
len = t->len - tqspi->cur_pos;
read_bytes = len;
for (count = 0; count < rx_full_count; count++) {
x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
for (i = 0; len && (i < bytes_per_word); i++, len--)
*rx_buf++ = (x >> (i * 8)) & 0xff;
}
read_words += rx_full_count;
tqspi->cur_rx_pos += read_bytes;
}
return read_words;
}
static void
tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
tqspi->dma_buf_size, DMA_TO_DEVICE);
/*
* In packed mode, each word in FIFO may contain multiple packets
* based on bits per word. So all bytes in each FIFO word are valid.
*
* In unpacked mode, each word in FIFO contains single packet and
* based on bits per word any remaining bits in FIFO word will be
* ignored by the hardware and are invalid bits.
*/
if (tqspi->is_packed) {
tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
} else {
u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
unsigned int i, count, consume, write_bytes;
/*
* Fill tx_dma_buf to contain single packet in each word based
* on bits per word from SPI core tx_buf.
*/
consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
if (consume > t->len - tqspi->cur_pos)
consume = t->len - tqspi->cur_pos;
write_bytes = consume;
for (count = 0; count < tqspi->curr_dma_words; count++) {
u32 x = 0;
for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
x |= (u32)(*tx_buf++) << (i * 8);
tqspi->tx_dma_buf[count] = x;
}
tqspi->cur_tx_pos += write_bytes;
}
dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
tqspi->dma_buf_size, DMA_TO_DEVICE);
}
static void
tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
tqspi->dma_buf_size, DMA_FROM_DEVICE);
if (tqspi->is_packed) {
tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
} else {
unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
unsigned int i, count, consume, read_bytes;
/*
* Each FIFO word contains single data packet.
* Skip invalid bits in each FIFO word based on bits per word
* and align bytes while filling in SPI core rx_buf.
*/
consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
if (consume > t->len - tqspi->cur_pos)
consume = t->len - tqspi->cur_pos;
read_bytes = consume;
for (count = 0; count < tqspi->curr_dma_words; count++) {
u32 x = tqspi->rx_dma_buf[count] & rx_mask;
for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
*rx_buf++ = (x >> (i * 8)) & 0xff;
}
tqspi->cur_rx_pos += read_bytes;
}
dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
tqspi->dma_buf_size, DMA_FROM_DEVICE);
}
static void tegra_qspi_dma_complete(void *args)
{
struct completion *dma_complete = args;
complete(dma_complete);
}
static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
{
dma_addr_t tx_dma_phys;
reinit_completion(&tqspi->tx_dma_complete);
if (tqspi->is_packed)
tx_dma_phys = t->tx_dma;
else
tx_dma_phys = tqspi->tx_dma_phys;
tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tqspi->tx_dma_desc) {
dev_err(tqspi->dev, "Unable to get TX descriptor\n");
return -EIO;
}
tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
dmaengine_submit(tqspi->tx_dma_desc);
dma_async_issue_pending(tqspi->tx_dma_chan);
return 0;
}
static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
{
dma_addr_t rx_dma_phys;
reinit_completion(&tqspi->rx_dma_complete);
if (tqspi->is_packed)
rx_dma_phys = t->rx_dma;
else
rx_dma_phys = tqspi->rx_dma_phys;
tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tqspi->rx_dma_desc) {
dev_err(tqspi->dev, "Unable to get RX descriptor\n");
return -EIO;
}
tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
dmaengine_submit(tqspi->rx_dma_desc);
dma_async_issue_pending(tqspi->rx_dma_chan);
return 0;
}
static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
{
void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
u32 val;
val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
return 0;
val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
if (!atomic)
return readl_relaxed_poll_timeout(addr, val,
(val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
1000, 1000000);
return readl_relaxed_poll_timeout_atomic(addr, val,
(val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
1000, 1000000);
}
static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
{
u32 intr_mask;
intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
}
static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
unsigned int len;
len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
if (t->tx_buf) {
t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
if (dma_mapping_error(tqspi->dev, t->tx_dma))
return -ENOMEM;
}
if (t->rx_buf) {
t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
return -ENOMEM;
}
}
return 0;
}
static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
unsigned int len;
len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
}
static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
struct dma_slave_config dma_sconfig = { 0 };
unsigned int len;
u8 dma_burst;
int ret = 0;
u32 val;
if (tqspi->is_packed) {
ret = tegra_qspi_dma_map_xfer(tqspi, t);
if (ret < 0)
return ret;
}
val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
tegra_qspi_unmask_irq(tqspi);
if (tqspi->is_packed)
len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
else
len = tqspi->curr_dma_words * 4;
/* set attention level based on length of transfer */
val = 0;
if (len & 0xf) {
val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
dma_burst = 1;
} else if (((len) >> 4) & 0x1) {
val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
dma_burst = 4;
} else {
val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
dma_burst = 8;
}
tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
tqspi->dma_control_reg = val;
dma_sconfig.device_fc = true;
if (tqspi->cur_direction & DATA_DIR_TX) {
dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_sconfig.dst_maxburst = dma_burst;
ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
if (ret < 0) {
dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
return ret;
}
tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
ret = tegra_qspi_start_tx_dma(tqspi, t, len);
if (ret < 0) {
dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
return ret;
}
}
if (tqspi->cur_direction & DATA_DIR_RX) {
dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_sconfig.src_maxburst = dma_burst;
ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
if (ret < 0) {
dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
return ret;
}
dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
tqspi->dma_buf_size,
DMA_FROM_DEVICE);
ret = tegra_qspi_start_rx_dma(tqspi, t, len);
if (ret < 0) {
dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
if (tqspi->cur_direction & DATA_DIR_TX)
dmaengine_terminate_all(tqspi->tx_dma_chan);
return ret;
}
}
tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
tqspi->is_curr_dma_xfer = true;
tqspi->dma_control_reg = val;
val |= QSPI_DMA_EN;
tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
return ret;
}
static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
{
u32 val;
unsigned int cur_words;
if (qspi->cur_direction & DATA_DIR_TX)
cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
else
cur_words = qspi->curr_dma_words;
val = QSPI_DMA_BLK_SET(cur_words - 1);
tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
tegra_qspi_unmask_irq(qspi);
qspi->is_curr_dma_xfer = false;
val = qspi->command1_reg;
val |= QSPI_PIO;
tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
return 0;
}
static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
{
if (!tqspi->soc_data->has_dma)
return;
if (tqspi->tx_dma_buf) {
dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
tqspi->tx_dma_buf, tqspi->tx_dma_phys);
tqspi->tx_dma_buf = NULL;
}
if (tqspi->tx_dma_chan) {
dma_release_channel(tqspi->tx_dma_chan);
tqspi->tx_dma_chan = NULL;
}
if (tqspi->rx_dma_buf) {
dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
tqspi->rx_dma_buf, tqspi->rx_dma_phys);
tqspi->rx_dma_buf = NULL;
}
if (tqspi->rx_dma_chan) {
dma_release_channel(tqspi->rx_dma_chan);
tqspi->rx_dma_chan = NULL;
}
}
static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
{
struct dma_chan *dma_chan;
dma_addr_t dma_phys;
u32 *dma_buf;
int err;
if (!tqspi->soc_data->has_dma)
return 0;
dma_chan = dma_request_chan(tqspi->dev, "rx");
if (IS_ERR(dma_chan)) {
err = PTR_ERR(dma_chan);
goto err_out;
}
tqspi->rx_dma_chan = dma_chan;
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
err = -ENOMEM;
goto err_out;
}
tqspi->rx_dma_buf = dma_buf;
tqspi->rx_dma_phys = dma_phys;
dma_chan = dma_request_chan(tqspi->dev, "tx");
if (IS_ERR(dma_chan)) {
err = PTR_ERR(dma_chan);
goto err_out;
}
tqspi->tx_dma_chan = dma_chan;
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
err = -ENOMEM;
goto err_out;
}
tqspi->tx_dma_buf = dma_buf;
tqspi->tx_dma_phys = dma_phys;
tqspi->use_dma = true;
return 0;
err_out:
tegra_qspi_deinit_dma(tqspi);
if (err != -EPROBE_DEFER) {
dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
dev_err(tqspi->dev, "falling back to PIO\n");
return 0;
}
return err;
}
static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
bool is_first_of_msg)
{
struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
struct tegra_qspi_client_data *cdata = spi->controller_data;
u32 command1, command2, speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
u32 tx_tap = 0, rx_tap = 0;
int req_mode;
if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
clk_set_rate(tqspi->clk, speed);
tqspi->cur_speed = speed;
}
tqspi->cur_pos = 0;
tqspi->cur_rx_pos = 0;
tqspi->cur_tx_pos = 0;
tqspi->curr_xfer = t;
if (is_first_of_msg) {
tegra_qspi_mask_clear_irq(tqspi);
command1 = tqspi->def_command1_reg;
command1 |= QSPI_CS_SEL(spi_get_chipselect(spi, 0));
command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
command1 &= ~QSPI_CONTROL_MODE_MASK;
req_mode = spi->mode & 0x3;
if (req_mode == SPI_MODE_3)
command1 |= QSPI_CONTROL_MODE_3;
else
command1 |= QSPI_CONTROL_MODE_0;
if (spi->mode & SPI_CS_HIGH)
command1 |= QSPI_CS_SW_VAL;
else
command1 &= ~QSPI_CS_SW_VAL;
tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
if (cdata && cdata->tx_clk_tap_delay)
tx_tap = cdata->tx_clk_tap_delay;
if (cdata && cdata->rx_clk_tap_delay)
rx_tap = cdata->rx_clk_tap_delay;
command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
if (command2 != tqspi->def_command2_reg)
tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
} else {
command1 = tqspi->command1_reg;
command1 &= ~QSPI_BIT_LENGTH(~0);
command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
}
command1 &= ~QSPI_SDR_DDR_SEL;
return command1;
}
static int tegra_qspi_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, u32 command1)
{
struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
unsigned int total_fifo_words;
u8 bus_width = 0;
int ret;
total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
command1 &= ~QSPI_PACKED;
if (tqspi->is_packed)
command1 |= QSPI_PACKED;
tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
tqspi->cur_direction = 0;
command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
if (t->rx_buf) {
command1 |= QSPI_RX_EN;
tqspi->cur_direction |= DATA_DIR_RX;
bus_width = t->rx_nbits;
}
if (t->tx_buf) {
command1 |= QSPI_TX_EN;
tqspi->cur_direction |= DATA_DIR_TX;
bus_width = t->tx_nbits;
}
command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
if (bus_width == SPI_NBITS_QUAD)
command1 |= QSPI_INTERFACE_WIDTH_QUAD;
else if (bus_width == SPI_NBITS_DUAL)
command1 |= QSPI_INTERFACE_WIDTH_DUAL;
else
command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
tqspi->command1_reg = command1;
tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
ret = tegra_qspi_flush_fifos(tqspi, false);
if (ret < 0)
return ret;
if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
else
ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
return ret;
}
static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_qspi_client_data *cdata;
struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
return NULL;
device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
&cdata->tx_clk_tap_delay);
device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
&cdata->rx_clk_tap_delay);
return cdata;
}
static int tegra_qspi_setup(struct spi_device *spi)
{
struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
struct tegra_qspi_client_data *cdata = spi->controller_data;
unsigned long flags;
u32 val;
int ret;
ret = pm_runtime_resume_and_get(tqspi->dev);
if (ret < 0) {
dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
return ret;
}
if (!cdata) {
cdata = tegra_qspi_parse_cdata_dt(spi);
spi->controller_data = cdata;
}
spin_lock_irqsave(&tqspi->lock, flags);
/* keep default cs state to inactive */
val = tqspi->def_command1_reg;
val |= QSPI_CS_SEL(spi_get_chipselect(spi, 0));
if (spi->mode & SPI_CS_HIGH)
val &= ~QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
else
val |= QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
tqspi->def_command1_reg = val;
tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
spin_unlock_irqrestore(&tqspi->lock, flags);
pm_runtime_put(tqspi->dev);
return 0;
}
static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
{
dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
tegra_qspi_readl(tqspi, QSPI_COMMAND1),
tegra_qspi_readl(tqspi, QSPI_COMMAND2));
dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n",
tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
tegra_qspi_readl(tqspi, QSPI_MISC_REG));
dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
}
static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
{
dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
tegra_qspi_dump_regs(tqspi);
tegra_qspi_flush_fifos(tqspi, true);
if (device_reset(tqspi->dev) < 0)
dev_warn_once(tqspi->dev, "device reset failed\n");
}
static void tegra_qspi_transfer_end(struct spi_device *spi)
{
struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
if (cs_val)
tqspi->command1_reg |= QSPI_CS_SW_VAL;
else
tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
}
static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
{
u32 cmd_config = 0;
/* Extract Command configuration and value */
if (is_ddr)
cmd_config |= QSPI_COMMAND_SDR_DDR;
else
cmd_config &= ~QSPI_COMMAND_SDR_DDR;
cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
return cmd_config;
}
static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
{
u32 addr_config = 0;
/* Extract Address configuration and value */
is_ddr = 0; //Only SDR mode supported
bus_width = 0; //X1 mode
if (is_ddr)
addr_config |= QSPI_ADDRESS_SDR_DDR;
else
addr_config &= ~QSPI_ADDRESS_SDR_DDR;
addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
return addr_config;
}
static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
struct spi_message *msg)
{
bool is_first_msg = true;
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
u8 transfer_phase = 0;
u32 cmd1 = 0, dma_ctl = 0;
int ret = 0;
u32 address_value = 0;
u32 cmd_config = 0, addr_config = 0;
u8 cmd_value = 0, val = 0;
/* Enable Combined sequence mode */
val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
if (spi->mode & SPI_TPM_HW_FLOW) {
if (tqspi->soc_data->supports_tpm)
val |= QSPI_TPM_WAIT_POLL_EN;
else
return -EIO;
}
val |= QSPI_CMB_SEQ_EN;
tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
/* Process individual transfer list */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
switch (transfer_phase) {
case CMD_TRANSFER:
/* X1 SDR mode */
cmd_config = tegra_qspi_cmd_config(false, 0,
xfer->len);
cmd_value = *((const u8 *)(xfer->tx_buf));
break;
case ADDR_TRANSFER:
/* X1 SDR mode */
addr_config = tegra_qspi_addr_config(false, 0,
xfer->len);
address_value = *((const u32 *)(xfer->tx_buf));
break;
case DATA_TRANSFER:
/* Program Command, Address value in register */
tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
tegra_qspi_writel(tqspi, address_value,
QSPI_CMB_SEQ_ADDR);
/* Program Command and Address config in register */
tegra_qspi_writel(tqspi, cmd_config,
QSPI_CMB_SEQ_CMD_CFG);
tegra_qspi_writel(tqspi, addr_config,
QSPI_CMB_SEQ_ADDR_CFG);
reinit_completion(&tqspi->xfer_completion);
cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
is_first_msg);
ret = tegra_qspi_start_transfer_one(spi, xfer,
cmd1);
if (ret < 0) {
dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
ret);
return ret;
}
is_first_msg = false;
ret = wait_for_completion_timeout
(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
ret);
if (tqspi->is_curr_dma_xfer &&
(tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all
(tqspi->tx_dma_chan);
if (tqspi->is_curr_dma_xfer &&
(tqspi->cur_direction & DATA_DIR_RX))
dmaengine_terminate_all
(tqspi->rx_dma_chan);
/* Abort transfer by resetting pio/dma bit */
if (!tqspi->is_curr_dma_xfer) {
cmd1 = tegra_qspi_readl
(tqspi,
QSPI_COMMAND1);
cmd1 &= ~QSPI_PIO;
tegra_qspi_writel
(tqspi, cmd1,
QSPI_COMMAND1);
} else {
dma_ctl = tegra_qspi_readl
(tqspi,
QSPI_DMA_CTL);
dma_ctl &= ~QSPI_DMA_EN;
tegra_qspi_writel(tqspi, dma_ctl,
QSPI_DMA_CTL);
}
/* Reset controller if timeout happens */
if (device_reset(tqspi->dev) < 0)
dev_warn_once(tqspi->dev,
"device reset failed\n");
ret = -EIO;
goto exit;
}
if (tqspi->tx_status || tqspi->rx_status) {
dev_err(tqspi->dev, "QSPI Transfer failed\n");
tqspi->tx_status = 0;
tqspi->rx_status = 0;
ret = -EIO;
goto exit;
}
if (!xfer->cs_change) {
tegra_qspi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
}
break;
default:
ret = -EINVAL;
goto exit;
}
msg->actual_length += xfer->len;
transfer_phase++;
}
ret = 0;
exit:
msg->status = ret;
if (ret < 0) {
tegra_qspi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
}
return ret;
}
static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
struct spi_transfer *transfer;
bool is_first_msg = true;
int ret = 0, val = 0;
msg->status = 0;
msg->actual_length = 0;
tqspi->tx_status = 0;
tqspi->rx_status = 0;
/* Disable Combined sequence mode */
val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
val &= ~QSPI_CMB_SEQ_EN;
if (tqspi->soc_data->supports_tpm)
val &= ~QSPI_TPM_WAIT_POLL_EN;
tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
list_for_each_entry(transfer, &msg->transfers, transfer_list) {
struct spi_transfer *xfer = transfer;
u8 dummy_bytes = 0;
u32 cmd1;
tqspi->dummy_cycles = 0;
/*
* Tegra QSPI hardware supports dummy bytes transfer after actual transfer
* bytes based on programmed dummy clock cycles in the QSPI_MISC register.
* So, check if the next transfer is dummy data transfer and program dummy
* clock cycles along with the current transfer and skip next transfer.
*/
if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
struct spi_transfer *next_xfer;
next_xfer = list_next_entry(xfer, transfer_list);
if (next_xfer->dummy_data) {
u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
tqspi->dummy_cycles = dummy_cycles;
dummy_bytes = next_xfer->len;
transfer = next_xfer;
}
}
}
reinit_completion(&tqspi->xfer_completion);
cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
if (ret < 0) {
dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
goto complete_xfer;
}
ret = wait_for_completion_timeout(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "transfer timeout\n");
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tqspi->tx_dma_chan);
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
dmaengine_terminate_all(tqspi->rx_dma_chan);
tegra_qspi_handle_error(tqspi);
ret = -EIO;
goto complete_xfer;
}
if (tqspi->tx_status || tqspi->rx_status) {
tegra_qspi_handle_error(tqspi);
ret = -EIO;
goto complete_xfer;
}
msg->actual_length += xfer->len + dummy_bytes;
complete_xfer:
if (ret < 0) {
tegra_qspi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
goto exit;
}
if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
/* de-activate CS after last transfer only when cs_change is not set */
if (!xfer->cs_change) {
tegra_qspi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
}
} else if (xfer->cs_change) {
/* de-activated CS between the transfers only when cs_change is set */
tegra_qspi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
}
}
ret = 0;
exit:
msg->status = ret;
return ret;
}
static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
struct spi_message *msg)
{
int transfer_count = 0;
struct spi_transfer *xfer;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
transfer_count++;
}
if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
return false;
xfer = list_first_entry(&msg->transfers, typeof(*xfer),
transfer_list);
if (xfer->len > 2)
return false;
xfer = list_next_entry(xfer, transfer_list);
if (xfer->len > 4 || xfer->len < 3)
return false;
xfer = list_next_entry(xfer, transfer_list);
if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
return false;
return true;
}
static int tegra_qspi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
int ret;
if (tegra_qspi_validate_cmb_seq(tqspi, msg))
ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
else
ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
spi_finalize_current_message(master);
return ret;
}
static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
{
struct spi_transfer *t = tqspi->curr_xfer;
unsigned long flags;
spin_lock_irqsave(&tqspi->lock, flags);
if (tqspi->tx_status || tqspi->rx_status) {
tegra_qspi_handle_error(tqspi);
complete(&tqspi->xfer_completion);
goto exit;
}
if (tqspi->cur_direction & DATA_DIR_RX)
tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
if (tqspi->cur_direction & DATA_DIR_TX)
tqspi->cur_pos = tqspi->cur_tx_pos;
else
tqspi->cur_pos = tqspi->cur_rx_pos;
if (tqspi->cur_pos == t->len) {
complete(&tqspi->xfer_completion);
goto exit;
}
tegra_qspi_calculate_curr_xfer_param(tqspi, t);
tegra_qspi_start_cpu_based_transfer(tqspi, t);
exit:
spin_unlock_irqrestore(&tqspi->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
{
struct spi_transfer *t = tqspi->curr_xfer;
unsigned int total_fifo_words;
unsigned long flags;
long wait_status;
int err = 0;
if (tqspi->cur_direction & DATA_DIR_TX) {
if (tqspi->tx_status) {
dmaengine_terminate_all(tqspi->tx_dma_chan);
err += 1;
} else {
wait_status = wait_for_completion_interruptible_timeout(
&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tqspi->tx_dma_chan);
dev_err(tqspi->dev, "failed TX DMA transfer\n");
err += 1;
}
}
}
if (tqspi->cur_direction & DATA_DIR_RX) {
if (tqspi->rx_status) {
dmaengine_terminate_all(tqspi->rx_dma_chan);
err += 2;
} else {
wait_status = wait_for_completion_interruptible_timeout(
&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tqspi->rx_dma_chan);
dev_err(tqspi->dev, "failed RX DMA transfer\n");
err += 2;
}
}
}
spin_lock_irqsave(&tqspi->lock, flags);
if (err) {
tegra_qspi_dma_unmap_xfer(tqspi, t);
tegra_qspi_handle_error(tqspi);
complete(&tqspi->xfer_completion);
goto exit;
}
if (tqspi->cur_direction & DATA_DIR_RX)
tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
if (tqspi->cur_direction & DATA_DIR_TX)
tqspi->cur_pos = tqspi->cur_tx_pos;
else
tqspi->cur_pos = tqspi->cur_rx_pos;
if (tqspi->cur_pos == t->len) {
tegra_qspi_dma_unmap_xfer(tqspi, t);
complete(&tqspi->xfer_completion);
goto exit;
}
tegra_qspi_dma_unmap_xfer(tqspi, t);
/* continue transfer in current message */
total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
if (total_fifo_words > QSPI_FIFO_DEPTH)
err = tegra_qspi_start_dma_based_transfer(tqspi, t);
else
err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
exit:
spin_unlock_irqrestore(&tqspi->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
{
struct tegra_qspi *tqspi = context_data;
tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
if (tqspi->cur_direction & DATA_DIR_TX)
tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
if (tqspi->cur_direction & DATA_DIR_RX)
tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
tegra_qspi_mask_clear_irq(tqspi);
if (!tqspi->is_curr_dma_xfer)
return handle_cpu_based_xfer(tqspi);
return handle_dma_based_xfer(tqspi);
}
static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
.has_dma = true,
.cmb_xfer_capable = false,
.supports_tpm = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
.has_dma = true,
.cmb_xfer_capable = true,
.supports_tpm = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
.has_dma = false,
.cmb_xfer_capable = true,
.supports_tpm = true,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
.has_dma = false,
.cmb_xfer_capable = true,
.supports_tpm = true,
.cs_count = 4,
};
static const struct of_device_id tegra_qspi_of_match[] = {
{
.compatible = "nvidia,tegra210-qspi",
.data = &tegra210_qspi_soc_data,
}, {
.compatible = "nvidia,tegra186-qspi",
.data = &tegra186_qspi_soc_data,
}, {
.compatible = "nvidia,tegra194-qspi",
.data = &tegra186_qspi_soc_data,
}, {
.compatible = "nvidia,tegra234-qspi",
.data = &tegra234_qspi_soc_data,
}, {
.compatible = "nvidia,tegra241-qspi",
.data = &tegra241_qspi_soc_data,
},
{}
};
MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id tegra_qspi_acpi_match[] = {
{
.id = "NVDA1213",
.driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
}, {
.id = "NVDA1313",
.driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
}, {
.id = "NVDA1413",
.driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
}, {
.id = "NVDA1513",
.driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
},
{}
};
MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
#endif
static int tegra_qspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct tegra_qspi *tqspi;
struct resource *r;
int ret, qspi_irq;
int bus_num;
master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
tqspi = spi_master_get_devdata(master);
master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = tegra_qspi_setup;
master->transfer_one_message = tegra_qspi_transfer_one_message;
master->num_chipselect = 1;
master->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
if (bus_num >= 0)
master->bus_num = bus_num;
tqspi->master = master;
tqspi->dev = &pdev->dev;
spin_lock_init(&tqspi->lock);
tqspi->soc_data = device_get_match_data(&pdev->dev);
master->num_chipselect = tqspi->soc_data->cs_count;
tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tqspi->base))
return PTR_ERR(tqspi->base);
tqspi->phys = r->start;
qspi_irq = platform_get_irq(pdev, 0);
if (qspi_irq < 0)
return qspi_irq;
tqspi->irq = qspi_irq;
if (!has_acpi_companion(tqspi->dev)) {
tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
if (IS_ERR(tqspi->clk)) {
ret = PTR_ERR(tqspi->clk);
dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
return ret;
}
}
tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
ret = tegra_qspi_init_dma(tqspi);
if (ret < 0)
return ret;
if (tqspi->use_dma)
tqspi->max_buf_size = tqspi->dma_buf_size;
init_completion(&tqspi->tx_dma_complete);
init_completion(&tqspi->rx_dma_complete);
init_completion(&tqspi->xfer_completion);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
goto exit_pm_disable;
}
if (device_reset(tqspi->dev) < 0)
dev_warn_once(tqspi->dev, "device reset failed\n");
tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
pm_runtime_put(&pdev->dev);
ret = request_threaded_irq(tqspi->irq, NULL,
tegra_qspi_isr_thread, IRQF_ONESHOT,
dev_name(&pdev->dev), tqspi);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
goto exit_pm_disable;
}
master->dev.of_node = pdev->dev.of_node;
ret = spi_register_master(master);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register master: %d\n", ret);
goto exit_free_irq;
}
return 0;
exit_free_irq:
free_irq(qspi_irq, tqspi);
exit_pm_disable:
pm_runtime_force_suspend(&pdev->dev);
tegra_qspi_deinit_dma(tqspi);
return ret;
}
static void tegra_qspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
spi_unregister_master(master);
free_irq(tqspi->irq, tqspi);
pm_runtime_force_suspend(&pdev->dev);
tegra_qspi_deinit_dma(tqspi);
}
static int __maybe_unused tegra_qspi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
return spi_master_suspend(master);
}
static int __maybe_unused tegra_qspi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", ret);
return ret;
}
tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
pm_runtime_put(dev);
return spi_master_resume(master);
}
static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
/* Runtime pm disabled with ACPI */
if (has_acpi_companion(tqspi->dev))
return 0;
/* flush all write which are in PPSB queue by reading back */
tegra_qspi_readl(tqspi, QSPI_COMMAND1);
clk_disable_unprepare(tqspi->clk);
return 0;
}
static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_qspi *tqspi = spi_master_get_devdata(master);
int ret;
/* Runtime pm disabled with ACPI */
if (has_acpi_companion(tqspi->dev))
return 0;
ret = clk_prepare_enable(tqspi->clk);
if (ret < 0)
dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
return ret;
}
static const struct dev_pm_ops tegra_qspi_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
};
static struct platform_driver tegra_qspi_driver = {
.driver = {
.name = "tegra-qspi",
.pm = &tegra_qspi_pm_ops,
.of_match_table = tegra_qspi_of_match,
.acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
},
.probe = tegra_qspi_probe,
.remove_new = tegra_qspi_remove,
};
module_platform_driver(tegra_qspi_driver);
MODULE_ALIAS("platform:qspi-tegra");
MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
MODULE_AUTHOR("Sowjanya Komatineni <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-tegra210-quad.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2021 Sunplus Inc.
// Author: Li-hao Kuo <[email protected]>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#define SP7021_DATA_RDY_REG 0x0044
#define SP7021_SLAVE_DMA_CTRL_REG 0x0048
#define SP7021_SLAVE_DMA_LENGTH_REG 0x004c
#define SP7021_SLAVE_DMA_ADDR_REG 0x004c
#define SP7021_SLAVE_DATA_RDY BIT(0)
#define SP7021_SLAVE_SW_RST BIT(1)
#define SP7021_SLA_DMA_W_INT BIT(8)
#define SP7021_SLAVE_CLR_INT BIT(8)
#define SP7021_SLAVE_DMA_EN BIT(0)
#define SP7021_SLAVE_DMA_RW BIT(6)
#define SP7021_SLAVE_DMA_CMD GENMASK(3, 2)
#define SP7021_FIFO_REG 0x0034
#define SP7021_SPI_STATUS_REG 0x0038
#define SP7021_SPI_CONFIG_REG 0x003c
#define SP7021_INT_BUSY_REG 0x004c
#define SP7021_DMA_CTRL_REG 0x0050
#define SP7021_SPI_START_FD BIT(0)
#define SP7021_FD_SW_RST BIT(1)
#define SP7021_TX_EMP_FLAG BIT(2)
#define SP7021_RX_EMP_FLAG BIT(4)
#define SP7021_RX_FULL_FLAG BIT(5)
#define SP7021_FINISH_FLAG BIT(6)
#define SP7021_TX_CNT_MASK GENMASK(11, 8)
#define SP7021_RX_CNT_MASK GENMASK(15, 12)
#define SP7021_TX_LEN_MASK GENMASK(23, 16)
#define SP7021_GET_LEN_MASK GENMASK(31, 24)
#define SP7021_SET_TX_LEN GENMASK(23, 16)
#define SP7021_SET_XFER_LEN GENMASK(31, 24)
#define SP7021_CPOL_FD BIT(0)
#define SP7021_CPHA_R BIT(1)
#define SP7021_CPHA_W BIT(2)
#define SP7021_LSB_SEL BIT(4)
#define SP7021_CS_POR BIT(5)
#define SP7021_FD_SEL BIT(6)
#define SP7021_RX_UNIT GENMASK(8, 7)
#define SP7021_TX_UNIT GENMASK(10, 9)
#define SP7021_TX_EMP_FLAG_MASK BIT(11)
#define SP7021_RX_FULL_FLAG_MASK BIT(14)
#define SP7021_FINISH_FLAG_MASK BIT(15)
#define SP7021_CLEAN_RW_BYTE GENMASK(10, 7)
#define SP7021_CLEAN_FLUG_MASK GENMASK(15, 11)
#define SP7021_CLK_MASK GENMASK(31, 16)
#define SP7021_INT_BYPASS BIT(3)
#define SP7021_CLR_MASTER_INT BIT(6)
#define SP7021_SPI_DATA_SIZE (255)
#define SP7021_FIFO_DATA_LEN (16)
enum {
SP7021_MASTER_MODE = 0,
SP7021_SLAVE_MODE = 1,
};
struct sp7021_spi_ctlr {
struct device *dev;
struct spi_controller *ctlr;
void __iomem *m_base;
void __iomem *s_base;
u32 xfer_conf;
int mode;
int m_irq;
int s_irq;
struct clk *spi_clk;
struct reset_control *rstc;
// data xfer lock
struct mutex buf_lock;
struct completion isr_done;
struct completion slave_isr;
unsigned int rx_cur_len;
unsigned int tx_cur_len;
unsigned int data_unit;
const u8 *tx_buf;
u8 *rx_buf;
};
static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev)
{
struct sp7021_spi_ctlr *pspim = dev;
unsigned int data_status;
data_status = readl(pspim->s_base + SP7021_DATA_RDY_REG);
data_status |= SP7021_SLAVE_CLR_INT;
writel(data_status , pspim->s_base + SP7021_DATA_RDY_REG);
complete(&pspim->slave_isr);
return IRQ_HANDLED;
}
static int sp7021_spi_slave_abort(struct spi_controller *ctlr)
{
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
complete(&pspim->slave_isr);
complete(&pspim->isr_done);
return 0;
}
static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
u32 value;
reinit_completion(&pspim->slave_isr);
value = SP7021_SLAVE_DMA_EN | SP7021_SLAVE_DMA_RW | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
writel(xfer->tx_dma, pspim->s_base + SP7021_SLAVE_DMA_ADDR_REG);
value = readl(pspim->s_base + SP7021_DATA_RDY_REG);
value |= SP7021_SLAVE_DATA_RDY;
writel(value, pspim->s_base + SP7021_DATA_RDY_REG);
if (wait_for_completion_interruptible(&pspim->isr_done)) {
dev_err(&spi->dev, "%s() wait_for_completion err\n", __func__);
return -EINTR;
}
return 0;
}
static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
u32 value;
reinit_completion(&pspim->isr_done);
value = SP7021_SLAVE_DMA_EN | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
writel(xfer->rx_dma, pspim->s_base + SP7021_SLAVE_DMA_ADDR_REG);
if (wait_for_completion_interruptible(&pspim->isr_done)) {
dev_err(&spi->dev, "%s() wait_for_completion err\n", __func__);
return -EINTR;
}
writel(SP7021_SLAVE_SW_RST, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
return 0;
}
static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len)
{
int i;
for (i = 0; i < len; i++) {
pspim->rx_buf[pspim->rx_cur_len] =
readl(pspim->m_base + SP7021_FIFO_REG);
pspim->rx_cur_len++;
}
}
static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len)
{
int i;
for (i = 0; i < len; i++) {
writel(pspim->tx_buf[pspim->tx_cur_len],
pspim->m_base + SP7021_FIFO_REG);
pspim->tx_cur_len++;
}
}
static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
{
struct sp7021_spi_ctlr *pspim = dev;
unsigned int tx_cnt, total_len;
unsigned int tx_len, rx_cnt;
unsigned int fd_status;
bool isrdone = false;
u32 value;
fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
tx_cnt = FIELD_GET(SP7021_TX_CNT_MASK, fd_status);
tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
if ((fd_status & SP7021_TX_EMP_FLAG) && (fd_status & SP7021_RX_EMP_FLAG) && total_len == 0)
return IRQ_NONE;
if (tx_len == 0 && total_len == 0)
return IRQ_NONE;
rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
if (fd_status & SP7021_RX_FULL_FLAG)
rx_cnt = pspim->data_unit;
tx_cnt = min(tx_len - pspim->tx_cur_len, pspim->data_unit - tx_cnt);
dev_dbg(pspim->dev, "fd_st=0x%x rx_c:%d tx_c:%d tx_l:%d",
fd_status, rx_cnt, tx_cnt, tx_len);
if (rx_cnt > 0)
sp7021_spi_master_rb(pspim, rx_cnt);
if (tx_cnt > 0)
sp7021_spi_master_wb(pspim, tx_cnt);
fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
if (fd_status & SP7021_FINISH_FLAG || tx_len == pspim->tx_cur_len) {
while (total_len != pspim->rx_cur_len) {
fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
total_len = FIELD_GET(SP7021_GET_LEN_MASK, fd_status);
if (fd_status & SP7021_RX_FULL_FLAG)
rx_cnt = pspim->data_unit;
else
rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
if (rx_cnt > 0)
sp7021_spi_master_rb(pspim, rx_cnt);
}
value = readl(pspim->m_base + SP7021_INT_BUSY_REG);
value |= SP7021_CLR_MASTER_INT;
writel(value, pspim->m_base + SP7021_INT_BUSY_REG);
writel(SP7021_FINISH_FLAG, pspim->m_base + SP7021_SPI_STATUS_REG);
isrdone = true;
}
if (isrdone)
complete(&pspim->isr_done);
return IRQ_HANDLED;
}
static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device *spi)
{
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
pspim->tx_cur_len = 0;
pspim->rx_cur_len = 0;
pspim->data_unit = SP7021_FIFO_DATA_LEN;
}
// preliminary set CS, CPOL, CPHA and LSB
static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
struct spi_device *s = msg->spi;
u32 valus, rs = 0;
valus = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
valus |= SP7021_FD_SW_RST;
writel(valus, pspim->m_base + SP7021_SPI_STATUS_REG);
rs |= SP7021_FD_SEL;
if (s->mode & SPI_CPOL)
rs |= SP7021_CPOL_FD;
if (s->mode & SPI_LSB_FIRST)
rs |= SP7021_LSB_SEL;
if (s->mode & SPI_CS_HIGH)
rs |= SP7021_CS_POR;
if (s->mode & SPI_CPHA)
rs |= SP7021_CPHA_R;
else
rs |= SP7021_CPHA_W;
rs |= FIELD_PREP(SP7021_TX_UNIT, 0) | FIELD_PREP(SP7021_RX_UNIT, 0);
pspim->xfer_conf = rs;
if (pspim->xfer_conf & SP7021_CPOL_FD)
writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
return 0;
}
static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
u32 clk_rate, clk_sel, div;
clk_rate = clk_get_rate(pspim->spi_clk);
div = max(2U, clk_rate / xfer->speed_hz);
clk_sel = (div / 2) - 1;
pspim->xfer_conf &= ~SP7021_CLK_MASK;
pspim->xfer_conf |= FIELD_PREP(SP7021_CLK_MASK, clk_sel);
writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
}
static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
unsigned long timeout = msecs_to_jiffies(1000);
unsigned int xfer_cnt, xfer_len, last_len;
unsigned int i, len_temp;
u32 reg_temp;
xfer_cnt = xfer->len / SP7021_SPI_DATA_SIZE;
last_len = xfer->len % SP7021_SPI_DATA_SIZE;
for (i = 0; i <= xfer_cnt; i++) {
mutex_lock(&pspim->buf_lock);
sp7021_prep_transfer(ctlr, spi);
sp7021_spi_setup_clk(ctlr, xfer);
reinit_completion(&pspim->isr_done);
if (i == xfer_cnt)
xfer_len = last_len;
else
xfer_len = SP7021_SPI_DATA_SIZE;
pspim->tx_buf = xfer->tx_buf + i * SP7021_SPI_DATA_SIZE;
pspim->rx_buf = xfer->rx_buf + i * SP7021_SPI_DATA_SIZE;
if (pspim->tx_cur_len < xfer_len) {
len_temp = min(pspim->data_unit, xfer_len);
sp7021_spi_master_wb(pspim, len_temp);
}
reg_temp = readl(pspim->m_base + SP7021_SPI_CONFIG_REG);
reg_temp &= ~SP7021_CLEAN_RW_BYTE;
reg_temp &= ~SP7021_CLEAN_FLUG_MASK;
reg_temp |= SP7021_FD_SEL | SP7021_FINISH_FLAG_MASK |
SP7021_TX_EMP_FLAG_MASK | SP7021_RX_FULL_FLAG_MASK |
FIELD_PREP(SP7021_TX_UNIT, 0) | FIELD_PREP(SP7021_RX_UNIT, 0);
writel(reg_temp, pspim->m_base + SP7021_SPI_CONFIG_REG);
reg_temp = FIELD_PREP(SP7021_SET_TX_LEN, xfer_len) |
FIELD_PREP(SP7021_SET_XFER_LEN, xfer_len) |
SP7021_SPI_START_FD;
writel(reg_temp, pspim->m_base + SP7021_SPI_STATUS_REG);
if (!wait_for_completion_interruptible_timeout(&pspim->isr_done, timeout)) {
dev_err(&spi->dev, "wait_for_completion err\n");
mutex_unlock(&pspim->buf_lock);
return -ETIMEDOUT;
}
reg_temp = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
if (reg_temp & SP7021_FINISH_FLAG) {
writel(SP7021_FINISH_FLAG, pspim->m_base + SP7021_SPI_STATUS_REG);
writel(readl(pspim->m_base + SP7021_SPI_CONFIG_REG) &
SP7021_CLEAN_FLUG_MASK, pspim->m_base + SP7021_SPI_CONFIG_REG);
}
if (pspim->xfer_conf & SP7021_CPOL_FD)
writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
mutex_unlock(&pspim->buf_lock);
}
return 0;
}
static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
struct device *dev = pspim->dev;
int ret;
if (xfer->tx_buf && !xfer->rx_buf) {
xfer->tx_dma = dma_map_single(dev, (void *)xfer->tx_buf,
xfer->len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma))
return -ENOMEM;
ret = sp7021_spi_slave_tx(spi, xfer);
dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
} else if (xfer->rx_buf && !xfer->tx_buf) {
xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma))
return -ENOMEM;
ret = sp7021_spi_slave_rx(spi, xfer);
dma_unmap_single(dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
} else {
dev_dbg(&ctlr->dev, "%s() wrong command\n", __func__);
return -EINVAL;
}
spi_finalize_current_transfer(ctlr);
return ret;
}
static void sp7021_spi_disable_unprepare(void *data)
{
clk_disable_unprepare(data);
}
static void sp7021_spi_reset_control_assert(void *data)
{
reset_control_assert(data);
}
static int sp7021_spi_controller_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sp7021_spi_ctlr *pspim;
struct spi_controller *ctlr;
int mode, ret;
pdev->id = of_alias_get_id(pdev->dev.of_node, "sp_spi");
if (device_property_read_bool(dev, "spi-slave"))
mode = SP7021_SLAVE_MODE;
else
mode = SP7021_MASTER_MODE;
if (mode == SP7021_SLAVE_MODE)
ctlr = devm_spi_alloc_slave(dev, sizeof(*pspim));
else
ctlr = devm_spi_alloc_master(dev, sizeof(*pspim));
if (!ctlr)
return -ENOMEM;
device_set_node(&ctlr->dev, dev_fwnode(dev));
ctlr->bus_num = pdev->id;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
ctlr->auto_runtime_pm = true;
ctlr->prepare_message = sp7021_spi_controller_prepare_message;
if (mode == SP7021_SLAVE_MODE) {
ctlr->transfer_one = sp7021_spi_slave_transfer_one;
ctlr->slave_abort = sp7021_spi_slave_abort;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
} else {
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->min_speed_hz = 40000;
ctlr->max_speed_hz = 25000000;
ctlr->use_gpio_descriptors = true;
ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
ctlr->transfer_one = sp7021_spi_master_transfer_one;
}
platform_set_drvdata(pdev, ctlr);
pspim = spi_controller_get_devdata(ctlr);
pspim->mode = mode;
pspim->ctlr = ctlr;
pspim->dev = dev;
mutex_init(&pspim->buf_lock);
init_completion(&pspim->isr_done);
init_completion(&pspim->slave_isr);
pspim->m_base = devm_platform_ioremap_resource_byname(pdev, "master");
if (IS_ERR(pspim->m_base))
return dev_err_probe(dev, PTR_ERR(pspim->m_base), "m_base get fail\n");
pspim->s_base = devm_platform_ioremap_resource_byname(pdev, "slave");
if (IS_ERR(pspim->s_base))
return dev_err_probe(dev, PTR_ERR(pspim->s_base), "s_base get fail\n");
pspim->m_irq = platform_get_irq_byname(pdev, "master_risc");
if (pspim->m_irq < 0)
return pspim->m_irq;
pspim->s_irq = platform_get_irq_byname(pdev, "slave_risc");
if (pspim->s_irq < 0)
return pspim->s_irq;
pspim->spi_clk = devm_clk_get(dev, NULL);
if (IS_ERR(pspim->spi_clk))
return dev_err_probe(dev, PTR_ERR(pspim->spi_clk), "clk get fail\n");
pspim->rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(pspim->rstc))
return dev_err_probe(dev, PTR_ERR(pspim->rstc), "rst get fail\n");
ret = clk_prepare_enable(pspim->spi_clk);
if (ret)
return dev_err_probe(dev, ret, "failed to enable clk\n");
ret = devm_add_action_or_reset(dev, sp7021_spi_disable_unprepare, pspim->spi_clk);
if (ret)
return ret;
ret = reset_control_deassert(pspim->rstc);
if (ret)
return dev_err_probe(dev, ret, "failed to deassert reset\n");
ret = devm_add_action_or_reset(dev, sp7021_spi_reset_control_assert, pspim->rstc);
if (ret)
return ret;
ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_master_irq,
IRQF_TRIGGER_RISING, pdev->name, pspim);
if (ret)
return ret;
ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_slave_irq,
IRQF_TRIGGER_RISING, pdev->name, pspim);
if (ret)
return ret;
pm_runtime_enable(dev);
ret = spi_register_controller(ctlr);
if (ret) {
pm_runtime_disable(dev);
return dev_err_probe(dev, ret, "spi_register_master fail\n");
}
return 0;
}
static void sp7021_spi_controller_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
spi_unregister_controller(ctlr);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
}
static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
return reset_control_assert(pspim->rstc);
}
static int __maybe_unused sp7021_spi_controller_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
reset_control_deassert(pspim->rstc);
return clk_prepare_enable(pspim->spi_clk);
}
#ifdef CONFIG_PM
static int sp7021_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
return reset_control_assert(pspim->rstc);
}
static int sp7021_spi_runtime_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
return reset_control_deassert(pspim->rstc);
}
#endif
static const struct dev_pm_ops sp7021_spi_pm_ops = {
SET_RUNTIME_PM_OPS(sp7021_spi_runtime_suspend,
sp7021_spi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(sp7021_spi_controller_suspend,
sp7021_spi_controller_resume)
};
static const struct of_device_id sp7021_spi_controller_ids[] = {
{ .compatible = "sunplus,sp7021-spi" },
{}
};
MODULE_DEVICE_TABLE(of, sp7021_spi_controller_ids);
static struct platform_driver sp7021_spi_controller_driver = {
.probe = sp7021_spi_controller_probe,
.remove_new = sp7021_spi_controller_remove,
.driver = {
.name = "sunplus,sp7021-spi-controller",
.of_match_table = sp7021_spi_controller_ids,
.pm = &sp7021_spi_pm_ops,
},
};
module_platform_driver(sp7021_spi_controller_driver);
MODULE_AUTHOR("Li-hao Kuo <[email protected]>");
MODULE_DESCRIPTION("Sunplus SPI controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-sunplus-sp7021.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CLPS711X SPI bus driver
*
* Copyright (C) 2012-2016 Alexander Shiyan <[email protected]>
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/clps711x.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "clps711x-spi"
#define SYNCIO_FRMLEN(x) ((x) << 8)
#define SYNCIO_TXFRMEN (1 << 14)
struct spi_clps711x_data {
void __iomem *syncio;
struct regmap *syscon;
struct clk *spi_clk;
u8 *tx_buf;
u8 *rx_buf;
unsigned int bpw;
int len;
};
static int spi_clps711x_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
struct spi_clps711x_data *hw = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
/* Setup mode for transfer */
return regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCKNSEN,
(spi->mode & SPI_CPHA) ?
SYSCON3_ADCCKNSEN : 0);
}
static int spi_clps711x_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct spi_clps711x_data *hw = spi_controller_get_devdata(host);
u8 data;
clk_set_rate(hw->spi_clk, xfer->speed_hz ? : spi->max_speed_hz);
hw->len = xfer->len;
hw->bpw = xfer->bits_per_word;
hw->tx_buf = (u8 *)xfer->tx_buf;
hw->rx_buf = (u8 *)xfer->rx_buf;
/* Initiate transfer */
data = hw->tx_buf ? *hw->tx_buf++ : 0;
writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN, hw->syncio);
return 1;
}
static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
{
struct spi_controller *host = dev_id;
struct spi_clps711x_data *hw = spi_controller_get_devdata(host);
u8 data;
/* Handle RX */
data = readb(hw->syncio);
if (hw->rx_buf)
*hw->rx_buf++ = data;
/* Handle TX */
if (--hw->len > 0) {
data = hw->tx_buf ? *hw->tx_buf++ : 0;
writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN,
hw->syncio);
} else
spi_finalize_current_transfer(host);
return IRQ_HANDLED;
}
static int spi_clps711x_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spi_clps711x_data *hw;
struct spi_controller *host;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = spi_alloc_host(&pdev->dev, sizeof(*hw));
if (!host)
return -ENOMEM;
host->use_gpio_descriptors = true;
host->bus_num = -1;
host->mode_bits = SPI_CPHA | SPI_CS_HIGH;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
host->dev.of_node = pdev->dev.of_node;
host->prepare_message = spi_clps711x_prepare_message;
host->transfer_one = spi_clps711x_transfer_one;
hw = spi_controller_get_devdata(host);
hw->spi_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hw->spi_clk)) {
ret = PTR_ERR(hw->spi_clk);
goto err_out;
}
hw->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(hw->syscon)) {
ret = PTR_ERR(hw->syscon);
goto err_out;
}
hw->syncio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->syncio)) {
ret = PTR_ERR(hw->syncio);
goto err_out;
}
/* Disable extended mode due hardware problems */
regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCON, 0);
/* Clear possible pending interrupt */
readl(hw->syncio);
ret = devm_request_irq(&pdev->dev, irq, spi_clps711x_isr, 0,
dev_name(&pdev->dev), host);
if (ret)
goto err_out;
ret = devm_spi_register_controller(&pdev->dev, host);
if (!ret)
return 0;
err_out:
spi_controller_put(host);
return ret;
}
static const struct of_device_id clps711x_spi_dt_ids[] = {
{ .compatible = "cirrus,ep7209-spi", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_spi_dt_ids);
static struct platform_driver clps711x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = clps711x_spi_dt_ids,
},
.probe = spi_clps711x_probe,
};
module_platform_driver(clps711x_spi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <[email protected]>");
MODULE_DESCRIPTION("CLPS711X SPI bus driver");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/spi/spi-clps711x.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_opp.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#define QSPI_NUM_CS 2
#define QSPI_BYTES_PER_WORD 4
#define MSTR_CONFIG 0x0000
#define FULL_CYCLE_MODE BIT(3)
#define FB_CLK_EN BIT(4)
#define PIN_HOLDN BIT(6)
#define PIN_WPN BIT(7)
#define DMA_ENABLE BIT(8)
#define BIG_ENDIAN_MODE BIT(9)
#define SPI_MODE_MSK 0xc00
#define SPI_MODE_SHFT 10
#define CHIP_SELECT_NUM BIT(12)
#define SBL_EN BIT(13)
#define LPA_BASE_MSK 0x3c000
#define LPA_BASE_SHFT 14
#define TX_DATA_DELAY_MSK 0xc0000
#define TX_DATA_DELAY_SHFT 18
#define TX_CLK_DELAY_MSK 0x300000
#define TX_CLK_DELAY_SHFT 20
#define TX_CS_N_DELAY_MSK 0xc00000
#define TX_CS_N_DELAY_SHFT 22
#define TX_DATA_OE_DELAY_MSK 0x3000000
#define TX_DATA_OE_DELAY_SHFT 24
#define AHB_MASTER_CFG 0x0004
#define HMEM_TYPE_START_MID_TRANS_MSK 0x7
#define HMEM_TYPE_START_MID_TRANS_SHFT 0
#define HMEM_TYPE_LAST_TRANS_MSK 0x38
#define HMEM_TYPE_LAST_TRANS_SHFT 3
#define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0
#define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6
#define HMEMTYPE_READ_TRANS_MSK 0x700
#define HMEMTYPE_READ_TRANS_SHFT 8
#define HSHARED BIT(11)
#define HINNERSHARED BIT(12)
#define MSTR_INT_EN 0x000C
#define MSTR_INT_STATUS 0x0010
#define RESP_FIFO_UNDERRUN BIT(0)
#define RESP_FIFO_NOT_EMPTY BIT(1)
#define RESP_FIFO_RDY BIT(2)
#define HRESP_FROM_NOC_ERR BIT(3)
#define WR_FIFO_EMPTY BIT(9)
#define WR_FIFO_FULL BIT(10)
#define WR_FIFO_OVERRUN BIT(11)
#define TRANSACTION_DONE BIT(16)
#define DMA_CHAIN_DONE BIT(31)
#define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
WR_FIFO_OVERRUN)
#define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
WR_FIFO_EMPTY | WR_FIFO_FULL | \
TRANSACTION_DONE | DMA_CHAIN_DONE)
#define PIO_XFER_CTRL 0x0014
#define REQUEST_COUNT_MSK 0xffff
#define PIO_XFER_CFG 0x0018
#define TRANSFER_DIRECTION BIT(0)
#define MULTI_IO_MODE_MSK 0xe
#define MULTI_IO_MODE_SHFT 1
#define TRANSFER_FRAGMENT BIT(8)
#define SDR_1BIT 1
#define SDR_2BIT 2
#define SDR_4BIT 3
#define DDR_1BIT 5
#define DDR_2BIT 6
#define DDR_4BIT 7
#define DMA_DESC_SINGLE_SPI 1
#define DMA_DESC_DUAL_SPI 2
#define DMA_DESC_QUAD_SPI 3
#define PIO_XFER_STATUS 0x001c
#define WR_FIFO_BYTES_MSK 0xffff0000
#define WR_FIFO_BYTES_SHFT 16
#define PIO_DATAOUT_1B 0x0020
#define PIO_DATAOUT_4B 0x0024
#define RD_FIFO_CFG 0x0028
#define CONTINUOUS_MODE BIT(0)
#define RD_FIFO_STATUS 0x002c
#define FIFO_EMPTY BIT(11)
#define WR_CNTS_MSK 0x7f0
#define WR_CNTS_SHFT 4
#define RDY_64BYTE BIT(3)
#define RDY_32BYTE BIT(2)
#define RDY_16BYTE BIT(1)
#define FIFO_RDY BIT(0)
#define RD_FIFO_RESET 0x0030
#define RESET_FIFO BIT(0)
#define NEXT_DMA_DESC_ADDR 0x0040
#define CURRENT_DMA_DESC_ADDR 0x0044
#define CURRENT_MEM_ADDR 0x0048
#define CUR_MEM_ADDR 0x0048
#define HW_VERSION 0x004c
#define RD_FIFO 0x0050
#define SAMPLING_CLK_CFG 0x0090
#define SAMPLING_CLK_STATUS 0x0094
#define QSPI_ALIGN_REQ 32
enum qspi_dir {
QSPI_READ,
QSPI_WRITE,
};
struct qspi_cmd_desc {
u32 data_address;
u32 next_descriptor;
u32 direction:1;
u32 multi_io_mode:3;
u32 reserved1:4;
u32 fragment:1;
u32 reserved2:7;
u32 length:16;
};
struct qspi_xfer {
union {
const void *tx_buf;
void *rx_buf;
};
unsigned int rem_bytes;
unsigned int buswidth;
enum qspi_dir dir;
bool is_last;
};
enum qspi_clocks {
QSPI_CLK_CORE,
QSPI_CLK_IFACE,
QSPI_NUM_CLKS
};
/*
* Number of entries in sgt returned from spi framework that-
* will be supported. Can be modified as required.
* In practice, given max_dma_len is 64KB, the number of
* entries is not expected to exceed 1.
*/
#define QSPI_MAX_SG 5
struct qcom_qspi {
void __iomem *base;
struct device *dev;
struct clk_bulk_data *clks;
struct qspi_xfer xfer;
struct dma_pool *dma_cmd_pool;
dma_addr_t dma_cmd_desc[QSPI_MAX_SG];
void *virt_cmd_desc[QSPI_MAX_SG];
unsigned int n_cmd_desc;
struct icc_path *icc_path_cpu_to_qspi;
unsigned long last_speed;
/* Lock to protect data accessed by IRQs */
spinlock_t lock;
};
static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
unsigned int buswidth)
{
switch (buswidth) {
case 1:
return SDR_1BIT;
case 2:
return SDR_2BIT;
case 4:
return SDR_4BIT;
default:
dev_warn_once(ctrl->dev,
"Unexpected bus width: %u\n", buswidth);
return SDR_1BIT;
}
}
static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
{
u32 pio_xfer_cfg;
u32 iomode;
const struct qspi_xfer *xfer;
xfer = &ctrl->xfer;
pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG);
pio_xfer_cfg &= ~TRANSFER_DIRECTION;
pio_xfer_cfg |= xfer->dir;
if (xfer->is_last)
pio_xfer_cfg &= ~TRANSFER_FRAGMENT;
else
pio_xfer_cfg |= TRANSFER_FRAGMENT;
pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
iomode = qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
pio_xfer_cfg |= iomode << MULTI_IO_MODE_SHFT;
writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
}
static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl)
{
u32 pio_xfer_ctrl;
pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL);
pio_xfer_ctrl &= ~REQUEST_COUNT_MSK;
pio_xfer_ctrl |= ctrl->xfer.rem_bytes;
writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL);
}
static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
{
u32 ints;
qcom_qspi_pio_xfer_cfg(ctrl);
/* Ack any previous interrupts that might be hanging around */
writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS);
/* Setup new interrupts */
if (ctrl->xfer.dir == QSPI_WRITE)
ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY;
else
ints = QSPI_ERR_IRQS | RESP_FIFO_RDY;
writel(ints, ctrl->base + MSTR_INT_EN);
/* Kick off the transfer */
qcom_qspi_pio_xfer_ctrl(ctrl);
}
static void qcom_qspi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
u32 int_status;
struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
unsigned long flags;
int i;
spin_lock_irqsave(&ctrl->lock, flags);
writel(0, ctrl->base + MSTR_INT_EN);
int_status = readl(ctrl->base + MSTR_INT_STATUS);
writel(int_status, ctrl->base + MSTR_INT_STATUS);
ctrl->xfer.rem_bytes = 0;
/* free cmd descriptors if they are around (DMA mode) */
for (i = 0; i < ctrl->n_cmd_desc; i++)
dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
ctrl->dma_cmd_desc[i]);
ctrl->n_cmd_desc = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
}
static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
{
int ret;
unsigned int avg_bw_cpu;
if (speed_hz == ctrl->last_speed)
return 0;
/* In regular operation (SBL_EN=1) core must be 4x transfer clock */
ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
if (ret) {
dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
return ret;
}
/*
* Set BW quota for CPU.
* We don't have explicit peak requirement so keep it equal to avg_bw.
*/
avg_bw_cpu = Bps_to_icc(speed_hz);
ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu);
if (ret) {
dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
__func__, ret);
return ret;
}
ctrl->last_speed = speed_hz;
return 0;
}
static int qcom_qspi_alloc_desc(struct qcom_qspi *ctrl, dma_addr_t dma_ptr,
uint32_t n_bytes)
{
struct qspi_cmd_desc *virt_cmd_desc, *prev;
dma_addr_t dma_cmd_desc;
/* allocate for dma cmd descriptor */
virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_ATOMIC | __GFP_ZERO, &dma_cmd_desc);
if (!virt_cmd_desc) {
dev_warn_once(ctrl->dev, "Couldn't find memory for descriptor\n");
return -EAGAIN;
}
ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc;
ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc;
ctrl->n_cmd_desc++;
/* setup cmd descriptor */
virt_cmd_desc->data_address = dma_ptr;
virt_cmd_desc->direction = ctrl->xfer.dir;
virt_cmd_desc->multi_io_mode = qspi_buswidth_to_iomode(ctrl, ctrl->xfer.buswidth);
virt_cmd_desc->fragment = !ctrl->xfer.is_last;
virt_cmd_desc->length = n_bytes;
/* update previous descriptor */
if (ctrl->n_cmd_desc >= 2) {
prev = (ctrl->virt_cmd_desc)[ctrl->n_cmd_desc - 2];
prev->next_descriptor = dma_cmd_desc;
prev->fragment = 1;
}
return 0;
}
static int qcom_qspi_setup_dma_desc(struct qcom_qspi *ctrl,
struct spi_transfer *xfer)
{
int ret;
struct sg_table *sgt;
dma_addr_t dma_ptr_sg;
unsigned int dma_len_sg;
int i;
if (ctrl->n_cmd_desc) {
dev_err(ctrl->dev, "Remnant dma buffers n_cmd_desc-%d\n", ctrl->n_cmd_desc);
return -EIO;
}
sgt = (ctrl->xfer.dir == QSPI_READ) ? &xfer->rx_sg : &xfer->tx_sg;
if (!sgt->nents || sgt->nents > QSPI_MAX_SG) {
dev_warn_once(ctrl->dev, "Cannot handle %d entries in scatter list\n", sgt->nents);
return -EAGAIN;
}
for (i = 0; i < sgt->nents; i++) {
dma_ptr_sg = sg_dma_address(sgt->sgl + i);
dma_len_sg = sg_dma_len(sgt->sgl + i);
if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) {
dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ);
return -EAGAIN;
}
/*
* When reading with DMA the controller writes to memory 1 word
* at a time. If the length isn't a multiple of 4 bytes then
* the controller can clobber the things later in memory.
* Fallback to PIO to be safe.
*/
if (ctrl->xfer.dir == QSPI_READ && (dma_len_sg & 0x03)) {
dev_warn_once(ctrl->dev, "fallback to PIO for read of size %#010x\n",
dma_len_sg);
return -EAGAIN;
}
}
for (i = 0; i < sgt->nents; i++) {
dma_ptr_sg = sg_dma_address(sgt->sgl + i);
dma_len_sg = sg_dma_len(sgt->sgl + i);
ret = qcom_qspi_alloc_desc(ctrl, dma_ptr_sg, dma_len_sg);
if (ret)
goto cleanup;
}
return 0;
cleanup:
for (i = 0; i < ctrl->n_cmd_desc; i++)
dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
ctrl->dma_cmd_desc[i]);
ctrl->n_cmd_desc = 0;
return ret;
}
static void qcom_qspi_dma_xfer(struct qcom_qspi *ctrl)
{
/* Setup new interrupts */
writel(DMA_CHAIN_DONE, ctrl->base + MSTR_INT_EN);
/* kick off transfer */
writel((u32)((ctrl->dma_cmd_desc)[0]), ctrl->base + NEXT_DMA_DESC_ADDR);
}
/* Switch to DMA if transfer length exceeds this */
#define QSPI_MAX_BYTES_FIFO 64
static bool qcom_qspi_can_dma(struct spi_controller *ctlr,
struct spi_device *slv, struct spi_transfer *xfer)
{
return xfer->len > QSPI_MAX_BYTES_FIFO;
}
static int qcom_qspi_transfer_one(struct spi_controller *host,
struct spi_device *slv,
struct spi_transfer *xfer)
{
struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
int ret;
unsigned long speed_hz;
unsigned long flags;
u32 mstr_cfg;
speed_hz = slv->max_speed_hz;
if (xfer->speed_hz)
speed_hz = xfer->speed_hz;
ret = qcom_qspi_set_speed(ctrl, speed_hz);
if (ret)
return ret;
spin_lock_irqsave(&ctrl->lock, flags);
mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
/* We are half duplex, so either rx or tx will be set */
if (xfer->rx_buf) {
ctrl->xfer.dir = QSPI_READ;
ctrl->xfer.buswidth = xfer->rx_nbits;
ctrl->xfer.rx_buf = xfer->rx_buf;
} else {
ctrl->xfer.dir = QSPI_WRITE;
ctrl->xfer.buswidth = xfer->tx_nbits;
ctrl->xfer.tx_buf = xfer->tx_buf;
}
ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
&host->cur_msg->transfers);
ctrl->xfer.rem_bytes = xfer->len;
if (xfer->rx_sg.nents || xfer->tx_sg.nents) {
/* do DMA transfer */
if (!(mstr_cfg & DMA_ENABLE)) {
mstr_cfg |= DMA_ENABLE;
writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
}
ret = qcom_qspi_setup_dma_desc(ctrl, xfer);
if (ret != -EAGAIN) {
if (!ret) {
dma_wmb();
qcom_qspi_dma_xfer(ctrl);
}
goto exit;
}
dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n");
ret = 0; /* We'll retry w/ PIO */
}
if (mstr_cfg & DMA_ENABLE) {
mstr_cfg &= ~DMA_ENABLE;
writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
}
qcom_qspi_pio_xfer(ctrl);
exit:
spin_unlock_irqrestore(&ctrl->lock, flags);
if (ret)
return ret;
/* We'll call spi_finalize_current_transfer() when done */
return 1;
}
static int qcom_qspi_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
u32 mstr_cfg;
struct qcom_qspi *ctrl;
int tx_data_oe_delay = 1;
int tx_data_delay = 1;
unsigned long flags;
ctrl = spi_controller_get_devdata(host);
spin_lock_irqsave(&ctrl->lock, flags);
mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
mstr_cfg &= ~CHIP_SELECT_NUM;
if (spi_get_chipselect(message->spi, 0))
mstr_cfg |= CHIP_SELECT_NUM;
mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE;
mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK);
mstr_cfg |= message->spi->mode << SPI_MODE_SHFT;
mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT;
mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT;
mstr_cfg &= ~DMA_ENABLE;
writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
}
static int qcom_qspi_alloc_dma(struct qcom_qspi *ctrl)
{
ctrl->dma_cmd_pool = dmam_pool_create("qspi cmd desc pool",
ctrl->dev, sizeof(struct qspi_cmd_desc), 0, 0);
if (!ctrl->dma_cmd_pool)
return -ENOMEM;
return 0;
}
static irqreturn_t pio_read(struct qcom_qspi *ctrl)
{
u32 rd_fifo_status;
u32 rd_fifo;
unsigned int wr_cnts;
unsigned int bytes_to_read;
unsigned int words_to_read;
u32 *word_buf;
u8 *byte_buf;
int i;
rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS);
if (!(rd_fifo_status & FIFO_RDY)) {
dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status);
return IRQ_NONE;
}
wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT;
wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes);
words_to_read = wr_cnts / QSPI_BYTES_PER_WORD;
bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD;
if (words_to_read) {
word_buf = ctrl->xfer.rx_buf;
ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD;
ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read);
ctrl->xfer.rx_buf = word_buf + words_to_read;
}
if (bytes_to_read) {
byte_buf = ctrl->xfer.rx_buf;
rd_fifo = readl(ctrl->base + RD_FIFO);
ctrl->xfer.rem_bytes -= bytes_to_read;
for (i = 0; i < bytes_to_read; i++)
*byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE);
ctrl->xfer.rx_buf = byte_buf;
}
return IRQ_HANDLED;
}
static irqreturn_t pio_write(struct qcom_qspi *ctrl)
{
const void *xfer_buf = ctrl->xfer.tx_buf;
const int *word_buf;
const char *byte_buf;
unsigned int wr_fifo_bytes;
unsigned int wr_fifo_words;
unsigned int wr_size;
unsigned int rem_words;
wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS);
wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT;
if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) {
/* Process the last 1-3 bytes */
wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
ctrl->xfer.rem_bytes -= wr_size;
byte_buf = xfer_buf;
while (wr_size--)
writel(*byte_buf++,
ctrl->base + PIO_DATAOUT_1B);
ctrl->xfer.tx_buf = byte_buf;
} else {
/*
* Process all the whole words; to keep things simple we'll
* just wait for the next interrupt to handle the last 1-3
* bytes if we don't have an even number of words.
*/
rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD;
wr_size = min(rem_words, wr_fifo_words);
ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD;
word_buf = xfer_buf;
iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size);
ctrl->xfer.tx_buf = word_buf + wr_size;
}
return IRQ_HANDLED;
}
static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
{
u32 int_status;
struct qcom_qspi *ctrl = dev_id;
irqreturn_t ret = IRQ_NONE;
spin_lock(&ctrl->lock);
int_status = readl(ctrl->base + MSTR_INT_STATUS);
writel(int_status, ctrl->base + MSTR_INT_STATUS);
/* Ignore disabled interrupts */
int_status &= readl(ctrl->base + MSTR_INT_EN);
/* PIO mode handling */
if (ctrl->xfer.dir == QSPI_WRITE) {
if (int_status & WR_FIFO_EMPTY)
ret = pio_write(ctrl);
} else {
if (int_status & RESP_FIFO_RDY)
ret = pio_read(ctrl);
}
if (int_status & QSPI_ERR_IRQS) {
if (int_status & RESP_FIFO_UNDERRUN)
dev_err(ctrl->dev, "IRQ error: FIFO underrun\n");
if (int_status & WR_FIFO_OVERRUN)
dev_err(ctrl->dev, "IRQ error: FIFO overrun\n");
if (int_status & HRESP_FROM_NOC_ERR)
dev_err(ctrl->dev, "IRQ error: NOC response error\n");
ret = IRQ_HANDLED;
}
if (!ctrl->xfer.rem_bytes) {
writel(0, ctrl->base + MSTR_INT_EN);
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
}
/* DMA mode handling */
if (int_status & DMA_CHAIN_DONE) {
int i;
writel(0, ctrl->base + MSTR_INT_EN);
ctrl->xfer.rem_bytes = 0;
for (i = 0; i < ctrl->n_cmd_desc; i++)
dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
ctrl->dma_cmd_desc[i]);
ctrl->n_cmd_desc = 0;
ret = IRQ_HANDLED;
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
}
spin_unlock(&ctrl->lock);
return ret;
}
static int qcom_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
/*
* If qcom_qspi_can_dma() is going to return false we don't need to
* adjust anything.
*/
if (op->data.nbytes <= QSPI_MAX_BYTES_FIFO)
return 0;
/*
* When reading, the transfer needs to be a multiple of 4 bytes so
* shrink the transfer if that's not true. The caller will then do a
* second transfer to finish things up.
*/
if (op->data.dir == SPI_MEM_DATA_IN && (op->data.nbytes & 0x3))
op->data.nbytes &= ~0x3;
return 0;
}
static const struct spi_controller_mem_ops qcom_qspi_mem_ops = {
.adjust_op_size = qcom_qspi_adjust_op_size,
};
static int qcom_qspi_probe(struct platform_device *pdev)
{
int ret;
struct device *dev;
struct spi_controller *host;
struct qcom_qspi *ctrl;
dev = &pdev->dev;
host = devm_spi_alloc_host(dev, sizeof(*ctrl));
if (!host)
return -ENOMEM;
platform_set_drvdata(pdev, host);
ctrl = spi_controller_get_devdata(host);
spin_lock_init(&ctrl->lock);
ctrl->dev = dev;
ctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
sizeof(*ctrl->clks), GFP_KERNEL);
if (!ctrl->clks)
return -ENOMEM;
ctrl->clks[QSPI_CLK_CORE].id = "core";
ctrl->clks[QSPI_CLK_IFACE].id = "iface";
ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
if (ret)
return ret;
ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
if (IS_ERR(ctrl->icc_path_cpu_to_qspi))
return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi),
"Failed to get cpu path\n");
/* Set BW vote for register access */
ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
Bps_to_icc(1000));
if (ret) {
dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
__func__, ret);
return ret;
}
ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
if (ret) {
dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
__func__, ret);
return ret;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
ret = devm_request_irq(dev, ret, qcom_qspi_irq, 0, dev_name(dev), ctrl);
if (ret) {
dev_err(dev, "Failed to request irq %d\n", ret);
return ret;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return dev_err_probe(dev, ret, "could not set DMA mask\n");
host->max_speed_hz = 300000000;
host->max_dma_len = 65536; /* as per HPG */
host->dma_alignment = QSPI_ALIGN_REQ;
host->num_chipselect = QSPI_NUM_CS;
host->bus_num = -1;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_MODE_0 |
SPI_TX_DUAL | SPI_RX_DUAL |
SPI_TX_QUAD | SPI_RX_QUAD;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->prepare_message = qcom_qspi_prepare_message;
host->transfer_one = qcom_qspi_transfer_one;
host->handle_err = qcom_qspi_handle_err;
if (of_property_read_bool(pdev->dev.of_node, "iommus"))
host->can_dma = qcom_qspi_can_dma;
host->auto_runtime_pm = true;
host->mem_ops = &qcom_qspi_mem_ops;
ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev);
if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
return ret;
}
ret = qcom_qspi_alloc_dma(ctrl);
if (ret)
return ret;
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 250);
pm_runtime_enable(dev);
ret = spi_register_controller(host);
if (!ret)
return 0;
pm_runtime_disable(dev);
return ret;
}
static void qcom_qspi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
spi_unregister_controller(host);
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
int ret;
/* Drop the performance state vote */
dev_pm_opp_set_rate(dev, 0);
clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
if (ret) {
dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
__func__, ret);
return ret;
}
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
int ret;
pinctrl_pm_select_default_state(dev);
ret = icc_enable(ctrl->icc_path_cpu_to_qspi);
if (ret) {
dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n",
__func__, ret);
return ret;
}
ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
if (ret)
return ret;
return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
}
static int __maybe_unused qcom_qspi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
ret = spi_controller_suspend(host);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret)
spi_controller_resume(host);
return ret;
}
static int __maybe_unused qcom_qspi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
ret = spi_controller_resume(host);
if (ret)
pm_runtime_force_suspend(dev);
return ret;
}
static const struct dev_pm_ops qcom_qspi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend,
qcom_qspi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume)
};
static const struct of_device_id qcom_qspi_dt_match[] = {
{ .compatible = "qcom,qspi-v1", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match);
static struct platform_driver qcom_qspi_driver = {
.driver = {
.name = "qcom_qspi",
.pm = &qcom_qspi_dev_pm_ops,
.of_match_table = qcom_qspi_dt_match,
},
.probe = qcom_qspi_probe,
.remove_new = qcom_qspi_remove,
};
module_platform_driver(qcom_qspi_driver);
MODULE_DESCRIPTION("SPI driver for QSPI cores");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-qcom-qspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel PCH/PCU SPI flash driver.
*
* Copyright (C) 2016 - 2022, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include "spi-intel.h"
/* Offsets are from @ispi->base */
#define BFPREG 0x00
#define HSFSTS_CTL 0x04
#define HSFSTS_CTL_FSMIE BIT(31)
#define HSFSTS_CTL_FDBC_SHIFT 24
#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT)
#define HSFSTS_CTL_FCYCLE_SHIFT 17
#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
/* HW sequencer opcodes */
#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_RDSFDP (0x05 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FGO BIT(16)
#define HSFSTS_CTL_FLOCKDN BIT(15)
#define HSFSTS_CTL_FDV BIT(14)
#define HSFSTS_CTL_SCIP BIT(5)
#define HSFSTS_CTL_AEL BIT(2)
#define HSFSTS_CTL_FCERR BIT(1)
#define HSFSTS_CTL_FDONE BIT(0)
#define FADDR 0x08
#define DLOCK 0x0c
#define FDATA(n) (0x10 + ((n) * 4))
#define FRACC 0x50
#define FREG(n) (0x54 + ((n) * 4))
#define FREG_BASE_MASK GENMASK(14, 0)
#define FREG_LIMIT_SHIFT 16
#define FREG_LIMIT_MASK GENMASK(30, 16)
/* Offset is from @ispi->pregs */
#define PR(n) ((n) * 4)
#define PR_WPE BIT(31)
#define PR_LIMIT_SHIFT 16
#define PR_LIMIT_MASK GENMASK(30, 16)
#define PR_RPE BIT(15)
#define PR_BASE_MASK GENMASK(14, 0)
/* Offsets are from @ispi->sregs */
#define SSFSTS_CTL 0x00
#define SSFSTS_CTL_FSMIE BIT(23)
#define SSFSTS_CTL_DS BIT(22)
#define SSFSTS_CTL_DBC_SHIFT 16
#define SSFSTS_CTL_SPOP BIT(11)
#define SSFSTS_CTL_ACS BIT(10)
#define SSFSTS_CTL_SCGO BIT(9)
#define SSFSTS_CTL_COP_SHIFT 12
#define SSFSTS_CTL_FRS BIT(7)
#define SSFSTS_CTL_DOFRS BIT(6)
#define SSFSTS_CTL_AEL BIT(4)
#define SSFSTS_CTL_FCERR BIT(3)
#define SSFSTS_CTL_FDONE BIT(2)
#define SSFSTS_CTL_SCIP BIT(0)
#define PREOP_OPTYPE 0x04
#define OPMENU0 0x08
#define OPMENU1 0x0c
#define OPTYPE_READ_NO_ADDR 0
#define OPTYPE_WRITE_NO_ADDR 1
#define OPTYPE_READ_WITH_ADDR 2
#define OPTYPE_WRITE_WITH_ADDR 3
/* CPU specifics */
#define BYT_PR 0x74
#define BYT_SSFSTS_CTL 0x90
#define BYT_FREG_NUM 5
#define BYT_PR_NUM 5
#define LPT_PR 0x74
#define LPT_SSFSTS_CTL 0x90
#define LPT_FREG_NUM 5
#define LPT_PR_NUM 5
#define BXT_PR 0x84
#define BXT_SSFSTS_CTL 0xa0
#define BXT_FREG_NUM 12
#define BXT_PR_NUM 5
#define CNL_PR 0x84
#define CNL_FREG_NUM 6
#define CNL_PR_NUM 5
#define LVSCC 0xc4
#define UVSCC 0xc8
#define ERASE_OPCODE_SHIFT 8
#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define ERASE_64K_OPCODE_SHIFT 16
#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT)
/* Flash descriptor fields */
#define FLVALSIG_MAGIC 0x0ff0a55a
#define FLMAP0_NC_MASK GENMASK(9, 8)
#define FLMAP0_NC_SHIFT 8
#define FLMAP0_FCBA_MASK GENMASK(7, 0)
#define FLCOMP_C0DEN_MASK GENMASK(3, 0)
#define FLCOMP_C0DEN_512K 0x00
#define FLCOMP_C0DEN_1M 0x01
#define FLCOMP_C0DEN_2M 0x02
#define FLCOMP_C0DEN_4M 0x03
#define FLCOMP_C0DEN_8M 0x04
#define FLCOMP_C0DEN_16M 0x05
#define FLCOMP_C0DEN_32M 0x06
#define FLCOMP_C0DEN_64M 0x07
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
/**
* struct intel_spi - Driver private data
* @dev: Device pointer
* @info: Pointer to board specific info
* @base: Beginning of MMIO space
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
* @host: Pointer to the SPI controller structure
* @nregions: Maximum number of regions
* @pr_num: Maximum number of protected range registers
* @chip0_size: Size of the first flash chip in bytes
* @locked: Is SPI setting locked
* @swseq_reg: Use SW sequencer in register reads/writes
* @swseq_erase: Use SW sequencer in erase operation
* @atomic_preopcode: Holds preopcode when atomic sequence is requested
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
* @mem_ops: Pointer to SPI MEM ops supported by the controller
*/
struct intel_spi {
struct device *dev;
const struct intel_spi_boardinfo *info;
void __iomem *base;
void __iomem *pregs;
void __iomem *sregs;
struct spi_controller *host;
size_t nregions;
size_t pr_num;
size_t chip0_size;
bool locked;
bool swseq_reg;
bool swseq_erase;
u8 atomic_preopcode;
u8 opcodes[8];
const struct intel_spi_mem_op *mem_ops;
};
struct intel_spi_mem_op {
struct spi_mem_op mem_op;
u32 replacement_op;
int (*exec_op)(struct intel_spi *ispi,
const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op);
};
static bool writeable;
module_param(writeable, bool, 0);
MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
static void intel_spi_dump_regs(struct intel_spi *ispi)
{
u32 value;
int i;
dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
value = readl(ispi->base + HSFSTS_CTL);
dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
if (value & HSFSTS_CTL_FLOCKDN)
dev_dbg(ispi->dev, "-> Locked\n");
dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
for (i = 0; i < 16; i++)
dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
i, readl(ispi->base + FDATA(i)));
dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
for (i = 0; i < ispi->nregions; i++)
dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
readl(ispi->base + FREG(i)));
for (i = 0; i < ispi->pr_num; i++)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
if (ispi->sregs) {
value = readl(ispi->sregs + SSFSTS_CTL);
dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
readl(ispi->sregs + PREOP_OPTYPE));
dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
readl(ispi->sregs + OPMENU0));
dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
readl(ispi->sregs + OPMENU1));
}
dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
dev_dbg(ispi->dev, "Protected regions:\n");
for (i = 0; i < ispi->pr_num; i++) {
u32 base, limit;
value = readl(ispi->pregs + PR(i));
if (!(value & (PR_WPE | PR_RPE)))
continue;
limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
base = value & PR_BASE_MASK;
dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
i, base << 12, (limit << 12) | 0xfff,
value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
}
dev_dbg(ispi->dev, "Flash regions:\n");
for (i = 0; i < ispi->nregions; i++) {
u32 region, base, limit;
region = readl(ispi->base + FREG(i));
base = region & FREG_BASE_MASK;
limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
if (base >= limit || (i > 0 && limit == 0))
dev_dbg(ispi->dev, " %02d disabled\n", i);
else
dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
i, base << 12, (limit << 12) | 0xfff);
}
dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
ispi->swseq_reg ? 'S' : 'H');
dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
ispi->swseq_erase ? 'S' : 'H');
}
/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
{
size_t bytes;
int i = 0;
if (size > INTEL_SPI_FIFO_SZ)
return -EINVAL;
while (size > 0) {
bytes = min_t(size_t, size, 4);
memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
size -= bytes;
buf += bytes;
i++;
}
return 0;
}
/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
size_t size)
{
size_t bytes;
int i = 0;
if (size > INTEL_SPI_FIFO_SZ)
return -EINVAL;
while (size > 0) {
bytes = min_t(size_t, size, 4);
memcpy_toio(ispi->base + FDATA(i), buf, bytes);
size -= bytes;
buf += bytes;
i++;
}
return 0;
}
static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
{
u32 val;
return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
!(val & HSFSTS_CTL_SCIP), 0,
INTEL_SPI_TIMEOUT * 1000);
}
static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
{
u32 val;
return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
!(val & SSFSTS_CTL_SCIP), 0,
INTEL_SPI_TIMEOUT * 1000);
}
static bool intel_spi_set_writeable(struct intel_spi *ispi)
{
if (!ispi->info->set_writeable)
return false;
return ispi->info->set_writeable(ispi->base, ispi->info->data);
}
static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
{
int i;
int preop;
if (ispi->locked) {
for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
if (ispi->opcodes[i] == opcode)
return i;
return -EINVAL;
}
/* The lock is off, so just use index 0 */
writel(opcode, ispi->sregs + OPMENU0);
preop = readw(ispi->sregs + PREOP_OPTYPE);
writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
return 0;
}
static int intel_spi_hw_cycle(struct intel_spi *ispi,
const struct intel_spi_mem_op *iop, size_t len)
{
u32 val, status;
int ret;
if (!iop->replacement_op)
return -EINVAL;
val = readl(ispi->base + HSFSTS_CTL);
val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= HSFSTS_CTL_FGO;
val |= iop->replacement_op;
writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret)
return ret;
status = readl(ispi->base + HSFSTS_CTL);
if (status & HSFSTS_CTL_FCERR)
return -EIO;
else if (status & HSFSTS_CTL_AEL)
return -EACCES;
return 0;
}
static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
int optype)
{
u32 val = 0, status;
u8 atomic_preopcode;
int ret;
ret = intel_spi_opcode_index(ispi, opcode, optype);
if (ret < 0)
return ret;
/*
* Always clear it after each SW sequencer operation regardless
* of whether it is successful or not.
*/
atomic_preopcode = ispi->atomic_preopcode;
ispi->atomic_preopcode = 0;
/* Only mark 'Data Cycle' bit when there is data to be transferred */
if (len > 0)
val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
val |= ret << SSFSTS_CTL_COP_SHIFT;
val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
val |= SSFSTS_CTL_SCGO;
if (atomic_preopcode) {
u16 preop;
switch (optype) {
case OPTYPE_WRITE_NO_ADDR:
case OPTYPE_WRITE_WITH_ADDR:
/* Pick matching preopcode for the atomic sequence */
preop = readw(ispi->sregs + PREOP_OPTYPE);
if ((preop & 0xff) == atomic_preopcode)
; /* Do nothing */
else if ((preop >> 8) == atomic_preopcode)
val |= SSFSTS_CTL_SPOP;
else
return -EINVAL;
/* Enable atomic sequence */
val |= SSFSTS_CTL_ACS;
break;
default:
return -EINVAL;
}
}
writel(val, ispi->sregs + SSFSTS_CTL);
ret = intel_spi_wait_sw_busy(ispi);
if (ret)
return ret;
status = readl(ispi->sregs + SSFSTS_CTL);
if (status & SSFSTS_CTL_FCERR)
return -EIO;
else if (status & SSFSTS_CTL_AEL)
return -EACCES;
return 0;
}
static u32 intel_spi_chip_addr(const struct intel_spi *ispi,
const struct spi_mem *mem)
{
/* Pick up the correct start address */
if (!mem)
return 0;
return (spi_get_chipselect(mem->spi, 0) == 1) ? ispi->chip0_size : 0;
}
static int intel_spi_read_reg(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t nbytes = op->data.nbytes;
u8 opcode = op->cmd.opcode;
int ret;
writel(addr, ispi->base + FADDR);
if (ispi->swseq_reg)
ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
OPTYPE_READ_NO_ADDR);
else
ret = intel_spi_hw_cycle(ispi, iop, nbytes);
if (ret)
return ret;
return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
}
static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t nbytes = op->data.nbytes;
u8 opcode = op->cmd.opcode;
int ret;
/*
* This is handled with atomic operation and preop code in Intel
* controller so we only verify that it is available. If the
* controller is not locked, program the opcode to the PREOP
* register for later use.
*
* When hardware sequencer is used there is no need to program
* any opcodes (it handles them automatically as part of a command).
*/
if (opcode == SPINOR_OP_WREN) {
u16 preop;
if (!ispi->swseq_reg)
return 0;
preop = readw(ispi->sregs + PREOP_OPTYPE);
if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
if (ispi->locked)
return -EINVAL;
writel(opcode, ispi->sregs + PREOP_OPTYPE);
}
/*
* This enables atomic sequence on next SW sycle. Will
* be cleared after next operation.
*/
ispi->atomic_preopcode = opcode;
return 0;
}
/*
* We hope that HW sequencer will do the right thing automatically and
* with the SW sequencer we cannot use preopcode anyway, so just ignore
* the Write Disable operation and pretend it was completed
* successfully.
*/
if (opcode == SPINOR_OP_WRDI)
return 0;
writel(addr, ispi->base + FADDR);
/* Write the value beforehand */
ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
if (ret)
return ret;
if (ispi->swseq_reg)
return intel_spi_sw_cycle(ispi, opcode, nbytes,
OPTYPE_WRITE_NO_ADDR);
return intel_spi_hw_cycle(ispi, iop, nbytes);
}
static int intel_spi_read(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t block_size, nbytes = op->data.nbytes;
void *read_buf = op->data.buf.in;
u32 val, status;
int ret;
/*
* Atomic sequence is not expected with HW sequencer reads. Make
* sure it is cleared regardless.
*/
if (WARN_ON_ONCE(ispi->atomic_preopcode))
ispi->atomic_preopcode = 0;
while (nbytes > 0) {
block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
/* Read cannot cross 4K boundary */
block_size = min_t(loff_t, addr + block_size,
round_up(addr + 1, SZ_4K)) - addr;
writel(addr, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCYCLE_READ;
val |= HSFSTS_CTL_FGO;
writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret)
return ret;
status = readl(ispi->base + HSFSTS_CTL);
if (status & HSFSTS_CTL_FCERR)
ret = -EIO;
else if (status & HSFSTS_CTL_AEL)
ret = -EACCES;
if (ret < 0) {
dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
return ret;
}
ret = intel_spi_read_block(ispi, read_buf, block_size);
if (ret)
return ret;
nbytes -= block_size;
addr += block_size;
read_buf += block_size;
}
return 0;
}
static int intel_spi_write(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t block_size, nbytes = op->data.nbytes;
const void *write_buf = op->data.buf.out;
u32 val, status;
int ret;
/* Not needed with HW sequencer write, make sure it is cleared */
ispi->atomic_preopcode = 0;
while (nbytes > 0) {
block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
/* Write cannot cross 4K boundary */
block_size = min_t(loff_t, addr + block_size,
round_up(addr + 1, SZ_4K)) - addr;
writel(addr, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCYCLE_WRITE;
ret = intel_spi_write_block(ispi, write_buf, block_size);
if (ret) {
dev_err(ispi->dev, "failed to write block\n");
return ret;
}
/* Start the write now */
val |= HSFSTS_CTL_FGO;
writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret) {
dev_err(ispi->dev, "timeout\n");
return ret;
}
status = readl(ispi->base + HSFSTS_CTL);
if (status & HSFSTS_CTL_FCERR)
ret = -EIO;
else if (status & HSFSTS_CTL_AEL)
ret = -EACCES;
if (ret < 0) {
dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
return ret;
}
nbytes -= block_size;
addr += block_size;
write_buf += block_size;
}
return 0;
}
static int intel_spi_erase(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
u8 opcode = op->cmd.opcode;
u32 val, status;
int ret;
writel(addr, ispi->base + FADDR);
if (ispi->swseq_erase)
return intel_spi_sw_cycle(ispi, opcode, 0,
OPTYPE_WRITE_WITH_ADDR);
/* Not needed with HW sequencer erase, make sure it is cleared */
ispi->atomic_preopcode = 0;
val = readl(ispi->base + HSFSTS_CTL);
val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= HSFSTS_CTL_FGO;
val |= iop->replacement_op;
writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret)
return ret;
status = readl(ispi->base + HSFSTS_CTL);
if (status & HSFSTS_CTL_FCERR)
return -EIO;
if (status & HSFSTS_CTL_AEL)
return -EACCES;
return 0;
}
static int intel_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
op->data.nbytes = clamp_val(op->data.nbytes, 0, INTEL_SPI_FIFO_SZ);
return 0;
}
static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
iop->mem_op.cmd.dtr != op->cmd.dtr ||
iop->mem_op.cmd.opcode != op->cmd.opcode)
return false;
if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
iop->mem_op.addr.dtr != op->addr.dtr)
return false;
if (iop->mem_op.data.dir != op->data.dir ||
iop->mem_op.data.dtr != op->data.dtr)
return false;
if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
if (iop->mem_op.data.buswidth != op->data.buswidth)
return false;
}
return true;
}
static const struct intel_spi_mem_op *
intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
{
const struct intel_spi_mem_op *iop;
for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
if (intel_spi_cmp_mem_op(iop, op))
break;
}
return iop->mem_op.cmd.opcode ? iop : NULL;
}
static bool intel_spi_supports_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
const struct intel_spi_mem_op *iop;
iop = intel_spi_match_mem_op(ispi, op);
if (!iop) {
dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
return false;
}
/*
* For software sequencer check that the opcode is actually
* present in the opmenu if it is locked.
*/
if (ispi->swseq_reg && ispi->locked) {
int i;
/* Check if it is in the locked opcodes list */
for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
if (ispi->opcodes[i] == op->cmd.opcode)
return true;
}
dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
return false;
}
return true;
}
static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
const struct intel_spi_mem_op *iop;
iop = intel_spi_match_mem_op(ispi, op);
if (!iop)
return -EOPNOTSUPP;
return iop->exec_op(ispi, mem, iop, op);
}
static const char *intel_spi_get_name(struct spi_mem *mem)
{
const struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
/*
* Return name of the flash controller device to be compatible
* with the MTD version.
*/
return dev_name(ispi->dev);
}
static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
const struct intel_spi_mem_op *iop;
iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
if (!iop)
return -EOPNOTSUPP;
desc->priv = (void *)iop;
return 0;
}
static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
size_t len, void *buf)
{
struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
const struct intel_spi_mem_op *iop = desc->priv;
struct spi_mem_op op = desc->info.op_tmpl;
int ret;
/* Fill in the gaps */
op.addr.val = offs;
op.data.nbytes = len;
op.data.buf.in = buf;
ret = iop->exec_op(ispi, desc->mem, iop, &op);
return ret ? ret : len;
}
static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
size_t len, const void *buf)
{
struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
const struct intel_spi_mem_op *iop = desc->priv;
struct spi_mem_op op = desc->info.op_tmpl;
int ret;
op.addr.val = offs;
op.data.nbytes = len;
op.data.buf.out = buf;
ret = iop->exec_op(ispi, desc->mem, iop, &op);
return ret ? ret : len;
}
static const struct spi_controller_mem_ops intel_spi_mem_ops = {
.adjust_op_size = intel_spi_adjust_op_size,
.supports_op = intel_spi_supports_mem_op,
.exec_op = intel_spi_exec_mem_op,
.get_name = intel_spi_get_name,
.dirmap_create = intel_spi_dirmap_create,
.dirmap_read = intel_spi_dirmap_read,
.dirmap_write = intel_spi_dirmap_write,
};
#define INTEL_SPI_OP_ADDR(__nbytes) \
{ \
.nbytes = __nbytes, \
}
#define INTEL_SPI_OP_NO_DATA \
{ \
.dir = SPI_MEM_NO_DATA, \
}
#define INTEL_SPI_OP_DATA_IN(__buswidth) \
{ \
.dir = SPI_MEM_DATA_IN, \
.buswidth = __buswidth, \
}
#define INTEL_SPI_OP_DATA_OUT(__buswidth) \
{ \
.dir = SPI_MEM_DATA_OUT, \
.buswidth = __buswidth, \
}
#define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op) \
{ \
.mem_op = { \
.cmd = __cmd, \
.addr = __addr, \
.data = __data, \
}, \
.exec_op = __exec_op, \
}
#define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \
{ \
.mem_op = { \
.cmd = __cmd, \
.addr = __addr, \
.data = __data, \
}, \
.exec_op = __exec_op, \
.replacement_op = __repl, \
}
/*
* The controller handles pretty much everything internally based on the
* SFDP data but we want to make sure we only support the operations
* actually possible. Only check buswidth and transfer direction, the
* core validates data.
*/
#define INTEL_SPI_GENERIC_OPS \
/* Status register operations */ \
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \
SPI_MEM_OP_NO_ADDR, \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read_reg, \
HSFSTS_CTL_FCYCLE_RDID), \
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \
SPI_MEM_OP_NO_ADDR, \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read_reg, \
HSFSTS_CTL_FCYCLE_RDSR), \
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \
SPI_MEM_OP_NO_ADDR, \
INTEL_SPI_OP_DATA_OUT(1), \
intel_spi_write_reg, \
HSFSTS_CTL_FCYCLE_WRSR), \
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSFDP, 1), \
INTEL_SPI_OP_ADDR(3), \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read_reg, \
HSFSTS_CTL_FCYCLE_RDSFDP), \
/* Normal read */ \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
INTEL_SPI_OP_ADDR(3), \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
INTEL_SPI_OP_ADDR(3), \
INTEL_SPI_OP_DATA_IN(2), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
INTEL_SPI_OP_ADDR(3), \
INTEL_SPI_OP_DATA_IN(4), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(2), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(4), \
intel_spi_read), \
/* Fast read */ \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
INTEL_SPI_OP_ADDR(3), \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
INTEL_SPI_OP_ADDR(3), \
INTEL_SPI_OP_DATA_IN(2), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
INTEL_SPI_OP_ADDR(3), \
INTEL_SPI_OP_DATA_IN(4), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(2), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(4), \
intel_spi_read), \
/* Read with 4-byte address opcode */ \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(2), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(4), \
intel_spi_read), \
/* Fast read with 4-byte address opcode */ \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(1), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(2), \
intel_spi_read), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_IN(4), \
intel_spi_read), \
/* Write operations */ \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
INTEL_SPI_OP_ADDR(3), \
INTEL_SPI_OP_DATA_OUT(1), \
intel_spi_write), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_OUT(1), \
intel_spi_write), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1), \
INTEL_SPI_OP_ADDR(4), \
INTEL_SPI_OP_DATA_OUT(1), \
intel_spi_write), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DATA, \
intel_spi_write_reg), \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DATA, \
intel_spi_write_reg), \
/* Erase operations */ \
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
INTEL_SPI_OP_ADDR(3), \
SPI_MEM_OP_NO_DATA, \
intel_spi_erase, \
HSFSTS_CTL_FCYCLE_ERASE), \
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
INTEL_SPI_OP_ADDR(4), \
SPI_MEM_OP_NO_DATA, \
intel_spi_erase, \
HSFSTS_CTL_FCYCLE_ERASE), \
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1), \
INTEL_SPI_OP_ADDR(4), \
SPI_MEM_OP_NO_DATA, \
intel_spi_erase, \
HSFSTS_CTL_FCYCLE_ERASE) \
static const struct intel_spi_mem_op generic_mem_ops[] = {
INTEL_SPI_GENERIC_OPS,
{ },
};
static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
INTEL_SPI_GENERIC_OPS,
/* 64k sector erase operations */
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
INTEL_SPI_OP_ADDR(3),
SPI_MEM_OP_NO_DATA,
intel_spi_erase,
HSFSTS_CTL_FCYCLE_ERASE_64K),
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
INTEL_SPI_OP_ADDR(4),
SPI_MEM_OP_NO_DATA,
intel_spi_erase,
HSFSTS_CTL_FCYCLE_ERASE_64K),
INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
INTEL_SPI_OP_ADDR(4),
SPI_MEM_OP_NO_DATA,
intel_spi_erase,
HSFSTS_CTL_FCYCLE_ERASE_64K),
{ },
};
static int intel_spi_init(struct intel_spi *ispi)
{
u32 opmenu0, opmenu1, lvscc, uvscc, val;
bool erase_64k = false;
int i;
switch (ispi->info->type) {
case INTEL_SPI_BYT:
ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
ispi->pregs = ispi->base + BYT_PR;
ispi->nregions = BYT_FREG_NUM;
ispi->pr_num = BYT_PR_NUM;
ispi->swseq_reg = true;
break;
case INTEL_SPI_LPT:
ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
ispi->pregs = ispi->base + LPT_PR;
ispi->nregions = LPT_FREG_NUM;
ispi->pr_num = LPT_PR_NUM;
ispi->swseq_reg = true;
break;
case INTEL_SPI_BXT:
ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
ispi->pregs = ispi->base + BXT_PR;
ispi->nregions = BXT_FREG_NUM;
ispi->pr_num = BXT_PR_NUM;
erase_64k = true;
break;
case INTEL_SPI_CNL:
ispi->sregs = NULL;
ispi->pregs = ispi->base + CNL_PR;
ispi->nregions = CNL_FREG_NUM;
ispi->pr_num = CNL_PR_NUM;
erase_64k = true;
break;
default:
return -EINVAL;
}
/* Try to disable write protection if user asked to do so */
if (writeable && !intel_spi_set_writeable(ispi)) {
dev_warn(ispi->dev, "can't disable chip write protection\n");
writeable = false;
}
/* Disable #SMI generation from HW sequencer */
val = readl(ispi->base + HSFSTS_CTL);
val &= ~HSFSTS_CTL_FSMIE;
writel(val, ispi->base + HSFSTS_CTL);
/*
* Determine whether erase operation should use HW or SW sequencer.
*
* The HW sequencer has a predefined list of opcodes, with only the
* erase opcode being programmable in LVSCC and UVSCC registers.
* If these registers don't contain a valid erase opcode, erase
* cannot be done using HW sequencer.
*/
lvscc = readl(ispi->base + LVSCC);
uvscc = readl(ispi->base + UVSCC);
if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
ispi->swseq_erase = true;
/* SPI controller on Intel BXT supports 64K erase opcode */
if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
!(uvscc & ERASE_64K_OPCODE_MASK))
erase_64k = false;
if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
dev_err(ispi->dev, "software sequencer not supported, but required\n");
return -EINVAL;
}
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
* using software sequencer.
*/
if (ispi->swseq_reg) {
/* Disable #SMI generation from SW sequencer */
val = readl(ispi->sregs + SSFSTS_CTL);
val &= ~SSFSTS_CTL_FSMIE;
writel(val, ispi->sregs + SSFSTS_CTL);
}
/* Check controller's lock status */
val = readl(ispi->base + HSFSTS_CTL);
ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
if (ispi->locked && ispi->sregs) {
/*
* BIOS programs allowed opcodes and then locks down the
* register. So read back what opcodes it decided to support.
* That's the set we are going to support as well.
*/
opmenu0 = readl(ispi->sregs + OPMENU0);
opmenu1 = readl(ispi->sregs + OPMENU1);
if (opmenu0 && opmenu1) {
for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
ispi->opcodes[i] = opmenu0 >> i * 8;
ispi->opcodes[i + 4] = opmenu1 >> i * 8;
}
}
}
if (erase_64k) {
dev_dbg(ispi->dev, "Using erase_64k memory operations");
ispi->mem_ops = erase_64k_mem_ops;
} else {
dev_dbg(ispi->dev, "Using generic memory operations");
ispi->mem_ops = generic_mem_ops;
}
intel_spi_dump_regs(ispi);
return 0;
}
static bool intel_spi_is_protected(const struct intel_spi *ispi,
unsigned int base, unsigned int limit)
{
int i;
for (i = 0; i < ispi->pr_num; i++) {
u32 pr_base, pr_limit, pr_value;
pr_value = readl(ispi->pregs + PR(i));
if (!(pr_value & (PR_WPE | PR_RPE)))
continue;
pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
pr_base = pr_value & PR_BASE_MASK;
if (pr_base >= base && pr_limit <= limit)
return true;
}
return false;
}
/*
* There will be a single partition holding all enabled flash regions. We
* call this "BIOS".
*/
static void intel_spi_fill_partition(struct intel_spi *ispi,
struct mtd_partition *part)
{
u64 end;
int i;
memset(part, 0, sizeof(*part));
/* Start from the mandatory descriptor region */
part->size = 4096;
part->name = "BIOS";
/*
* Now try to find where this partition ends based on the flash
* region registers.
*/
for (i = 1; i < ispi->nregions; i++) {
u32 region, base, limit;
region = readl(ispi->base + FREG(i));
base = region & FREG_BASE_MASK;
limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
if (base >= limit || limit == 0)
continue;
/*
* If any of the regions have protection bits set, make the
* whole partition read-only to be on the safe side.
*
* Also if the user did not ask the chip to be writeable
* mask the bit too.
*/
if (!writeable || intel_spi_is_protected(ispi, base, limit))
part->mask_flags |= MTD_WRITEABLE;
end = (limit << 12) + 4096;
if (end > part->size)
part->size = end;
}
}
static int intel_spi_read_desc(struct intel_spi *ispi)
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 0),
SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_IN(0, NULL, 0));
u32 buf[2], nc, fcba, flcomp;
ssize_t ret;
op.addr.val = 0x10;
op.data.buf.in = buf;
op.data.nbytes = sizeof(buf);
ret = intel_spi_read(ispi, NULL, NULL, &op);
if (ret) {
dev_warn(ispi->dev, "failed to read descriptor\n");
return ret;
}
dev_dbg(ispi->dev, "FLVALSIG=0x%08x\n", buf[0]);
dev_dbg(ispi->dev, "FLMAP0=0x%08x\n", buf[1]);
if (buf[0] != FLVALSIG_MAGIC) {
dev_warn(ispi->dev, "descriptor signature not valid\n");
return -ENODEV;
}
fcba = (buf[1] & FLMAP0_FCBA_MASK) << 4;
dev_dbg(ispi->dev, "FCBA=%#x\n", fcba);
op.addr.val = fcba;
op.data.buf.in = &flcomp;
op.data.nbytes = sizeof(flcomp);
ret = intel_spi_read(ispi, NULL, NULL, &op);
if (ret) {
dev_warn(ispi->dev, "failed to read FLCOMP\n");
return -ENODEV;
}
dev_dbg(ispi->dev, "FLCOMP=0x%08x\n", flcomp);
switch (flcomp & FLCOMP_C0DEN_MASK) {
case FLCOMP_C0DEN_512K:
ispi->chip0_size = SZ_512K;
break;
case FLCOMP_C0DEN_1M:
ispi->chip0_size = SZ_1M;
break;
case FLCOMP_C0DEN_2M:
ispi->chip0_size = SZ_2M;
break;
case FLCOMP_C0DEN_4M:
ispi->chip0_size = SZ_4M;
break;
case FLCOMP_C0DEN_8M:
ispi->chip0_size = SZ_8M;
break;
case FLCOMP_C0DEN_16M:
ispi->chip0_size = SZ_16M;
break;
case FLCOMP_C0DEN_32M:
ispi->chip0_size = SZ_32M;
break;
case FLCOMP_C0DEN_64M:
ispi->chip0_size = SZ_64M;
break;
default:
return -EINVAL;
}
dev_dbg(ispi->dev, "chip0 size %zd KB\n", ispi->chip0_size / SZ_1K);
nc = (buf[1] & FLMAP0_NC_MASK) >> FLMAP0_NC_SHIFT;
if (!nc)
ispi->host->num_chipselect = 1;
else if (nc == 1)
ispi->host->num_chipselect = 2;
else
return -EINVAL;
dev_dbg(ispi->dev, "%u flash components found\n",
ispi->host->num_chipselect);
return 0;
}
static int intel_spi_populate_chip(struct intel_spi *ispi)
{
struct flash_platform_data *pdata;
struct spi_board_info chip;
int ret;
pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->nr_parts = 1;
pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts,
sizeof(*pdata->parts), GFP_KERNEL);
if (!pdata->parts)
return -ENOMEM;
intel_spi_fill_partition(ispi, pdata->parts);
memset(&chip, 0, sizeof(chip));
snprintf(chip.modalias, 8, "spi-nor");
chip.platform_data = pdata;
if (!spi_new_device(ispi->host, &chip))
return -ENODEV;
ret = intel_spi_read_desc(ispi);
if (ret)
return ret;
/* Add the second chip if present */
if (ispi->host->num_chipselect < 2)
return 0;
chip.platform_data = NULL;
chip.chip_select = 1;
if (!spi_new_device(ispi->host, &chip))
return -ENODEV;
return 0;
}
/**
* intel_spi_probe() - Probe the Intel SPI flash controller
* @dev: Pointer to the parent device
* @mem: MMIO resource
* @info: Platform specific information
*
* Probes Intel SPI flash controller and creates the flash chip device.
* Returns %0 on success and negative errno in case of failure.
*/
int intel_spi_probe(struct device *dev, struct resource *mem,
const struct intel_spi_boardinfo *info)
{
struct spi_controller *host;
struct intel_spi *ispi;
int ret;
host = devm_spi_alloc_host(dev, sizeof(*ispi));
if (!host)
return -ENOMEM;
host->mem_ops = &intel_spi_mem_ops;
ispi = spi_controller_get_devdata(host);
ispi->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(ispi->base))
return PTR_ERR(ispi->base);
ispi->dev = dev;
ispi->host = host;
ispi->info = info;
ret = intel_spi_init(ispi);
if (ret)
return ret;
ret = devm_spi_register_controller(dev, host);
if (ret)
return ret;
return intel_spi_populate_chip(ispi);
}
EXPORT_SYMBOL_GPL(intel_spi_probe);
MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
MODULE_AUTHOR("Mika Westerberg <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-intel.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Nuvoton Technology corporation.
#include <linux/bits.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/spi/spi-mem.h>
#include <linux/mfd/syscon.h>
/* NPCM7xx GCR module */
#define NPCM7XX_INTCR3_OFFSET 0x9C
#define NPCM7XX_INTCR3_FIU_FIX BIT(6)
/* Flash Interface Unit (FIU) Registers */
#define NPCM_FIU_DRD_CFG 0x00
#define NPCM_FIU_DWR_CFG 0x04
#define NPCM_FIU_UMA_CFG 0x08
#define NPCM_FIU_UMA_CTS 0x0C
#define NPCM_FIU_UMA_CMD 0x10
#define NPCM_FIU_UMA_ADDR 0x14
#define NPCM_FIU_PRT_CFG 0x18
#define NPCM_FIU_UMA_DW0 0x20
#define NPCM_FIU_UMA_DW1 0x24
#define NPCM_FIU_UMA_DW2 0x28
#define NPCM_FIU_UMA_DW3 0x2C
#define NPCM_FIU_UMA_DR0 0x30
#define NPCM_FIU_UMA_DR1 0x34
#define NPCM_FIU_UMA_DR2 0x38
#define NPCM_FIU_UMA_DR3 0x3C
#define NPCM_FIU_CFG 0x78
#define NPCM_FIU_MAX_REG_LIMIT 0x80
/* FIU Direct Read Configuration Register */
#define NPCM_FIU_DRD_CFG_LCK BIT(31)
#define NPCM_FIU_DRD_CFG_R_BURST GENMASK(25, 24)
#define NPCM_FIU_DRD_CFG_ADDSIZ GENMASK(17, 16)
#define NPCM_FIU_DRD_CFG_DBW GENMASK(13, 12)
#define NPCM_FIU_DRD_CFG_ACCTYPE GENMASK(9, 8)
#define NPCM_FIU_DRD_CFG_RDCMD GENMASK(7, 0)
#define NPCM_FIU_DRD_ADDSIZ_SHIFT 16
#define NPCM_FIU_DRD_DBW_SHIFT 12
#define NPCM_FIU_DRD_ACCTYPE_SHIFT 8
/* FIU Direct Write Configuration Register */
#define NPCM_FIU_DWR_CFG_LCK BIT(31)
#define NPCM_FIU_DWR_CFG_W_BURST GENMASK(25, 24)
#define NPCM_FIU_DWR_CFG_ADDSIZ GENMASK(17, 16)
#define NPCM_FIU_DWR_CFG_ABPCK GENMASK(11, 10)
#define NPCM_FIU_DWR_CFG_DBPCK GENMASK(9, 8)
#define NPCM_FIU_DWR_CFG_WRCMD GENMASK(7, 0)
#define NPCM_FIU_DWR_ADDSIZ_SHIFT 16
#define NPCM_FIU_DWR_ABPCK_SHIFT 10
#define NPCM_FIU_DWR_DBPCK_SHIFT 8
/* FIU UMA Configuration Register */
#define NPCM_FIU_UMA_CFG_LCK BIT(31)
#define NPCM_FIU_UMA_CFG_CMMLCK BIT(30)
#define NPCM_FIU_UMA_CFG_RDATSIZ GENMASK(28, 24)
#define NPCM_FIU_UMA_CFG_DBSIZ GENMASK(23, 21)
#define NPCM_FIU_UMA_CFG_WDATSIZ GENMASK(20, 16)
#define NPCM_FIU_UMA_CFG_ADDSIZ GENMASK(13, 11)
#define NPCM_FIU_UMA_CFG_CMDSIZ BIT(10)
#define NPCM_FIU_UMA_CFG_RDBPCK GENMASK(9, 8)
#define NPCM_FIU_UMA_CFG_DBPCK GENMASK(7, 6)
#define NPCM_FIU_UMA_CFG_WDBPCK GENMASK(5, 4)
#define NPCM_FIU_UMA_CFG_ADBPCK GENMASK(3, 2)
#define NPCM_FIU_UMA_CFG_CMBPCK GENMASK(1, 0)
#define NPCM_FIU_UMA_CFG_ADBPCK_SHIFT 2
#define NPCM_FIU_UMA_CFG_WDBPCK_SHIFT 4
#define NPCM_FIU_UMA_CFG_DBPCK_SHIFT 6
#define NPCM_FIU_UMA_CFG_RDBPCK_SHIFT 8
#define NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT 11
#define NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT 16
#define NPCM_FIU_UMA_CFG_DBSIZ_SHIFT 21
#define NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT 24
/* FIU UMA Control and Status Register */
#define NPCM_FIU_UMA_CTS_RDYIE BIT(25)
#define NPCM_FIU_UMA_CTS_RDYST BIT(24)
#define NPCM_FIU_UMA_CTS_SW_CS BIT(16)
#define NPCM_FIU_UMA_CTS_DEV_NUM GENMASK(9, 8)
#define NPCM_FIU_UMA_CTS_EXEC_DONE BIT(0)
#define NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT 8
/* FIU UMA Command Register */
#define NPCM_FIU_UMA_CMD_DUM3 GENMASK(31, 24)
#define NPCM_FIU_UMA_CMD_DUM2 GENMASK(23, 16)
#define NPCM_FIU_UMA_CMD_DUM1 GENMASK(15, 8)
#define NPCM_FIU_UMA_CMD_CMD GENMASK(7, 0)
/* FIU UMA Address Register */
#define NPCM_FIU_UMA_ADDR_UMA_ADDR GENMASK(31, 0)
#define NPCM_FIU_UMA_ADDR_AB3 GENMASK(31, 24)
#define NPCM_FIU_UMA_ADDR_AB2 GENMASK(23, 16)
#define NPCM_FIU_UMA_ADDR_AB1 GENMASK(15, 8)
#define NPCM_FIU_UMA_ADDR_AB0 GENMASK(7, 0)
/* FIU UMA Write Data Bytes 0-3 Register */
#define NPCM_FIU_UMA_DW0_WB3 GENMASK(31, 24)
#define NPCM_FIU_UMA_DW0_WB2 GENMASK(23, 16)
#define NPCM_FIU_UMA_DW0_WB1 GENMASK(15, 8)
#define NPCM_FIU_UMA_DW0_WB0 GENMASK(7, 0)
/* FIU UMA Write Data Bytes 4-7 Register */
#define NPCM_FIU_UMA_DW1_WB7 GENMASK(31, 24)
#define NPCM_FIU_UMA_DW1_WB6 GENMASK(23, 16)
#define NPCM_FIU_UMA_DW1_WB5 GENMASK(15, 8)
#define NPCM_FIU_UMA_DW1_WB4 GENMASK(7, 0)
/* FIU UMA Write Data Bytes 8-11 Register */
#define NPCM_FIU_UMA_DW2_WB11 GENMASK(31, 24)
#define NPCM_FIU_UMA_DW2_WB10 GENMASK(23, 16)
#define NPCM_FIU_UMA_DW2_WB9 GENMASK(15, 8)
#define NPCM_FIU_UMA_DW2_WB8 GENMASK(7, 0)
/* FIU UMA Write Data Bytes 12-15 Register */
#define NPCM_FIU_UMA_DW3_WB15 GENMASK(31, 24)
#define NPCM_FIU_UMA_DW3_WB14 GENMASK(23, 16)
#define NPCM_FIU_UMA_DW3_WB13 GENMASK(15, 8)
#define NPCM_FIU_UMA_DW3_WB12 GENMASK(7, 0)
/* FIU UMA Read Data Bytes 0-3 Register */
#define NPCM_FIU_UMA_DR0_RB3 GENMASK(31, 24)
#define NPCM_FIU_UMA_DR0_RB2 GENMASK(23, 16)
#define NPCM_FIU_UMA_DR0_RB1 GENMASK(15, 8)
#define NPCM_FIU_UMA_DR0_RB0 GENMASK(7, 0)
/* FIU UMA Read Data Bytes 4-7 Register */
#define NPCM_FIU_UMA_DR1_RB15 GENMASK(31, 24)
#define NPCM_FIU_UMA_DR1_RB14 GENMASK(23, 16)
#define NPCM_FIU_UMA_DR1_RB13 GENMASK(15, 8)
#define NPCM_FIU_UMA_DR1_RB12 GENMASK(7, 0)
/* FIU UMA Read Data Bytes 8-11 Register */
#define NPCM_FIU_UMA_DR2_RB15 GENMASK(31, 24)
#define NPCM_FIU_UMA_DR2_RB14 GENMASK(23, 16)
#define NPCM_FIU_UMA_DR2_RB13 GENMASK(15, 8)
#define NPCM_FIU_UMA_DR2_RB12 GENMASK(7, 0)
/* FIU UMA Read Data Bytes 12-15 Register */
#define NPCM_FIU_UMA_DR3_RB15 GENMASK(31, 24)
#define NPCM_FIU_UMA_DR3_RB14 GENMASK(23, 16)
#define NPCM_FIU_UMA_DR3_RB13 GENMASK(15, 8)
#define NPCM_FIU_UMA_DR3_RB12 GENMASK(7, 0)
/* FIU Configuration Register */
#define NPCM_FIU_CFG_FIU_FIX BIT(31)
/* FIU Read Mode */
enum {
DRD_SINGLE_WIRE_MODE = 0,
DRD_DUAL_IO_MODE = 1,
DRD_QUAD_IO_MODE = 2,
DRD_SPI_X_MODE = 3,
};
enum {
DWR_ABPCK_BIT_PER_CLK = 0,
DWR_ABPCK_2_BIT_PER_CLK = 1,
DWR_ABPCK_4_BIT_PER_CLK = 2,
};
enum {
DWR_DBPCK_BIT_PER_CLK = 0,
DWR_DBPCK_2_BIT_PER_CLK = 1,
DWR_DBPCK_4_BIT_PER_CLK = 2,
};
#define NPCM_FIU_DRD_16_BYTE_BURST 0x3000000
#define NPCM_FIU_DWR_16_BYTE_BURST 0x3000000
#define MAP_SIZE_128MB 0x8000000
#define MAP_SIZE_16MB 0x1000000
#define MAP_SIZE_8MB 0x800000
#define FIU_DRD_MAX_DUMMY_NUMBER 3
#define NPCM_MAX_CHIP_NUM 4
#define CHUNK_SIZE 16
#define UMA_MICRO_SEC_TIMEOUT 150
enum {
FIU0 = 0,
FIU3,
FIUX,
FIU1,
};
struct npcm_fiu_info {
char *name;
u32 fiu_id;
u32 max_map_size;
u32 max_cs;
};
struct fiu_data {
const struct npcm_fiu_info *npcm_fiu_data_info;
int fiu_max;
};
static const struct npcm_fiu_info npcm7xx_fiu_info[] = {
{.name = "FIU0", .fiu_id = FIU0,
.max_map_size = MAP_SIZE_128MB, .max_cs = 2},
{.name = "FIU3", .fiu_id = FIU3,
.max_map_size = MAP_SIZE_128MB, .max_cs = 4},
{.name = "FIUX", .fiu_id = FIUX,
.max_map_size = MAP_SIZE_16MB, .max_cs = 2} };
static const struct fiu_data npcm7xx_fiu_data = {
.npcm_fiu_data_info = npcm7xx_fiu_info,
.fiu_max = 3,
};
static const struct npcm_fiu_info npxm8xx_fiu_info[] = {
{.name = "FIU0", .fiu_id = FIU0,
.max_map_size = MAP_SIZE_128MB, .max_cs = 2},
{.name = "FIU3", .fiu_id = FIU3,
.max_map_size = MAP_SIZE_128MB, .max_cs = 4},
{.name = "FIUX", .fiu_id = FIUX,
.max_map_size = MAP_SIZE_16MB, .max_cs = 2},
{.name = "FIU1", .fiu_id = FIU1,
.max_map_size = MAP_SIZE_16MB, .max_cs = 4} };
static const struct fiu_data npxm8xx_fiu_data = {
.npcm_fiu_data_info = npxm8xx_fiu_info,
.fiu_max = 4,
};
struct npcm_fiu_spi;
struct npcm_fiu_chip {
void __iomem *flash_region_mapped_ptr;
struct npcm_fiu_spi *fiu;
unsigned long clkrate;
u32 chipselect;
};
struct npcm_fiu_spi {
struct npcm_fiu_chip chip[NPCM_MAX_CHIP_NUM];
const struct npcm_fiu_info *info;
struct spi_mem_op drd_op;
struct resource *res_mem;
struct regmap *regmap;
unsigned long clkrate;
struct device *dev;
struct clk *clk;
bool spix_mode;
};
static const struct regmap_config npcm_mtd_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = NPCM_FIU_MAX_REG_LIMIT,
};
static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu,
const struct spi_mem_op *op)
{
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_ACCTYPE,
ilog2(op->addr.buswidth) <<
NPCM_FIU_DRD_ACCTYPE_SHIFT);
fiu->drd_op.addr.buswidth = op->addr.buswidth;
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_DBW,
op->dummy.nbytes << NPCM_FIU_DRD_DBW_SHIFT);
fiu->drd_op.dummy.nbytes = op->dummy.nbytes;
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode);
fiu->drd_op.cmd.opcode = op->cmd.opcode;
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_ADDSIZ,
(op->addr.nbytes - 3) << NPCM_FIU_DRD_ADDSIZ_SHIFT);
fiu->drd_op.addr.nbytes = op->addr.nbytes;
}
static ssize_t npcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct npcm_fiu_spi *fiu =
spi_controller_get_devdata(desc->mem->spi->controller);
struct npcm_fiu_chip *chip = &fiu->chip[spi_get_chipselect(desc->mem->spi, 0)];
void __iomem *src = (void __iomem *)(chip->flash_region_mapped_ptr +
offs);
u8 *buf_rx = buf;
u32 i;
if (fiu->spix_mode) {
for (i = 0 ; i < len ; i++)
*(buf_rx + i) = ioread8(src + i);
} else {
if (desc->info.op_tmpl.addr.buswidth != fiu->drd_op.addr.buswidth ||
desc->info.op_tmpl.dummy.nbytes != fiu->drd_op.dummy.nbytes ||
desc->info.op_tmpl.cmd.opcode != fiu->drd_op.cmd.opcode ||
desc->info.op_tmpl.addr.nbytes != fiu->drd_op.addr.nbytes)
npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
memcpy_fromio(buf_rx, src, len);
}
return len;
}
static ssize_t npcm_fiu_direct_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf)
{
struct npcm_fiu_spi *fiu =
spi_controller_get_devdata(desc->mem->spi->controller);
struct npcm_fiu_chip *chip = &fiu->chip[spi_get_chipselect(desc->mem->spi, 0)];
void __iomem *dst = (void __iomem *)(chip->flash_region_mapped_ptr +
offs);
const u8 *buf_tx = buf;
u32 i;
if (fiu->spix_mode)
for (i = 0 ; i < len ; i++)
iowrite8(*(buf_tx + i), dst + i);
else
memcpy_toio(dst, buf_tx, len);
return len;
}
static int npcm_fiu_uma_read(struct spi_mem *mem,
const struct spi_mem_op *op, u32 addr,
bool is_address_size, u8 *data, u32 data_size)
{
struct npcm_fiu_spi *fiu =
spi_controller_get_devdata(mem->spi->controller);
u32 uma_cfg = BIT(10);
u32 data_reg[4];
int ret;
u32 val;
u32 i;
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
NPCM_FIU_UMA_CTS_DEV_NUM,
(spi_get_chipselect(mem->spi, 0) <<
NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
NPCM_FIU_UMA_CMD_CMD, op->cmd.opcode);
if (is_address_size) {
uma_cfg |= ilog2(op->cmd.buswidth);
uma_cfg |= ilog2(op->addr.buswidth)
<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
uma_cfg |= ilog2(op->dummy.buswidth)
<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
uma_cfg |= ilog2(op->data.buswidth)
<< NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, addr);
} else {
regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
}
uma_cfg |= data_size << NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT;
regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
NPCM_FIU_UMA_CTS_EXEC_DONE,
NPCM_FIU_UMA_CTS_EXEC_DONE);
ret = regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
(!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
UMA_MICRO_SEC_TIMEOUT);
if (ret)
return ret;
if (data_size) {
for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
regmap_read(fiu->regmap, NPCM_FIU_UMA_DR0 + (i * 4),
&data_reg[i]);
memcpy(data, data_reg, data_size);
}
return 0;
}
static int npcm_fiu_uma_write(struct spi_mem *mem,
const struct spi_mem_op *op, u8 cmd,
bool is_address_size, u8 *data, u32 data_size)
{
struct npcm_fiu_spi *fiu =
spi_controller_get_devdata(mem->spi->controller);
u32 uma_cfg = BIT(10);
u32 data_reg[4] = {0};
u32 val;
u32 i;
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
NPCM_FIU_UMA_CTS_DEV_NUM,
(spi_get_chipselect(mem->spi, 0) <<
NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
NPCM_FIU_UMA_CMD_CMD, cmd);
if (data_size) {
memcpy(data_reg, data, data_size);
for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
regmap_write(fiu->regmap, NPCM_FIU_UMA_DW0 + (i * 4),
data_reg[i]);
}
if (is_address_size) {
uma_cfg |= ilog2(op->cmd.buswidth);
uma_cfg |= ilog2(op->addr.buswidth) <<
NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
uma_cfg |= ilog2(op->data.buswidth) <<
NPCM_FIU_UMA_CFG_WDBPCK_SHIFT;
uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, op->addr.val);
} else {
regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
}
uma_cfg |= (data_size << NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT);
regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
NPCM_FIU_UMA_CTS_EXEC_DONE,
NPCM_FIU_UMA_CTS_EXEC_DONE);
return regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
(!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
UMA_MICRO_SEC_TIMEOUT);
}
static int npcm_fiu_manualwrite(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct npcm_fiu_spi *fiu =
spi_controller_get_devdata(mem->spi->controller);
u8 *data = (u8 *)op->data.buf.out;
u32 num_data_chunks;
u32 remain_data;
u32 idx = 0;
int ret;
num_data_chunks = op->data.nbytes / CHUNK_SIZE;
remain_data = op->data.nbytes % CHUNK_SIZE;
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
NPCM_FIU_UMA_CTS_DEV_NUM,
(spi_get_chipselect(mem->spi, 0) <<
NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
NPCM_FIU_UMA_CTS_SW_CS, 0);
ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, true, NULL, 0);
if (ret)
return ret;
/* Starting the data writing loop in multiples of 8 */
for (idx = 0; idx < num_data_chunks; ++idx) {
ret = npcm_fiu_uma_write(mem, op, data[0], false,
&data[1], CHUNK_SIZE - 1);
if (ret)
return ret;
data += CHUNK_SIZE;
}
/* Handling chunk remains */
if (remain_data > 0) {
ret = npcm_fiu_uma_write(mem, op, data[0], false,
&data[1], remain_data - 1);
if (ret)
return ret;
}
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
NPCM_FIU_UMA_CTS_SW_CS, NPCM_FIU_UMA_CTS_SW_CS);
return 0;
}
static int npcm_fiu_read(struct spi_mem *mem, const struct spi_mem_op *op)
{
u8 *data = op->data.buf.in;
int i, readlen, currlen;
u8 *buf_ptr;
u32 addr;
int ret;
i = 0;
currlen = op->data.nbytes;
do {
addr = ((u32)op->addr.val + i);
if (currlen < 16)
readlen = currlen;
else
readlen = 16;
buf_ptr = data + i;
ret = npcm_fiu_uma_read(mem, op, addr, true, buf_ptr,
readlen);
if (ret)
return ret;
i += readlen;
currlen -= 16;
} while (currlen > 0);
return 0;
}
static void npcm_fiux_set_direct_wr(struct npcm_fiu_spi *fiu)
{
regmap_write(fiu->regmap, NPCM_FIU_DWR_CFG,
NPCM_FIU_DWR_16_BYTE_BURST);
regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
NPCM_FIU_DWR_CFG_ABPCK,
DWR_ABPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_ABPCK_SHIFT);
regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
NPCM_FIU_DWR_CFG_DBPCK,
DWR_DBPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_DBPCK_SHIFT);
}
static void npcm_fiux_set_direct_rd(struct npcm_fiu_spi *fiu)
{
u32 rx_dummy = 0;
regmap_write(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_16_BYTE_BURST);
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_ACCTYPE,
DRD_SPI_X_MODE << NPCM_FIU_DRD_ACCTYPE_SHIFT);
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_DBW,
rx_dummy << NPCM_FIU_DRD_DBW_SHIFT);
}
static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct npcm_fiu_spi *fiu =
spi_controller_get_devdata(mem->spi->controller);
struct npcm_fiu_chip *chip = &fiu->chip[spi_get_chipselect(mem->spi, 0)];
int ret = 0;
u8 *buf;
dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth, op->addr.val,
op->data.nbytes);
if (fiu->spix_mode || op->addr.nbytes > 4)
return -ENOTSUPP;
if (fiu->clkrate != chip->clkrate) {
ret = clk_set_rate(fiu->clk, chip->clkrate);
if (ret < 0)
dev_warn(fiu->dev, "Failed setting %lu frequency, stay at %lu frequency\n",
chip->clkrate, fiu->clkrate);
else
fiu->clkrate = chip->clkrate;
}
if (op->data.dir == SPI_MEM_DATA_IN) {
if (!op->addr.nbytes) {
buf = op->data.buf.in;
ret = npcm_fiu_uma_read(mem, op, op->addr.val, false,
buf, op->data.nbytes);
} else {
ret = npcm_fiu_read(mem, op);
}
} else {
if (!op->addr.nbytes && !op->data.nbytes)
ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
NULL, 0);
if (op->addr.nbytes && !op->data.nbytes) {
int i;
u8 buf_addr[4];
u32 addr = op->addr.val;
for (i = op->addr.nbytes - 1; i >= 0; i--) {
buf_addr[i] = addr & 0xff;
addr >>= 8;
}
ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
buf_addr, op->addr.nbytes);
}
if (!op->addr.nbytes && op->data.nbytes)
ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
(u8 *)op->data.buf.out,
op->data.nbytes);
if (op->addr.nbytes && op->data.nbytes)
ret = npcm_fiu_manualwrite(mem, op);
}
return ret;
}
static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct npcm_fiu_spi *fiu =
spi_controller_get_devdata(desc->mem->spi->controller);
struct npcm_fiu_chip *chip = &fiu->chip[spi_get_chipselect(desc->mem->spi, 0)];
struct regmap *gcr_regmap;
if (!fiu->res_mem) {
dev_warn(fiu->dev, "Reserved memory not defined, direct read disabled\n");
desc->nodirmap = true;
return 0;
}
if (!fiu->spix_mode &&
desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) {
desc->nodirmap = true;
return 0;
}
if (!chip->flash_region_mapped_ptr) {
chip->flash_region_mapped_ptr =
devm_ioremap(fiu->dev, (fiu->res_mem->start +
(fiu->info->max_map_size *
spi_get_chipselect(desc->mem->spi, 0))),
(u32)desc->info.length);
if (!chip->flash_region_mapped_ptr) {
dev_warn(fiu->dev, "Error mapping memory region, direct read disabled\n");
desc->nodirmap = true;
return 0;
}
}
if (of_device_is_compatible(fiu->dev->of_node, "nuvoton,npcm750-fiu")) {
gcr_regmap =
syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
if (IS_ERR(gcr_regmap)) {
dev_warn(fiu->dev, "Didn't find nuvoton,npcm750-gcr, direct read disabled\n");
desc->nodirmap = true;
return 0;
}
regmap_update_bits(gcr_regmap, NPCM7XX_INTCR3_OFFSET,
NPCM7XX_INTCR3_FIU_FIX,
NPCM7XX_INTCR3_FIU_FIX);
} else {
regmap_update_bits(fiu->regmap, NPCM_FIU_CFG,
NPCM_FIU_CFG_FIU_FIX,
NPCM_FIU_CFG_FIU_FIX);
}
if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) {
if (!fiu->spix_mode)
npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
else
npcm_fiux_set_direct_rd(fiu);
} else {
npcm_fiux_set_direct_wr(fiu);
}
return 0;
}
static int npcm_fiu_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->controller;
struct npcm_fiu_spi *fiu = spi_controller_get_devdata(ctrl);
struct npcm_fiu_chip *chip;
chip = &fiu->chip[spi_get_chipselect(spi, 0)];
chip->fiu = fiu;
chip->chipselect = spi_get_chipselect(spi, 0);
chip->clkrate = spi->max_speed_hz;
fiu->clkrate = clk_get_rate(fiu->clk);
return 0;
}
static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
.exec_op = npcm_fiu_exec_op,
.dirmap_create = npcm_fiu_dirmap_create,
.dirmap_read = npcm_fiu_direct_read,
.dirmap_write = npcm_fiu_direct_write,
};
static const struct of_device_id npcm_fiu_dt_ids[] = {
{ .compatible = "nuvoton,npcm750-fiu", .data = &npcm7xx_fiu_data },
{ .compatible = "nuvoton,npcm845-fiu", .data = &npxm8xx_fiu_data },
{ /* sentinel */ }
};
static int npcm_fiu_probe(struct platform_device *pdev)
{
const struct fiu_data *fiu_data_match;
struct device *dev = &pdev->dev;
struct spi_controller *ctrl;
struct npcm_fiu_spi *fiu;
void __iomem *regbase;
int id, ret;
ctrl = devm_spi_alloc_host(dev, sizeof(*fiu));
if (!ctrl)
return -ENOMEM;
fiu = spi_controller_get_devdata(ctrl);
fiu_data_match = of_device_get_match_data(dev);
if (!fiu_data_match) {
dev_err(dev, "No compatible OF match\n");
return -ENODEV;
}
id = of_alias_get_id(dev->of_node, "fiu");
if (id < 0 || id >= fiu_data_match->fiu_max) {
dev_err(dev, "Invalid platform device id: %d\n", id);
return -EINVAL;
}
fiu->info = &fiu_data_match->npcm_fiu_data_info[id];
platform_set_drvdata(pdev, fiu);
fiu->dev = dev;
regbase = devm_platform_ioremap_resource_byname(pdev, "control");
if (IS_ERR(regbase))
return PTR_ERR(regbase);
fiu->regmap = devm_regmap_init_mmio(dev, regbase,
&npcm_mtd_regmap_config);
if (IS_ERR(fiu->regmap)) {
dev_err(dev, "Failed to create regmap\n");
return PTR_ERR(fiu->regmap);
}
fiu->res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"memory");
fiu->clk = devm_clk_get(dev, NULL);
if (IS_ERR(fiu->clk))
return PTR_ERR(fiu->clk);
fiu->spix_mode = of_property_read_bool(dev->of_node,
"nuvoton,spix-mode");
platform_set_drvdata(pdev, fiu);
clk_prepare_enable(fiu->clk);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
| SPI_TX_DUAL | SPI_TX_QUAD;
ctrl->setup = npcm_fiu_setup;
ctrl->bus_num = -1;
ctrl->mem_ops = &npcm_fiu_mem_ops;
ctrl->num_chipselect = fiu->info->max_cs;
ctrl->dev.of_node = dev->of_node;
ret = devm_spi_register_controller(dev, ctrl);
if (ret)
clk_disable_unprepare(fiu->clk);
return ret;
}
static void npcm_fiu_remove(struct platform_device *pdev)
{
struct npcm_fiu_spi *fiu = platform_get_drvdata(pdev);
clk_disable_unprepare(fiu->clk);
}
MODULE_DEVICE_TABLE(of, npcm_fiu_dt_ids);
static struct platform_driver npcm_fiu_driver = {
.driver = {
.name = "NPCM-FIU",
.bus = &platform_bus_type,
.of_match_table = npcm_fiu_dt_ids,
},
.probe = npcm_fiu_probe,
.remove_new = npcm_fiu_remove,
};
module_platform_driver(npcm_fiu_driver);
MODULE_DESCRIPTION("Nuvoton FLASH Interface Unit SPI Controller Driver");
MODULE_AUTHOR("Tomer Maimon <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-npcm-fiu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* polling/bitbanging SPI master controller driver utilities
*/
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#define SPI_BITBANG_CS_DELAY 100
/*----------------------------------------------------------------------*/
/*
* FIRST PART (OPTIONAL): word-at-a-time spi_transfer support.
* Use this for GPIO or shift-register level hardware APIs.
*
* spi_bitbang_cs is in spi_device->controller_state, which is unavailable
* to glue code. These bitbang setup() and cleanup() routines are always
* used, though maybe they're called from controller-aware code.
*
* chipselect() and friends may use spi_device->controller_data and
* controller registers as appropriate.
*
*
* NOTE: SPI controller pins can often be used as GPIO pins instead,
* which means you could use a bitbang driver either to get hardware
* working quickly, or testing for differences that aren't speed related.
*/
struct spi_bitbang_cs {
unsigned nsecs; /* (clock cycle time)/2 */
u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs,
u32 word, u8 bits, unsigned flags);
unsigned (*txrx_bufs)(struct spi_device *,
u32 (*txrx_word)(
struct spi_device *spi,
unsigned nsecs,
u32 word, u8 bits,
unsigned flags),
unsigned, struct spi_transfer *,
unsigned);
};
static unsigned bitbang_txrx_8(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
u32 word, u8 bits,
unsigned flags),
unsigned ns,
struct spi_transfer *t,
unsigned flags
)
{
unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
while (likely(count > 0)) {
u8 word = 0;
if (tx)
word = *tx++;
word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 1;
}
return t->len - count;
}
static unsigned bitbang_txrx_16(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
u32 word, u8 bits,
unsigned flags),
unsigned ns,
struct spi_transfer *t,
unsigned flags
)
{
unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u16 *tx = t->tx_buf;
u16 *rx = t->rx_buf;
while (likely(count > 1)) {
u16 word = 0;
if (tx)
word = *tx++;
word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 2;
}
return t->len - count;
}
static unsigned bitbang_txrx_32(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
u32 word, u8 bits,
unsigned flags),
unsigned ns,
struct spi_transfer *t,
unsigned flags
)
{
unsigned bits = t->bits_per_word;
unsigned count = t->len;
const u32 *tx = t->tx_buf;
u32 *rx = t->rx_buf;
while (likely(count > 3)) {
u32 word = 0;
if (tx)
word = *tx++;
word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 4;
}
return t->len - count;
}
int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct spi_bitbang_cs *cs = spi->controller_state;
u8 bits_per_word;
u32 hz;
if (t) {
bits_per_word = t->bits_per_word;
hz = t->speed_hz;
} else {
bits_per_word = 0;
hz = 0;
}
/* spi_transfer level calls that work per-word */
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
if (bits_per_word <= 8)
cs->txrx_bufs = bitbang_txrx_8;
else if (bits_per_word <= 16)
cs->txrx_bufs = bitbang_txrx_16;
else if (bits_per_word <= 32)
cs->txrx_bufs = bitbang_txrx_32;
else
return -EINVAL;
/* nsecs = (clock period)/2 */
if (!hz)
hz = spi->max_speed_hz;
if (hz) {
cs->nsecs = (1000000000/2) / hz;
if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000))
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer);
/*
* spi_bitbang_setup - default setup for per-word I/O loops
*/
int spi_bitbang_setup(struct spi_device *spi)
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
bool initial_setup = false;
int retval;
bitbang = spi_master_get_devdata(spi->master);
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
initial_setup = true;
}
/* per-word shift register access, in hardware or bitbanging */
cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
if (!cs->txrx_word) {
retval = -EINVAL;
goto err_free;
}
if (bitbang->setup_transfer) {
retval = bitbang->setup_transfer(spi, NULL);
if (retval < 0)
goto err_free;
}
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
return 0;
err_free:
if (initial_setup)
kfree(cs);
return retval;
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup);
/*
* spi_bitbang_cleanup - default cleanup for per-word I/O loops
*/
void spi_bitbang_cleanup(struct spi_device *spi)
{
kfree(spi->controller_state);
}
EXPORT_SYMBOL_GPL(spi_bitbang_cleanup);
static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct spi_bitbang_cs *cs = spi->controller_state;
unsigned nsecs = cs->nsecs;
struct spi_bitbang *bitbang;
bitbang = spi_master_get_devdata(spi->master);
if (bitbang->set_line_direction) {
int err;
err = bitbang->set_line_direction(spi, !!(t->tx_buf));
if (err < 0)
return err;
}
if (spi->mode & SPI_3WIRE) {
unsigned flags;
flags = t->tx_buf ? SPI_CONTROLLER_NO_RX : SPI_CONTROLLER_NO_TX;
return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags);
}
return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, 0);
}
/*----------------------------------------------------------------------*/
/*
* SECOND PART ... simple transfer queue runner.
*
* This costs a task context per controller, running the queue by
* performing each transfer in sequence. Smarter hardware can queue
* several DMA transfers at once, and process several controller queues
* in parallel; this driver doesn't match such hardware very well.
*
* Drivers can provide word-at-a-time i/o primitives, or provide
* transfer-at-a-time ones to leverage dma or fifo hardware.
*/
static int spi_bitbang_prepare_hardware(struct spi_master *spi)
{
struct spi_bitbang *bitbang;
bitbang = spi_master_get_devdata(spi);
mutex_lock(&bitbang->lock);
bitbang->busy = 1;
mutex_unlock(&bitbang->lock);
return 0;
}
static int spi_bitbang_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_bitbang *bitbang = spi_master_get_devdata(master);
int status = 0;
if (bitbang->setup_transfer) {
status = bitbang->setup_transfer(spi, transfer);
if (status < 0)
goto out;
}
if (transfer->len)
status = bitbang->txrx_bufs(spi, transfer);
if (status == transfer->len)
status = 0;
else if (status >= 0)
status = -EREMOTEIO;
out:
spi_finalize_current_transfer(master);
return status;
}
static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
{
struct spi_bitbang *bitbang;
bitbang = spi_master_get_devdata(spi);
mutex_lock(&bitbang->lock);
bitbang->busy = 0;
mutex_unlock(&bitbang->lock);
return 0;
}
static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
{
struct spi_bitbang *bitbang = spi_master_get_devdata(spi->master);
/* SPI core provides CS high / low, but bitbang driver
* expects CS active
* spi device driver takes care of handling SPI_CS_HIGH
*/
enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
ndelay(SPI_BITBANG_CS_DELAY);
bitbang->chipselect(spi, enable ? BITBANG_CS_ACTIVE :
BITBANG_CS_INACTIVE);
ndelay(SPI_BITBANG_CS_DELAY);
}
/*----------------------------------------------------------------------*/
int spi_bitbang_init(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
bool custom_cs;
if (!master)
return -EINVAL;
/*
* We only need the chipselect callback if we are actually using it.
* If we just use GPIO descriptors, it is surplus. If the
* SPI_CONTROLLER_GPIO_SS flag is set, we always need to call the
* driver-specific chipselect routine.
*/
custom_cs = (!master->use_gpio_descriptors ||
(master->flags & SPI_CONTROLLER_GPIO_SS));
if (custom_cs && !bitbang->chipselect)
return -EINVAL;
mutex_init(&bitbang->lock);
if (!master->mode_bits)
master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
if (master->transfer || master->transfer_one_message)
return -EINVAL;
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
master->transfer_one = spi_bitbang_transfer_one;
/*
* When using GPIO descriptors, the ->set_cs() callback doesn't even
* get called unless SPI_CONTROLLER_GPIO_SS is set.
*/
if (custom_cs)
master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
if (!master->setup) {
if (!bitbang->setup_transfer)
bitbang->setup_transfer =
spi_bitbang_setup_transfer;
master->setup = spi_bitbang_setup;
master->cleanup = spi_bitbang_cleanup;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_init);
/**
* spi_bitbang_start - start up a polled/bitbanging SPI master driver
* @bitbang: driver handle
*
* Caller should have zero-initialized all parts of the structure, and then
* provided callbacks for chip selection and I/O loops. If the master has
* a transfer method, its final step should call spi_bitbang_transfer; or,
* that's the default if the transfer routine is not initialized. It should
* also set up the bus number and number of chipselects.
*
* For i/o loops, provide callbacks either per-word (for bitbanging, or for
* hardware that basically exposes a shift register) or per-spi_transfer
* (which takes better advantage of hardware like fifos or DMA engines).
*
* Drivers using per-word I/O loops should use (or call) spi_bitbang_setup,
* spi_bitbang_cleanup and spi_bitbang_setup_transfer to handle those spi
* master methods. Those methods are the defaults if the bitbang->txrx_bufs
* routine isn't initialized.
*
* This routine registers the spi_master, which will process requests in a
* dedicated task, keeping IRQs unblocked most of the time. To stop
* processing those requests, call spi_bitbang_stop().
*
* On success, this routine will take a reference to master. The caller is
* responsible for calling spi_bitbang_stop() to decrement the reference and
* spi_master_put() as counterpart of spi_alloc_master() to prevent a memory
* leak.
*/
int spi_bitbang_start(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
int ret;
ret = spi_bitbang_init(bitbang);
if (ret)
return ret;
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
*/
ret = spi_register_master(spi_master_get(master));
if (ret)
spi_master_put(master);
return ret;
}
EXPORT_SYMBOL_GPL(spi_bitbang_start);
/*
* spi_bitbang_stop - stops the task providing spi communication
*/
void spi_bitbang_stop(struct spi_bitbang *bitbang)
{
spi_unregister_master(bitbang->master);
}
EXPORT_SYMBOL_GPL(spi_bitbang_stop);
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-bitbang.c |
// SPDX-License-Identifier: (GPL-2.0)
/*
* Microchip coreQSPI QSPI controller driver
*
* Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries
*
* Author: Naga Sureshkumar Relli <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
/*
* QSPI Control register mask defines
*/
#define CONTROL_ENABLE BIT(0)
#define CONTROL_MASTER BIT(1)
#define CONTROL_XIP BIT(2)
#define CONTROL_XIPADDR BIT(3)
#define CONTROL_CLKIDLE BIT(10)
#define CONTROL_SAMPLE_MASK GENMASK(12, 11)
#define CONTROL_MODE0 BIT(13)
#define CONTROL_MODE12_MASK GENMASK(15, 14)
#define CONTROL_MODE12_EX_RO BIT(14)
#define CONTROL_MODE12_EX_RW BIT(15)
#define CONTROL_MODE12_FULL GENMASK(15, 14)
#define CONTROL_FLAGSX4 BIT(16)
#define CONTROL_CLKRATE_MASK GENMASK(27, 24)
#define CONTROL_CLKRATE_SHIFT 24
/*
* QSPI Frames register mask defines
*/
#define FRAMES_TOTALBYTES_MASK GENMASK(15, 0)
#define FRAMES_CMDBYTES_MASK GENMASK(24, 16)
#define FRAMES_CMDBYTES_SHIFT 16
#define FRAMES_SHIFT 25
#define FRAMES_IDLE_MASK GENMASK(29, 26)
#define FRAMES_IDLE_SHIFT 26
#define FRAMES_FLAGBYTE BIT(30)
#define FRAMES_FLAGWORD BIT(31)
/*
* QSPI Interrupt Enable register mask defines
*/
#define IEN_TXDONE BIT(0)
#define IEN_RXDONE BIT(1)
#define IEN_RXAVAILABLE BIT(2)
#define IEN_TXAVAILABLE BIT(3)
#define IEN_RXFIFOEMPTY BIT(4)
#define IEN_TXFIFOFULL BIT(5)
/*
* QSPI Status register mask defines
*/
#define STATUS_TXDONE BIT(0)
#define STATUS_RXDONE BIT(1)
#define STATUS_RXAVAILABLE BIT(2)
#define STATUS_TXAVAILABLE BIT(3)
#define STATUS_RXFIFOEMPTY BIT(4)
#define STATUS_TXFIFOFULL BIT(5)
#define STATUS_READY BIT(7)
#define STATUS_FLAGSX4 BIT(8)
#define STATUS_MASK GENMASK(8, 0)
#define BYTESUPPER_MASK GENMASK(31, 16)
#define BYTESLOWER_MASK GENMASK(15, 0)
#define MAX_DIVIDER 16
#define MIN_DIVIDER 0
#define MAX_DATA_CMD_LEN 256
/* QSPI ready time out value */
#define TIMEOUT_MS 500
/*
* QSPI Register offsets.
*/
#define REG_CONTROL (0x00)
#define REG_FRAMES (0x04)
#define REG_IEN (0x0c)
#define REG_STATUS (0x10)
#define REG_DIRECT_ACCESS (0x14)
#define REG_UPPER_ACCESS (0x18)
#define REG_RX_DATA (0x40)
#define REG_TX_DATA (0x44)
#define REG_X4_RX_DATA (0x48)
#define REG_X4_TX_DATA (0x4c)
#define REG_FRAMESUP (0x50)
/**
* struct mchp_coreqspi - Defines qspi driver instance
* @regs: Virtual address of the QSPI controller registers
* @clk: QSPI Operating clock
* @data_completion: completion structure
* @op_lock: lock access to the device
* @txbuf: TX buffer
* @rxbuf: RX buffer
* @irq: IRQ number
* @tx_len: Number of bytes left to transfer
* @rx_len: Number of bytes left to receive
*/
struct mchp_coreqspi {
void __iomem *regs;
struct clk *clk;
struct completion data_completion;
struct mutex op_lock; /* lock access to the device */
u8 *txbuf;
u8 *rxbuf;
int irq;
int tx_len;
int rx_len;
};
static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
{
u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
/*
* The operating mode can be configured based on the command that needs to be send.
* bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes.
* 00: Normal (single DQ0 TX and single DQ1 RX lines)
* 01: Extended RO (command and address bytes on DQ0 only)
* 10: Extended RW (command byte on DQ0 only)
* 11: Full. (command and address are on all DQ lines)
* bit[13]: Sets whether multiple bit SPI uses 2 or 4 bits of data
* 0: 2-bits (BSPI)
* 1: 4-bits (QSPI)
*/
if (op->data.buswidth == 4 || op->data.buswidth == 2) {
control &= ~CONTROL_MODE12_MASK;
if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))
control |= CONTROL_MODE12_EX_RO;
else if (op->cmd.buswidth == 1)
control |= CONTROL_MODE12_EX_RW;
else
control |= CONTROL_MODE12_FULL;
control |= CONTROL_MODE0;
} else {
control &= ~(CONTROL_MODE12_MASK |
CONTROL_MODE0);
}
writel_relaxed(control, qspi->regs + REG_CONTROL);
return 0;
}
static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
{
u32 control, data;
if (!qspi->rx_len)
return;
control = readl_relaxed(qspi->regs + REG_CONTROL);
/*
* Read 4-bytes from the SPI FIFO in single transaction and then read
* the reamaining data byte wise.
*/
control |= CONTROL_FLAGSX4;
writel_relaxed(control, qspi->regs + REG_CONTROL);
while (qspi->rx_len >= 4) {
while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
;
data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
*(u32 *)qspi->rxbuf = data;
qspi->rxbuf += 4;
qspi->rx_len -= 4;
}
control &= ~CONTROL_FLAGSX4;
writel_relaxed(control, qspi->regs + REG_CONTROL);
while (qspi->rx_len--) {
while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
;
data = readl_relaxed(qspi->regs + REG_RX_DATA);
*qspi->rxbuf++ = (data & 0xFF);
}
}
static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word)
{
u32 control, data;
control = readl_relaxed(qspi->regs + REG_CONTROL);
control |= CONTROL_FLAGSX4;
writel_relaxed(control, qspi->regs + REG_CONTROL);
while (qspi->tx_len >= 4) {
while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
;
data = *(u32 *)qspi->txbuf;
qspi->txbuf += 4;
qspi->tx_len -= 4;
writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
}
control &= ~CONTROL_FLAGSX4;
writel_relaxed(control, qspi->regs + REG_CONTROL);
while (qspi->tx_len--) {
while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
;
data = *qspi->txbuf++;
writel_relaxed(data, qspi->regs + REG_TX_DATA);
}
}
static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi)
{
u32 mask = IEN_TXDONE |
IEN_RXDONE |
IEN_RXAVAILABLE;
writel_relaxed(mask, qspi->regs + REG_IEN);
}
static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi)
{
writel_relaxed(0, qspi->regs + REG_IEN);
}
static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
{
struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id;
irqreturn_t ret = IRQ_NONE;
int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK;
if (intfield == 0)
return ret;
if (intfield & IEN_TXDONE) {
writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS);
ret = IRQ_HANDLED;
}
if (intfield & IEN_RXAVAILABLE) {
writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS);
mchp_coreqspi_read_op(qspi);
ret = IRQ_HANDLED;
}
if (intfield & IEN_RXDONE) {
writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS);
complete(&qspi->data_completion);
ret = IRQ_HANDLED;
}
return ret;
}
static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi)
{
unsigned long clk_hz;
u32 control, baud_rate_val = 0;
clk_hz = clk_get_rate(qspi->clk);
if (!clk_hz)
return -EINVAL;
baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz);
if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
dev_err(&spi->dev,
"could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
spi->max_speed_hz, clk_hz);
return -EINVAL;
}
control = readl_relaxed(qspi->regs + REG_CONTROL);
control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
writel_relaxed(control, qspi->regs + REG_CONTROL);
control = readl_relaxed(qspi->regs + REG_CONTROL);
if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA))
control |= CONTROL_CLKIDLE;
else
control &= ~CONTROL_CLKIDLE;
writel_relaxed(control, qspi->regs + REG_CONTROL);
return 0;
}
static int mchp_coreqspi_setup_op(struct spi_device *spi_dev)
{
struct spi_controller *ctlr = spi_dev->master;
struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
control |= (CONTROL_MASTER | CONTROL_ENABLE);
control &= ~CONTROL_CLKIDLE;
writel_relaxed(control, qspi->regs + REG_CONTROL);
return 0;
}
static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
{
u32 idle_cycles = 0;
int total_bytes, cmd_bytes, frames, ctrl;
cmd_bytes = op->cmd.nbytes + op->addr.nbytes;
total_bytes = cmd_bytes + op->data.nbytes;
/*
* As per the coreQSPI IP spec,the number of command and data bytes are
* controlled by the frames register for each SPI sequence. This supports
* the SPI flash memory read and writes sequences as below. so configure
* the cmd and total bytes accordingly.
* ---------------------------------------------------------------------
* TOTAL BYTES | CMD BYTES | What happens |
* ______________________________________________________________________
* | | |
* 1 | 1 | The SPI core will transmit a single byte |
* | | and receive data is discarded |
* | | |
* 1 | 0 | The SPI core will transmit a single byte |
* | | and return a single byte |
* | | |
* 10 | 4 | The SPI core will transmit 4 command |
* | | bytes discarding the receive data and |
* | | transmits 6 dummy bytes returning the 6 |
* | | received bytes and return a single byte |
* | | |
* 10 | 10 | The SPI core will transmit 10 command |
* | | |
* 10 | 0 | The SPI core will transmit 10 command |
* | | bytes and returning 10 received bytes |
* ______________________________________________________________________
*/
if (!(op->data.dir == SPI_MEM_DATA_IN))
cmd_bytes = total_bytes;
frames = total_bytes & BYTESUPPER_MASK;
writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
frames = total_bytes & BYTESLOWER_MASK;
frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
if (op->dummy.buswidth)
idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
frames |= idle_cycles << FRAMES_IDLE_SHIFT;
ctrl = readl_relaxed(qspi->regs + REG_CONTROL);
if (ctrl & CONTROL_MODE12_MASK)
frames |= (1 << FRAMES_SHIFT);
frames |= FRAMES_FLAGWORD;
writel_relaxed(frames, qspi->regs + REG_FRAMES);
}
static int mchp_qspi_wait_for_ready(struct spi_mem *mem)
{
struct mchp_coreqspi *qspi = spi_controller_get_devdata
(mem->spi->master);
u32 status;
int ret;
ret = readl_poll_timeout(qspi->regs + REG_STATUS, status,
(status & STATUS_READY), 0,
TIMEOUT_MS);
if (ret) {
dev_err(&mem->spi->dev,
"Timeout waiting on QSPI ready.\n");
return -ETIMEDOUT;
}
return ret;
}
static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mchp_coreqspi *qspi = spi_controller_get_devdata
(mem->spi->master);
u32 address = op->addr.val;
u8 opcode = op->cmd.opcode;
u8 opaddr[5];
int err, i;
mutex_lock(&qspi->op_lock);
err = mchp_qspi_wait_for_ready(mem);
if (err)
goto error;
err = mchp_coreqspi_setup_clock(qspi, mem->spi);
if (err)
goto error;
err = mchp_coreqspi_set_mode(qspi, op);
if (err)
goto error;
reinit_completion(&qspi->data_completion);
mchp_coreqspi_config_op(qspi, op);
if (op->cmd.opcode) {
qspi->txbuf = &opcode;
qspi->rxbuf = NULL;
qspi->tx_len = op->cmd.nbytes;
qspi->rx_len = 0;
mchp_coreqspi_write_op(qspi, false);
}
qspi->txbuf = &opaddr[0];
if (op->addr.nbytes) {
for (i = 0; i < op->addr.nbytes; i++)
qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1));
qspi->rxbuf = NULL;
qspi->tx_len = op->addr.nbytes;
qspi->rx_len = 0;
mchp_coreqspi_write_op(qspi, false);
}
if (op->data.nbytes) {
if (op->data.dir == SPI_MEM_DATA_OUT) {
qspi->txbuf = (u8 *)op->data.buf.out;
qspi->rxbuf = NULL;
qspi->rx_len = 0;
qspi->tx_len = op->data.nbytes;
mchp_coreqspi_write_op(qspi, true);
} else {
qspi->txbuf = NULL;
qspi->rxbuf = (u8 *)op->data.buf.in;
qspi->rx_len = op->data.nbytes;
qspi->tx_len = 0;
}
}
mchp_coreqspi_enable_ints(qspi);
if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
error:
mutex_unlock(&qspi->op_lock);
mchp_coreqspi_disable_ints(qspi);
return err;
}
static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
if ((op->data.buswidth == 4 || op->data.buswidth == 2) &&
(op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) {
/*
* If the command and address are on DQ0 only, then this
* controller doesn't support sending data on dual and
* quad lines. but it supports reading data on dual and
* quad lines with same configuration as command and
* address on DQ0.
* i.e. The control register[15:13] :EX_RO(read only) is
* meant only for the command and address are on DQ0 but
* not to write data, it is just to read.
* Ex: 0x34h is Quad Load Program Data which is not
* supported. Then the spi-mem layer will iterate over
* each command and it will chose the supported one.
*/
if (op->data.dir == SPI_MEM_DATA_OUT)
return false;
}
return true;
}
static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) {
if (op->data.nbytes > MAX_DATA_CMD_LEN)
op->data.nbytes = MAX_DATA_CMD_LEN;
}
return 0;
}
static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
.adjust_op_size = mchp_coreqspi_adjust_op_size,
.supports_op = mchp_coreqspi_supports_op,
.exec_op = mchp_coreqspi_exec_op,
};
static int mchp_coreqspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct mchp_coreqspi *qspi;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*qspi));
if (!ctlr)
return dev_err_probe(&pdev->dev, -ENOMEM,
"unable to allocate master for QSPI controller\n");
qspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, qspi);
qspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qspi->regs))
return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs),
"failed to map registers\n");
qspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(qspi->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk),
"could not get clock\n");
ret = clk_prepare_enable(qspi->clk);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to enable clock\n");
init_completion(&qspi->data_completion);
mutex_init(&qspi->op_lock);
qspi->irq = platform_get_irq(pdev, 0);
if (qspi->irq < 0) {
ret = qspi->irq;
goto out;
}
ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr,
IRQF_SHARED, pdev->name, qspi);
if (ret) {
dev_err(&pdev->dev, "request_irq failed %d\n", ret);
goto out;
}
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &mchp_coreqspi_mem_ops;
ctlr->setup = mchp_coreqspi_setup_op;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->dev.of_node = np;
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err_probe(&pdev->dev, ret,
"spi_register_controller failed\n");
goto out;
}
return 0;
out:
clk_disable_unprepare(qspi->clk);
return ret;
}
static void mchp_coreqspi_remove(struct platform_device *pdev)
{
struct mchp_coreqspi *qspi = platform_get_drvdata(pdev);
u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
mchp_coreqspi_disable_ints(qspi);
control &= ~CONTROL_ENABLE;
writel_relaxed(control, qspi->regs + REG_CONTROL);
clk_disable_unprepare(qspi->clk);
}
static const struct of_device_id mchp_coreqspi_of_match[] = {
{ .compatible = "microchip,coreqspi-rtl-v2" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match);
static struct platform_driver mchp_coreqspi_driver = {
.probe = mchp_coreqspi_probe,
.driver = {
.name = "microchip,coreqspi",
.of_match_table = mchp_coreqspi_of_match,
},
.remove_new = mchp_coreqspi_remove,
};
module_platform_driver(mchp_coreqspi_driver);
MODULE_AUTHOR("Naga Sureshkumar Relli <[email protected]");
MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-microchip-core-qspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Altera SPI driver
*
* Copyright (C) 2008 Thomas Chou <[email protected]>
*
* Based on spi_s3c24xx.c, which is:
* Copyright (c) 2006 Ben Dooks
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <[email protected]>
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/altera.h>
#include <linux/spi/spi.h>
#include <linux/io.h>
#include <linux/of.h>
#define DRV_NAME "spi_altera"
#define ALTERA_SPI_RXDATA 0
#define ALTERA_SPI_TXDATA 4
#define ALTERA_SPI_STATUS 8
#define ALTERA_SPI_CONTROL 12
#define ALTERA_SPI_TARGET_SEL 20
#define ALTERA_SPI_STATUS_ROE_MSK 0x8
#define ALTERA_SPI_STATUS_TOE_MSK 0x10
#define ALTERA_SPI_STATUS_TMT_MSK 0x20
#define ALTERA_SPI_STATUS_TRDY_MSK 0x40
#define ALTERA_SPI_STATUS_RRDY_MSK 0x80
#define ALTERA_SPI_STATUS_E_MSK 0x100
#define ALTERA_SPI_CONTROL_IROE_MSK 0x8
#define ALTERA_SPI_CONTROL_ITOE_MSK 0x10
#define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40
#define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80
#define ALTERA_SPI_CONTROL_IE_MSK 0x100
#define ALTERA_SPI_CONTROL_SSO_MSK 0x400
static int altr_spi_writel(struct altera_spi *hw, unsigned int reg,
unsigned int val)
{
int ret;
ret = regmap_write(hw->regmap, hw->regoff + reg, val);
if (ret)
dev_err(hw->dev, "fail to write reg 0x%x val 0x%x: %d\n",
reg, val, ret);
return ret;
}
static int altr_spi_readl(struct altera_spi *hw, unsigned int reg,
unsigned int *val)
{
int ret;
ret = regmap_read(hw->regmap, hw->regoff + reg, val);
if (ret)
dev_err(hw->dev, "fail to read reg 0x%x: %d\n", reg, ret);
return ret;
}
static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev)
{
return spi_controller_get_devdata(sdev->controller);
}
static void altera_spi_set_cs(struct spi_device *spi, bool is_high)
{
struct altera_spi *hw = altera_spi_to_hw(spi);
if (is_high) {
hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
altr_spi_writel(hw, ALTERA_SPI_TARGET_SEL, 0);
} else {
altr_spi_writel(hw, ALTERA_SPI_TARGET_SEL,
BIT(spi_get_chipselect(spi, 0)));
hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
}
}
static void altera_spi_tx_word(struct altera_spi *hw)
{
unsigned int txd = 0;
if (hw->tx) {
switch (hw->bytes_per_word) {
case 1:
txd = hw->tx[hw->count];
break;
case 2:
txd = (hw->tx[hw->count * 2]
| (hw->tx[hw->count * 2 + 1] << 8));
break;
case 4:
txd = (hw->tx[hw->count * 4]
| (hw->tx[hw->count * 4 + 1] << 8)
| (hw->tx[hw->count * 4 + 2] << 16)
| (hw->tx[hw->count * 4 + 3] << 24));
break;
}
}
altr_spi_writel(hw, ALTERA_SPI_TXDATA, txd);
}
static void altera_spi_rx_word(struct altera_spi *hw)
{
unsigned int rxd;
altr_spi_readl(hw, ALTERA_SPI_RXDATA, &rxd);
if (hw->rx) {
switch (hw->bytes_per_word) {
case 1:
hw->rx[hw->count] = rxd;
break;
case 2:
hw->rx[hw->count * 2] = rxd;
hw->rx[hw->count * 2 + 1] = rxd >> 8;
break;
case 4:
hw->rx[hw->count * 4] = rxd;
hw->rx[hw->count * 4 + 1] = rxd >> 8;
hw->rx[hw->count * 4 + 2] = rxd >> 16;
hw->rx[hw->count * 4 + 3] = rxd >> 24;
break;
}
}
hw->count++;
}
static int altera_spi_txrx(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *t)
{
struct altera_spi *hw = spi_controller_get_devdata(host);
u32 val;
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->count = 0;
hw->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
hw->len = t->len / hw->bytes_per_word;
if (hw->irq >= 0) {
/* enable receive interrupt */
hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
/* send the first byte */
altera_spi_tx_word(hw);
return 1;
}
while (hw->count < hw->len) {
altera_spi_tx_word(hw);
for (;;) {
altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
if (val & ALTERA_SPI_STATUS_RRDY_MSK)
break;
cpu_relax();
}
altera_spi_rx_word(hw);
}
spi_finalize_current_transfer(host);
return 0;
}
irqreturn_t altera_spi_irq(int irq, void *dev)
{
struct spi_controller *host = dev;
struct altera_spi *hw = spi_controller_get_devdata(host);
altera_spi_rx_word(hw);
if (hw->count < hw->len) {
altera_spi_tx_word(hw);
} else {
/* disable receive interrupt */
hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
spi_finalize_current_transfer(host);
}
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(altera_spi_irq);
void altera_spi_init_host(struct spi_controller *host)
{
struct altera_spi *hw = spi_controller_get_devdata(host);
u32 val;
host->transfer_one = altera_spi_txrx;
host->set_cs = altera_spi_set_cs;
/* program defaults into the registers */
hw->imr = 0; /* disable spi interrupts */
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
altr_spi_writel(hw, ALTERA_SPI_STATUS, 0); /* clear status reg */
altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
if (val & ALTERA_SPI_STATUS_RRDY_MSK)
altr_spi_readl(hw, ALTERA_SPI_RXDATA, &val); /* flush rxdata */
}
EXPORT_SYMBOL_GPL(altera_spi_init_host);
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-altera-core.c |
// SPDX-License-Identifier: GPL-2.0+
// PCI interface driver for Loongson SPI Support
// Copyright (C) 2023 Loongson Technology Corporation Limited
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
#include "spi-loongson.h"
static int loongson_spi_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret;
void __iomem *reg_base;
struct device *dev = &pdev->dev;
int pci_bar = 0;
ret = pcim_enable_device(pdev);
if (ret < 0)
return dev_err_probe(dev, ret, "cannot enable pci device\n");
ret = pcim_iomap_regions(pdev, BIT(pci_bar), pci_name(pdev));
if (ret)
return dev_err_probe(dev, ret, "failed to request and remap memory\n");
reg_base = pcim_iomap_table(pdev)[pci_bar];
ret = loongson_spi_init_controller(dev, reg_base);
if (ret)
return dev_err_probe(dev, ret, "failed to initialize controller\n");
return 0;
}
static struct pci_device_id loongson_spi_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a0b) },
{ PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a1b) },
{ }
};
MODULE_DEVICE_TABLE(pci, loongson_spi_devices);
static struct pci_driver loongson_spi_pci_driver = {
.name = "loongson-spi-pci",
.id_table = loongson_spi_devices,
.probe = loongson_spi_pci_register,
.driver = {
.bus = &pci_bus_type,
.pm = &loongson_spi_dev_pm_ops,
},
};
module_pci_driver(loongson_spi_pci_driver);
MODULE_DESCRIPTION("Loongson spi pci driver");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(SPI_LOONGSON_CORE);
| linux-master | drivers/spi/spi-loongson-pci.c |
// SPDX-License-Identifier: GPL-2.0
//
// Synquacer HSSPI controller driver
//
// Copyright (c) 2015-2018 Socionext Inc.
// Copyright (c) 2018-2019 Linaro Ltd.
//
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
/* HSSPI register address definitions */
#define SYNQUACER_HSSPI_REG_MCTRL 0x00
#define SYNQUACER_HSSPI_REG_PCC0 0x04
#define SYNQUACER_HSSPI_REG_PCC(n) (SYNQUACER_HSSPI_REG_PCC0 + (n) * 4)
#define SYNQUACER_HSSPI_REG_TXF 0x14
#define SYNQUACER_HSSPI_REG_TXE 0x18
#define SYNQUACER_HSSPI_REG_TXC 0x1C
#define SYNQUACER_HSSPI_REG_RXF 0x20
#define SYNQUACER_HSSPI_REG_RXE 0x24
#define SYNQUACER_HSSPI_REG_RXC 0x28
#define SYNQUACER_HSSPI_REG_FAULTF 0x2C
#define SYNQUACER_HSSPI_REG_FAULTC 0x30
#define SYNQUACER_HSSPI_REG_DMCFG 0x34
#define SYNQUACER_HSSPI_REG_DMSTART 0x38
#define SYNQUACER_HSSPI_REG_DMBCC 0x3C
#define SYNQUACER_HSSPI_REG_DMSTATUS 0x40
#define SYNQUACER_HSSPI_REG_FIFOCFG 0x4C
#define SYNQUACER_HSSPI_REG_TX_FIFO 0x50
#define SYNQUACER_HSSPI_REG_RX_FIFO 0x90
#define SYNQUACER_HSSPI_REG_MID 0xFC
/* HSSPI register bit definitions */
#define SYNQUACER_HSSPI_MCTRL_MEN BIT(0)
#define SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN BIT(1)
#define SYNQUACER_HSSPI_MCTRL_CDSS BIT(3)
#define SYNQUACER_HSSPI_MCTRL_MES BIT(4)
#define SYNQUACER_HSSPI_MCTRL_SYNCON BIT(5)
#define SYNQUACER_HSSPI_PCC_CPHA BIT(0)
#define SYNQUACER_HSSPI_PCC_CPOL BIT(1)
#define SYNQUACER_HSSPI_PCC_ACES BIT(2)
#define SYNQUACER_HSSPI_PCC_RTM BIT(3)
#define SYNQUACER_HSSPI_PCC_SSPOL BIT(4)
#define SYNQUACER_HSSPI_PCC_SDIR BIT(7)
#define SYNQUACER_HSSPI_PCC_SENDIAN BIT(8)
#define SYNQUACER_HSSPI_PCC_SAFESYNC BIT(16)
#define SYNQUACER_HSSPI_PCC_SS2CD_SHIFT 5U
#define SYNQUACER_HSSPI_PCC_CDRS_MASK 0x7f
#define SYNQUACER_HSSPI_PCC_CDRS_SHIFT 9U
#define SYNQUACER_HSSPI_TXF_FIFO_FULL BIT(0)
#define SYNQUACER_HSSPI_TXF_FIFO_EMPTY BIT(1)
#define SYNQUACER_HSSPI_TXF_SLAVE_RELEASED BIT(6)
#define SYNQUACER_HSSPI_TXE_FIFO_FULL BIT(0)
#define SYNQUACER_HSSPI_TXE_FIFO_EMPTY BIT(1)
#define SYNQUACER_HSSPI_TXE_SLAVE_RELEASED BIT(6)
#define SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD BIT(5)
#define SYNQUACER_HSSPI_RXF_SLAVE_RELEASED BIT(6)
#define SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD BIT(5)
#define SYNQUACER_HSSPI_RXE_SLAVE_RELEASED BIT(6)
#define SYNQUACER_HSSPI_DMCFG_SSDC BIT(1)
#define SYNQUACER_HSSPI_DMCFG_MSTARTEN BIT(2)
#define SYNQUACER_HSSPI_DMSTART_START BIT(0)
#define SYNQUACER_HSSPI_DMSTOP_STOP BIT(8)
#define SYNQUACER_HSSPI_DMPSEL_CS_MASK 0x3
#define SYNQUACER_HSSPI_DMPSEL_CS_SHIFT 16U
#define SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT 24U
#define SYNQUACER_HSSPI_DMTRP_DATA_MASK 0x3
#define SYNQUACER_HSSPI_DMTRP_DATA_SHIFT 26U
#define SYNQUACER_HSSPI_DMTRP_DATA_TXRX 0
#define SYNQUACER_HSSPI_DMTRP_DATA_RX 1
#define SYNQUACER_HSSPI_DMTRP_DATA_TX 2
#define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK 0x1f
#define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT 8U
#define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK 0x1f
#define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT 16U
#define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK 0xf
#define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT 0U
#define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_MASK 0xf
#define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_SHIFT 4U
#define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK 0x3
#define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT 8U
#define SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH BIT(11)
#define SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH BIT(12)
#define SYNQUACER_HSSPI_FIFO_DEPTH 16U
#define SYNQUACER_HSSPI_FIFO_TX_THRESHOLD 4U
#define SYNQUACER_HSSPI_FIFO_RX_THRESHOLD \
(SYNQUACER_HSSPI_FIFO_DEPTH - SYNQUACER_HSSPI_FIFO_TX_THRESHOLD)
#define SYNQUACER_HSSPI_TRANSFER_MODE_TX BIT(1)
#define SYNQUACER_HSSPI_TRANSFER_MODE_RX BIT(2)
#define SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC 2000U
#define SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC 1000U
#define SYNQUACER_HSSPI_CLOCK_SRC_IHCLK 0
#define SYNQUACER_HSSPI_CLOCK_SRC_IPCLK 1
#define SYNQUACER_HSSPI_NUM_CHIP_SELECT 4U
#define SYNQUACER_HSSPI_IRQ_NAME_MAX 32U
struct synquacer_spi {
struct device *dev;
struct completion transfer_done;
unsigned int cs;
unsigned int bpw;
unsigned int mode;
unsigned int speed;
bool aces, rtm;
void *rx_buf;
const void *tx_buf;
struct clk *clk;
int clk_src_type;
void __iomem *regs;
u32 tx_words, rx_words;
unsigned int bus_width;
unsigned int transfer_mode;
char rx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
char tx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
};
static int read_fifo(struct synquacer_spi *sspi)
{
u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
len = (len >> SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT) &
SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK;
len = min(len, sspi->rx_words);
switch (sspi->bpw) {
case 8: {
u8 *buf = sspi->rx_buf;
ioread8_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
buf, len);
sspi->rx_buf = buf + len;
break;
}
case 16: {
u16 *buf = sspi->rx_buf;
ioread16_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
buf, len);
sspi->rx_buf = buf + len;
break;
}
case 24:
/* fallthrough, should use 32-bits access */
case 32: {
u32 *buf = sspi->rx_buf;
ioread32_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
buf, len);
sspi->rx_buf = buf + len;
break;
}
default:
return -EINVAL;
}
sspi->rx_words -= len;
return 0;
}
static int write_fifo(struct synquacer_spi *sspi)
{
u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
len = (len >> SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT) &
SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK;
len = min(SYNQUACER_HSSPI_FIFO_DEPTH - len,
sspi->tx_words);
switch (sspi->bpw) {
case 8: {
const u8 *buf = sspi->tx_buf;
iowrite8_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
buf, len);
sspi->tx_buf = buf + len;
break;
}
case 16: {
const u16 *buf = sspi->tx_buf;
iowrite16_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
buf, len);
sspi->tx_buf = buf + len;
break;
}
case 24:
/* fallthrough, should use 32-bits access */
case 32: {
const u32 *buf = sspi->tx_buf;
iowrite32_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
buf, len);
sspi->tx_buf = buf + len;
break;
}
default:
return -EINVAL;
}
sspi->tx_words -= len;
return 0;
}
static int synquacer_spi_config(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct synquacer_spi *sspi = spi_master_get_devdata(master);
unsigned int speed, mode, bpw, cs, bus_width, transfer_mode;
u32 rate, val, div;
/* Full Duplex only on 1-bit wide bus */
if (xfer->rx_buf && xfer->tx_buf &&
(xfer->rx_nbits != 1 || xfer->tx_nbits != 1)) {
dev_err(sspi->dev,
"RX and TX bus widths must be 1-bit for Full-Duplex!\n");
return -EINVAL;
}
if (xfer->tx_buf) {
bus_width = xfer->tx_nbits;
transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_TX;
} else {
bus_width = xfer->rx_nbits;
transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_RX;
}
mode = spi->mode;
cs = spi_get_chipselect(spi, 0);
speed = xfer->speed_hz;
bpw = xfer->bits_per_word;
/* return if nothing to change */
if (speed == sspi->speed &&
bus_width == sspi->bus_width && bpw == sspi->bpw &&
mode == sspi->mode && cs == sspi->cs &&
transfer_mode == sspi->transfer_mode) {
return 0;
}
sspi->transfer_mode = transfer_mode;
rate = master->max_speed_hz;
div = DIV_ROUND_UP(rate, speed);
if (div > 254) {
dev_err(sspi->dev, "Requested rate too low (%u)\n",
sspi->speed);
return -EINVAL;
}
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
val &= ~SYNQUACER_HSSPI_PCC_SAFESYNC;
if (bpw == 8 && (mode & (SPI_TX_DUAL | SPI_RX_DUAL)) && div < 3)
val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
if (bpw == 8 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 6)
val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
if (bpw == 16 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 3)
val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
if (mode & SPI_CPHA)
val |= SYNQUACER_HSSPI_PCC_CPHA;
else
val &= ~SYNQUACER_HSSPI_PCC_CPHA;
if (mode & SPI_CPOL)
val |= SYNQUACER_HSSPI_PCC_CPOL;
else
val &= ~SYNQUACER_HSSPI_PCC_CPOL;
if (mode & SPI_CS_HIGH)
val |= SYNQUACER_HSSPI_PCC_SSPOL;
else
val &= ~SYNQUACER_HSSPI_PCC_SSPOL;
if (mode & SPI_LSB_FIRST)
val |= SYNQUACER_HSSPI_PCC_SDIR;
else
val &= ~SYNQUACER_HSSPI_PCC_SDIR;
if (sspi->aces)
val |= SYNQUACER_HSSPI_PCC_ACES;
else
val &= ~SYNQUACER_HSSPI_PCC_ACES;
if (sspi->rtm)
val |= SYNQUACER_HSSPI_PCC_RTM;
else
val &= ~SYNQUACER_HSSPI_PCC_RTM;
val |= (3 << SYNQUACER_HSSPI_PCC_SS2CD_SHIFT);
val |= SYNQUACER_HSSPI_PCC_SENDIAN;
val &= ~(SYNQUACER_HSSPI_PCC_CDRS_MASK <<
SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
val |= ((div >> 1) << SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
val &= ~(SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK <<
SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
val |= ((bpw / 8 - 1) << SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
val &= ~(SYNQUACER_HSSPI_DMTRP_DATA_MASK <<
SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
if (xfer->rx_buf)
val |= (SYNQUACER_HSSPI_DMTRP_DATA_RX <<
SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
else
val |= (SYNQUACER_HSSPI_DMTRP_DATA_TX <<
SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
val &= ~(3 << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
val |= ((bus_width >> 1) << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
sspi->bpw = bpw;
sspi->mode = mode;
sspi->speed = speed;
sspi->cs = spi_get_chipselect(spi, 0);
sspi->bus_width = bus_width;
return 0;
}
static int synquacer_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct synquacer_spi *sspi = spi_master_get_devdata(master);
int ret;
int status = 0;
u32 words;
u8 bpw;
u32 val;
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
val &= ~SYNQUACER_HSSPI_DMSTOP_STOP;
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
val |= SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH;
val |= SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH;
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
/*
* See if we can transfer 4-bytes as 1 word
* to maximize the FIFO buffer efficiency.
*/
bpw = xfer->bits_per_word;
if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST))
xfer->bits_per_word = 32;
ret = synquacer_spi_config(master, spi, xfer);
/* restore */
xfer->bits_per_word = bpw;
if (ret)
return ret;
reinit_completion(&sspi->transfer_done);
sspi->tx_buf = xfer->tx_buf;
sspi->rx_buf = xfer->rx_buf;
switch (sspi->bpw) {
case 8:
words = xfer->len;
break;
case 16:
words = xfer->len / 2;
break;
case 24:
/* fallthrough, should use 32-bits access */
case 32:
words = xfer->len / 4;
break;
default:
dev_err(sspi->dev, "unsupported bpw: %d\n", sspi->bpw);
return -EINVAL;
}
if (xfer->tx_buf)
sspi->tx_words = words;
else
sspi->tx_words = 0;
if (xfer->rx_buf)
sspi->rx_words = words;
else
sspi->rx_words = 0;
if (xfer->tx_buf) {
status = write_fifo(sspi);
if (status < 0) {
dev_err(sspi->dev, "failed write_fifo. status: 0x%x\n",
status);
return status;
}
}
if (xfer->rx_buf) {
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
val &= ~(SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK <<
SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
val |= ((sspi->rx_words > SYNQUACER_HSSPI_FIFO_DEPTH ?
SYNQUACER_HSSPI_FIFO_RX_THRESHOLD : sspi->rx_words) <<
SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
}
writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
/* Trigger */
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
val |= SYNQUACER_HSSPI_DMSTART_START;
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
if (xfer->tx_buf) {
val = SYNQUACER_HSSPI_TXE_FIFO_EMPTY;
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
status = wait_for_completion_timeout(&sspi->transfer_done,
msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
}
if (xfer->rx_buf) {
u32 buf[SYNQUACER_HSSPI_FIFO_DEPTH];
val = SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD |
SYNQUACER_HSSPI_RXE_SLAVE_RELEASED;
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
status = wait_for_completion_timeout(&sspi->transfer_done,
msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
/* stop RX and clean RXFIFO */
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
val |= SYNQUACER_HSSPI_DMSTOP_STOP;
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
sspi->rx_buf = buf;
sspi->rx_words = SYNQUACER_HSSPI_FIFO_DEPTH;
read_fifo(sspi);
}
if (status == 0) {
dev_err(sspi->dev, "failed to transfer. Timeout.\n");
return -ETIMEDOUT;
}
return 0;
}
static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
{
struct synquacer_spi *sspi = spi_master_get_devdata(spi->master);
u32 val;
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
val |= spi_get_chipselect(spi, 0) << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
if (!enable)
val |= SYNQUACER_HSSPI_DMSTOP_STOP;
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
}
static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi,
bool enable)
{
u32 val;
unsigned long timeout = jiffies +
msecs_to_jiffies(SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC);
/* wait MES(Module Enable Status) is updated */
do {
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL) &
SYNQUACER_HSSPI_MCTRL_MES;
if (enable && val)
return 0;
if (!enable && !val)
return 0;
} while (time_before(jiffies, timeout));
dev_err(sspi->dev, "timeout occurs in updating Module Enable Status\n");
return -EBUSY;
}
static int synquacer_spi_enable(struct spi_master *master)
{
u32 val;
int status;
struct synquacer_spi *sspi = spi_master_get_devdata(master);
/* Disable module */
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
status = synquacer_spi_wait_status_update(sspi, false);
if (status < 0)
return status;
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_FAULTC);
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
val &= ~SYNQUACER_HSSPI_DMCFG_SSDC;
val &= ~SYNQUACER_HSSPI_DMCFG_MSTARTEN;
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
if (sspi->clk_src_type == SYNQUACER_HSSPI_CLOCK_SRC_IPCLK)
val |= SYNQUACER_HSSPI_MCTRL_CDSS;
else
val &= ~SYNQUACER_HSSPI_MCTRL_CDSS;
val &= ~SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN;
val |= SYNQUACER_HSSPI_MCTRL_MEN;
val |= SYNQUACER_HSSPI_MCTRL_SYNCON;
/* Enable module */
writel(val, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
status = synquacer_spi_wait_status_update(sspi, true);
if (status < 0)
return status;
return 0;
}
static irqreturn_t sq_spi_rx_handler(int irq, void *priv)
{
uint32_t val;
struct synquacer_spi *sspi = priv;
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_RXF);
if ((val & SYNQUACER_HSSPI_RXF_SLAVE_RELEASED) ||
(val & SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD)) {
read_fifo(sspi);
if (sspi->rx_words == 0) {
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
complete(&sspi->transfer_done);
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static irqreturn_t sq_spi_tx_handler(int irq, void *priv)
{
uint32_t val;
struct synquacer_spi *sspi = priv;
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_TXF);
if (val & SYNQUACER_HSSPI_TXF_FIFO_EMPTY) {
if (sspi->tx_words == 0) {
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
complete(&sspi->transfer_done);
} else {
write_fifo(sspi);
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int synquacer_spi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spi_master *master;
struct synquacer_spi *sspi;
int ret;
int rx_irq, tx_irq;
master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
sspi->dev = &pdev->dev;
init_completion(&sspi->transfer_done);
sspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sspi->regs)) {
ret = PTR_ERR(sspi->regs);
goto put_spi;
}
sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */
device_property_read_u32(&pdev->dev, "socionext,ihclk-rate",
&master->max_speed_hz); /* for ACPI */
if (dev_of_node(&pdev->dev)) {
if (device_property_match_string(&pdev->dev,
"clock-names", "iHCLK") >= 0) {
sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK;
sspi->clk = devm_clk_get(sspi->dev, "iHCLK");
} else if (device_property_match_string(&pdev->dev,
"clock-names", "iPCLK") >= 0) {
sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IPCLK;
sspi->clk = devm_clk_get(sspi->dev, "iPCLK");
} else {
dev_err(&pdev->dev, "specified wrong clock source\n");
ret = -EINVAL;
goto put_spi;
}
if (IS_ERR(sspi->clk)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(sspi->clk),
"clock not found\n");
goto put_spi;
}
ret = clk_prepare_enable(sspi->clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable clock (%d)\n",
ret);
goto put_spi;
}
master->max_speed_hz = clk_get_rate(sspi->clk);
}
if (!master->max_speed_hz) {
dev_err(&pdev->dev, "missing clock source\n");
ret = -EINVAL;
goto disable_clk;
}
master->min_speed_hz = master->max_speed_hz / 254;
sspi->aces = device_property_read_bool(&pdev->dev,
"socionext,set-aces");
sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm");
master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
rx_irq = platform_get_irq(pdev, 0);
if (rx_irq <= 0) {
ret = rx_irq;
goto disable_clk;
}
snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx",
dev_name(&pdev->dev));
ret = devm_request_irq(&pdev->dev, rx_irq, sq_spi_rx_handler,
0, sspi->rx_irq_name, sspi);
if (ret) {
dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret);
goto disable_clk;
}
tx_irq = platform_get_irq(pdev, 1);
if (tx_irq <= 0) {
ret = tx_irq;
goto disable_clk;
}
snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx",
dev_name(&pdev->dev));
ret = devm_request_irq(&pdev->dev, tx_irq, sq_spi_tx_handler,
0, sspi->tx_irq_name, sspi);
if (ret) {
dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret);
goto disable_clk;
}
master->dev.of_node = np;
master->dev.fwnode = pdev->dev.fwnode;
master->auto_runtime_pm = true;
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
SPI_TX_QUAD | SPI_RX_QUAD;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
master->set_cs = synquacer_spi_set_cs;
master->transfer_one = synquacer_spi_transfer_one;
ret = synquacer_spi_enable(master);
if (ret)
goto disable_clk;
pm_runtime_set_active(sspi->dev);
pm_runtime_enable(sspi->dev);
ret = devm_spi_register_master(sspi->dev, master);
if (ret)
goto disable_pm;
return 0;
disable_pm:
pm_runtime_disable(sspi->dev);
disable_clk:
clk_disable_unprepare(sspi->clk);
put_spi:
spi_master_put(master);
return ret;
}
static void synquacer_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct synquacer_spi *sspi = spi_master_get_devdata(master);
pm_runtime_disable(sspi->dev);
clk_disable_unprepare(sspi->clk);
}
static int __maybe_unused synquacer_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct synquacer_spi *sspi = spi_master_get_devdata(master);
int ret;
ret = spi_master_suspend(master);
if (ret)
return ret;
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(sspi->clk);
return ret;
}
static int __maybe_unused synquacer_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct synquacer_spi *sspi = spi_master_get_devdata(master);
int ret;
if (!pm_runtime_suspended(dev)) {
/* Ensure reconfigure during next xfer */
sspi->speed = 0;
ret = clk_prepare_enable(sspi->clk);
if (ret < 0) {
dev_err(dev, "failed to enable clk (%d)\n",
ret);
return ret;
}
ret = synquacer_spi_enable(master);
if (ret) {
clk_disable_unprepare(sspi->clk);
dev_err(dev, "failed to enable spi (%d)\n", ret);
return ret;
}
}
ret = spi_master_resume(master);
if (ret < 0)
clk_disable_unprepare(sspi->clk);
return ret;
}
static SIMPLE_DEV_PM_OPS(synquacer_spi_pm_ops, synquacer_spi_suspend,
synquacer_spi_resume);
static const struct of_device_id synquacer_spi_of_match[] = {
{.compatible = "socionext,synquacer-spi"},
{}
};
MODULE_DEVICE_TABLE(of, synquacer_spi_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id synquacer_hsspi_acpi_ids[] = {
{ "SCX0004" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(acpi, synquacer_hsspi_acpi_ids);
#endif
static struct platform_driver synquacer_spi_driver = {
.driver = {
.name = "synquacer-spi",
.pm = &synquacer_spi_pm_ops,
.of_match_table = synquacer_spi_of_match,
.acpi_match_table = ACPI_PTR(synquacer_hsspi_acpi_ids),
},
.probe = synquacer_spi_probe,
.remove_new = synquacer_spi_remove,
};
module_platform_driver(synquacer_spi_driver);
MODULE_DESCRIPTION("Socionext Synquacer HS-SPI controller driver");
MODULE_AUTHOR("Masahisa Kojima <[email protected]>");
MODULE_AUTHOR("Jassi Brar <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-synquacer.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <asm/octeon/octeon.h>
#include "spi-cavium.h"
static int octeon_spi_probe(struct platform_device *pdev)
{
void __iomem *reg_base;
struct spi_controller *host;
struct octeon_spi *p;
int err = -ENOENT;
host = spi_alloc_host(&pdev->dev, sizeof(struct octeon_spi));
if (!host)
return -ENOMEM;
p = spi_controller_get_devdata(host);
platform_set_drvdata(pdev, host);
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base)) {
err = PTR_ERR(reg_base);
goto fail;
}
p->register_base = reg_base;
p->sys_freq = octeon_get_io_clock_rate();
p->regs.config = 0;
p->regs.status = 0x08;
p->regs.tx = 0x10;
p->regs.data = 0x80;
host->num_chipselect = 4;
host->mode_bits = SPI_CPHA |
SPI_CPOL |
SPI_CS_HIGH |
SPI_LSB_FIRST |
SPI_3WIRE;
host->transfer_one_message = octeon_spi_transfer_one_message;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
host->dev.of_node = pdev->dev.of_node;
err = devm_spi_register_controller(&pdev->dev, host);
if (err) {
dev_err(&pdev->dev, "register host failed: %d\n", err);
goto fail;
}
dev_info(&pdev->dev, "OCTEON SPI bus driver\n");
return 0;
fail:
spi_controller_put(host);
return err;
}
static void octeon_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct octeon_spi *p = spi_controller_get_devdata(host);
/* Clear the CSENA* and put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
}
static const struct of_device_id octeon_spi_match[] = {
{ .compatible = "cavium,octeon-3010-spi", },
{},
};
MODULE_DEVICE_TABLE(of, octeon_spi_match);
static struct platform_driver octeon_spi_driver = {
.driver = {
.name = "spi-octeon",
.of_match_table = octeon_spi_match,
},
.probe = octeon_spi_probe,
.remove_new = octeon_spi_remove,
};
module_platform_driver(octeon_spi_driver);
MODULE_DESCRIPTION("Cavium, Inc. OCTEON SPI bus driver");
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-cavium-octeon.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// Driver for Cadence QSPI Controller
//
// Copyright Altera Corporation (C) 2012-2014. All rights reserved.
// Copyright Intel Corporation (C) 2019-2020. All rights reserved.
// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/timer.h>
#define CQSPI_NAME "cadence-qspi"
#define CQSPI_MAX_CHIPSELECT 16
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
#define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
#define CQSPI_SLOW_SRAM BIT(4)
#define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
enum {
CLK_QSPI_APB = 0,
CLK_QSPI_AHB,
CLK_QSPI_NUM,
};
struct cqspi_st;
struct cqspi_flash_pdata {
struct cqspi_st *cqspi;
u32 clk_rate;
u32 read_delay;
u32 tshsl_ns;
u32 tsd2d_ns;
u32 tchsh_ns;
u32 tslch_ns;
u8 cs;
};
struct cqspi_st {
struct platform_device *pdev;
struct spi_controller *host;
struct clk *clk;
struct clk *clks[CLK_QSPI_NUM];
unsigned int sclk;
void __iomem *iobase;
void __iomem *ahb_base;
resource_size_t ahb_size;
struct completion transfer_complete;
struct dma_chan *rx_chan;
struct completion rx_dma_complete;
dma_addr_t mmap_phys_base;
int current_cs;
unsigned long master_ref_clk_hz;
bool is_decoded_cs;
u32 fifo_depth;
u32 fifo_width;
u32 num_chipselect;
bool rclk_en;
u32 trigger_address;
u32 wr_delay;
bool use_direct_mode;
bool use_direct_mode_wr;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
bool use_dma_read;
u32 pd_dev_id;
bool wr_completion;
bool slow_sram;
bool apb_ahb_hazard;
bool is_jh7110; /* Flag for StarFive JH7110 SoC */
};
struct cqspi_driver_platdata {
u32 hwcaps_mask;
u8 quirks;
int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
u_char *rxbuf, loff_t from_addr, size_t n_rx);
u32 (*get_dma_status)(struct cqspi_st *cqspi);
int (*jh7110_clk_init)(struct platform_device *pdev,
struct cqspi_st *cqspi);
};
/* Operation timeout value */
#define CQSPI_TIMEOUT_MS 500
#define CQSPI_READ_TIMEOUT_MS 10
#define CQSPI_DUMMY_CLKS_PER_BYTE 8
#define CQSPI_DUMMY_BYTES_MAX 4
#define CQSPI_DUMMY_CLKS_MAX 31
#define CQSPI_STIG_DATA_LEN_MAX 8
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
#define CQSPI_REG_CONFIG_BAUD_LSB 19
#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
#define CQSPI_REG_CONFIG_IDLE_LSB 31
#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
#define CQSPI_REG_RD_INSTR 0x04
#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
#define CQSPI_REG_WR_INSTR 0x08
#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
#define CQSPI_REG_DELAY 0x0C
#define CQSPI_REG_DELAY_TSLCH_LSB 0
#define CQSPI_REG_DELAY_TCHSH_LSB 8
#define CQSPI_REG_DELAY_TSD2D_LSB 16
#define CQSPI_REG_DELAY_TSHSL_LSB 24
#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
#define CQSPI_REG_READCAPTURE 0x10
#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
#define CQSPI_REG_SIZE 0x14
#define CQSPI_REG_SIZE_ADDRESS_LSB 0
#define CQSPI_REG_SIZE_PAGE_LSB 4
#define CQSPI_REG_SIZE_BLOCK_LSB 16
#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
#define CQSPI_REG_SRAMPARTITION 0x18
#define CQSPI_REG_INDIRECTTRIGGER 0x1C
#define CQSPI_REG_DMA 0x20
#define CQSPI_REG_DMA_SINGLE_LSB 0
#define CQSPI_REG_DMA_BURST_LSB 8
#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
#define CQSPI_REG_DMA_BURST_MASK 0xFF
#define CQSPI_REG_REMAP 0x24
#define CQSPI_REG_MODE_BIT 0x28
#define CQSPI_REG_SDRAMLEVEL 0x2C
#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
#define CQSPI_REG_WR_COMPLETION_CTRL 0x38
#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
#define CQSPI_REG_IRQSTATUS 0x40
#define CQSPI_REG_IRQMASK 0x44
#define CQSPI_REG_INDIRECTRD 0x60
#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
#define CQSPI_REG_INDIRECTRDBYTES 0x6C
#define CQSPI_REG_CMDCTRL 0x90
#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
#define CQSPI_REG_INDIRECTWR 0x70
#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
#define CQSPI_REG_INDIRECTWRBYTES 0x7C
#define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
#define CQSPI_REG_CMDADDRESS 0x94
#define CQSPI_REG_CMDREADDATALOWER 0xA0
#define CQSPI_REG_CMDREADDATAUPPER 0xA4
#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
#define CQSPI_REG_POLLING_STATUS 0xB0
#define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16
#define CQSPI_REG_OP_EXT_LOWER 0xE0
#define CQSPI_REG_OP_EXT_READ_LSB 24
#define CQSPI_REG_OP_EXT_WRITE_LSB 16
#define CQSPI_REG_OP_EXT_STIG_LSB 0
#define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000
#define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800
#define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804
#define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C
#define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814
#define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818
#define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C
#define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1)
#define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828
#define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00
#define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6
/* Interrupt status bits */
#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
#define CQSPI_REG_IRQ_IND_COMP BIT(2)
#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
#define CQSPI_REG_IRQ_WATERMARK BIT(6)
#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
CQSPI_REG_IRQ_IND_SRAM_FULL | \
CQSPI_REG_IRQ_IND_COMP)
#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
CQSPI_REG_IRQ_WATERMARK | \
CQSPI_REG_IRQ_UNDERFLOW)
#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
#define CQSPI_DMA_UNALIGN 0x3
#define CQSPI_REG_VERSAL_DMA_VAL 0x602
static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
{
u32 val;
return readl_relaxed_poll_timeout(reg, val,
(((clr ? ~val : val) & mask) == mask),
10, CQSPI_TIMEOUT_MS * 1000);
}
static bool cqspi_is_idle(struct cqspi_st *cqspi)
{
u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB);
}
static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
{
u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
}
static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi)
{
u32 dma_status;
dma_status = readl(cqspi->iobase +
CQSPI_REG_VERSAL_DMA_DST_I_STS);
writel(dma_status, cqspi->iobase +
CQSPI_REG_VERSAL_DMA_DST_I_STS);
return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK;
}
static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
{
struct cqspi_st *cqspi = dev;
unsigned int irq_status;
struct device *device = &cqspi->pdev->dev;
const struct cqspi_driver_platdata *ddata;
ddata = of_device_get_match_data(device);
/* Read interrupt status */
irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
/* Clear interrupt */
writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
if (cqspi->use_dma_read && ddata && ddata->get_dma_status) {
if (ddata->get_dma_status(cqspi)) {
complete(&cqspi->transfer_complete);
return IRQ_HANDLED;
}
}
else if (!cqspi->slow_sram)
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
else
irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
if (irq_status)
complete(&cqspi->transfer_complete);
return IRQ_HANDLED;
}
static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op)
{
u32 rdreg = 0;
rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
return rdreg;
}
static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op)
{
unsigned int dummy_clk;
if (!op->dummy.nbytes)
return 0;
dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
if (op->cmd.dtr)
dummy_clk /= 2;
return dummy_clk;
}
static int cqspi_wait_idle(struct cqspi_st *cqspi)
{
const unsigned int poll_idle_retry = 3;
unsigned int count = 0;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
while (1) {
/*
* Read few times in succession to ensure the controller
* is indeed idle, that is, the bit does not transition
* low again.
*/
if (cqspi_is_idle(cqspi))
count++;
else
count = 0;
if (count >= poll_idle_retry)
return 0;
if (time_after(jiffies, timeout)) {
/* Timeout, in busy mode. */
dev_err(&cqspi->pdev->dev,
"QSPI is still busy after %dms timeout.\n",
CQSPI_TIMEOUT_MS);
return -ETIMEDOUT;
}
cpu_relax();
}
}
static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
{
void __iomem *reg_base = cqspi->iobase;
int ret;
/* Write the CMDCTRL without start execution. */
writel(reg, reg_base + CQSPI_REG_CMDCTRL);
/* Start execute */
reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
writel(reg, reg_base + CQSPI_REG_CMDCTRL);
/* Polling for completion. */
ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
if (ret) {
dev_err(&cqspi->pdev->dev,
"Flash command execution timed out.\n");
return ret;
}
/* Polling QSPI idle status. */
return cqspi_wait_idle(cqspi);
}
static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op,
unsigned int shift)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
u8 ext;
if (op->cmd.nbytes != 2)
return -EINVAL;
/* Opcode extension is the LSB. */
ext = op->cmd.opcode & 0xff;
reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER);
reg &= ~(0xff << shift);
reg |= ext << shift;
writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER);
return 0;
}
static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op, unsigned int shift)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
int ret;
reg = readl(reg_base + CQSPI_REG_CONFIG);
/*
* We enable dual byte opcode here. The callers have to set up the
* extension opcode based on which type of operation it is.
*/
if (op->cmd.dtr) {
reg |= CQSPI_REG_CONFIG_DTR_PROTO;
reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
/* Set up command opcode extension. */
ret = cqspi_setup_opcode_ext(f_pdata, op, shift);
if (ret)
return ret;
} else {
reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
}
writel(reg, reg_base + CQSPI_REG_CONFIG);
return cqspi_wait_idle(cqspi);
}
static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
u8 *rxbuf = op->data.buf.in;
u8 opcode;
size_t n_rx = op->data.nbytes;
unsigned int rdreg;
unsigned int reg;
unsigned int dummy_clk;
size_t read_len;
int status;
status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
if (status)
return status;
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
dev_err(&cqspi->pdev->dev,
"Invalid input argument, len %zu rxbuf 0x%p\n",
n_rx, rxbuf);
return -EINVAL;
}
if (op->cmd.dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
rdreg = cqspi_calc_rdreg(op);
writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
dummy_clk = cqspi_calc_dummy(op);
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
return -EOPNOTSUPP;
if (dummy_clk)
reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
<< CQSPI_REG_CMDCTRL_DUMMY_LSB;
reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
/* 0 means 1 byte. */
reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
/* setup ADDR BIT field */
if (op->addr.nbytes) {
reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
reg |= ((op->addr.nbytes - 1) &
CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
}
status = cqspi_exec_flash_cmd(cqspi, reg);
if (status)
return status;
reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
/* Put the read value into rx_buf */
read_len = (n_rx > 4) ? 4 : n_rx;
memcpy(rxbuf, ®, read_len);
rxbuf += read_len;
if (n_rx > 4) {
reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
read_len = n_rx - read_len;
memcpy(rxbuf, ®, read_len);
}
/* Reset CMD_CTRL Reg once command read completes */
writel(0, reg_base + CQSPI_REG_CMDCTRL);
return 0;
}
static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
u8 opcode;
const u8 *txbuf = op->data.buf.out;
size_t n_tx = op->data.nbytes;
unsigned int reg;
unsigned int data;
size_t write_len;
int ret;
ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
if (ret)
return ret;
if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
dev_err(&cqspi->pdev->dev,
"Invalid input argument, cmdlen %zu txbuf 0x%p\n",
n_tx, txbuf);
return -EINVAL;
}
reg = cqspi_calc_rdreg(op);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
if (op->cmd.dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
if (op->addr.nbytes) {
reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
reg |= ((op->addr.nbytes - 1) &
CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
}
if (n_tx) {
reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
data = 0;
write_len = (n_tx > 4) ? 4 : n_tx;
memcpy(&data, txbuf, write_len);
txbuf += write_len;
writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
if (n_tx > 4) {
data = 0;
write_len = n_tx - 4;
memcpy(&data, txbuf, write_len);
writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
}
}
ret = cqspi_exec_flash_cmd(cqspi, reg);
/* Reset CMD_CTRL Reg once command write completes */
writel(0, reg_base + CQSPI_REG_CMDCTRL);
return ret;
}
static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int dummy_clk = 0;
unsigned int reg;
int ret;
u8 opcode;
ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB);
if (ret)
return ret;
if (op->cmd.dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(op);
/* Setup dummy clock cycles */
dummy_clk = cqspi_calc_dummy(op);
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
return -EOPNOTSUPP;
if (dummy_clk)
reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
<< CQSPI_REG_RD_INSTR_DUMMY_LSB;
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
/* Set address width */
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
writel(reg, reg_base + CQSPI_REG_SIZE);
return 0;
}
static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
u8 *rxbuf, loff_t from_addr,
const size_t n_rx)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
struct device *dev = &cqspi->pdev->dev;
void __iomem *reg_base = cqspi->iobase;
void __iomem *ahb_base = cqspi->ahb_base;
unsigned int remaining = n_rx;
unsigned int mod_bytes = n_rx % 4;
unsigned int bytes_to_read = 0;
u8 *rxbuf_end = rxbuf + n_rx;
int ret = 0;
writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
/*
* On SoCFPGA platform reading the SRAM is slow due to
* hardware limitation and causing read interrupt storm to CPU,
* so enabling only watermark interrupt to disable all read
* interrupts later as we want to run "bytes to read" loop with
* all the read interrupts disabled for max performance.
*/
if (!cqspi->slow_sram)
writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
else
writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTRD_START_MASK,
reg_base + CQSPI_REG_INDIRECTRD);
while (remaining > 0) {
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT;
/*
* Disable all read interrupts until
* we are out of "bytes to read"
*/
if (cqspi->slow_sram)
writel(0x0, reg_base + CQSPI_REG_IRQMASK);
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
if (ret && bytes_to_read == 0) {
dev_err(dev, "Indirect read timeout, no bytes\n");
goto failrd;
}
while (bytes_to_read != 0) {
unsigned int word_remain = round_down(remaining, 4);
bytes_to_read *= cqspi->fifo_width;
bytes_to_read = bytes_to_read > remaining ?
remaining : bytes_to_read;
bytes_to_read = round_down(bytes_to_read, 4);
/* Read 4 byte word chunks then single bytes */
if (bytes_to_read) {
ioread32_rep(ahb_base, rxbuf,
(bytes_to_read / 4));
} else if (!word_remain && mod_bytes) {
unsigned int temp = ioread32(ahb_base);
bytes_to_read = mod_bytes;
memcpy(rxbuf, &temp, min((unsigned int)
(rxbuf_end - rxbuf),
bytes_to_read));
}
rxbuf += bytes_to_read;
remaining -= bytes_to_read;
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
}
if (remaining > 0) {
reinit_completion(&cqspi->transfer_complete);
if (cqspi->slow_sram)
writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
}
}
/* Check indirect done status */
ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
if (ret) {
dev_err(dev, "Indirect read completion error (%i)\n", ret);
goto failrd;
}
/* Disable interrupt */
writel(0, reg_base + CQSPI_REG_IRQMASK);
/* Clear indirect completion status */
writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
return 0;
failrd:
/* Disable interrupt */
writel(0, reg_base + CQSPI_REG_IRQMASK);
/* Cancel the indirect read */
writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK,
reg_base + CQSPI_REG_INDIRECTRD);
return ret;
}
static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
{
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
reg = readl(reg_base + CQSPI_REG_CONFIG);
if (enable)
reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
else
reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata,
u_char *rxbuf, loff_t from_addr,
size_t n_rx)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
struct device *dev = &cqspi->pdev->dev;
void __iomem *reg_base = cqspi->iobase;
u32 reg, bytes_to_dma;
loff_t addr = from_addr;
void *buf = rxbuf;
dma_addr_t dma_addr;
u8 bytes_rem;
int ret = 0;
bytes_rem = n_rx % 4;
bytes_to_dma = (n_rx - bytes_rem);
if (!bytes_to_dma)
goto nondmard;
ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA);
if (ret)
return ret;
cqspi_controller_enable(cqspi, 0);
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
reg |= CQSPI_REG_CONFIG_DMA_MASK;
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
cqspi_controller_enable(cqspi, 1);
dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "dma mapping failed\n");
return -ENOMEM;
}
writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL,
reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
/* Enable DMA done interrupt */
writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK,
reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN);
/* Default DMA periph configuration */
writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA);
/* Configure DMA Dst address */
writel(lower_32_bits(dma_addr),
reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR);
writel(upper_32_bits(dma_addr),
reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB);
/* Configure DMA Src address */
writel(cqspi->trigger_address, reg_base +
CQSPI_REG_VERSAL_DMA_SRC_ADDR);
/* Set DMA destination size */
writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE);
/* Set DMA destination control */
writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL,
reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL);
writel(CQSPI_REG_INDIRECTRD_START_MASK,
reg_base + CQSPI_REG_INDIRECTRD);
reinit_completion(&cqspi->transfer_complete);
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
msecs_to_jiffies(max_t(size_t, bytes_to_dma, 500)))) {
ret = -ETIMEDOUT;
goto failrd;
}
/* Disable DMA interrupt */
writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
/* Clear indirect completion status */
writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
cqspi->iobase + CQSPI_REG_INDIRECTRD);
dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
cqspi_controller_enable(cqspi, 0);
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
cqspi_controller_enable(cqspi, 1);
ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id,
PM_OSPI_MUX_SEL_LINEAR);
if (ret)
return ret;
nondmard:
if (bytes_rem) {
addr += bytes_to_dma;
buf += bytes_to_dma;
ret = cqspi_indirect_read_execute(f_pdata, buf, addr,
bytes_rem);
if (ret)
return ret;
}
return 0;
failrd:
/* Disable DMA interrupt */
writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
/* Cancel the indirect read */
writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
reg_base + CQSPI_REG_INDIRECTRD);
dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR);
return ret;
}
static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
unsigned int reg;
int ret;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
u8 opcode;
ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB);
if (ret)
return ret;
if (op->cmd.dtr)
opcode = op->cmd.opcode >> 8;
else
opcode = op->cmd.opcode;
/* Set opcode. */
reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
reg = cqspi_calc_rdreg(op);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
/*
* SPI NAND flashes require the address of the status register to be
* passed in the Read SR command. Also, some SPI NOR flashes like the
* cypress Semper flash expect a 4-byte dummy address in the Read SR
* command in DTR mode.
*
* But this controller does not support address phase in the Read SR
* command when doing auto-HW polling. So, disable write completion
* polling on the controller's side. spinand and spi-nor will take
* care of polling the status register.
*/
if (cqspi->wr_completion) {
reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
/*
* DAC mode require auto polling as flash needs to be polled
* for write completion in case of bubble in SPI transaction
* due to slow CPU/DMA master.
*/
cqspi->use_direct_mode_wr = false;
}
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
writel(reg, reg_base + CQSPI_REG_SIZE);
return 0;
}
static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
loff_t to_addr, const u8 *txbuf,
const size_t n_tx)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
struct device *dev = &cqspi->pdev->dev;
void __iomem *reg_base = cqspi->iobase;
unsigned int remaining = n_tx;
unsigned int write_bytes;
int ret;
writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTWR_START_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
/*
* As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
* Controller programming sequence, couple of cycles of
* QSPI_REF_CLK delay is required for the above bit to
* be internally synchronized by the QSPI module. Provide 5
* cycles of delay.
*/
if (cqspi->wr_delay)
ndelay(cqspi->wr_delay);
/*
* If a hazard exists between the APB and AHB interfaces, perform a
* dummy readback from the controller to ensure synchronization.
*/
if (cqspi->apb_ahb_hazard)
readl(reg_base + CQSPI_REG_INDIRECTWR);
while (remaining > 0) {
size_t write_words, mod_bytes;
write_bytes = remaining;
write_words = write_bytes / 4;
mod_bytes = write_bytes % 4;
/* Write 4 bytes at a time then single bytes. */
if (write_words) {
iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
txbuf += (write_words * 4);
}
if (mod_bytes) {
unsigned int temp = 0xFFFFFFFF;
memcpy(&temp, txbuf, mod_bytes);
iowrite32(temp, cqspi->ahb_base);
txbuf += mod_bytes;
}
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
dev_err(dev, "Indirect write timeout\n");
ret = -ETIMEDOUT;
goto failwr;
}
remaining -= write_bytes;
if (remaining > 0)
reinit_completion(&cqspi->transfer_complete);
}
/* Check indirect done status */
ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
if (ret) {
dev_err(dev, "Indirect write completion error (%i)\n", ret);
goto failwr;
}
/* Disable interrupt. */
writel(0, reg_base + CQSPI_REG_IRQMASK);
/* Clear indirect completion status */
writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
cqspi_wait_idle(cqspi);
return 0;
failwr:
/* Disable interrupt. */
writel(0, reg_base + CQSPI_REG_IRQMASK);
/* Cancel the indirect write */
writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
return ret;
}
static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int chip_select = f_pdata->cs;
unsigned int reg;
reg = readl(reg_base + CQSPI_REG_CONFIG);
if (cqspi->is_decoded_cs) {
reg |= CQSPI_REG_CONFIG_DECODE_MASK;
} else {
reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
/* Convert CS if without decoder.
* CS0 to 4b'1110
* CS1 to 4b'1101
* CS2 to 4b'1011
* CS3 to 4b'0111
*/
chip_select = 0xF & ~(1 << chip_select);
}
reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
<< CQSPI_REG_CONFIG_CHIPSELECT_LSB);
reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
<< CQSPI_REG_CONFIG_CHIPSELECT_LSB;
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
const unsigned int ns_val)
{
unsigned int ticks;
ticks = ref_clk_hz / 1000; /* kHz */
ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
return ticks;
}
static void cqspi_delay(struct cqspi_flash_pdata *f_pdata)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *iobase = cqspi->iobase;
const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
unsigned int tshsl, tchsh, tslch, tsd2d;
unsigned int reg;
unsigned int tsclk;
/* calculate the number of ref ticks for one sclk tick */
tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
/* this particular value must be at least one sclk */
if (tshsl < tsclk)
tshsl = tsclk;
tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
<< CQSPI_REG_DELAY_TSHSL_LSB;
reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
<< CQSPI_REG_DELAY_TCHSH_LSB;
reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
<< CQSPI_REG_DELAY_TSLCH_LSB;
reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
<< CQSPI_REG_DELAY_TSD2D_LSB;
writel(reg, iobase + CQSPI_REG_DELAY);
}
static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
{
const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
void __iomem *reg_base = cqspi->iobase;
u32 reg, div;
/* Recalculate the baudrate divisor based on QSPI specification. */
div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
/* Maximum baud divisor */
if (div > CQSPI_REG_CONFIG_BAUD_MASK) {
div = CQSPI_REG_CONFIG_BAUD_MASK;
dev_warn(&cqspi->pdev->dev,
"Unable to adjust clock <= %d hz. Reduced to %d hz\n",
cqspi->sclk, ref_clk_hz/((div+1)*2));
}
reg = readl(reg_base + CQSPI_REG_CONFIG);
reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
static void cqspi_readdata_capture(struct cqspi_st *cqspi,
const bool bypass,
const unsigned int delay)
{
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
reg = readl(reg_base + CQSPI_REG_READCAPTURE);
if (bypass)
reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
else
reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
<< CQSPI_REG_READCAPTURE_DELAY_LSB);
reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
<< CQSPI_REG_READCAPTURE_DELAY_LSB;
writel(reg, reg_base + CQSPI_REG_READCAPTURE);
}
static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
unsigned long sclk)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
int switch_cs = (cqspi->current_cs != f_pdata->cs);
int switch_ck = (cqspi->sclk != sclk);
if (switch_cs || switch_ck)
cqspi_controller_enable(cqspi, 0);
/* Switch chip select. */
if (switch_cs) {
cqspi->current_cs = f_pdata->cs;
cqspi_chipselect(f_pdata);
}
/* Setup baudrate divisor and delays */
if (switch_ck) {
cqspi->sclk = sclk;
cqspi_config_baudrate_div(cqspi);
cqspi_delay(f_pdata);
cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
f_pdata->read_delay);
}
if (switch_cs || switch_ck)
cqspi_controller_enable(cqspi, 1);
}
static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
loff_t to = op->addr.val;
size_t len = op->data.nbytes;
const u_char *buf = op->data.buf.out;
int ret;
ret = cqspi_write_setup(f_pdata, op);
if (ret)
return ret;
/*
* Some flashes like the Cypress Semper flash expect a dummy 4-byte
* address (all 0s) with the read status register command in DTR mode.
* But this controller does not support sending dummy address bytes to
* the flash when it is polling the write completion register in DTR
* mode. So, we can not use direct mode when in DTR mode for writing
* data.
*/
if (!op->cmd.dtr && cqspi->use_direct_mode &&
cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
return cqspi_wait_idle(cqspi);
}
return cqspi_indirect_write_execute(f_pdata, to, buf, len);
}
static void cqspi_rx_dma_callback(void *param)
{
struct cqspi_st *cqspi = param;
complete(&cqspi->rx_dma_complete);
}
static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
u_char *buf, loff_t from, size_t len)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
struct device *dev = &cqspi->pdev->dev;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
int ret = 0;
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
dma_addr_t dma_dst;
struct device *ddev;
if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
memcpy_fromio(buf, cqspi->ahb_base + from, len);
return 0;
}
ddev = cqspi->rx_chan->device->dev;
dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(ddev, dma_dst)) {
dev_err(dev, "dma mapping failed\n");
return -ENOMEM;
}
tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
len, flags);
if (!tx) {
dev_err(dev, "device_prep_dma_memcpy error\n");
ret = -EIO;
goto err_unmap;
}
tx->callback = cqspi_rx_dma_callback;
tx->callback_param = cqspi;
cookie = tx->tx_submit(tx);
reinit_completion(&cqspi->rx_dma_complete);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(dev, "dma_submit_error %d\n", cookie);
ret = -EIO;
goto err_unmap;
}
dma_async_issue_pending(cqspi->rx_chan);
if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
msecs_to_jiffies(max_t(size_t, len, 500)))) {
dmaengine_terminate_sync(cqspi->rx_chan);
dev_err(dev, "DMA wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
goto err_unmap;
}
err_unmap:
dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE);
return ret;
}
static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
struct device *dev = &cqspi->pdev->dev;
const struct cqspi_driver_platdata *ddata;
loff_t from = op->addr.val;
size_t len = op->data.nbytes;
u_char *buf = op->data.buf.in;
u64 dma_align = (u64)(uintptr_t)buf;
int ret;
ddata = of_device_get_match_data(dev);
ret = cqspi_read_setup(f_pdata, op);
if (ret)
return ret;
if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
return cqspi_direct_read_execute(f_pdata, buf, from, len);
if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0))
return ddata->indirect_read_dma(f_pdata, buf, from, len);
return cqspi_indirect_read_execute(f_pdata, buf, from, len);
}
static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
struct cqspi_flash_pdata *f_pdata;
f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)];
cqspi_configure(f_pdata, mem->spi->max_speed_hz);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
/*
* Performing reads in DAC mode forces to read minimum 4 bytes
* which is unsupported on some flash devices during register
* reads, prefer STIG mode for such small reads.
*/
if (!op->addr.nbytes ||
op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX)
return cqspi_command_read(f_pdata, op);
return cqspi_read(f_pdata, op);
}
if (!op->addr.nbytes || !op->data.buf.out)
return cqspi_command_write(f_pdata, op);
return cqspi_write(f_pdata, op);
}
static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
int ret;
ret = cqspi_mem_process(mem, op);
if (ret)
dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
return ret;
}
static bool cqspi_supports_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
bool all_true, all_false;
/*
* op->dummy.dtr is required for converting nbytes into ncycles.
* Also, don't check the dtr field of the op phase having zero nbytes.
*/
all_true = op->cmd.dtr &&
(!op->addr.nbytes || op->addr.dtr) &&
(!op->dummy.nbytes || op->dummy.dtr) &&
(!op->data.nbytes || op->data.dtr);
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;
if (all_true) {
/* Right now we only support 8-8-8 DTR mode. */
if (op->cmd.nbytes && op->cmd.buswidth != 8)
return false;
if (op->addr.nbytes && op->addr.buswidth != 8)
return false;
if (op->data.nbytes && op->data.buswidth != 8)
return false;
} else if (!all_false) {
/* Mixed DTR modes are not supported. */
return false;
}
return spi_mem_default_supports_op(mem, op);
}
static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
struct cqspi_flash_pdata *f_pdata,
struct device_node *np)
{
if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
dev_err(&pdev->dev, "couldn't determine read-delay\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
return -ENXIO;
}
if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
return -ENXIO;
}
return 0;
}
static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
{
struct device *dev = &cqspi->pdev->dev;
struct device_node *np = dev->of_node;
u32 id[2];
cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
dev_err(dev, "couldn't determine fifo-depth\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
dev_err(dev, "couldn't determine fifo-width\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,trigger-address",
&cqspi->trigger_address)) {
dev_err(dev, "couldn't determine trigger-address\n");
return -ENXIO;
}
if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect))
cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT;
cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
if (!of_property_read_u32_array(np, "power-domains", id,
ARRAY_SIZE(id)))
cqspi->pd_dev_id = id[1];
return 0;
}
static void cqspi_controller_init(struct cqspi_st *cqspi)
{
u32 reg;
cqspi_controller_enable(cqspi, 0);
/* Configure the remap address register, no remap */
writel(0, cqspi->iobase + CQSPI_REG_REMAP);
/* Disable all interrupts. */
writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
/* Configure the SRAM split to 1:1 . */
writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
/* Load indirect trigger address. */
writel(cqspi->trigger_address,
cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
/* Program read watermark -- 1/2 of the FIFO. */
writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
/* Program write watermark -- 1/8 of the FIFO. */
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
/* Disable direct access controller */
if (!cqspi->use_direct_mode) {
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
}
/* Enable DMA interface */
if (cqspi->use_dma_read) {
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
reg |= CQSPI_REG_CONFIG_DMA_MASK;
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
}
cqspi_controller_enable(cqspi, 1);
}
static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
{
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
cqspi->rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(cqspi->rx_chan)) {
int ret = PTR_ERR(cqspi->rx_chan);
cqspi->rx_chan = NULL;
return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
}
init_completion(&cqspi->rx_dma_complete);
return 0;
}
static const char *cqspi_get_name(struct spi_mem *mem)
{
struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = &cqspi->pdev->dev;
return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
spi_get_chipselect(mem->spi, 0));
}
static const struct spi_controller_mem_ops cqspi_mem_ops = {
.exec_op = cqspi_exec_mem_op,
.get_name = cqspi_get_name,
.supports_op = cqspi_supports_mem_op,
};
static const struct spi_controller_mem_caps cqspi_mem_caps = {
.dtr = true,
};
static int cqspi_setup_flash(struct cqspi_st *cqspi)
{
struct platform_device *pdev = cqspi->pdev;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct cqspi_flash_pdata *f_pdata;
unsigned int cs;
int ret;
/* Get flash device data */
for_each_available_child_of_node(dev->of_node, np) {
ret = of_property_read_u32(np, "reg", &cs);
if (ret) {
dev_err(dev, "Couldn't determine chip select.\n");
of_node_put(np);
return ret;
}
if (cs >= CQSPI_MAX_CHIPSELECT) {
dev_err(dev, "Chip select %d out of range.\n", cs);
of_node_put(np);
return -EINVAL;
}
f_pdata = &cqspi->f_pdata[cs];
f_pdata->cqspi = cqspi;
f_pdata->cs = cs;
ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
if (ret) {
of_node_put(np);
return ret;
}
}
return 0;
}
static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi)
{
static struct clk_bulk_data qspiclk[] = {
{ .id = "apb" },
{ .id = "ahb" },
};
int ret = 0;
ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk);
if (ret) {
dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__);
return ret;
}
cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk;
cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk;
ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]);
if (ret) {
dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__);
return ret;
}
ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]);
if (ret) {
dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__);
goto disable_apb_clk;
}
cqspi->is_jh7110 = true;
return 0;
disable_apb_clk:
clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
return ret;
}
static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi)
{
clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]);
clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
}
static int cqspi_probe(struct platform_device *pdev)
{
const struct cqspi_driver_platdata *ddata;
struct reset_control *rstc, *rstc_ocp, *rstc_ref;
struct device *dev = &pdev->dev;
struct spi_controller *host;
struct resource *res_ahb;
struct cqspi_st *cqspi;
int ret;
int irq;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi));
if (!host) {
dev_err(&pdev->dev, "devm_spi_alloc_host failed\n");
return -ENOMEM;
}
host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
host->mem_ops = &cqspi_mem_ops;
host->mem_caps = &cqspi_mem_caps;
host->dev.of_node = pdev->dev.of_node;
cqspi = spi_controller_get_devdata(host);
cqspi->pdev = pdev;
cqspi->host = host;
cqspi->is_jh7110 = false;
platform_set_drvdata(pdev, cqspi);
/* Obtain configuration from OF. */
ret = cqspi_of_get_pdata(cqspi);
if (ret) {
dev_err(dev, "Cannot get mandatory OF data.\n");
return -ENODEV;
}
/* Obtain QSPI clock. */
cqspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(cqspi->clk)) {
dev_err(dev, "Cannot claim QSPI clock.\n");
ret = PTR_ERR(cqspi->clk);
return ret;
}
/* Obtain and remap controller address. */
cqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cqspi->iobase)) {
dev_err(dev, "Cannot remap controller address.\n");
ret = PTR_ERR(cqspi->iobase);
return ret;
}
/* Obtain and remap AHB address. */
cqspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res_ahb);
if (IS_ERR(cqspi->ahb_base)) {
dev_err(dev, "Cannot remap AHB address.\n");
ret = PTR_ERR(cqspi->ahb_base);
return ret;
}
cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
cqspi->ahb_size = resource_size(res_ahb);
init_completion(&cqspi->transfer_complete);
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto probe_pm_failed;
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
dev_err(dev, "Cannot enable QSPI clock.\n");
goto probe_clk_failed;
}
/* Obtain QSPI reset control */
rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
dev_err(dev, "Cannot get QSPI reset.\n");
goto probe_reset_failed;
}
rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
if (IS_ERR(rstc_ocp)) {
ret = PTR_ERR(rstc_ocp);
dev_err(dev, "Cannot get QSPI OCP reset.\n");
goto probe_reset_failed;
}
if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) {
rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref");
if (IS_ERR(rstc_ref)) {
ret = PTR_ERR(rstc_ref);
dev_err(dev, "Cannot get QSPI REF reset.\n");
goto probe_reset_failed;
}
reset_control_assert(rstc_ref);
reset_control_deassert(rstc_ref);
}
reset_control_assert(rstc);
reset_control_deassert(rstc);
reset_control_assert(rstc_ocp);
reset_control_deassert(rstc_ocp);
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
host->max_speed_hz = cqspi->master_ref_clk_hz;
/* write completion is supported by default */
cqspi->wr_completion = true;
ddata = of_device_get_match_data(dev);
if (ddata) {
if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
cqspi->use_direct_mode = true;
cqspi->use_direct_mode_wr = true;
}
if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
cqspi->use_dma_read = true;
if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
cqspi->wr_completion = false;
if (ddata->quirks & CQSPI_SLOW_SRAM)
cqspi->slow_sram = true;
if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR)
cqspi->apb_ahb_hazard = true;
if (ddata->jh7110_clk_init) {
ret = cqspi_jh7110_clk_init(pdev, cqspi);
if (ret)
goto probe_clk_failed;
}
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0")) {
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
goto probe_reset_failed;
}
}
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
if (ret) {
dev_err(dev, "Cannot request IRQ.\n");
goto probe_reset_failed;
}
cqspi_wait_idle(cqspi);
cqspi_controller_init(cqspi);
cqspi->current_cs = -1;
cqspi->sclk = 0;
host->num_chipselect = cqspi->num_chipselect;
ret = cqspi_setup_flash(cqspi);
if (ret) {
dev_err(dev, "failed to setup flash parameters %d\n", ret);
goto probe_setup_failed;
}
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
goto probe_setup_failed;
}
ret = spi_register_controller(host);
if (ret) {
dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
goto probe_setup_failed;
}
return 0;
probe_setup_failed:
cqspi_controller_enable(cqspi, 0);
probe_reset_failed:
clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
pm_runtime_put_sync(dev);
probe_pm_failed:
pm_runtime_disable(dev);
return ret;
}
static void cqspi_remove(struct platform_device *pdev)
{
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
spi_unregister_controller(cqspi->host);
cqspi_controller_enable(cqspi, 0);
if (cqspi->rx_chan)
dma_release_channel(cqspi->rx_chan);
clk_disable_unprepare(cqspi->clk);
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
static int cqspi_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
ret = spi_controller_suspend(host);
cqspi_controller_enable(cqspi, 0);
clk_disable_unprepare(cqspi->clk);
return ret;
}
static int cqspi_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
struct spi_controller *host = dev_get_drvdata(dev);
clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
cqspi_controller_init(cqspi);
cqspi->current_cs = -1;
cqspi->sclk = 0;
return spi_controller_resume(host);
}
static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);
static const struct cqspi_driver_platdata cdns_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
};
static const struct cqspi_driver_platdata k2g_qspi = {
.quirks = CQSPI_NEEDS_WR_DELAY,
};
static const struct cqspi_driver_platdata am654_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
.quirks = CQSPI_NEEDS_WR_DELAY,
};
static const struct cqspi_driver_platdata intel_lgm_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
};
static const struct cqspi_driver_platdata socfpga_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE
| CQSPI_NO_SUPPORT_WR_COMPLETION
| CQSPI_SLOW_SRAM,
};
static const struct cqspi_driver_platdata versal_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
.indirect_read_dma = cqspi_versal_indirect_read_dma,
.get_dma_status = cqspi_get_versal_dma_status,
};
static const struct cqspi_driver_platdata jh7110_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
.jh7110_clk_init = cqspi_jh7110_clk_init,
};
static const struct cqspi_driver_platdata pensando_cdns_qspi = {
.quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE,
};
static const struct of_device_id cqspi_dt_ids[] = {
{
.compatible = "cdns,qspi-nor",
.data = &cdns_qspi,
},
{
.compatible = "ti,k2g-qspi",
.data = &k2g_qspi,
},
{
.compatible = "ti,am654-ospi",
.data = &am654_ospi,
},
{
.compatible = "intel,lgm-qspi",
.data = &intel_lgm_qspi,
},
{
.compatible = "xlnx,versal-ospi-1.0",
.data = &versal_ospi,
},
{
.compatible = "intel,socfpga-qspi",
.data = &socfpga_qspi,
},
{
.compatible = "starfive,jh7110-qspi",
.data = &jh7110_qspi,
},
{
.compatible = "amd,pensando-elba-qspi",
.data = &pensando_cdns_qspi,
},
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
static struct platform_driver cqspi_platform_driver = {
.probe = cqspi_probe,
.remove_new = cqspi_remove,
.driver = {
.name = CQSPI_NAME,
.pm = &cqspi_dev_pm_ops,
.of_match_table = cqspi_dt_ids,
},
};
module_platform_driver(cqspi_platform_driver);
MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" CQSPI_NAME);
MODULE_AUTHOR("Ley Foon Tan <[email protected]>");
MODULE_AUTHOR("Graham Moore <[email protected]>");
MODULE_AUTHOR("Vadivel Murugan R <[email protected]>");
MODULE_AUTHOR("Vignesh Raghavendra <[email protected]>");
MODULE_AUTHOR("Pratyush Yadav <[email protected]>");
| linux-master | drivers/spi/spi-cadence-quadspi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH RSPI driver
*
* Copyright (C) 2012, 2013 Renesas Solutions Corp.
* Copyright (C) 2014 Glider bvba
*
* Based on spi-sh.c:
* Copyright (C) 2011 Renesas Solutions Corp.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
#include <linux/spinlock.h>
#define RSPI_SPCR 0x00 /* Control Register */
#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
#define RSPI_SPPCR 0x02 /* Pin Control Register */
#define RSPI_SPSR 0x03 /* Status Register */
#define RSPI_SPDR 0x04 /* Data Register */
#define RSPI_SPSCR 0x08 /* Sequence Control Register */
#define RSPI_SPSSR 0x09 /* Sequence Status Register */
#define RSPI_SPBR 0x0a /* Bit Rate Register */
#define RSPI_SPDCR 0x0b /* Data Control Register */
#define RSPI_SPCKD 0x0c /* Clock Delay Register */
#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
#define RSPI_SPND 0x0e /* Next-Access Delay Register */
#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
#define RSPI_SPCMD0 0x10 /* Command Register 0 */
#define RSPI_SPCMD1 0x12 /* Command Register 1 */
#define RSPI_SPCMD2 0x14 /* Command Register 2 */
#define RSPI_SPCMD3 0x16 /* Command Register 3 */
#define RSPI_SPCMD4 0x18 /* Command Register 4 */
#define RSPI_SPCMD5 0x1a /* Command Register 5 */
#define RSPI_SPCMD6 0x1c /* Command Register 6 */
#define RSPI_SPCMD7 0x1e /* Command Register 7 */
#define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
#define RSPI_NUM_SPCMD 8
#define RSPI_RZ_NUM_SPCMD 4
#define QSPI_NUM_SPCMD 4
/* RSPI on RZ only */
#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
/* QSPI only */
#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
#define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
/* SPCR - Control Register */
#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
#define SPCR_SPE 0x40 /* Function Enable */
#define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
#define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
#define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
#define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
/* RSPI on SH only */
#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
/* QSPI on R-Car Gen2 only */
#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
/* SSLP - Slave Select Polarity Register */
#define SSLP_SSLP(i) BIT(i) /* SSLi Signal Polarity Setting */
/* SPPCR - Pin Control Register */
#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
#define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
#define SPPCR_SPOM 0x04
#define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
#define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
#define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
#define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
/* SPSR - Status Register */
#define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
#define SPSR_TEND 0x40 /* Transmit End */
#define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
#define SPSR_PERF 0x08 /* Parity Error Flag */
#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
/* SPSCR - Sequence Control Register */
#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
/* SPSSR - Sequence Status Register */
#define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
#define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
/* SPDCR - Data Control Register */
#define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
#define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
#define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
#define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
#define SPDCR_SPLWORD SPDCR_SPLW1
#define SPDCR_SPLBYTE SPDCR_SPLW0
#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
#define SPDCR_SLSEL1 0x08
#define SPDCR_SLSEL0 0x04
#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
#define SPDCR_SPFC1 0x02
#define SPDCR_SPFC0 0x01
#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
/* SPCKD - Clock Delay Register */
#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
/* SSLND - Slave Select Negation Delay Register */
#define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
/* SPND - Next-Access Delay Register */
#define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
/* SPCR2 - Control Register 2 */
#define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
#define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
#define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
#define SPCR2_SPPE 0x01 /* Parity Enable */
/* SPCMDn - Command Registers */
#define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
#define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
#define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
#define SPCMD_LSBF 0x1000 /* LSB First */
#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
#define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
#define SPCMD_SPB_16BIT 0x0100
#define SPCMD_SPB_20BIT 0x0000
#define SPCMD_SPB_24BIT 0x0100
#define SPCMD_SPB_32BIT 0x0200
#define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
#define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
#define SPCMD_SPIMOD1 0x0040
#define SPCMD_SPIMOD0 0x0020
#define SPCMD_SPIMOD_SINGLE 0
#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
#define SPCMD_BRDV(brdv) ((brdv) << 2)
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
/* SPBFCR - Buffer Control Register */
#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
/* QSPI on R-Car Gen2 */
#define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */
#define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */
#define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */
#define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */
#define QSPI_BUFFER_SIZE 32u
struct rspi_data {
void __iomem *addr;
u32 speed_hz;
struct spi_controller *ctlr;
struct platform_device *pdev;
wait_queue_head_t wait;
spinlock_t lock; /* Protects RMW-access to RSPI_SSLP */
struct clk *clk;
u16 spcmd;
u8 spsr;
u8 sppcr;
int rx_irq, tx_irq;
const struct spi_ops *ops;
unsigned dma_callbacked:1;
unsigned byte_access:1;
};
static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
{
iowrite8(data, rspi->addr + offset);
}
static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
{
iowrite16(data, rspi->addr + offset);
}
static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
{
iowrite32(data, rspi->addr + offset);
}
static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
{
return ioread8(rspi->addr + offset);
}
static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
{
return ioread16(rspi->addr + offset);
}
static void rspi_write_data(const struct rspi_data *rspi, u16 data)
{
if (rspi->byte_access)
rspi_write8(rspi, data, RSPI_SPDR);
else /* 16 bit */
rspi_write16(rspi, data, RSPI_SPDR);
}
static u16 rspi_read_data(const struct rspi_data *rspi)
{
if (rspi->byte_access)
return rspi_read8(rspi, RSPI_SPDR);
else /* 16 bit */
return rspi_read16(rspi, RSPI_SPDR);
}
/* optional functions */
struct spi_ops {
int (*set_config_register)(struct rspi_data *rspi, int access_size);
int (*transfer_one)(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer);
u16 extra_mode_bits;
u16 min_div;
u16 max_div;
u16 flags;
u16 fifo_size;
u8 num_hw_ss;
};
static void rspi_set_rate(struct rspi_data *rspi)
{
unsigned long clksrc;
int brdv = 0, spbr;
clksrc = clk_get_rate(rspi->clk);
spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
while (spbr > 255 && brdv < 3) {
brdv++;
spbr = DIV_ROUND_UP(spbr + 1, 2) - 1;
}
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
rspi->spcmd |= SPCMD_BRDV(brdv);
rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * (spbr + 1));
}
/*
* functions for RSPI on legacy SH
*/
static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
{
/* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
rspi_set_rate(rspi);
/* Disable dummy transmission, set 16-bit word access, 1 frame */
rspi_write8(rspi, 0, RSPI_SPDCR);
rspi->byte_access = 0;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
rspi_write8(rspi, 0x00, RSPI_SSLND);
rspi_write8(rspi, 0x00, RSPI_SPND);
/* Sets parity, interrupt mask */
rspi_write8(rspi, 0x00, RSPI_SPCR2);
/* Resets sequencer */
rspi_write8(rspi, 0, RSPI_SPSCR);
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
return 0;
}
/*
* functions for RSPI on RZ
*/
static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
{
/* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
rspi_set_rate(rspi);
/* Disable dummy transmission, set byte access */
rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
rspi->byte_access = 1;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
rspi_write8(rspi, 0x00, RSPI_SSLND);
rspi_write8(rspi, 0x00, RSPI_SPND);
/* Resets sequencer */
rspi_write8(rspi, 0, RSPI_SPSCR);
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
return 0;
}
/*
* functions for QSPI
*/
static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{
unsigned long clksrc;
int brdv = 0, spbr;
/* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
clksrc = clk_get_rate(rspi->clk);
if (rspi->speed_hz >= clksrc) {
spbr = 0;
rspi->speed_hz = clksrc;
} else {
spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz);
while (spbr > 255 && brdv < 3) {
brdv++;
spbr = DIV_ROUND_UP(spbr, 2);
}
spbr = clamp(spbr, 0, 255);
rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * spbr);
}
rspi_write8(rspi, spbr, RSPI_SPBR);
rspi->spcmd |= SPCMD_BRDV(brdv);
/* Disable dummy transmission, set byte access */
rspi_write8(rspi, 0, RSPI_SPDCR);
rspi->byte_access = 1;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
rspi_write8(rspi, 0x00, RSPI_SSLND);
rspi_write8(rspi, 0x00, RSPI_SPND);
/* Data Length Setting */
if (access_size == 8)
rspi->spcmd |= SPCMD_SPB_8BIT;
else if (access_size == 16)
rspi->spcmd |= SPCMD_SPB_16BIT;
else
rspi->spcmd |= SPCMD_SPB_32BIT;
rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
/* Resets transfer data length */
rspi_write32(rspi, 0, QSPI_SPBMUL0);
/* Resets transmit and receive buffer */
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
/* Sets buffer to allow normal operation */
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
/* Resets sequencer */
rspi_write8(rspi, 0, RSPI_SPSCR);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
return 0;
}
static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
{
u8 data;
data = rspi_read8(rspi, reg);
data &= ~mask;
data |= (val & mask);
rspi_write8(rspi, data, reg);
}
static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
unsigned int len)
{
unsigned int n;
n = min(len, QSPI_BUFFER_SIZE);
if (len >= QSPI_BUFFER_SIZE) {
/* sets triggering number to 32 bytes */
qspi_update(rspi, SPBFCR_TXTRG_MASK,
SPBFCR_TXTRG_32B, QSPI_SPBFCR);
} else {
/* sets triggering number to 1 byte */
qspi_update(rspi, SPBFCR_TXTRG_MASK,
SPBFCR_TXTRG_1B, QSPI_SPBFCR);
}
return n;
}
static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
{
unsigned int n;
n = min(len, QSPI_BUFFER_SIZE);
if (len >= QSPI_BUFFER_SIZE) {
/* sets triggering number to 32 bytes */
qspi_update(rspi, SPBFCR_RXTRG_MASK,
SPBFCR_RXTRG_32B, QSPI_SPBFCR);
} else {
/* sets triggering number to 1 byte */
qspi_update(rspi, SPBFCR_RXTRG_MASK,
SPBFCR_RXTRG_1B, QSPI_SPBFCR);
}
return n;
}
static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
}
static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
}
static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
u8 enable_bit)
{
int ret;
rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
if (rspi->spsr & wait_mask)
return 0;
rspi_enable_irq(rspi, enable_bit);
ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
if (ret == 0 && !(rspi->spsr & wait_mask))
return -ETIMEDOUT;
return 0;
}
static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
{
return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
}
static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
{
return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
}
static int rspi_data_out(struct rspi_data *rspi, u8 data)
{
int error = rspi_wait_for_tx_empty(rspi);
if (error < 0) {
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return error;
}
rspi_write_data(rspi, data);
return 0;
}
static int rspi_data_in(struct rspi_data *rspi)
{
int error;
u8 data;
error = rspi_wait_for_rx_full(rspi);
if (error < 0) {
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return error;
}
data = rspi_read_data(rspi);
return data;
}
static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
unsigned int n)
{
while (n-- > 0) {
if (tx) {
int ret = rspi_data_out(rspi, *tx++);
if (ret < 0)
return ret;
}
if (rx) {
int ret = rspi_data_in(rspi);
if (ret < 0)
return ret;
*rx++ = ret;
}
}
return 0;
}
static void rspi_dma_complete(void *arg)
{
struct rspi_data *rspi = arg;
rspi->dma_callbacked = 1;
wake_up_interruptible(&rspi->wait);
}
static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
struct sg_table *rx)
{
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
u8 irq_mask = 0;
unsigned int other_irq = 0;
dma_cookie_t cookie;
int ret;
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EAGAIN;
goto no_dma_rx;
}
desc_rx->callback = rspi_dma_complete;
desc_rx->callback_param = rspi;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie)) {
ret = cookie;
goto no_dma_rx;
}
irq_mask |= SPCR_SPRIE;
}
if (tx) {
desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
goto no_dma_tx;
}
if (rx) {
/* No callback */
desc_tx->callback = NULL;
} else {
desc_tx->callback = rspi_dma_complete;
desc_tx->callback_param = rspi;
}
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie)) {
ret = cookie;
goto no_dma_tx;
}
irq_mask |= SPCR_SPTIE;
}
/*
* DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
if (tx)
disable_irq(other_irq = rspi->tx_irq);
if (rx && rspi->rx_irq != other_irq)
disable_irq(rspi->rx_irq);
rspi_enable_irq(rspi, irq_mask);
rspi->dma_callbacked = 0;
/* Now start DMA */
if (rx)
dma_async_issue_pending(rspi->ctlr->dma_rx);
if (tx)
dma_async_issue_pending(rspi->ctlr->dma_tx);
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
if (tx)
dmaengine_synchronize(rspi->ctlr->dma_tx);
if (rx)
dmaengine_synchronize(rspi->ctlr->dma_rx);
} else {
if (!ret) {
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
}
if (tx)
dmaengine_terminate_sync(rspi->ctlr->dma_tx);
if (rx)
dmaengine_terminate_sync(rspi->ctlr->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
if (tx)
enable_irq(rspi->tx_irq);
if (rx && rspi->rx_irq != other_irq)
enable_irq(rspi->rx_irq);
return ret;
no_dma_tx:
if (rx)
dmaengine_terminate_sync(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
dev_warn_once(&rspi->ctlr->dev,
"DMA not available, falling back to PIO\n");
}
return ret;
}
static void rspi_receive_init(const struct rspi_data *rspi)
{
u8 spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
rspi_read_data(rspi); /* dummy read */
if (spsr & SPSR_OVRF)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
RSPI_SPSR);
}
static void rspi_rz_receive_init(const struct rspi_data *rspi)
{
rspi_receive_init(rspi);
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
rspi_write8(rspi, 0, RSPI_SPBFCR);
}
static void qspi_receive_init(const struct rspi_data *rspi)
{
u8 spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
rspi_read_data(rspi); /* dummy read */
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
rspi_write8(rspi, 0, QSPI_SPBFCR);
}
static bool __rspi_can_dma(const struct rspi_data *rspi,
const struct spi_transfer *xfer)
{
return xfer->len > rspi->ops->fifo_size;
}
static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
return __rspi_can_dma(rspi, xfer);
}
static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
return -EAGAIN;
/* rx_buf can be NULL on RSPI on SH in TX-only Mode */
return rspi_dma_transfer(rspi, &xfer->tx_sg,
xfer->rx_buf ? &xfer->rx_sg : NULL);
}
static int rspi_common_transfer(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
int ret;
xfer->effective_speed_hz = rspi->speed_hz;
ret = rspi_dma_check_then_transfer(rspi, xfer);
if (ret != -EAGAIN)
return ret;
ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
if (ret < 0)
return ret;
/* Wait for the last transmission */
rspi_wait_for_tx_empty(rspi);
return 0;
}
static int rspi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
u8 spcr;
spcr = rspi_read8(rspi, RSPI_SPCR);
if (xfer->rx_buf) {
rspi_receive_init(rspi);
spcr &= ~SPCR_TXMD;
} else {
spcr |= SPCR_TXMD;
}
rspi_write8(rspi, spcr, RSPI_SPCR);
return rspi_common_transfer(rspi, xfer);
}
static int rspi_rz_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
rspi_rz_receive_init(rspi);
return rspi_common_transfer(rspi, xfer);
}
static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
u8 *rx, unsigned int len)
{
unsigned int i, n;
int ret;
while (len > 0) {
n = qspi_set_send_trigger(rspi, len);
qspi_set_receive_trigger(rspi, len);
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < n; i++)
rspi_write_data(rspi, *tx++);
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < n; i++)
*rx++ = rspi_read_data(rspi);
len -= n;
}
return 0;
}
static int qspi_transfer_out_in(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
int ret;
qspi_receive_init(rspi);
ret = rspi_dma_check_then_transfer(rspi, xfer);
if (ret != -EAGAIN)
return ret;
return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
xfer->rx_buf, xfer->len);
}
static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
{
const u8 *tx = xfer->tx_buf;
unsigned int n = xfer->len;
unsigned int i, len;
int ret;
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
if (ret != -EAGAIN)
return ret;
}
while (n > 0) {
len = qspi_set_send_trigger(rspi, n);
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < len; i++)
rspi_write_data(rspi, *tx++);
n -= len;
}
/* Wait for the last transmission */
rspi_wait_for_tx_empty(rspi);
return 0;
}
static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
{
u8 *rx = xfer->rx_buf;
unsigned int n = xfer->len;
unsigned int i, len;
int ret;
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
if (ret != -EAGAIN)
return ret;
}
while (n > 0) {
len = qspi_set_receive_trigger(rspi, n);
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < len; i++)
*rx++ = rspi_read_data(rspi);
n -= len;
}
return 0;
}
static int qspi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
xfer->effective_speed_hz = rspi->speed_hz;
if (spi->mode & SPI_LOOP) {
return qspi_transfer_out_in(rspi, xfer);
} else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
/* Quad or Dual SPI Write */
return qspi_transfer_out(rspi, xfer);
} else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
/* Quad or Dual SPI Read */
return qspi_transfer_in(rspi, xfer);
} else {
/* Single SPI Transfer */
return qspi_transfer_out_in(rspi, xfer);
}
}
static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
{
if (xfer->tx_buf)
switch (xfer->tx_nbits) {
case SPI_NBITS_QUAD:
return SPCMD_SPIMOD_QUAD;
case SPI_NBITS_DUAL:
return SPCMD_SPIMOD_DUAL;
default:
return 0;
}
if (xfer->rx_buf)
switch (xfer->rx_nbits) {
case SPI_NBITS_QUAD:
return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
case SPI_NBITS_DUAL:
return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
default:
return 0;
}
return 0;
}
static int qspi_setup_sequencer(struct rspi_data *rspi,
const struct spi_message *msg)
{
const struct spi_transfer *xfer;
unsigned int i = 0, len = 0;
u16 current_mode = 0xffff, mode;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
mode = qspi_transfer_mode(xfer);
if (mode == current_mode) {
len += xfer->len;
continue;
}
/* Transfer mode change */
if (i) {
/* Set transfer data length of previous transfer */
rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
}
if (i >= QSPI_NUM_SPCMD) {
dev_err(&msg->spi->dev,
"Too many different transfer modes");
return -EINVAL;
}
/* Program transfer mode for this transfer */
rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
current_mode = mode;
len = xfer->len;
i++;
}
if (i) {
/* Set final transfer data length and sequence length */
rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
rspi_write8(rspi, i - 1, RSPI_SPSCR);
}
return 0;
}
static int rspi_setup(struct spi_device *spi)
{
struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
u8 sslp;
if (spi_get_csgpiod(spi, 0))
return 0;
pm_runtime_get_sync(&rspi->pdev->dev);
spin_lock_irq(&rspi->lock);
sslp = rspi_read8(rspi, RSPI_SSLP);
if (spi->mode & SPI_CS_HIGH)
sslp |= SSLP_SSLP(spi_get_chipselect(spi, 0));
else
sslp &= ~SSLP_SSLP(spi_get_chipselect(spi, 0));
rspi_write8(rspi, sslp, RSPI_SSLP);
spin_unlock_irq(&rspi->lock);
pm_runtime_put(&rspi->pdev->dev);
return 0;
}
static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
struct spi_device *spi = msg->spi;
const struct spi_transfer *xfer;
int ret;
/*
* As the Bit Rate Register must not be changed while the device is
* active, all transfers in a message must use the same bit rate.
* In theory, the sequencer could be enabled, and each Command Register
* could divide the base bit rate by a different value.
* However, most RSPI variants do not have Transfer Data Length
* Multiplier Setting Registers, so each sequence step would be limited
* to a single word, making this feature unsuitable for large
* transfers, which would gain most from it.
*/
rspi->speed_hz = spi->max_speed_hz;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->speed_hz < rspi->speed_hz)
rspi->speed_hz = xfer->speed_hz;
}
rspi->spcmd = SPCMD_SSLKP;
if (spi->mode & SPI_CPOL)
rspi->spcmd |= SPCMD_CPOL;
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
if (spi->mode & SPI_LSB_FIRST)
rspi->spcmd |= SPCMD_LSBF;
/* Configure slave signal to assert */
rspi->spcmd |= SPCMD_SSLA(spi_get_csgpiod(spi, 0) ? rspi->ctlr->unused_native_cs
: spi_get_chipselect(spi, 0));
/* CMOS output mode and MOSI signal from previous transfer */
rspi->sppcr = 0;
if (spi->mode & SPI_LOOP)
rspi->sppcr |= SPPCR_SPLP;
rspi->ops->set_config_register(rspi, 8);
if (msg->spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
/* Setup sequencer for messages with multiple transfer modes */
ret = qspi_setup_sequencer(rspi, msg);
if (ret < 0)
return ret;
}
/* Enable SPI function in master mode */
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
return 0;
}
static int rspi_unprepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
/* Disable SPI function */
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
/* Reset sequencer for Single SPI Transfers */
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
rspi_write8(rspi, 0, RSPI_SPSCR);
return 0;
}
static irqreturn_t rspi_irq_mux(int irq, void *_sr)
{
struct rspi_data *rspi = _sr;
u8 spsr;
irqreturn_t ret = IRQ_NONE;
u8 disable_irq = 0;
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
disable_irq |= SPCR_SPRIE;
if (spsr & SPSR_SPTEF)
disable_irq |= SPCR_SPTIE;
if (disable_irq) {
ret = IRQ_HANDLED;
rspi_disable_irq(rspi, disable_irq);
wake_up(&rspi->wait);
}
return ret;
}
static irqreturn_t rspi_irq_rx(int irq, void *_sr)
{
struct rspi_data *rspi = _sr;
u8 spsr;
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF) {
rspi_disable_irq(rspi, SPCR_SPRIE);
wake_up(&rspi->wait);
return IRQ_HANDLED;
}
return 0;
}
static irqreturn_t rspi_irq_tx(int irq, void *_sr)
{
struct rspi_data *rspi = _sr;
u8 spsr;
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPTEF) {
rspi_disable_irq(rspi, SPCR_SPTIE);
wake_up(&rspi->wait);
return IRQ_HANDLED;
}
return 0;
}
static struct dma_chan *rspi_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir,
unsigned int id,
dma_addr_t port_addr)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
struct dma_slave_config cfg;
int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
(void *)(unsigned long)id, dev,
dir == DMA_MEM_TO_DEV ? "tx" : "rx");
if (!chan) {
dev_warn(dev, "dma_request_slave_channel_compat failed\n");
return NULL;
}
memset(&cfg, 0, sizeof(cfg));
cfg.dst_addr = port_addr + RSPI_SPDR;
cfg.src_addr = port_addr + RSPI_SPDR;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
cfg.direction = dir;
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
dma_release_channel(chan);
return NULL;
}
return chan;
}
static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
const struct resource *res)
{
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
unsigned int dma_tx_id, dma_rx_id;
if (dev->of_node) {
/* In the OF case we will get the slave IDs from the DT */
dma_tx_id = 0;
dma_rx_id = 0;
} else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
dma_tx_id = rspi_pd->dma_tx_id;
dma_rx_id = rspi_pd->dma_rx_id;
} else {
/* The driver assumes no error. */
return 0;
}
ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
res->start);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
res->start);
if (!ctlr->dma_rx) {
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
return -ENODEV;
}
ctlr->can_dma = rspi_can_dma;
dev_info(dev, "DMA available");
return 0;
}
static void rspi_release_dma(struct spi_controller *ctlr)
{
if (ctlr->dma_tx)
dma_release_channel(ctlr->dma_tx);
if (ctlr->dma_rx)
dma_release_channel(ctlr->dma_rx);
}
static void rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = platform_get_drvdata(pdev);
rspi_release_dma(rspi->ctlr);
pm_runtime_disable(&pdev->dev);
}
static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one,
.min_div = 2,
.max_div = 4096,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
.num_hw_ss = 2,
};
static const struct spi_ops rspi_rz_ops __maybe_unused = {
.set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one,
.min_div = 2,
.max_div = 4096,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
.num_hw_ss = 1,
};
static const struct spi_ops qspi_ops __maybe_unused = {
.set_config_register = qspi_set_config_register,
.transfer_one = qspi_transfer_one,
.extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD,
.min_div = 1,
.max_div = 4080,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
.num_hw_ss = 1,
};
static const struct of_device_id rspi_of_match[] __maybe_unused = {
/* RSPI on legacy SH */
{ .compatible = "renesas,rspi", .data = &rspi_ops },
/* RSPI on RZ/A1H */
{ .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
/* QSPI on R-Car Gen2 */
{ .compatible = "renesas,qspi", .data = &qspi_ops },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rspi_of_match);
#ifdef CONFIG_OF
static void rspi_reset_control_assert(void *data)
{
reset_control_assert(data);
}
static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
struct reset_control *rstc;
u32 num_cs;
int error;
/* Parse DT properties */
error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (error) {
dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
return error;
}
ctlr->num_chipselect = num_cs;
rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(rstc))
return dev_err_probe(dev, PTR_ERR(rstc),
"failed to get reset ctrl\n");
error = reset_control_deassert(rstc);
if (error) {
dev_err(dev, "failed to deassert reset %d\n", error);
return error;
}
error = devm_add_action_or_reset(dev, rspi_reset_control_assert, rstc);
if (error) {
dev_err(dev, "failed to register assert devm action, %d\n", error);
return error;
}
return 0;
}
#else
#define rspi_of_match NULL
static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
return -EINVAL;
}
#endif /* CONFIG_OF */
static int rspi_request_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, const char *suffix,
void *dev_id)
{
const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
dev_name(dev), suffix);
if (!name)
return -ENOMEM;
return devm_request_irq(dev, irq, handler, 0, name, dev_id);
}
static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_controller *ctlr;
struct rspi_data *rspi;
int ret;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
unsigned long clksrc;
ctlr = spi_alloc_host(&pdev->dev, sizeof(struct rspi_data));
if (ctlr == NULL)
return -ENOMEM;
ops = of_device_get_match_data(&pdev->dev);
if (ops) {
ret = rspi_parse_dt(&pdev->dev, ctlr);
if (ret)
goto error1;
} else {
ops = (struct spi_ops *)pdev->id_entry->driver_data;
rspi_pd = dev_get_platdata(&pdev->dev);
if (rspi_pd && rspi_pd->num_chipselect)
ctlr->num_chipselect = rspi_pd->num_chipselect;
else
ctlr->num_chipselect = 2; /* default */
}
rspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
rspi->ctlr = ctlr;
rspi->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(rspi->addr)) {
ret = PTR_ERR(rspi->addr);
goto error1;
}
rspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rspi->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(rspi->clk);
goto error1;
}
rspi->pdev = pdev;
pm_runtime_enable(&pdev->dev);
init_waitqueue_head(&rspi->wait);
spin_lock_init(&rspi->lock);
ctlr->bus_num = pdev->id;
ctlr->setup = rspi_setup;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = ops->transfer_one;
ctlr->prepare_message = rspi_prepare_message;
ctlr->unprepare_message = rspi_unprepare_message;
ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
SPI_LOOP | ops->extra_mode_bits;
clksrc = clk_get_rate(rspi->clk);
ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, ops->max_div);
ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, ops->min_div);
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->use_gpio_descriptors = true;
ctlr->max_native_cs = rspi->ops->num_hw_ss;
ret = platform_get_irq_byname_optional(pdev, "rx");
if (ret < 0) {
ret = platform_get_irq_byname_optional(pdev, "mux");
if (ret < 0)
ret = platform_get_irq(pdev, 0);
if (ret >= 0)
rspi->rx_irq = rspi->tx_irq = ret;
} else {
rspi->rx_irq = ret;
ret = platform_get_irq_byname(pdev, "tx");
if (ret >= 0)
rspi->tx_irq = ret;
}
if (rspi->rx_irq == rspi->tx_irq) {
/* Single multiplexed interrupt */
ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
"mux", rspi);
} else {
/* Multi-interrupt mode, only SPRI and SPTI are used */
ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
"rx", rspi);
if (!ret)
ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
rspi_irq_tx, "tx", rspi);
}
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
goto error2;
}
ret = rspi_request_dma(&pdev->dev, ctlr, res);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error3;
}
dev_info(&pdev->dev, "probed\n");
return 0;
error3:
rspi_release_dma(ctlr);
error2:
pm_runtime_disable(&pdev->dev);
error1:
spi_controller_put(ctlr);
return ret;
}
static const struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
{},
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
#ifdef CONFIG_PM_SLEEP
static int rspi_suspend(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
return spi_controller_suspend(rspi->ctlr);
}
static int rspi_resume(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
return spi_controller_resume(rspi->ctlr);
}
static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
#define DEV_PM_OPS &rspi_pm_ops
#else
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
.remove_new = rspi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "renesas_spi",
.pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(rspi_of_match),
},
};
module_platform_driver(rspi_driver);
MODULE_DESCRIPTION("Renesas RSPI bus driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
| linux-master | drivers/spi/spi-rspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the Diolan DLN-2 USB-SPI adapter
*
* Copyright (c) 2014 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/mfd/dln2.h>
#include <linux/spi/spi.h>
#include <linux/pm_runtime.h>
#include <asm/unaligned.h>
#define DLN2_SPI_MODULE_ID 0x02
#define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
/* SPI commands */
#define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00)
#define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11)
#define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12)
#define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13)
#define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14)
#define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15)
#define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16)
#define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17)
#define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18)
#define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19)
#define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A)
#define DLN2_SPI_READ DLN2_SPI_CMD(0x1B)
#define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C)
#define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20)
#define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21)
#define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22)
#define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23)
#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24)
#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25)
#define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26)
#define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27)
#define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28)
#define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B)
#define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C)
#define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D)
#define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E)
#define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F)
#define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30)
#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31)
#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32)
#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33)
#define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34)
#define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35)
#define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36)
#define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37)
#define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38)
#define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39)
#define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A)
#define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40)
#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41)
#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42)
#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43)
#define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44)
#define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45)
#define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46)
#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47)
#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48)
#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49)
#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A)
#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B)
#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C)
#define DLN2_SPI_MAX_XFER_SIZE 256
#define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16)
#define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0)
#define DLN2_TRANSFERS_WAIT_COMPLETE 1
#define DLN2_TRANSFERS_CANCEL 0
#define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000
struct dln2_spi {
struct platform_device *pdev;
struct spi_controller *host;
u8 port;
/*
* This buffer will be used mainly for read/write operations. Since
* they're quite large, we cannot use the stack. Protection is not
* needed because all SPI communication is serialized by the SPI core.
*/
void *buf;
u8 bpw;
u32 speed;
u16 mode;
u8 cs;
};
/*
* Enable/Disable SPI module. The disable command will wait for transfers to
* complete first.
*/
static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
{
u16 cmd;
struct {
u8 port;
u8 wait_for_completion;
} tx;
unsigned len = sizeof(tx);
tx.port = dln2->port;
if (enable) {
cmd = DLN2_SPI_ENABLE;
len -= sizeof(tx.wait_for_completion);
} else {
tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
cmd = DLN2_SPI_DISABLE;
}
return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
}
/*
* Select/unselect multiple CS lines. The selected lines will be automatically
* toggled LOW/HIGH by the board firmware during transfers, provided they're
* enabled first.
*
* Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
* will toggle the lines LOW/HIGH automatically.
*/
static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
{
struct {
u8 port;
u8 cs;
} tx;
tx.port = dln2->port;
/*
* According to Diolan docs, "a slave device can be selected by changing
* the corresponding bit value to 0". The rest must be set to 1. Hence
* the bitwise NOT in front.
*/
tx.cs = ~cs_mask;
return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
}
/*
* Select one CS line. The other lines will be un-selected.
*/
static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
{
return dln2_spi_cs_set(dln2, BIT(cs));
}
/*
* Enable/disable CS lines for usage. The module has to be disabled first.
*/
static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
{
struct {
u8 port;
u8 cs;
} tx;
u16 cmd;
tx.port = dln2->port;
tx.cs = cs_mask;
cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
}
static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
{
u8 cs_mask = GENMASK(dln2->host->num_chipselect - 1, 0);
return dln2_spi_cs_enable(dln2, cs_mask, enable);
}
static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
{
int ret;
struct {
u8 port;
} tx;
struct {
__le16 cs_count;
} rx;
unsigned rx_len = sizeof(rx);
tx.port = dln2->port;
ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
&rx, &rx_len);
if (ret < 0)
return ret;
if (rx_len < sizeof(rx))
return -EPROTO;
*cs_num = le16_to_cpu(rx.cs_count);
dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
return 0;
}
static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
{
int ret;
struct {
u8 port;
} tx;
struct {
__le32 speed;
} rx;
unsigned rx_len = sizeof(rx);
tx.port = dln2->port;
ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
if (ret < 0)
return ret;
if (rx_len < sizeof(rx))
return -EPROTO;
*freq = le32_to_cpu(rx.speed);
return 0;
}
/*
* Get bus min/max frequencies.
*/
static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
{
int ret;
ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
if (ret < 0)
return ret;
ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
if (ret < 0)
return ret;
dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
*fmin, *fmax);
return 0;
}
/*
* Set the bus speed. The module will automatically round down to the closest
* available frequency and returns it. The module has to be disabled first.
*/
static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
{
int ret;
struct {
u8 port;
__le32 speed;
} __packed tx;
struct {
__le32 speed;
} rx;
int rx_len = sizeof(rx);
tx.port = dln2->port;
tx.speed = cpu_to_le32(speed);
ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
&rx, &rx_len);
if (ret < 0)
return ret;
if (rx_len < sizeof(rx))
return -EPROTO;
return 0;
}
/*
* Change CPOL & CPHA. The module has to be disabled first.
*/
static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
{
struct {
u8 port;
u8 mode;
} tx;
tx.port = dln2->port;
tx.mode = mode;
return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
}
/*
* Change frame size. The module has to be disabled first.
*/
static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
{
struct {
u8 port;
u8 bpw;
} tx;
tx.port = dln2->port;
tx.bpw = bpw;
return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
&tx, sizeof(tx));
}
static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
u32 *bpw_mask)
{
int ret;
struct {
u8 port;
} tx;
struct {
u8 count;
u8 frame_sizes[36];
} *rx = dln2->buf;
unsigned rx_len = sizeof(*rx);
int i;
tx.port = dln2->port;
ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
&tx, sizeof(tx), rx, &rx_len);
if (ret < 0)
return ret;
if (rx_len < sizeof(*rx))
return -EPROTO;
if (rx->count > ARRAY_SIZE(rx->frame_sizes))
return -EPROTO;
*bpw_mask = 0;
for (i = 0; i < rx->count; i++)
*bpw_mask |= BIT(rx->frame_sizes[i] - 1);
dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
return 0;
}
/*
* Copy the data to DLN2 buffer and change the byte order to LE, requested by
* DLN2 module. SPI core makes sure that the data length is a multiple of word
* size.
*/
static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
{
#ifdef __LITTLE_ENDIAN
memcpy(dln2_buf, src, len);
#else
if (bpw <= 8) {
memcpy(dln2_buf, src, len);
} else if (bpw <= 16) {
__le16 *d = (__le16 *)dln2_buf;
u16 *s = (u16 *)src;
len = len / 2;
while (len--)
*d++ = cpu_to_le16p(s++);
} else {
__le32 *d = (__le32 *)dln2_buf;
u32 *s = (u32 *)src;
len = len / 4;
while (len--)
*d++ = cpu_to_le32p(s++);
}
#endif
return 0;
}
/*
* Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
* buffer is LE ordered. SPI core makes sure that the data length is a multiple
* of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
* sure we avoid unaligned accesses for 32 bit case.
*/
static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
{
#ifdef __LITTLE_ENDIAN
memcpy(dest, dln2_buf, len);
#else
if (bpw <= 8) {
memcpy(dest, dln2_buf, len);
} else if (bpw <= 16) {
u16 *d = (u16 *)dest;
__le16 *s = (__le16 *)dln2_buf;
len = len / 2;
while (len--)
*d++ = le16_to_cpup(s++);
} else {
u32 *d = (u32 *)dest;
__le32 *s = (__le32 *)dln2_buf;
len = len / 4;
while (len--)
*d++ = get_unaligned_le32(s++);
}
#endif
return 0;
}
/*
* Perform one write operation.
*/
static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
u16 data_len, u8 attr)
{
struct {
u8 port;
__le16 size;
u8 attr;
u8 buf[DLN2_SPI_MAX_XFER_SIZE];
} __packed *tx = dln2->buf;
unsigned tx_len;
BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
if (data_len > DLN2_SPI_MAX_XFER_SIZE)
return -EINVAL;
tx->port = dln2->port;
tx->size = cpu_to_le16(data_len);
tx->attr = attr;
dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
}
/*
* Perform one read operation.
*/
static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
u16 data_len, u8 attr)
{
int ret;
struct {
u8 port;
__le16 size;
u8 attr;
} __packed tx;
struct {
__le16 size;
u8 buf[DLN2_SPI_MAX_XFER_SIZE];
} __packed *rx = dln2->buf;
unsigned rx_len = sizeof(*rx);
BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
if (data_len > DLN2_SPI_MAX_XFER_SIZE)
return -EINVAL;
tx.port = dln2->port;
tx.size = cpu_to_le16(data_len);
tx.attr = attr;
ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
rx, &rx_len);
if (ret < 0)
return ret;
if (rx_len < sizeof(rx->size) + data_len)
return -EPROTO;
if (le16_to_cpu(rx->size) != data_len)
return -EPROTO;
dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
return 0;
}
/*
* Perform one write & read operation.
*/
static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
u8 *rx_data, u16 data_len, u8 attr)
{
int ret;
struct {
u8 port;
__le16 size;
u8 attr;
u8 buf[DLN2_SPI_MAX_XFER_SIZE];
} __packed *tx;
struct {
__le16 size;
u8 buf[DLN2_SPI_MAX_XFER_SIZE];
} __packed *rx;
unsigned tx_len, rx_len;
BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
sizeof(*rx) > DLN2_SPI_BUF_SIZE);
if (data_len > DLN2_SPI_MAX_XFER_SIZE)
return -EINVAL;
/*
* Since this is a pseudo full-duplex communication, we're perfectly
* safe to use the same buffer for both tx and rx. When DLN2 sends the
* response back, with the rx data, we don't need the tx buffer anymore.
*/
tx = dln2->buf;
rx = dln2->buf;
tx->port = dln2->port;
tx->size = cpu_to_le16(data_len);
tx->attr = attr;
dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
rx_len = sizeof(*rx);
ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
rx, &rx_len);
if (ret < 0)
return ret;
if (rx_len < sizeof(rx->size) + data_len)
return -EPROTO;
if (le16_to_cpu(rx->size) != data_len)
return -EPROTO;
dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
return 0;
}
/*
* Read/Write wrapper. It will automatically split an operation into multiple
* single ones due to device buffer constraints.
*/
static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
u8 *rx_data, u16 data_len, u8 attr)
{
int ret;
u16 len;
u8 temp_attr;
u16 remaining = data_len;
u16 offset;
do {
if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
len = DLN2_SPI_MAX_XFER_SIZE;
temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
} else {
len = remaining;
temp_attr = attr;
}
offset = data_len - remaining;
if (tx_data && rx_data) {
ret = dln2_spi_read_write_one(dln2,
tx_data + offset,
rx_data + offset,
len, temp_attr);
} else if (tx_data) {
ret = dln2_spi_write_one(dln2,
tx_data + offset,
len, temp_attr);
} else if (rx_data) {
ret = dln2_spi_read_one(dln2,
rx_data + offset,
len, temp_attr);
} else {
return -EINVAL;
}
if (ret < 0)
return ret;
remaining -= len;
} while (remaining);
return 0;
}
static int dln2_spi_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
int ret;
struct dln2_spi *dln2 = spi_controller_get_devdata(host);
struct spi_device *spi = message->spi;
if (dln2->cs != spi_get_chipselect(spi, 0)) {
ret = dln2_spi_cs_set_one(dln2, spi_get_chipselect(spi, 0));
if (ret < 0)
return ret;
dln2->cs = spi_get_chipselect(spi, 0);
}
return 0;
}
static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
u8 bpw, u8 mode)
{
int ret;
bool bus_setup_change;
bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
dln2->bpw != bpw;
if (!bus_setup_change)
return 0;
ret = dln2_spi_enable(dln2, false);
if (ret < 0)
return ret;
if (dln2->speed != speed) {
ret = dln2_spi_set_speed(dln2, speed);
if (ret < 0)
return ret;
dln2->speed = speed;
}
if (dln2->mode != mode) {
ret = dln2_spi_set_mode(dln2, mode & 0x3);
if (ret < 0)
return ret;
dln2->mode = mode;
}
if (dln2->bpw != bpw) {
ret = dln2_spi_set_bpw(dln2, bpw);
if (ret < 0)
return ret;
dln2->bpw = bpw;
}
return dln2_spi_enable(dln2, true);
}
static int dln2_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct dln2_spi *dln2 = spi_controller_get_devdata(host);
int status;
u8 attr = 0;
status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
xfer->bits_per_word,
spi->mode);
if (status < 0) {
dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
return status;
}
if (!xfer->cs_change && !spi_transfer_is_last(host, xfer))
attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
xfer->len, attr);
if (status < 0)
dev_err(&dln2->pdev->dev, "write/read failed!\n");
return status;
}
static int dln2_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct dln2_spi *dln2;
struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
int ret;
host = spi_alloc_host(&pdev->dev, sizeof(*dln2));
if (!host)
return -ENOMEM;
device_set_node(&host->dev, dev_fwnode(dev));
platform_set_drvdata(pdev, host);
dln2 = spi_controller_get_devdata(host);
dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
if (!dln2->buf) {
ret = -ENOMEM;
goto exit_free_host;
}
dln2->host = host;
dln2->pdev = pdev;
dln2->port = pdata->port;
/* cs/mode can never be 0xff, so the first transfer will set them */
dln2->cs = 0xff;
dln2->mode = 0xff;
/* disable SPI module before continuing with the setup */
ret = dln2_spi_enable(dln2, false);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to disable SPI module\n");
goto exit_free_host;
}
ret = dln2_spi_get_cs_num(dln2, &host->num_chipselect);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get number of CS pins\n");
goto exit_free_host;
}
ret = dln2_spi_get_speed_range(dln2,
&host->min_speed_hz,
&host->max_speed_hz);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
goto exit_free_host;
}
ret = dln2_spi_get_supported_frame_sizes(dln2,
&host->bits_per_word_mask);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
goto exit_free_host;
}
ret = dln2_spi_cs_enable_all(dln2, true);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable CS pins\n");
goto exit_free_host;
}
host->bus_num = -1;
host->mode_bits = SPI_CPOL | SPI_CPHA;
host->prepare_message = dln2_spi_prepare_message;
host->transfer_one = dln2_spi_transfer_one;
host->auto_runtime_pm = true;
/* enable SPI module, we're good to go */
ret = dln2_spi_enable(dln2, true);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable SPI module\n");
goto exit_free_host;
}
pm_runtime_set_autosuspend_delay(&pdev->dev,
DLN2_RPM_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register host\n");
goto exit_register;
}
return ret;
exit_register:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
if (dln2_spi_enable(dln2, false) < 0)
dev_err(&pdev->dev, "Failed to disable SPI module\n");
exit_free_host:
spi_controller_put(host);
return ret;
}
static void dln2_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct dln2_spi *dln2 = spi_controller_get_devdata(host);
pm_runtime_disable(&pdev->dev);
if (dln2_spi_enable(dln2, false) < 0)
dev_err(&pdev->dev, "Failed to disable SPI module\n");
}
#ifdef CONFIG_PM_SLEEP
static int dln2_spi_suspend(struct device *dev)
{
int ret;
struct spi_controller *host = dev_get_drvdata(dev);
struct dln2_spi *dln2 = spi_controller_get_devdata(host);
ret = spi_controller_suspend(host);
if (ret < 0)
return ret;
if (!pm_runtime_suspended(dev)) {
ret = dln2_spi_enable(dln2, false);
if (ret < 0)
return ret;
}
/*
* USB power may be cut off during sleep. Resetting the following
* parameters will force the board to be set up before first transfer.
*/
dln2->cs = 0xff;
dln2->speed = 0;
dln2->bpw = 0;
dln2->mode = 0xff;
return 0;
}
static int dln2_spi_resume(struct device *dev)
{
int ret;
struct spi_controller *host = dev_get_drvdata(dev);
struct dln2_spi *dln2 = spi_controller_get_devdata(host);
if (!pm_runtime_suspended(dev)) {
ret = dln2_spi_cs_enable_all(dln2, true);
if (ret < 0)
return ret;
ret = dln2_spi_enable(dln2, true);
if (ret < 0)
return ret;
}
return spi_controller_resume(host);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int dln2_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct dln2_spi *dln2 = spi_controller_get_devdata(host);
return dln2_spi_enable(dln2, false);
}
static int dln2_spi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct dln2_spi *dln2 = spi_controller_get_devdata(host);
return dln2_spi_enable(dln2, true);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops dln2_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
dln2_spi_runtime_resume, NULL)
};
static struct platform_driver spi_dln2_driver = {
.driver = {
.name = "dln2-spi",
.pm = &dln2_spi_pm,
},
.probe = dln2_spi_probe,
.remove_new = dln2_spi_remove,
};
module_platform_driver(spi_dln2_driver);
MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI host interface");
MODULE_AUTHOR("Laurentiu Palcu <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:dln2-spi");
| linux-master | drivers/spi/spi-dln2.c |
/*
* MicroWire interface driver for OMAP
*
* Copyright 2003 MontaVista Software Inc. <[email protected]>
*
* Ported to 2.6 OMAP uwire interface.
* Copyright (C) 2004 Texas Instruments.
*
* Generalization patches by Juha Yrjola <[email protected]>
*
* Copyright (C) 2005 David Brownell (ported to 2.6 SPI interface)
* Copyright (C) 2006 Nokia
*
* Many updates by Imre Deak <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/module.h>
#include <linux/io.h>
#include <asm/mach-types.h>
#include <linux/soc/ti/omap1-io.h>
#include <linux/soc/ti/omap1-soc.h>
#include <linux/soc/ti/omap1-mux.h>
/* FIXME address is now a platform device resource,
* and irqs should show there too...
*/
#define UWIRE_BASE_PHYS 0xFFFB3000
/* uWire Registers: */
#define UWIRE_IO_SIZE 0x20
#define UWIRE_TDR 0x00
#define UWIRE_RDR 0x00
#define UWIRE_CSR 0x01
#define UWIRE_SR1 0x02
#define UWIRE_SR2 0x03
#define UWIRE_SR3 0x04
#define UWIRE_SR4 0x05
#define UWIRE_SR5 0x06
/* CSR bits */
#define RDRB (1 << 15)
#define CSRB (1 << 14)
#define START (1 << 13)
#define CS_CMD (1 << 12)
/* SR1 or SR2 bits */
#define UWIRE_READ_FALLING_EDGE 0x0001
#define UWIRE_READ_RISING_EDGE 0x0000
#define UWIRE_WRITE_FALLING_EDGE 0x0000
#define UWIRE_WRITE_RISING_EDGE 0x0002
#define UWIRE_CS_ACTIVE_LOW 0x0000
#define UWIRE_CS_ACTIVE_HIGH 0x0004
#define UWIRE_FREQ_DIV_2 0x0000
#define UWIRE_FREQ_DIV_4 0x0008
#define UWIRE_FREQ_DIV_8 0x0010
#define UWIRE_CHK_READY 0x0020
#define UWIRE_CLK_INVERTED 0x0040
struct uwire_spi {
struct spi_bitbang bitbang;
struct clk *ck;
};
struct uwire_state {
unsigned div1_idx;
};
/* REVISIT compile time constant for idx_shift? */
/*
* Or, put it in a structure which is used throughout the driver;
* that avoids having to issue two loads for each bit of static data.
*/
static unsigned int uwire_idx_shift = 2;
static void __iomem *uwire_base;
static inline void uwire_write_reg(int idx, u16 val)
{
__raw_writew(val, uwire_base + (idx << uwire_idx_shift));
}
static inline u16 uwire_read_reg(int idx)
{
return __raw_readw(uwire_base + (idx << uwire_idx_shift));
}
static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags)
{
u16 w, val = 0;
int shift, reg;
if (flags & UWIRE_CLK_INVERTED)
val ^= 0x03;
val = flags & 0x3f;
if (cs & 1)
shift = 6;
else
shift = 0;
if (cs <= 1)
reg = UWIRE_SR1;
else
reg = UWIRE_SR2;
w = uwire_read_reg(reg);
w &= ~(0x3f << shift);
w |= val << shift;
uwire_write_reg(reg, w);
}
static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch)
{
u16 w;
int c = 0;
unsigned long max_jiffies = jiffies + HZ;
for (;;) {
w = uwire_read_reg(UWIRE_CSR);
if ((w & mask) == val)
break;
if (time_after(jiffies, max_jiffies)) {
printk(KERN_ERR "%s: timeout. reg=%#06x "
"mask=%#06x val=%#06x\n",
__func__, w, mask, val);
return -1;
}
c++;
if (might_not_catch && c > 64)
break;
}
return 0;
}
static void uwire_set_clk1_div(int div1_idx)
{
u16 w;
w = uwire_read_reg(UWIRE_SR3);
w &= ~(0x03 << 1);
w |= div1_idx << 1;
uwire_write_reg(UWIRE_SR3, w);
}
static void uwire_chipselect(struct spi_device *spi, int value)
{
struct uwire_state *ust = spi->controller_state;
u16 w;
int old_cs;
BUG_ON(wait_uwire_csr_flag(CSRB, 0, 0));
w = uwire_read_reg(UWIRE_CSR);
old_cs = (w >> 10) & 0x03;
if (value == BITBANG_CS_INACTIVE || old_cs != spi_get_chipselect(spi, 0)) {
/* Deselect this CS, or the previous CS */
w &= ~CS_CMD;
uwire_write_reg(UWIRE_CSR, w);
}
/* activate specfied chipselect */
if (value == BITBANG_CS_ACTIVE) {
uwire_set_clk1_div(ust->div1_idx);
/* invert clock? */
if (spi->mode & SPI_CPOL)
uwire_write_reg(UWIRE_SR4, 1);
else
uwire_write_reg(UWIRE_SR4, 0);
w = spi_get_chipselect(spi, 0) << 10;
w |= CS_CMD;
uwire_write_reg(UWIRE_CSR, w);
}
}
static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
{
unsigned len = t->len;
unsigned bits = t->bits_per_word;
unsigned bytes;
u16 val, w;
int status = 0;
if (!t->tx_buf && !t->rx_buf)
return 0;
w = spi_get_chipselect(spi, 0) << 10;
w |= CS_CMD;
if (t->tx_buf) {
const u8 *buf = t->tx_buf;
/* NOTE: DMA could be used for TX transfers */
/* write one or two bytes at a time */
while (len >= 1) {
/* tx bit 15 is first sent; we byteswap multibyte words
* (msb-first) on the way out from memory.
*/
val = *buf++;
if (bits > 8) {
bytes = 2;
val |= *buf++ << 8;
} else
bytes = 1;
val <<= 16 - bits;
#ifdef VERBOSE
pr_debug("%s: write-%d =%04x\n",
dev_name(&spi->dev), bits, val);
#endif
if (wait_uwire_csr_flag(CSRB, 0, 0))
goto eio;
uwire_write_reg(UWIRE_TDR, val);
/* start write */
val = START | w | (bits << 5);
uwire_write_reg(UWIRE_CSR, val);
len -= bytes;
/* Wait till write actually starts.
* This is needed with MPU clock 60+ MHz.
* REVISIT: we may not have time to catch it...
*/
if (wait_uwire_csr_flag(CSRB, CSRB, 1))
goto eio;
status += bytes;
}
/* REVISIT: save this for later to get more i/o overlap */
if (wait_uwire_csr_flag(CSRB, 0, 0))
goto eio;
} else if (t->rx_buf) {
u8 *buf = t->rx_buf;
/* read one or two bytes at a time */
while (len) {
if (bits > 8) {
bytes = 2;
} else
bytes = 1;
/* start read */
val = START | w | (bits << 0);
uwire_write_reg(UWIRE_CSR, val);
len -= bytes;
/* Wait till read actually starts */
(void) wait_uwire_csr_flag(CSRB, CSRB, 1);
if (wait_uwire_csr_flag(RDRB | CSRB,
RDRB, 0))
goto eio;
/* rx bit 0 is last received; multibyte words will
* be properly byteswapped on the way to memory.
*/
val = uwire_read_reg(UWIRE_RDR);
val &= (1 << bits) - 1;
*buf++ = (u8) val;
if (bytes == 2)
*buf++ = val >> 8;
status += bytes;
#ifdef VERBOSE
pr_debug("%s: read-%d =%04x\n",
dev_name(&spi->dev), bits, val);
#endif
}
}
return status;
eio:
return -EIO;
}
static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct uwire_state *ust = spi->controller_state;
struct uwire_spi *uwire;
unsigned flags = 0;
unsigned hz;
unsigned long rate;
int div1_idx;
int div1;
int div2;
int status;
uwire = spi_master_get_devdata(spi->master);
/* mode 0..3, clock inverted separately;
* standard nCS signaling;
* don't treat DI=high as "not ready"
*/
if (spi->mode & SPI_CS_HIGH)
flags |= UWIRE_CS_ACTIVE_HIGH;
if (spi->mode & SPI_CPOL)
flags |= UWIRE_CLK_INVERTED;
switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
case SPI_MODE_3:
flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE;
break;
case SPI_MODE_1:
case SPI_MODE_2:
flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE;
break;
}
/* assume it's already enabled */
rate = clk_get_rate(uwire->ck);
if (t != NULL)
hz = t->speed_hz;
else
hz = spi->max_speed_hz;
if (!hz) {
pr_debug("%s: zero speed?\n", dev_name(&spi->dev));
status = -EINVAL;
goto done;
}
/* F_INT = mpu_xor_clk / DIV1 */
for (div1_idx = 0; div1_idx < 4; div1_idx++) {
switch (div1_idx) {
case 0:
div1 = 2;
break;
case 1:
div1 = 4;
break;
case 2:
div1 = 7;
break;
default:
case 3:
div1 = 10;
break;
}
div2 = (rate / div1 + hz - 1) / hz;
if (div2 <= 8)
break;
}
if (div1_idx == 4) {
pr_debug("%s: lowest clock %ld, need %d\n",
dev_name(&spi->dev), rate / 10 / 8, hz);
status = -EDOM;
goto done;
}
/* we have to cache this and reset in uwire_chipselect as this is a
* global parameter and another uwire device can change it under
* us */
ust->div1_idx = div1_idx;
uwire_set_clk1_div(div1_idx);
rate /= div1;
switch (div2) {
case 0:
case 1:
case 2:
flags |= UWIRE_FREQ_DIV_2;
rate /= 2;
break;
case 3:
case 4:
flags |= UWIRE_FREQ_DIV_4;
rate /= 4;
break;
case 5:
case 6:
case 7:
case 8:
flags |= UWIRE_FREQ_DIV_8;
rate /= 8;
break;
}
omap_uwire_configure_mode(spi_get_chipselect(spi, 0), flags);
pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n",
__func__, flags,
clk_get_rate(uwire->ck) / 1000,
rate / 1000);
status = 0;
done:
return status;
}
static int uwire_setup(struct spi_device *spi)
{
struct uwire_state *ust = spi->controller_state;
bool initial_setup = false;
int status;
if (ust == NULL) {
ust = kzalloc(sizeof(*ust), GFP_KERNEL);
if (ust == NULL)
return -ENOMEM;
spi->controller_state = ust;
initial_setup = true;
}
status = uwire_setup_transfer(spi, NULL);
if (status && initial_setup)
kfree(ust);
return status;
}
static void uwire_cleanup(struct spi_device *spi)
{
kfree(spi->controller_state);
}
static void uwire_off(struct uwire_spi *uwire)
{
uwire_write_reg(UWIRE_SR3, 0);
clk_disable_unprepare(uwire->ck);
spi_master_put(uwire->bitbang.master);
}
static int uwire_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct uwire_spi *uwire;
int status;
master = spi_alloc_master(&pdev->dev, sizeof(*uwire));
if (!master)
return -ENODEV;
uwire = spi_master_get_devdata(master);
uwire_base = devm_ioremap(&pdev->dev, UWIRE_BASE_PHYS, UWIRE_IO_SIZE);
if (!uwire_base) {
dev_dbg(&pdev->dev, "can't ioremap UWIRE\n");
spi_master_put(master);
return -ENOMEM;
}
platform_set_drvdata(pdev, uwire);
uwire->ck = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(uwire->ck)) {
status = PTR_ERR(uwire->ck);
dev_dbg(&pdev->dev, "no functional clock?\n");
spi_master_put(master);
return status;
}
clk_prepare_enable(uwire->ck);
uwire_write_reg(UWIRE_SR3, 1);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->bus_num = 2; /* "official" */
master->num_chipselect = 4;
master->setup = uwire_setup;
master->cleanup = uwire_cleanup;
uwire->bitbang.master = master;
uwire->bitbang.chipselect = uwire_chipselect;
uwire->bitbang.setup_transfer = uwire_setup_transfer;
uwire->bitbang.txrx_bufs = uwire_txrx;
status = spi_bitbang_start(&uwire->bitbang);
if (status < 0) {
uwire_off(uwire);
}
return status;
}
static void uwire_remove(struct platform_device *pdev)
{
struct uwire_spi *uwire = platform_get_drvdata(pdev);
// FIXME remove all child devices, somewhere ...
spi_bitbang_stop(&uwire->bitbang);
uwire_off(uwire);
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:omap_uwire");
static struct platform_driver uwire_driver = {
.driver = {
.name = "omap_uwire",
},
.probe = uwire_probe,
.remove_new = uwire_remove,
// suspend ... unuse ck
// resume ... use ck
};
static int __init omap_uwire_init(void)
{
return platform_driver_register(&uwire_driver);
}
static void __exit omap_uwire_exit(void)
{
platform_driver_unregister(&uwire_driver);
}
subsys_initcall(omap_uwire_init);
module_exit(omap_uwire_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-omap-uwire.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008-2014 STMicroelectronics Limited
*
* Author: Angus Clark <[email protected]>
* Patrice Chotard <[email protected]>
* Lee Jones <[email protected]>
*
* SPI master mode controller driver, used in STMicroelectronics devices.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
/* SSC registers */
#define SSC_BRG 0x000
#define SSC_TBUF 0x004
#define SSC_RBUF 0x008
#define SSC_CTL 0x00C
#define SSC_IEN 0x010
#define SSC_I2C 0x018
/* SSC Control */
#define SSC_CTL_DATA_WIDTH_9 0x8
#define SSC_CTL_DATA_WIDTH_MSK 0xf
#define SSC_CTL_BM 0xf
#define SSC_CTL_HB BIT(4)
#define SSC_CTL_PH BIT(5)
#define SSC_CTL_PO BIT(6)
#define SSC_CTL_SR BIT(7)
#define SSC_CTL_MS BIT(8)
#define SSC_CTL_EN BIT(9)
#define SSC_CTL_LPB BIT(10)
#define SSC_CTL_EN_TX_FIFO BIT(11)
#define SSC_CTL_EN_RX_FIFO BIT(12)
#define SSC_CTL_EN_CLST_RX BIT(13)
/* SSC Interrupt Enable */
#define SSC_IEN_TEEN BIT(2)
#define FIFO_SIZE 8
struct spi_st {
/* SSC SPI Controller */
void __iomem *base;
struct clk *clk;
struct device *dev;
/* SSC SPI current transaction */
const u8 *tx_ptr;
u8 *rx_ptr;
u16 bytes_per_word;
unsigned int words_remaining;
unsigned int baud;
struct completion done;
};
/* Load the TX FIFO */
static void ssc_write_tx_fifo(struct spi_st *spi_st)
{
unsigned int count, i;
uint32_t word = 0;
if (spi_st->words_remaining > FIFO_SIZE)
count = FIFO_SIZE;
else
count = spi_st->words_remaining;
for (i = 0; i < count; i++) {
if (spi_st->tx_ptr) {
if (spi_st->bytes_per_word == 1) {
word = *spi_st->tx_ptr++;
} else {
word = *spi_st->tx_ptr++;
word = *spi_st->tx_ptr++ | (word << 8);
}
}
writel_relaxed(word, spi_st->base + SSC_TBUF);
}
}
/* Read the RX FIFO */
static void ssc_read_rx_fifo(struct spi_st *spi_st)
{
unsigned int count, i;
uint32_t word = 0;
if (spi_st->words_remaining > FIFO_SIZE)
count = FIFO_SIZE;
else
count = spi_st->words_remaining;
for (i = 0; i < count; i++) {
word = readl_relaxed(spi_st->base + SSC_RBUF);
if (spi_st->rx_ptr) {
if (spi_st->bytes_per_word == 1) {
*spi_st->rx_ptr++ = (uint8_t)word;
} else {
*spi_st->rx_ptr++ = (word >> 8);
*spi_st->rx_ptr++ = word & 0xff;
}
}
}
spi_st->words_remaining -= count;
}
static int spi_st_transfer_one(struct spi_master *master,
struct spi_device *spi, struct spi_transfer *t)
{
struct spi_st *spi_st = spi_master_get_devdata(master);
uint32_t ctl = 0;
/* Setup transfer */
spi_st->tx_ptr = t->tx_buf;
spi_st->rx_ptr = t->rx_buf;
if (spi->bits_per_word > 8) {
/*
* Anything greater than 8 bits-per-word requires 2
* bytes-per-word in the RX/TX buffers
*/
spi_st->bytes_per_word = 2;
spi_st->words_remaining = t->len / 2;
} else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
/*
* If transfer is even-length, and 8 bits-per-word, then
* implement as half-length 16 bits-per-word transfer
*/
spi_st->bytes_per_word = 2;
spi_st->words_remaining = t->len / 2;
/* Set SSC_CTL to 16 bits-per-word */
ctl = readl_relaxed(spi_st->base + SSC_CTL);
writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
readl_relaxed(spi_st->base + SSC_RBUF);
} else {
spi_st->bytes_per_word = 1;
spi_st->words_remaining = t->len;
}
reinit_completion(&spi_st->done);
/* Start transfer by writing to the TX FIFO */
ssc_write_tx_fifo(spi_st);
writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
/* Wait for transfer to complete */
wait_for_completion(&spi_st->done);
/* Restore SSC_CTL if necessary */
if (ctl)
writel_relaxed(ctl, spi_st->base + SSC_CTL);
spi_finalize_current_transfer(spi->master);
return t->len;
}
/* the spi->mode bits understood by this driver: */
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
static int spi_st_setup(struct spi_device *spi)
{
struct spi_st *spi_st = spi_master_get_devdata(spi->master);
u32 spi_st_clk, sscbrg, var;
u32 hz = spi->max_speed_hz;
if (!hz) {
dev_err(&spi->dev, "max_speed_hz unspecified\n");
return -EINVAL;
}
if (!spi_get_csgpiod(spi, 0)) {
dev_err(&spi->dev, "no valid gpio assigned\n");
return -EINVAL;
}
spi_st_clk = clk_get_rate(spi_st->clk);
/* Set SSC_BRF */
sscbrg = spi_st_clk / (2 * hz);
if (sscbrg < 0x07 || sscbrg > BIT(16)) {
dev_err(&spi->dev,
"baudrate %d outside valid range %d\n", sscbrg, hz);
return -EINVAL;
}
spi_st->baud = spi_st_clk / (2 * sscbrg);
if (sscbrg == BIT(16)) /* 16-bit counter wraps */
sscbrg = 0x0;
writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
dev_dbg(&spi->dev,
"setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
hz, spi_st->baud, sscbrg);
/* Set SSC_CTL and enable SSC */
var = readl_relaxed(spi_st->base + SSC_CTL);
var |= SSC_CTL_MS;
if (spi->mode & SPI_CPOL)
var |= SSC_CTL_PO;
else
var &= ~SSC_CTL_PO;
if (spi->mode & SPI_CPHA)
var |= SSC_CTL_PH;
else
var &= ~SSC_CTL_PH;
if ((spi->mode & SPI_LSB_FIRST) == 0)
var |= SSC_CTL_HB;
else
var &= ~SSC_CTL_HB;
if (spi->mode & SPI_LOOP)
var |= SSC_CTL_LPB;
else
var &= ~SSC_CTL_LPB;
var &= ~SSC_CTL_DATA_WIDTH_MSK;
var |= (spi->bits_per_word - 1);
var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
var |= SSC_CTL_EN;
writel_relaxed(var, spi_st->base + SSC_CTL);
/* Clear the status register */
readl_relaxed(spi_st->base + SSC_RBUF);
return 0;
}
/* Interrupt fired when TX shift register becomes empty */
static irqreturn_t spi_st_irq(int irq, void *dev_id)
{
struct spi_st *spi_st = (struct spi_st *)dev_id;
/* Read RX FIFO */
ssc_read_rx_fifo(spi_st);
/* Fill TX FIFO */
if (spi_st->words_remaining) {
ssc_write_tx_fifo(spi_st);
} else {
/* TX/RX complete */
writel_relaxed(0x0, spi_st->base + SSC_IEN);
/*
* read SSC_IEN to ensure that this bit is set
* before re-enabling interrupt
*/
readl(spi_st->base + SSC_IEN);
complete(&spi_st->done);
}
return IRQ_HANDLED;
}
static int spi_st_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spi_master *master;
struct spi_st *spi_st;
int irq, ret = 0;
u32 var;
master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
if (!master)
return -ENOMEM;
master->dev.of_node = np;
master->mode_bits = MODEBITS;
master->setup = spi_st_setup;
master->transfer_one = spi_st_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->auto_runtime_pm = true;
master->bus_num = pdev->id;
master->use_gpio_descriptors = true;
spi_st = spi_master_get_devdata(master);
spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
if (IS_ERR(spi_st->clk)) {
dev_err(&pdev->dev, "Unable to request clock\n");
ret = PTR_ERR(spi_st->clk);
goto put_master;
}
ret = clk_prepare_enable(spi_st->clk);
if (ret)
goto put_master;
init_completion(&spi_st->done);
/* Get resources */
spi_st->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi_st->base)) {
ret = PTR_ERR(spi_st->base);
goto clk_disable;
}
/* Disable I2C and Reset SSC */
writel_relaxed(0x0, spi_st->base + SSC_I2C);
var = readw_relaxed(spi_st->base + SSC_CTL);
var |= SSC_CTL_SR;
writel_relaxed(var, spi_st->base + SSC_CTL);
udelay(1);
var = readl_relaxed(spi_st->base + SSC_CTL);
var &= ~SSC_CTL_SR;
writel_relaxed(var, spi_st->base + SSC_CTL);
/* Set SSC into slave mode before reconfiguring PIO pins */
var = readl_relaxed(spi_st->base + SSC_CTL);
var &= ~SSC_CTL_MS;
writel_relaxed(var, spi_st->base + SSC_CTL);
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
dev_err(&pdev->dev, "IRQ missing or invalid\n");
ret = -EINVAL;
goto clk_disable;
}
ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
pdev->name, spi_st);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
goto clk_disable;
}
/* by default the device is on */
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
platform_set_drvdata(pdev, master);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Failed to register master\n");
goto rpm_disable;
}
return 0;
rpm_disable:
pm_runtime_disable(&pdev->dev);
clk_disable:
clk_disable_unprepare(spi_st->clk);
put_master:
spi_master_put(master);
return ret;
}
static void spi_st_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_st *spi_st = spi_master_get_devdata(master);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi_st->clk);
pinctrl_pm_select_sleep_state(&pdev->dev);
}
#ifdef CONFIG_PM
static int spi_st_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct spi_st *spi_st = spi_master_get_devdata(master);
writel_relaxed(0, spi_st->base + SSC_IEN);
pinctrl_pm_select_sleep_state(dev);
clk_disable_unprepare(spi_st->clk);
return 0;
}
static int spi_st_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct spi_st *spi_st = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(spi_st->clk);
pinctrl_pm_select_default_state(dev);
return ret;
}
#endif
#ifdef CONFIG_PM_SLEEP
static int spi_st_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
int ret;
ret = spi_master_suspend(master);
if (ret)
return ret;
return pm_runtime_force_suspend(dev);
}
static int spi_st_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
int ret;
ret = spi_master_resume(master);
if (ret)
return ret;
return pm_runtime_force_resume(dev);
}
#endif
static const struct dev_pm_ops spi_st_pm = {
SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
};
static const struct of_device_id stm_spi_match[] = {
{ .compatible = "st,comms-ssc4-spi", },
{},
};
MODULE_DEVICE_TABLE(of, stm_spi_match);
static struct platform_driver spi_st_driver = {
.driver = {
.name = "spi-st",
.pm = &spi_st_pm,
.of_match_table = of_match_ptr(stm_spi_match),
},
.probe = spi_st_probe,
.remove_new = spi_st_remove,
};
module_platform_driver(spi_st_driver);
MODULE_AUTHOR("Patrice Chotard <[email protected]>");
MODULE_DESCRIPTION("STM SSC SPI driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-st-ssc4.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
#include "spi-cavium.h"
static void octeon_spi_wait_ready(struct octeon_spi *p)
{
union cvmx_mpi_sts mpi_sts;
unsigned int loops = 0;
do {
if (loops++)
__delay(500);
mpi_sts.u64 = readq(p->register_base + OCTEON_SPI_STS(p));
} while (mpi_sts.s.busy);
}
static int octeon_spi_do_transfer(struct octeon_spi *p,
struct spi_message *msg,
struct spi_transfer *xfer,
bool last_xfer)
{
struct spi_device *spi = msg->spi;
union cvmx_mpi_cfg mpi_cfg;
union cvmx_mpi_tx mpi_tx;
unsigned int clkdiv;
int mode;
bool cpha, cpol;
const u8 *tx_buf;
u8 *rx_buf;
int len;
int i;
mode = spi->mode;
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
clkdiv = p->sys_freq / (2 * xfer->speed_hz);
mpi_cfg.u64 = 0;
mpi_cfg.s.clkdiv = clkdiv;
mpi_cfg.s.cshi = (mode & SPI_CS_HIGH) ? 1 : 0;
mpi_cfg.s.lsbfirst = (mode & SPI_LSB_FIRST) ? 1 : 0;
mpi_cfg.s.wireor = (mode & SPI_3WIRE) ? 1 : 0;
mpi_cfg.s.idlelo = cpha != cpol;
mpi_cfg.s.cslate = cpha ? 1 : 0;
mpi_cfg.s.enable = 1;
if (spi_get_chipselect(spi, 0) < 4)
p->cs_enax |= 1ull << (12 + spi_get_chipselect(spi, 0));
mpi_cfg.u64 |= p->cs_enax;
if (mpi_cfg.u64 != p->last_cfg) {
p->last_cfg = mpi_cfg.u64;
writeq(mpi_cfg.u64, p->register_base + OCTEON_SPI_CFG(p));
}
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
len = xfer->len;
while (len > OCTEON_SPI_MAX_BYTES) {
for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
u8 d;
if (tx_buf)
d = *tx_buf++;
else
d = 0;
writeq(d, p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
}
mpi_tx.u64 = 0;
mpi_tx.s.csid = spi_get_chipselect(spi, 0);
mpi_tx.s.leavecs = 1;
mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
writeq(mpi_tx.u64, p->register_base + OCTEON_SPI_TX(p));
octeon_spi_wait_ready(p);
if (rx_buf)
for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
u64 v = readq(p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
*rx_buf++ = (u8)v;
}
len -= OCTEON_SPI_MAX_BYTES;
}
for (i = 0; i < len; i++) {
u8 d;
if (tx_buf)
d = *tx_buf++;
else
d = 0;
writeq(d, p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
}
mpi_tx.u64 = 0;
mpi_tx.s.csid = spi_get_chipselect(spi, 0);
if (last_xfer)
mpi_tx.s.leavecs = xfer->cs_change;
else
mpi_tx.s.leavecs = !xfer->cs_change;
mpi_tx.s.txnum = tx_buf ? len : 0;
mpi_tx.s.totnum = len;
writeq(mpi_tx.u64, p->register_base + OCTEON_SPI_TX(p));
octeon_spi_wait_ready(p);
if (rx_buf)
for (i = 0; i < len; i++) {
u64 v = readq(p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
*rx_buf++ = (u8)v;
}
spi_transfer_delay_exec(xfer);
return xfer->len;
}
int octeon_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct octeon_spi *p = spi_master_get_devdata(master);
unsigned int total_len = 0;
int status = 0;
struct spi_transfer *xfer;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
bool last_xfer = list_is_last(&xfer->transfer_list,
&msg->transfers);
int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
if (r < 0) {
status = r;
goto err;
}
total_len += r;
}
err:
msg->status = status;
msg->actual_length = total_len;
spi_finalize_current_message(master);
return status;
}
| linux-master | drivers/spi/spi-cavium.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Microchip PIC32 SPI controller driver.
*
* Purna Chandra Mandal <[email protected]>
* Copyright (c) 2016, Microchip Technology Inc.
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
/* SPI controller registers */
struct pic32_spi_regs {
u32 ctrl;
u32 ctrl_clr;
u32 ctrl_set;
u32 ctrl_inv;
u32 status;
u32 status_clr;
u32 status_set;
u32 status_inv;
u32 buf;
u32 dontuse[3];
u32 baud;
u32 dontuse2[3];
u32 ctrl2;
u32 ctrl2_clr;
u32 ctrl2_set;
u32 ctrl2_inv;
};
/* Bit fields of SPI Control Register */
#define CTRL_RX_INT_SHIFT 0 /* Rx interrupt generation */
#define RX_FIFO_EMPTY 0
#define RX_FIFO_NOT_EMPTY 1 /* not empty */
#define RX_FIFO_HALF_FULL 2 /* full by half or more */
#define RX_FIFO_FULL 3 /* completely full */
#define CTRL_TX_INT_SHIFT 2 /* TX interrupt generation */
#define TX_FIFO_ALL_EMPTY 0 /* completely empty */
#define TX_FIFO_EMPTY 1 /* empty */
#define TX_FIFO_HALF_EMPTY 2 /* empty by half or more */
#define TX_FIFO_NOT_FULL 3 /* atleast one empty */
#define CTRL_MSTEN BIT(5) /* enable master mode */
#define CTRL_CKP BIT(6) /* active low */
#define CTRL_CKE BIT(8) /* Tx on falling edge */
#define CTRL_SMP BIT(9) /* Rx at middle or end of tx */
#define CTRL_BPW_MASK 0x03 /* bits per word/sample */
#define CTRL_BPW_SHIFT 10
#define PIC32_BPW_8 0
#define PIC32_BPW_16 1
#define PIC32_BPW_32 2
#define CTRL_SIDL BIT(13) /* sleep when idle */
#define CTRL_ON BIT(15) /* enable macro */
#define CTRL_ENHBUF BIT(16) /* enable enhanced buffering */
#define CTRL_MCLKSEL BIT(23) /* select clock source */
#define CTRL_MSSEN BIT(28) /* macro driven /SS */
#define CTRL_FRMEN BIT(31) /* enable framing mode */
/* Bit fields of SPI Status Register */
#define STAT_RF_EMPTY BIT(5) /* RX Fifo empty */
#define STAT_RX_OV BIT(6) /* err, s/w needs to clear */
#define STAT_TX_UR BIT(8) /* UR in Framed SPI modes */
#define STAT_FRM_ERR BIT(12) /* Multiple Frame Sync pulse */
#define STAT_TF_LVL_MASK 0x1F
#define STAT_TF_LVL_SHIFT 16
#define STAT_RF_LVL_MASK 0x1F
#define STAT_RF_LVL_SHIFT 24
/* Bit fields of SPI Baud Register */
#define BAUD_MASK 0x1ff
/* Bit fields of SPI Control2 Register */
#define CTRL2_TX_UR_EN BIT(10) /* Enable int on Tx under-run */
#define CTRL2_RX_OV_EN BIT(11) /* Enable int on Rx over-run */
#define CTRL2_FRM_ERR_EN BIT(12) /* Enable frame err int */
/* Minimum DMA transfer size */
#define PIC32_DMA_LEN_MIN 64
struct pic32_spi {
dma_addr_t dma_base;
struct pic32_spi_regs __iomem *regs;
int fault_irq;
int rx_irq;
int tx_irq;
u32 fifo_n_byte; /* FIFO depth in bytes */
struct clk *clk;
struct spi_controller *host;
/* Current controller setting */
u32 speed_hz; /* spi-clk rate */
u32 mode;
u32 bits_per_word;
u32 fifo_n_elm; /* FIFO depth in words */
#define PIC32F_DMA_PREP 0 /* DMA chnls configured */
unsigned long flags;
/* Current transfer state */
struct completion xfer_done;
/* PIO transfer specific */
const void *tx;
const void *tx_end;
const void *rx;
const void *rx_end;
int len;
void (*rx_fifo)(struct pic32_spi *);
void (*tx_fifo)(struct pic32_spi *);
};
static inline void pic32_spi_enable(struct pic32_spi *pic32s)
{
writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_set);
}
static inline void pic32_spi_disable(struct pic32_spi *pic32s)
{
writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_clr);
/* avoid SPI registers read/write at immediate next CPU clock */
ndelay(20);
}
static void pic32_spi_set_clk_rate(struct pic32_spi *pic32s, u32 spi_ck)
{
u32 div;
/* div = (clk_in / 2 * spi_ck) - 1 */
div = DIV_ROUND_CLOSEST(clk_get_rate(pic32s->clk), 2 * spi_ck) - 1;
writel(div & BAUD_MASK, &pic32s->regs->baud);
}
static inline u32 pic32_rx_fifo_level(struct pic32_spi *pic32s)
{
u32 sr = readl(&pic32s->regs->status);
return (sr >> STAT_RF_LVL_SHIFT) & STAT_RF_LVL_MASK;
}
static inline u32 pic32_tx_fifo_level(struct pic32_spi *pic32s)
{
u32 sr = readl(&pic32s->regs->status);
return (sr >> STAT_TF_LVL_SHIFT) & STAT_TF_LVL_MASK;
}
/* Return the max entries we can fill into tx fifo */
static u32 pic32_tx_max(struct pic32_spi *pic32s, int n_bytes)
{
u32 tx_left, tx_room, rxtx_gap;
tx_left = (pic32s->tx_end - pic32s->tx) / n_bytes;
tx_room = pic32s->fifo_n_elm - pic32_tx_fifo_level(pic32s);
/*
* Another concern is about the tx/rx mismatch, we
* though to use (pic32s->fifo_n_byte - rxfl - txfl) as
* one maximum value for tx, but it doesn't cover the
* data which is out of tx/rx fifo and inside the
* shift registers. So a ctrl from sw point of
* view is taken.
*/
rxtx_gap = ((pic32s->rx_end - pic32s->rx) -
(pic32s->tx_end - pic32s->tx)) / n_bytes;
return min3(tx_left, tx_room, (u32)(pic32s->fifo_n_elm - rxtx_gap));
}
/* Return the max entries we should read out of rx fifo */
static u32 pic32_rx_max(struct pic32_spi *pic32s, int n_bytes)
{
u32 rx_left = (pic32s->rx_end - pic32s->rx) / n_bytes;
return min_t(u32, rx_left, pic32_rx_fifo_level(pic32s));
}
#define BUILD_SPI_FIFO_RW(__name, __type, __bwl) \
static void pic32_spi_rx_##__name(struct pic32_spi *pic32s) \
{ \
__type v; \
u32 mx = pic32_rx_max(pic32s, sizeof(__type)); \
for (; mx; mx--) { \
v = read##__bwl(&pic32s->regs->buf); \
if (pic32s->rx_end - pic32s->len) \
*(__type *)(pic32s->rx) = v; \
pic32s->rx += sizeof(__type); \
} \
} \
\
static void pic32_spi_tx_##__name(struct pic32_spi *pic32s) \
{ \
__type v; \
u32 mx = pic32_tx_max(pic32s, sizeof(__type)); \
for (; mx ; mx--) { \
v = (__type)~0U; \
if (pic32s->tx_end - pic32s->len) \
v = *(__type *)(pic32s->tx); \
write##__bwl(v, &pic32s->regs->buf); \
pic32s->tx += sizeof(__type); \
} \
}
BUILD_SPI_FIFO_RW(byte, u8, b);
BUILD_SPI_FIFO_RW(word, u16, w);
BUILD_SPI_FIFO_RW(dword, u32, l);
static void pic32_err_stop(struct pic32_spi *pic32s, const char *msg)
{
/* disable all interrupts */
disable_irq_nosync(pic32s->fault_irq);
disable_irq_nosync(pic32s->rx_irq);
disable_irq_nosync(pic32s->tx_irq);
/* Show err message and abort xfer with err */
dev_err(&pic32s->host->dev, "%s\n", msg);
if (pic32s->host->cur_msg)
pic32s->host->cur_msg->status = -EIO;
complete(&pic32s->xfer_done);
}
static irqreturn_t pic32_spi_fault_irq(int irq, void *dev_id)
{
struct pic32_spi *pic32s = dev_id;
u32 status;
status = readl(&pic32s->regs->status);
/* Error handling */
if (status & (STAT_RX_OV | STAT_TX_UR)) {
writel(STAT_RX_OV, &pic32s->regs->status_clr);
writel(STAT_TX_UR, &pic32s->regs->status_clr);
pic32_err_stop(pic32s, "err_irq: fifo ov/ur-run\n");
return IRQ_HANDLED;
}
if (status & STAT_FRM_ERR) {
pic32_err_stop(pic32s, "err_irq: frame error");
return IRQ_HANDLED;
}
if (!pic32s->host->cur_msg) {
pic32_err_stop(pic32s, "err_irq: no mesg");
return IRQ_NONE;
}
return IRQ_NONE;
}
static irqreturn_t pic32_spi_rx_irq(int irq, void *dev_id)
{
struct pic32_spi *pic32s = dev_id;
pic32s->rx_fifo(pic32s);
/* rx complete ? */
if (pic32s->rx_end == pic32s->rx) {
/* disable all interrupts */
disable_irq_nosync(pic32s->fault_irq);
disable_irq_nosync(pic32s->rx_irq);
/* complete current xfer */
complete(&pic32s->xfer_done);
}
return IRQ_HANDLED;
}
static irqreturn_t pic32_spi_tx_irq(int irq, void *dev_id)
{
struct pic32_spi *pic32s = dev_id;
pic32s->tx_fifo(pic32s);
/* tx complete? disable tx interrupt */
if (pic32s->tx_end == pic32s->tx)
disable_irq_nosync(pic32s->tx_irq);
return IRQ_HANDLED;
}
static void pic32_spi_dma_rx_notify(void *data)
{
struct pic32_spi *pic32s = data;
complete(&pic32s->xfer_done);
}
static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
struct spi_transfer *xfer)
{
struct spi_controller *host = pic32s->host;
struct dma_async_tx_descriptor *desc_rx;
struct dma_async_tx_descriptor *desc_tx;
dma_cookie_t cookie;
int ret;
if (!host->dma_rx || !host->dma_tx)
return -ENODEV;
desc_rx = dmaengine_prep_slave_sg(host->dma_rx,
xfer->rx_sg.sgl,
xfer->rx_sg.nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EINVAL;
goto err_dma;
}
desc_tx = dmaengine_prep_slave_sg(host->dma_tx,
xfer->tx_sg.sgl,
xfer->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EINVAL;
goto err_dma;
}
/* Put callback on the RX transfer, that should finish last */
desc_rx->callback = pic32_spi_dma_rx_notify;
desc_rx->callback_param = pic32s;
cookie = dmaengine_submit(desc_rx);
ret = dma_submit_error(cookie);
if (ret)
goto err_dma;
cookie = dmaengine_submit(desc_tx);
ret = dma_submit_error(cookie);
if (ret)
goto err_dma_tx;
dma_async_issue_pending(host->dma_rx);
dma_async_issue_pending(host->dma_tx);
return 0;
err_dma_tx:
dmaengine_terminate_all(host->dma_rx);
err_dma:
return ret;
}
static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
{
int buf_offset = offsetof(struct pic32_spi_regs, buf);
struct spi_controller *host = pic32s->host;
struct dma_slave_config cfg;
int ret;
memset(&cfg, 0, sizeof(cfg));
cfg.device_fc = true;
cfg.src_addr = pic32s->dma_base + buf_offset;
cfg.dst_addr = pic32s->dma_base + buf_offset;
cfg.src_maxburst = pic32s->fifo_n_elm / 2; /* fill one-half */
cfg.dst_maxburst = pic32s->fifo_n_elm / 2; /* drain one-half */
cfg.src_addr_width = dma_width;
cfg.dst_addr_width = dma_width;
/* tx channel */
cfg.direction = DMA_MEM_TO_DEV;
ret = dmaengine_slave_config(host->dma_tx, &cfg);
if (ret) {
dev_err(&host->dev, "tx channel setup failed\n");
return ret;
}
/* rx channel */
cfg.direction = DMA_DEV_TO_MEM;
ret = dmaengine_slave_config(host->dma_rx, &cfg);
if (ret)
dev_err(&host->dev, "rx channel setup failed\n");
return ret;
}
static int pic32_spi_set_word_size(struct pic32_spi *pic32s, u8 bits_per_word)
{
enum dma_slave_buswidth dmawidth;
u32 buswidth, v;
switch (bits_per_word) {
case 8:
pic32s->rx_fifo = pic32_spi_rx_byte;
pic32s->tx_fifo = pic32_spi_tx_byte;
buswidth = PIC32_BPW_8;
dmawidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
case 16:
pic32s->rx_fifo = pic32_spi_rx_word;
pic32s->tx_fifo = pic32_spi_tx_word;
buswidth = PIC32_BPW_16;
dmawidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case 32:
pic32s->rx_fifo = pic32_spi_rx_dword;
pic32s->tx_fifo = pic32_spi_tx_dword;
buswidth = PIC32_BPW_32;
dmawidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
default:
/* not supported */
return -EINVAL;
}
/* calculate maximum number of words fifos can hold */
pic32s->fifo_n_elm = DIV_ROUND_UP(pic32s->fifo_n_byte,
bits_per_word / 8);
/* set word size */
v = readl(&pic32s->regs->ctrl);
v &= ~(CTRL_BPW_MASK << CTRL_BPW_SHIFT);
v |= buswidth << CTRL_BPW_SHIFT;
writel(v, &pic32s->regs->ctrl);
/* re-configure dma width, if required */
if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
pic32_spi_dma_config(pic32s, dmawidth);
return 0;
}
static int pic32_spi_prepare_hardware(struct spi_controller *host)
{
struct pic32_spi *pic32s = spi_controller_get_devdata(host);
pic32_spi_enable(pic32s);
return 0;
}
static int pic32_spi_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
struct pic32_spi *pic32s = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
u32 val;
/* set device specific bits_per_word */
if (pic32s->bits_per_word != spi->bits_per_word) {
pic32_spi_set_word_size(pic32s, spi->bits_per_word);
pic32s->bits_per_word = spi->bits_per_word;
}
/* device specific speed change */
if (pic32s->speed_hz != spi->max_speed_hz) {
pic32_spi_set_clk_rate(pic32s, spi->max_speed_hz);
pic32s->speed_hz = spi->max_speed_hz;
}
/* device specific mode change */
if (pic32s->mode != spi->mode) {
val = readl(&pic32s->regs->ctrl);
/* active low */
if (spi->mode & SPI_CPOL)
val |= CTRL_CKP;
else
val &= ~CTRL_CKP;
/* tx on rising edge */
if (spi->mode & SPI_CPHA)
val &= ~CTRL_CKE;
else
val |= CTRL_CKE;
/* rx at end of tx */
val |= CTRL_SMP;
writel(val, &pic32s->regs->ctrl);
pic32s->mode = spi->mode;
}
return 0;
}
static bool pic32_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct pic32_spi *pic32s = spi_controller_get_devdata(host);
/* skip using DMA on small size transfer to avoid overhead.*/
return (xfer->len >= PIC32_DMA_LEN_MIN) &&
test_bit(PIC32F_DMA_PREP, &pic32s->flags);
}
static int pic32_spi_one_transfer(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *transfer)
{
struct pic32_spi *pic32s;
bool dma_issued = false;
unsigned long timeout;
int ret;
pic32s = spi_controller_get_devdata(host);
/* handle transfer specific word size change */
if (transfer->bits_per_word &&
(transfer->bits_per_word != pic32s->bits_per_word)) {
ret = pic32_spi_set_word_size(pic32s, transfer->bits_per_word);
if (ret)
return ret;
pic32s->bits_per_word = transfer->bits_per_word;
}
/* handle transfer specific speed change */
if (transfer->speed_hz && (transfer->speed_hz != pic32s->speed_hz)) {
pic32_spi_set_clk_rate(pic32s, transfer->speed_hz);
pic32s->speed_hz = transfer->speed_hz;
}
reinit_completion(&pic32s->xfer_done);
/* transact by DMA mode */
if (transfer->rx_sg.nents && transfer->tx_sg.nents) {
ret = pic32_spi_dma_transfer(pic32s, transfer);
if (ret) {
dev_err(&spi->dev, "dma submit error\n");
return ret;
}
/* DMA issued */
dma_issued = true;
} else {
/* set current transfer information */
pic32s->tx = (const void *)transfer->tx_buf;
pic32s->rx = (const void *)transfer->rx_buf;
pic32s->tx_end = pic32s->tx + transfer->len;
pic32s->rx_end = pic32s->rx + transfer->len;
pic32s->len = transfer->len;
/* transact by interrupt driven PIO */
enable_irq(pic32s->fault_irq);
enable_irq(pic32s->rx_irq);
enable_irq(pic32s->tx_irq);
}
/* wait for completion */
timeout = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ);
if (timeout == 0) {
dev_err(&spi->dev, "wait error/timedout\n");
if (dma_issued) {
dmaengine_terminate_all(host->dma_rx);
dmaengine_terminate_all(host->dma_tx);
}
ret = -ETIMEDOUT;
} else {
ret = 0;
}
return ret;
}
static int pic32_spi_unprepare_message(struct spi_controller *host,
struct spi_message *msg)
{
/* nothing to do */
return 0;
}
static int pic32_spi_unprepare_hardware(struct spi_controller *host)
{
struct pic32_spi *pic32s = spi_controller_get_devdata(host);
pic32_spi_disable(pic32s);
return 0;
}
/* This may be called multiple times by same spi dev */
static int pic32_spi_setup(struct spi_device *spi)
{
if (!spi->max_speed_hz) {
dev_err(&spi->dev, "No max speed HZ parameter\n");
return -EINVAL;
}
/* PIC32 spi controller can drive /CS during transfer depending
* on tx fifo fill-level. /CS will stay asserted as long as TX
* fifo is non-empty, else will be deasserted indicating
* completion of the ongoing transfer. This might result into
* unreliable/erroneous SPI transactions.
* To avoid that we will always handle /CS by toggling GPIO.
*/
if (!spi_get_csgpiod(spi, 0))
return -EINVAL;
return 0;
}
static void pic32_spi_cleanup(struct spi_device *spi)
{
/* de-activate cs-gpio, gpiolib will handle inversion */
gpiod_direction_output(spi_get_csgpiod(spi, 0), 0);
}
static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
{
struct spi_controller *host = pic32s->host;
int ret = 0;
host->dma_rx = dma_request_chan(dev, "spi-rx");
if (IS_ERR(host->dma_rx)) {
if (PTR_ERR(host->dma_rx) == -EPROBE_DEFER)
ret = -EPROBE_DEFER;
else
dev_warn(dev, "RX channel not found.\n");
host->dma_rx = NULL;
goto out_err;
}
host->dma_tx = dma_request_chan(dev, "spi-tx");
if (IS_ERR(host->dma_tx)) {
if (PTR_ERR(host->dma_tx) == -EPROBE_DEFER)
ret = -EPROBE_DEFER;
else
dev_warn(dev, "TX channel not found.\n");
host->dma_tx = NULL;
goto out_err;
}
if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE))
goto out_err;
/* DMA chnls allocated and prepared */
set_bit(PIC32F_DMA_PREP, &pic32s->flags);
return 0;
out_err:
if (host->dma_rx) {
dma_release_channel(host->dma_rx);
host->dma_rx = NULL;
}
if (host->dma_tx) {
dma_release_channel(host->dma_tx);
host->dma_tx = NULL;
}
return ret;
}
static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
{
if (!test_bit(PIC32F_DMA_PREP, &pic32s->flags))
return;
clear_bit(PIC32F_DMA_PREP, &pic32s->flags);
if (pic32s->host->dma_rx)
dma_release_channel(pic32s->host->dma_rx);
if (pic32s->host->dma_tx)
dma_release_channel(pic32s->host->dma_tx);
}
static void pic32_spi_hw_init(struct pic32_spi *pic32s)
{
u32 ctrl;
/* disable hardware */
pic32_spi_disable(pic32s);
ctrl = readl(&pic32s->regs->ctrl);
/* enable enhanced fifo of 128bit deep */
ctrl |= CTRL_ENHBUF;
pic32s->fifo_n_byte = 16;
/* disable framing mode */
ctrl &= ~CTRL_FRMEN;
/* enable host mode while disabled */
ctrl |= CTRL_MSTEN;
/* set tx fifo threshold interrupt */
ctrl &= ~(0x3 << CTRL_TX_INT_SHIFT);
ctrl |= (TX_FIFO_HALF_EMPTY << CTRL_TX_INT_SHIFT);
/* set rx fifo threshold interrupt */
ctrl &= ~(0x3 << CTRL_RX_INT_SHIFT);
ctrl |= (RX_FIFO_NOT_EMPTY << CTRL_RX_INT_SHIFT);
/* select clk source */
ctrl &= ~CTRL_MCLKSEL;
/* set manual /CS mode */
ctrl &= ~CTRL_MSSEN;
writel(ctrl, &pic32s->regs->ctrl);
/* enable error reporting */
ctrl = CTRL2_TX_UR_EN | CTRL2_RX_OV_EN | CTRL2_FRM_ERR_EN;
writel(ctrl, &pic32s->regs->ctrl2_set);
}
static int pic32_spi_hw_probe(struct platform_device *pdev,
struct pic32_spi *pic32s)
{
struct resource *mem;
int ret;
pic32s->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(pic32s->regs))
return PTR_ERR(pic32s->regs);
pic32s->dma_base = mem->start;
/* get irq resources: err-irq, rx-irq, tx-irq */
pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
if (pic32s->fault_irq < 0)
return pic32s->fault_irq;
pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
if (pic32s->rx_irq < 0)
return pic32s->rx_irq;
pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
if (pic32s->tx_irq < 0)
return pic32s->tx_irq;
/* get clock */
pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
if (IS_ERR(pic32s->clk)) {
dev_err(&pdev->dev, "clk not found\n");
ret = PTR_ERR(pic32s->clk);
goto err_unmap_mem;
}
ret = clk_prepare_enable(pic32s->clk);
if (ret)
goto err_unmap_mem;
pic32_spi_hw_init(pic32s);
return 0;
err_unmap_mem:
dev_err(&pdev->dev, "%s failed, err %d\n", __func__, ret);
return ret;
}
static int pic32_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct pic32_spi *pic32s;
int ret;
host = spi_alloc_host(&pdev->dev, sizeof(*pic32s));
if (!host)
return -ENOMEM;
pic32s = spi_controller_get_devdata(host);
pic32s->host = host;
ret = pic32_spi_hw_probe(pdev, pic32s);
if (ret)
goto err_host;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
host->num_chipselect = 1; /* single chip-select */
host->max_speed_hz = clk_get_rate(pic32s->clk);
host->setup = pic32_spi_setup;
host->cleanup = pic32_spi_cleanup;
host->flags = SPI_CONTROLLER_MUST_TX | SPI_CONTROLLER_MUST_RX;
host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(32);
host->transfer_one = pic32_spi_one_transfer;
host->prepare_message = pic32_spi_prepare_message;
host->unprepare_message = pic32_spi_unprepare_message;
host->prepare_transfer_hardware = pic32_spi_prepare_hardware;
host->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
host->use_gpio_descriptors = true;
/* optional DMA support */
ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
if (ret)
goto err_bailout;
if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
host->can_dma = pic32_spi_can_dma;
init_completion(&pic32s->xfer_done);
pic32s->mode = -1;
/* install irq handlers (with irq-disabled) */
irq_set_status_flags(pic32s->fault_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, pic32s->fault_irq,
pic32_spi_fault_irq, IRQF_NO_THREAD,
dev_name(&pdev->dev), pic32s);
if (ret < 0) {
dev_err(&pdev->dev, "request fault-irq %d\n", pic32s->rx_irq);
goto err_bailout;
}
/* receive interrupt handler */
irq_set_status_flags(pic32s->rx_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, pic32s->rx_irq,
pic32_spi_rx_irq, IRQF_NO_THREAD,
dev_name(&pdev->dev), pic32s);
if (ret < 0) {
dev_err(&pdev->dev, "request rx-irq %d\n", pic32s->rx_irq);
goto err_bailout;
}
/* transmit interrupt handler */
irq_set_status_flags(pic32s->tx_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, pic32s->tx_irq,
pic32_spi_tx_irq, IRQF_NO_THREAD,
dev_name(&pdev->dev), pic32s);
if (ret < 0) {
dev_err(&pdev->dev, "request tx-irq %d\n", pic32s->tx_irq);
goto err_bailout;
}
/* register host */
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
dev_err(&host->dev, "failed registering spi host\n");
goto err_bailout;
}
platform_set_drvdata(pdev, pic32s);
return 0;
err_bailout:
pic32_spi_dma_unprep(pic32s);
clk_disable_unprepare(pic32s->clk);
err_host:
spi_controller_put(host);
return ret;
}
static void pic32_spi_remove(struct platform_device *pdev)
{
struct pic32_spi *pic32s;
pic32s = platform_get_drvdata(pdev);
pic32_spi_disable(pic32s);
clk_disable_unprepare(pic32s->clk);
pic32_spi_dma_unprep(pic32s);
}
static const struct of_device_id pic32_spi_of_match[] = {
{.compatible = "microchip,pic32mzda-spi",},
{},
};
MODULE_DEVICE_TABLE(of, pic32_spi_of_match);
static struct platform_driver pic32_spi_driver = {
.driver = {
.name = "spi-pic32",
.of_match_table = of_match_ptr(pic32_spi_of_match),
},
.probe = pic32_spi_probe,
.remove_new = pic32_spi_remove,
};
module_platform_driver(pic32_spi_driver);
MODULE_AUTHOR("Purna Chandra Mandal <[email protected]>");
MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SPI controller.");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-pic32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012 - 2014 Allwinner Tech
* Pan Nan <[email protected]>
*
* Copyright (C) 2014 Maxime Ripard
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#define SUN4I_FIFO_DEPTH 64
#define SUN4I_RXDATA_REG 0x00
#define SUN4I_TXDATA_REG 0x04
#define SUN4I_CTL_REG 0x08
#define SUN4I_CTL_ENABLE BIT(0)
#define SUN4I_CTL_MASTER BIT(1)
#define SUN4I_CTL_CPHA BIT(2)
#define SUN4I_CTL_CPOL BIT(3)
#define SUN4I_CTL_CS_ACTIVE_LOW BIT(4)
#define SUN4I_CTL_LMTF BIT(6)
#define SUN4I_CTL_TF_RST BIT(8)
#define SUN4I_CTL_RF_RST BIT(9)
#define SUN4I_CTL_XCH BIT(10)
#define SUN4I_CTL_CS_MASK 0x3000
#define SUN4I_CTL_CS(cs) (((cs) << 12) & SUN4I_CTL_CS_MASK)
#define SUN4I_CTL_DHB BIT(15)
#define SUN4I_CTL_CS_MANUAL BIT(16)
#define SUN4I_CTL_CS_LEVEL BIT(17)
#define SUN4I_CTL_TP BIT(18)
#define SUN4I_INT_CTL_REG 0x0c
#define SUN4I_INT_CTL_RF_F34 BIT(4)
#define SUN4I_INT_CTL_TF_E34 BIT(12)
#define SUN4I_INT_CTL_TC BIT(16)
#define SUN4I_INT_STA_REG 0x10
#define SUN4I_DMA_CTL_REG 0x14
#define SUN4I_WAIT_REG 0x18
#define SUN4I_CLK_CTL_REG 0x1c
#define SUN4I_CLK_CTL_CDR2_MASK 0xff
#define SUN4I_CLK_CTL_CDR2(div) ((div) & SUN4I_CLK_CTL_CDR2_MASK)
#define SUN4I_CLK_CTL_CDR1_MASK 0xf
#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
#define SUN4I_CLK_CTL_DRS BIT(12)
#define SUN4I_MAX_XFER_SIZE 0xffffff
#define SUN4I_BURST_CNT_REG 0x20
#define SUN4I_BURST_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
#define SUN4I_XMIT_CNT_REG 0x24
#define SUN4I_XMIT_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
#define SUN4I_FIFO_STA_REG 0x28
#define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f
#define SUN4I_FIFO_STA_RF_CNT_BITS 0
#define SUN4I_FIFO_STA_TF_CNT_MASK 0x7f
#define SUN4I_FIFO_STA_TF_CNT_BITS 16
struct sun4i_spi {
struct spi_master *master;
void __iomem *base_addr;
struct clk *hclk;
struct clk *mclk;
struct completion done;
const u8 *tx_buf;
u8 *rx_buf;
int len;
};
static inline u32 sun4i_spi_read(struct sun4i_spi *sspi, u32 reg)
{
return readl(sspi->base_addr + reg);
}
static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
{
writel(value, sspi->base_addr + reg);
}
static inline u32 sun4i_spi_get_tx_fifo_count(struct sun4i_spi *sspi)
{
u32 reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
reg >>= SUN4I_FIFO_STA_TF_CNT_BITS;
return reg & SUN4I_FIFO_STA_TF_CNT_MASK;
}
static inline void sun4i_spi_enable_interrupt(struct sun4i_spi *sspi, u32 mask)
{
u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
reg |= mask;
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
}
static inline void sun4i_spi_disable_interrupt(struct sun4i_spi *sspi, u32 mask)
{
u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
reg &= ~mask;
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
}
static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
{
u32 reg, cnt;
u8 byte;
/* See how much data is available */
reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
reg &= SUN4I_FIFO_STA_RF_CNT_MASK;
cnt = reg >> SUN4I_FIFO_STA_RF_CNT_BITS;
if (len > cnt)
len = cnt;
while (len--) {
byte = readb(sspi->base_addr + SUN4I_RXDATA_REG);
if (sspi->rx_buf)
*sspi->rx_buf++ = byte;
}
}
static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
{
u32 cnt;
u8 byte;
/* See how much data we can fit */
cnt = SUN4I_FIFO_DEPTH - sun4i_spi_get_tx_fifo_count(sspi);
len = min3(len, (int)cnt, sspi->len);
while (len--) {
byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
writeb(byte, sspi->base_addr + SUN4I_TXDATA_REG);
sspi->len--;
}
}
static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
{
struct sun4i_spi *sspi = spi_master_get_devdata(spi->master);
u32 reg;
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
reg &= ~SUN4I_CTL_CS_MASK;
reg |= SUN4I_CTL_CS(spi_get_chipselect(spi, 0));
/* We want to control the chip select manually */
reg |= SUN4I_CTL_CS_MANUAL;
if (enable)
reg |= SUN4I_CTL_CS_LEVEL;
else
reg &= ~SUN4I_CTL_CS_LEVEL;
/*
* Even though this looks irrelevant since we are supposed to
* be controlling the chip select manually, this bit also
* controls the levels of the chip select for inactive
* devices.
*
* If we don't set it, the chip select level will go low by
* default when the device is idle, which is not really
* expected in the common case where the chip select is active
* low.
*/
if (spi->mode & SPI_CS_HIGH)
reg &= ~SUN4I_CTL_CS_ACTIVE_LOW;
else
reg |= SUN4I_CTL_CS_ACTIVE_LOW;
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
}
static size_t sun4i_spi_max_transfer_size(struct spi_device *spi)
{
return SUN4I_MAX_XFER_SIZE - 1;
}
static int sun4i_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct sun4i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
/* We don't support transfer larger than the FIFO */
if (tfr->len > SUN4I_MAX_XFER_SIZE)
return -EMSGSIZE;
if (tfr->tx_buf && tfr->len >= SUN4I_MAX_XFER_SIZE)
return -EMSGSIZE;
reinit_completion(&sspi->done);
sspi->tx_buf = tfr->tx_buf;
sspi->rx_buf = tfr->rx_buf;
sspi->len = tfr->len;
/* Clear pending interrupts */
sun4i_spi_write(sspi, SUN4I_INT_STA_REG, ~0);
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
/* Reset FIFOs */
sun4i_spi_write(sspi, SUN4I_CTL_REG,
reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST);
/*
* Setup the transfer control register: Chip Select,
* polarities, etc.
*/
if (spi->mode & SPI_CPOL)
reg |= SUN4I_CTL_CPOL;
else
reg &= ~SUN4I_CTL_CPOL;
if (spi->mode & SPI_CPHA)
reg |= SUN4I_CTL_CPHA;
else
reg &= ~SUN4I_CTL_CPHA;
if (spi->mode & SPI_LSB_FIRST)
reg |= SUN4I_CTL_LMTF;
else
reg &= ~SUN4I_CTL_LMTF;
/*
* If it's a TX only transfer, we don't want to fill the RX
* FIFO with bogus data
*/
if (sspi->rx_buf)
reg &= ~SUN4I_CTL_DHB;
else
reg |= SUN4I_CTL_DHB;
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
/* Ensure that we have a parent clock fast enough */
mclk_rate = clk_get_rate(sspi->mclk);
if (mclk_rate < (2 * tfr->speed_hz)) {
clk_set_rate(sspi->mclk, 2 * tfr->speed_hz);
mclk_rate = clk_get_rate(sspi->mclk);
}
/*
* Setup clock divider.
*
* We have two choices there. Either we can use the clock
* divide rate 1, which is calculated thanks to this formula:
* SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
* Or we can use CDR2, which is calculated with the formula:
* SPI_CLK = MOD_CLK / (2 * (cdr + 1))
* Whether we use the former or the latter is set through the
* DRS bit.
*
* First try CDR2, and if we can't reach the expected
* frequency, fall back to CDR1.
*/
div = mclk_rate / (2 * tfr->speed_hz);
if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
if (div > 0)
div--;
reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
} else {
div = ilog2(mclk_rate) - ilog2(tfr->speed_hz);
reg = SUN4I_CLK_CTL_CDR1(div);
}
sun4i_spi_write(sspi, SUN4I_CLK_CTL_REG, reg);
/* Setup the transfer now... */
if (sspi->tx_buf)
tx_len = tfr->len;
/* Setup the counters */
sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
/*
* Fill the TX FIFO
* Filling the FIFO fully causes timeout for some reason
* at least on spi2 on A10s
*/
sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
/* Enable the interrupts */
sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TC |
SUN4I_INT_CTL_RF_F34);
/* Only enable Tx FIFO interrupt if we really need it */
if (tx_len > SUN4I_FIFO_DEPTH)
sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
/* Start the transfer */
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
msecs_to_jiffies(tx_time));
end = jiffies;
if (!timeout) {
dev_warn(&master->dev,
"%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
out:
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
return ret;
}
static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
{
struct sun4i_spi *sspi = dev_id;
u32 status = sun4i_spi_read(sspi, SUN4I_INT_STA_REG);
/* Transfer complete */
if (status & SUN4I_INT_CTL_TC) {
sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
complete(&sspi->done);
return IRQ_HANDLED;
}
/* Receive FIFO 3/4 full */
if (status & SUN4I_INT_CTL_RF_F34) {
sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
/* Only clear the interrupt _after_ draining the FIFO */
sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_RF_F34);
return IRQ_HANDLED;
}
/* Transmit FIFO 3/4 empty */
if (status & SUN4I_INT_CTL_TF_E34) {
sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
if (!sspi->len)
/* nothing left to transmit */
sun4i_spi_disable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
/* Only clear the interrupt _after_ re-seeding the FIFO */
sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TF_E34);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int sun4i_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct sun4i_spi *sspi = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(sspi->hclk);
if (ret) {
dev_err(dev, "Couldn't enable AHB clock\n");
goto out;
}
ret = clk_prepare_enable(sspi->mclk);
if (ret) {
dev_err(dev, "Couldn't enable module clock\n");
goto err;
}
sun4i_spi_write(sspi, SUN4I_CTL_REG,
SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
return 0;
err:
clk_disable_unprepare(sspi->hclk);
out:
return ret;
}
static int sun4i_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct sun4i_spi *sspi = spi_master_get_devdata(master);
clk_disable_unprepare(sspi->mclk);
clk_disable_unprepare(sspi->hclk);
return 0;
}
static int sun4i_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct sun4i_spi *sspi;
int ret = 0, irq;
master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sspi->base_addr)) {
ret = PTR_ERR(sspi->base_addr);
goto err_free_master;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
goto err_free_master;
}
ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler,
0, "sun4i-spi", sspi);
if (ret) {
dev_err(&pdev->dev, "Cannot request IRQ\n");
goto err_free_master;
}
sspi->master = master;
master->max_speed_hz = 100 * 1000 * 1000;
master->min_speed_hz = 3 * 1000;
master->set_cs = sun4i_spi_set_cs;
master->transfer_one = sun4i_spi_transfer_one;
master->num_chipselect = 4;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->max_transfer_size = sun4i_spi_max_transfer_size;
sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sspi->hclk)) {
dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
ret = PTR_ERR(sspi->hclk);
goto err_free_master;
}
sspi->mclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(sspi->mclk)) {
dev_err(&pdev->dev, "Unable to acquire module clock\n");
ret = PTR_ERR(sspi->mclk);
goto err_free_master;
}
init_completion(&sspi->done);
/*
* This wake-up/shutdown pattern is to be able to have the
* device woken up, even if runtime_pm is disabled
*/
ret = sun4i_spi_runtime_resume(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Couldn't resume the device\n");
goto err_free_master;
}
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "cannot register SPI master\n");
goto err_pm_disable;
}
return 0;
err_pm_disable:
pm_runtime_disable(&pdev->dev);
sun4i_spi_runtime_suspend(&pdev->dev);
err_free_master:
spi_master_put(master);
return ret;
}
static void sun4i_spi_remove(struct platform_device *pdev)
{
pm_runtime_force_suspend(&pdev->dev);
}
static const struct of_device_id sun4i_spi_match[] = {
{ .compatible = "allwinner,sun4i-a10-spi", },
{}
};
MODULE_DEVICE_TABLE(of, sun4i_spi_match);
static const struct dev_pm_ops sun4i_spi_pm_ops = {
.runtime_resume = sun4i_spi_runtime_resume,
.runtime_suspend = sun4i_spi_runtime_suspend,
};
static struct platform_driver sun4i_spi_driver = {
.probe = sun4i_spi_probe,
.remove_new = sun4i_spi_remove,
.driver = {
.name = "sun4i-spi",
.of_match_table = sun4i_spi_match,
.pm = &sun4i_spi_pm_ops,
},
};
module_platform_driver(sun4i_spi_driver);
MODULE_AUTHOR("Pan Nan <[email protected]>");
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_DESCRIPTION("Allwinner A1X/A20 SPI controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-sun4i.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OMAP2 McSPI controller driver
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <[email protected]> and
* Juha Yrjola <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/gcd.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
#define OMAP2_MCSPI_MAX_DIVIDER 4096
#define OMAP2_MCSPI_MAX_FIFODEPTH 64
#define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
#define SPI_AUTOSUSPEND_TIMEOUT 2000
#define OMAP2_MCSPI_REVISION 0x00
#define OMAP2_MCSPI_SYSSTATUS 0x14
#define OMAP2_MCSPI_IRQSTATUS 0x18
#define OMAP2_MCSPI_IRQENABLE 0x1c
#define OMAP2_MCSPI_WAKEUPENABLE 0x20
#define OMAP2_MCSPI_SYST 0x24
#define OMAP2_MCSPI_MODULCTRL 0x28
#define OMAP2_MCSPI_XFERLEVEL 0x7c
/* per-channel banks, 0x14 bytes each, first is: */
#define OMAP2_MCSPI_CHCONF0 0x2c
#define OMAP2_MCSPI_CHSTAT0 0x30
#define OMAP2_MCSPI_CHCTRL0 0x34
#define OMAP2_MCSPI_TX0 0x38
#define OMAP2_MCSPI_RX0 0x3c
/* per-register bitmasks: */
#define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
#define OMAP2_MCSPI_CHCONF_PHA BIT(0)
#define OMAP2_MCSPI_CHCONF_POL BIT(1)
#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
#define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
#define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
#define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
#define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
#define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
#define OMAP2_MCSPI_CHCONF_IS BIT(18)
#define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
#define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
#define OMAP2_MCSPI_CHCONF_FFET BIT(27)
#define OMAP2_MCSPI_CHCONF_FFER BIT(28)
#define OMAP2_MCSPI_CHCONF_CLKG BIT(29)
#define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
#define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
#define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
#define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
#define OMAP2_MCSPI_CHCTRL_EN BIT(0)
#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8)
#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
/* We have 2 DMA channels per CS, one for RX and one for TX */
struct omap2_mcspi_dma {
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
struct completion dma_tx_completion;
struct completion dma_rx_completion;
char dma_rx_ch_name[14];
char dma_tx_ch_name[14];
};
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
* cache operations; better heuristics consider wordsize and bitrate.
*/
#define DMA_MIN_BYTES 160
/*
* Used for context save and restore, structure members to be updated whenever
* corresponding registers are modified.
*/
struct omap2_mcspi_regs {
u32 modulctrl;
u32 wakeupenable;
struct list_head cs;
};
struct omap2_mcspi {
struct completion txdone;
struct spi_master *master;
/* Virtual base address of the controller */
void __iomem *base;
unsigned long phys;
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
struct device *dev;
struct omap2_mcspi_regs ctx;
int fifo_depth;
bool slave_aborted;
unsigned int pin_dir:1;
size_t max_xfer_len;
};
struct omap2_mcspi_cs {
void __iomem *base;
unsigned long phys;
int word_len;
u16 mode;
struct list_head node;
/* Context save and restore shadow register */
u32 chconf0, chctrl0;
};
static inline void mcspi_write_reg(struct spi_master *master,
int idx, u32 val)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
writel_relaxed(val, mcspi->base + idx);
}
static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
return readl_relaxed(mcspi->base + idx);
}
static inline void mcspi_write_cs_reg(const struct spi_device *spi,
int idx, u32 val)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
writel_relaxed(val, cs->base + idx);
}
static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
return readl_relaxed(cs->base + idx);
}
static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
return cs->chconf0;
}
static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
cs->chconf0 = val;
mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
}
static inline int mcspi_bytes_per_word(int word_len)
{
if (word_len <= 8)
return 1;
else if (word_len <= 16)
return 2;
else /* word_len <= 32 */
return 4;
}
static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
int is_read, int enable)
{
u32 l, rw;
l = mcspi_cached_chconf0(spi);
if (is_read) /* 1 is read, 0 write */
rw = OMAP2_MCSPI_CHCONF_DMAR;
else
rw = OMAP2_MCSPI_CHCONF_DMAW;
if (enable)
l |= rw;
else
l &= ~rw;
mcspi_write_chconf0(spi, l);
}
static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
u32 l;
l = cs->chctrl0;
if (enable)
l |= OMAP2_MCSPI_CHCTRL_EN;
else
l &= ~OMAP2_MCSPI_CHCTRL_EN;
cs->chctrl0 = l;
mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
/* Flash post-writes */
mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
}
static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
u32 l;
/* The controller handles the inverted chip selects
* using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
* the inversion from the core spi_set_cs function.
*/
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
if (spi->controller_state) {
int err = pm_runtime_resume_and_get(mcspi->dev);
if (err < 0) {
dev_err(mcspi->dev, "failed to get sync: %d\n", err);
return;
}
l = mcspi_cached_chconf0(spi);
if (enable)
l &= ~OMAP2_MCSPI_CHCONF_FORCE;
else
l |= OMAP2_MCSPI_CHCONF_FORCE;
mcspi_write_chconf0(spi, l);
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
}
}
static void omap2_mcspi_set_mode(struct spi_master *master)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
u32 l;
/*
* Choose master or slave mode
*/
l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
if (spi_controller_is_slave(master)) {
l |= (OMAP2_MCSPI_MODULCTRL_MS);
} else {
l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
}
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
ctx->modulctrl = l;
}
static void omap2_mcspi_set_fifo(const struct spi_device *spi,
struct spi_transfer *t, int enable)
{
struct spi_master *master = spi->master;
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
unsigned int wcnt;
int max_fifo_depth, bytes_per_word;
u32 chconf, xferlevel;
mcspi = spi_master_get_devdata(master);
chconf = mcspi_cached_chconf0(spi);
if (enable) {
bytes_per_word = mcspi_bytes_per_word(cs->word_len);
if (t->len % bytes_per_word != 0)
goto disable_fifo;
if (t->rx_buf != NULL && t->tx_buf != NULL)
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
else
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
wcnt = t->len / bytes_per_word;
if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
goto disable_fifo;
xferlevel = wcnt << 16;
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
xferlevel |= (bytes_per_word - 1) << 8;
}
if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
xferlevel |= bytes_per_word - 1;
}
mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
mcspi_write_chconf0(spi, chconf);
mcspi->fifo_depth = max_fifo_depth;
return;
}
disable_fifo:
if (t->rx_buf != NULL)
chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
if (t->tx_buf != NULL)
chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
mcspi_write_chconf0(spi, chconf);
mcspi->fifo_depth = 0;
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(1000);
while (!(readl_relaxed(reg) & bit)) {
if (time_after(jiffies, timeout)) {
if (!(readl_relaxed(reg) & bit))
return -ETIMEDOUT;
else
return 0;
}
cpu_relax();
}
return 0;
}
static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
struct completion *x)
{
if (spi_controller_is_slave(mcspi->master)) {
if (wait_for_completion_interruptible(x) ||
mcspi->slave_aborted)
return -EINTR;
} else {
wait_for_completion(x);
}
return 0;
}
static void omap2_mcspi_rx_callback(void *data)
{
struct spi_device *spi = data;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
/* We must disable the DMA RX request */
omap2_mcspi_set_dma_req(spi, 1, 0);
complete(&mcspi_dma->dma_rx_completion);
}
static void omap2_mcspi_tx_callback(void *data)
{
struct spi_device *spi = data;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
/* We must disable the DMA TX request */
omap2_mcspi_set_dma_req(spi, 0, 0);
complete(&mcspi_dma->dma_tx_completion);
}
static void omap2_mcspi_tx_dma(struct spi_device *spi,
struct spi_transfer *xfer,
struct dma_slave_config cfg)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
struct dma_async_tx_descriptor *tx;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
xfer->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_tx_callback;
tx->callback_param = spi;
dmaengine_submit(tx);
} else {
/* FIXME: fall back to PIO? */
}
dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
}
static unsigned
omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
struct dma_slave_config cfg,
unsigned es)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
unsigned int count, transfer_reduction = 0;
struct scatterlist *sg_out[2];
int nb_sizes = 0, out_mapped_nents[2], ret, x;
size_t sizes[2];
u32 l;
int elements = 0;
int word_len, element_count;
struct omap2_mcspi_cs *cs = spi->controller_state;
void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
struct dma_async_tx_descriptor *tx;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
count = xfer->len;
/*
* In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
* it mentions reducing DMA transfer length by one element in master
* normal mode.
*/
if (mcspi->fifo_depth == 0)
transfer_reduction = es;
word_len = cs->word_len;
l = mcspi_cached_chconf0(spi);
if (word_len <= 8)
element_count = count;
else if (word_len <= 16)
element_count = count >> 1;
else /* word_len <= 32 */
element_count = count >> 2;
dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
/*
* Reduce DMA transfer length by one more if McSPI is
* configured in turbo mode.
*/
if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
transfer_reduction += es;
if (transfer_reduction) {
/* Split sgl into two. The second sgl won't be used. */
sizes[0] = count - transfer_reduction;
sizes[1] = transfer_reduction;
nb_sizes = 2;
} else {
/*
* Don't bother splitting the sgl. This essentially
* clones the original sgl.
*/
sizes[0] = count;
nb_sizes = 1;
}
ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
sizes, sg_out, out_mapped_nents, GFP_KERNEL);
if (ret < 0) {
dev_err(&spi->dev, "sg_split failed\n");
return 0;
}
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
out_mapped_nents[0], DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_rx_callback;
tx->callback_param = spi;
dmaengine_submit(tx);
} else {
/* FIXME: fall back to PIO? */
}
dma_async_issue_pending(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 1);
ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
if (ret || mcspi->slave_aborted) {
dmaengine_terminate_sync(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 0);
return 0;
}
for (x = 0; x < nb_sizes; x++)
kfree(sg_out[x]);
if (mcspi->fifo_depth > 0)
return count;
/*
* Due to the DMA transfer length reduction the missing bytes must
* be read manually to receive all of the expected data.
*/
omap2_mcspi_set_enable(spi, 0);
elements = element_count - 1;
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
elements--;
if (!mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
if (word_len <= 8)
((u8 *)xfer->rx_buf)[elements++] = w;
else if (word_len <= 16)
((u16 *)xfer->rx_buf)[elements++] = w;
else /* word_len <= 32 */
((u32 *)xfer->rx_buf)[elements++] = w;
} else {
int bytes_per_word = mcspi_bytes_per_word(word_len);
dev_err(&spi->dev, "DMA RX penultimate word empty\n");
count -= (bytes_per_word << 1);
omap2_mcspi_set_enable(spi, 1);
return count;
}
}
if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
if (word_len <= 8)
((u8 *)xfer->rx_buf)[elements] = w;
else if (word_len <= 16)
((u16 *)xfer->rx_buf)[elements] = w;
else /* word_len <= 32 */
((u32 *)xfer->rx_buf)[elements] = w;
} else {
dev_err(&spi->dev, "DMA RX last word empty\n");
count -= mcspi_bytes_per_word(word_len);
}
omap2_mcspi_set_enable(spi, 1);
return count;
}
static unsigned
omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi_dma *mcspi_dma;
unsigned int count;
u8 *rx;
const u8 *tx;
struct dma_slave_config cfg;
enum dma_slave_buswidth width;
unsigned es;
void __iomem *chstat_reg;
void __iomem *irqstat_reg;
int wait_res;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
if (cs->word_len <= 8) {
width = DMA_SLAVE_BUSWIDTH_1_BYTE;
es = 1;
} else if (cs->word_len <= 16) {
width = DMA_SLAVE_BUSWIDTH_2_BYTES;
es = 2;
} else {
width = DMA_SLAVE_BUSWIDTH_4_BYTES;
es = 4;
}
count = xfer->len;
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
cfg.src_addr_width = width;
cfg.dst_addr_width = width;
cfg.src_maxburst = 1;
cfg.dst_maxburst = 1;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
mcspi->slave_aborted = false;
reinit_completion(&mcspi_dma->dma_tx_completion);
reinit_completion(&mcspi_dma->dma_rx_completion);
reinit_completion(&mcspi->txdone);
if (tx) {
/* Enable EOW IRQ to know end of tx in slave mode */
if (spi_controller_is_slave(spi->master))
mcspi_write_reg(spi->master,
OMAP2_MCSPI_IRQENABLE,
OMAP2_MCSPI_IRQSTATUS_EOW);
omap2_mcspi_tx_dma(spi, xfer, cfg);
}
if (rx != NULL)
count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
if (tx != NULL) {
int ret;
ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
if (ret || mcspi->slave_aborted) {
dmaengine_terminate_sync(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 0);
return 0;
}
if (spi_controller_is_slave(mcspi->master)) {
ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
if (ret || mcspi->slave_aborted)
return 0;
}
if (mcspi->fifo_depth > 0) {
irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
if (mcspi_wait_for_reg_bit(irqstat_reg,
OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
dev_err(&spi->dev, "EOW timed out\n");
mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
OMAP2_MCSPI_IRQSTATUS_EOW);
}
/* for TX_ONLY mode, be sure all words have shifted out */
if (rx == NULL) {
chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
if (mcspi->fifo_depth > 0) {
wait_res = mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXFFE);
if (wait_res < 0)
dev_err(&spi->dev, "TXFFE timed out\n");
} else {
wait_res = mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXS);
if (wait_res < 0)
dev_err(&spi->dev, "TXS timed out\n");
}
if (wait_res >= 0 &&
(mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_EOT) < 0))
dev_err(&spi->dev, "EOT timed out\n");
}
}
return count;
}
static unsigned
omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
unsigned int count, c;
u32 l;
void __iomem *base = cs->base;
void __iomem *tx_reg;
void __iomem *rx_reg;
void __iomem *chstat_reg;
int word_len;
count = xfer->len;
c = count;
word_len = cs->word_len;
l = mcspi_cached_chconf0(spi);
/* We store the pre-calculated register addresses on stack to speed
* up the transfer loop. */
tx_reg = base + OMAP2_MCSPI_TX0;
rx_reg = base + OMAP2_MCSPI_RX0;
chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
if (c < (word_len>>3))
return 0;
if (word_len <= 8) {
u8 *rx;
const u8 *tx;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
c -= 1;
if (tx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXS) < 0) {
dev_err(&spi->dev, "TXS timed out\n");
goto out;
}
dev_vdbg(&spi->dev, "write-%d %02x\n",
word_len, *tx);
writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev, "RXS timed out\n");
goto out;
}
if (c == 1 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
*rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev,
"RXS timed out\n");
goto out;
}
c = 0;
} else if (c == 0 && tx == NULL) {
omap2_mcspi_set_enable(spi, 0);
}
*rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
}
/* Add word delay between each word */
spi_delay_exec(&xfer->word_delay, xfer);
} while (c);
} else if (word_len <= 16) {
u16 *rx;
const u16 *tx;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
c -= 2;
if (tx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXS) < 0) {
dev_err(&spi->dev, "TXS timed out\n");
goto out;
}
dev_vdbg(&spi->dev, "write-%d %04x\n",
word_len, *tx);
writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev, "RXS timed out\n");
goto out;
}
if (c == 2 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
*rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev,
"RXS timed out\n");
goto out;
}
c = 0;
} else if (c == 0 && tx == NULL) {
omap2_mcspi_set_enable(spi, 0);
}
*rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
}
/* Add word delay between each word */
spi_delay_exec(&xfer->word_delay, xfer);
} while (c >= 2);
} else if (word_len <= 32) {
u32 *rx;
const u32 *tx;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
c -= 4;
if (tx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXS) < 0) {
dev_err(&spi->dev, "TXS timed out\n");
goto out;
}
dev_vdbg(&spi->dev, "write-%d %08x\n",
word_len, *tx);
writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev, "RXS timed out\n");
goto out;
}
if (c == 4 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
*rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev,
"RXS timed out\n");
goto out;
}
c = 0;
} else if (c == 0 && tx == NULL) {
omap2_mcspi_set_enable(spi, 0);
}
*rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
}
/* Add word delay between each word */
spi_delay_exec(&xfer->word_delay, xfer);
} while (c >= 4);
}
/* for TX_ONLY mode, be sure all words have shifted out */
if (xfer->rx_buf == NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXS) < 0) {
dev_err(&spi->dev, "TXS timed out\n");
} else if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_EOT) < 0)
dev_err(&spi->dev, "EOT timed out\n");
/* disable chan to purge rx datas received in TX_ONLY transfer,
* otherwise these rx datas will affect the direct following
* RX_ONLY transfer.
*/
omap2_mcspi_set_enable(spi, 0);
}
out:
omap2_mcspi_set_enable(spi, 1);
return count - c;
}
static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
{
u32 div;
for (div = 0; div < 15; div++)
if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
return div;
return 15;
}
/* called only when no transfer is active to this device */
static int omap2_mcspi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
u8 word_len = spi->bits_per_word;
u32 speed_hz = spi->max_speed_hz;
mcspi = spi_master_get_devdata(spi->master);
if (t != NULL && t->bits_per_word)
word_len = t->bits_per_word;
cs->word_len = word_len;
if (t && t->speed_hz)
speed_hz = t->speed_hz;
speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
clkd = omap2_mcspi_calc_divisor(speed_hz);
speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
clkg = 0;
} else {
div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
clkd = (div - 1) & 0xf;
extclk = (div - 1) >> 4;
clkg = OMAP2_MCSPI_CHCONF_CLKG;
}
l = mcspi_cached_chconf0(spi);
/* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
* REVISIT: this controller could support SPI_3WIRE mode.
*/
if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
l &= ~OMAP2_MCSPI_CHCONF_IS;
l &= ~OMAP2_MCSPI_CHCONF_DPE1;
l |= OMAP2_MCSPI_CHCONF_DPE0;
} else {
l |= OMAP2_MCSPI_CHCONF_IS;
l |= OMAP2_MCSPI_CHCONF_DPE1;
l &= ~OMAP2_MCSPI_CHCONF_DPE0;
}
/* wordlength */
l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
l |= (word_len - 1) << 7;
/* set chipselect polarity; manage with FORCE */
if (!(spi->mode & SPI_CS_HIGH))
l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
else
l &= ~OMAP2_MCSPI_CHCONF_EPOL;
/* set clock divisor */
l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
l |= clkd << 2;
/* set clock granularity */
l &= ~OMAP2_MCSPI_CHCONF_CLKG;
l |= clkg;
if (clkg) {
cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
cs->chctrl0 |= extclk << 8;
mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
}
/* set SPI mode 0..3 */
if (spi->mode & SPI_CPOL)
l |= OMAP2_MCSPI_CHCONF_POL;
else
l &= ~OMAP2_MCSPI_CHCONF_POL;
if (spi->mode & SPI_CPHA)
l |= OMAP2_MCSPI_CHCONF_PHA;
else
l &= ~OMAP2_MCSPI_CHCONF_PHA;
mcspi_write_chconf0(spi, l);
cs->mode = spi->mode;
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
speed_hz,
(spi->mode & SPI_CPHA) ? "trailing" : "leading",
(spi->mode & SPI_CPOL) ? "inverted" : "normal");
return 0;
}
/*
* Note that we currently allow DMA only if we get a channel
* for both rx and tx. Otherwise we'll do PIO for both rx and tx.
*/
static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
struct omap2_mcspi_dma *mcspi_dma)
{
int ret = 0;
mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_rx_ch_name);
if (IS_ERR(mcspi_dma->dma_rx)) {
ret = PTR_ERR(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
goto no_dma;
}
mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_tx_ch_name);
if (IS_ERR(mcspi_dma->dma_tx)) {
ret = PTR_ERR(mcspi_dma->dma_tx);
mcspi_dma->dma_tx = NULL;
dma_release_channel(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
}
init_completion(&mcspi_dma->dma_rx_completion);
init_completion(&mcspi_dma->dma_tx_completion);
no_dma:
return ret;
}
static void omap2_mcspi_release_dma(struct spi_master *master)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_dma *mcspi_dma;
int i;
for (i = 0; i < master->num_chipselect; i++) {
mcspi_dma = &mcspi->dma_channels[i];
if (mcspi_dma->dma_rx) {
dma_release_channel(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
}
if (mcspi_dma->dma_tx) {
dma_release_channel(mcspi_dma->dma_tx);
mcspi_dma->dma_tx = NULL;
}
}
}
static void omap2_mcspi_cleanup(struct spi_device *spi)
{
struct omap2_mcspi_cs *cs;
if (spi->controller_state) {
/* Unlink controller state from context save list */
cs = spi->controller_state;
list_del(&cs->node);
kfree(cs);
}
}
static int omap2_mcspi_setup(struct spi_device *spi)
{
bool initial_setup = false;
int ret;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_cs *cs = spi->controller_state;
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = mcspi->base + spi_get_chipselect(spi, 0) * 0x14;
cs->phys = mcspi->phys + spi_get_chipselect(spi, 0) * 0x14;
cs->mode = 0;
cs->chconf0 = 0;
cs->chctrl0 = 0;
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
initial_setup = true;
}
ret = pm_runtime_resume_and_get(mcspi->dev);
if (ret < 0) {
if (initial_setup)
omap2_mcspi_cleanup(spi);
return ret;
}
ret = omap2_mcspi_setup_transfer(spi, NULL);
if (ret && initial_setup)
omap2_mcspi_cleanup(spi);
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
return ret;
}
static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
{
struct omap2_mcspi *mcspi = data;
u32 irqstat;
irqstat = mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS);
if (!irqstat)
return IRQ_NONE;
/* Disable IRQ and wakeup slave xfer task */
mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
complete(&mcspi->txdone);
return IRQ_HANDLED;
}
static int omap2_mcspi_slave_abort(struct spi_master *master)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
mcspi->slave_aborted = true;
complete(&mcspi_dma->dma_rx_completion);
complete(&mcspi_dma->dma_tx_completion);
complete(&mcspi->txdone);
return 0;
}
static int omap2_mcspi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
/* We only enable one channel at a time -- the one whose message is
* -- although this controller would gladly
* arbitrate among multiple channels. This corresponds to "single
* channel" master mode. As a side effect, we need to manage the
* chipselect with the FORCE bit ... CS != channel enable.
*/
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs;
struct omap2_mcspi_device_config *cd;
int par_override = 0;
int status = 0;
u32 chconf;
mcspi = spi_master_get_devdata(master);
mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
cs = spi->controller_state;
cd = spi->controller_data;
/*
* The slave driver could have changed spi->mode in which case
* it will be different from cs->mode (the current hardware setup).
* If so, set par_override (even though its not a parity issue) so
* omap2_mcspi_setup_transfer will be called to configure the hardware
* with the correct mode on the first iteration of the loop below.
*/
if (spi->mode != cs->mode)
par_override = 1;
omap2_mcspi_set_enable(spi, 0);
if (spi_get_csgpiod(spi, 0))
omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
if (par_override ||
(t->speed_hz != spi->max_speed_hz) ||
(t->bits_per_word != spi->bits_per_word)) {
par_override = 1;
status = omap2_mcspi_setup_transfer(spi, t);
if (status < 0)
goto out;
if (t->speed_hz == spi->max_speed_hz &&
t->bits_per_word == spi->bits_per_word)
par_override = 0;
}
if (cd && cd->cs_per_word) {
chconf = mcspi->ctx.modulctrl;
chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
mcspi->ctx.modulctrl =
mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
}
chconf = mcspi_cached_chconf0(spi);
chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
if (t->tx_buf == NULL)
chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
else if (t->rx_buf == NULL)
chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
if (cd && cd->turbo_mode && t->tx_buf == NULL) {
/* Turbo mode is for more than one word */
if (t->len > ((cs->word_len + 7) >> 3))
chconf |= OMAP2_MCSPI_CHCONF_TURBO;
}
mcspi_write_chconf0(spi, chconf);
if (t->len) {
unsigned count;
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
master->cur_msg_mapped &&
master->can_dma(master, spi, t))
omap2_mcspi_set_fifo(spi, t, 1);
omap2_mcspi_set_enable(spi, 1);
/* RX_ONLY mode needs dummy data in TX reg */
if (t->tx_buf == NULL)
writel_relaxed(0, cs->base
+ OMAP2_MCSPI_TX0);
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
master->cur_msg_mapped &&
master->can_dma(master, spi, t))
count = omap2_mcspi_txrx_dma(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);
if (count != t->len) {
status = -EIO;
goto out;
}
}
omap2_mcspi_set_enable(spi, 0);
if (mcspi->fifo_depth > 0)
omap2_mcspi_set_fifo(spi, t, 0);
out:
/* Restore defaults if they were overriden */
if (par_override) {
par_override = 0;
status = omap2_mcspi_setup_transfer(spi, NULL);
}
if (cd && cd->cs_per_word) {
chconf = mcspi->ctx.modulctrl;
chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
mcspi->ctx.modulctrl =
mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
}
omap2_mcspi_set_enable(spi, 0);
if (spi_get_csgpiod(spi, 0))
omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
if (mcspi->fifo_depth > 0 && t)
omap2_mcspi_set_fifo(spi, t, 0);
return status;
}
static int omap2_mcspi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_cs *cs;
/* Only a single channel can have the FORCE bit enabled
* in its chconf0 register.
* Scan all channels and disable them except the current one.
* A FORCE can remain from a last transfer having cs_change enabled
*/
list_for_each_entry(cs, &ctx->cs, node) {
if (msg->spi->controller_state == cs)
continue;
if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
writel_relaxed(cs->chconf0,
cs->base + OMAP2_MCSPI_CHCONF0);
readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
}
}
return 0;
}
static bool omap2_mcspi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma =
&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
return false;
if (spi_controller_is_slave(master))
return true;
master->dma_rx = mcspi_dma->dma_rx;
master->dma_tx = mcspi_dma->dma_tx;
return (xfer->len >= DMA_MIN_BYTES);
}
static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma =
&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
return mcspi->max_xfer_len;
return SIZE_MAX;
}
static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
int ret = 0;
ret = pm_runtime_resume_and_get(mcspi->dev);
if (ret < 0)
return ret;
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
OMAP2_MCSPI_WAKEUPENABLE_WKEN);
ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
omap2_mcspi_set_mode(master);
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
return 0;
}
static int omap_mcspi_runtime_suspend(struct device *dev)
{
int error;
error = pinctrl_pm_select_idle_state(dev);
if (error)
dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
return 0;
}
/*
* When SPI wake up from off-mode, CS is in activate state. If it was in
* inactive state when driver was suspend, then force it to inactive state at
* wake up.
*/
static int omap_mcspi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_cs *cs;
int error;
error = pinctrl_pm_select_default_state(dev);
if (error)
dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
/* McSPI: context restore */
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
list_for_each_entry(cs, &ctx->cs, node) {
/*
* We need to toggle CS state for OMAP take this
* change in account.
*/
if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
writel_relaxed(cs->chconf0,
cs->base + OMAP2_MCSPI_CHCONF0);
cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
writel_relaxed(cs->chconf0,
cs->base + OMAP2_MCSPI_CHCONF0);
} else {
writel_relaxed(cs->chconf0,
cs->base + OMAP2_MCSPI_CHCONF0);
}
}
return 0;
}
static struct omap2_mcspi_platform_config omap2_pdata = {
.regs_offset = 0,
};
static struct omap2_mcspi_platform_config omap4_pdata = {
.regs_offset = OMAP4_MCSPI_REG_OFFSET,
};
static struct omap2_mcspi_platform_config am654_pdata = {
.regs_offset = OMAP4_MCSPI_REG_OFFSET,
.max_xfer_len = SZ_4K - 1,
};
static const struct of_device_id omap_mcspi_of_match[] = {
{
.compatible = "ti,omap2-mcspi",
.data = &omap2_pdata,
},
{
.compatible = "ti,omap4-mcspi",
.data = &omap4_pdata,
},
{
.compatible = "ti,am654-mcspi",
.data = &am654_pdata,
},
{ },
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
static int omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
const struct omap2_mcspi_platform_config *pdata;
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
u32 regs_offset = 0;
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *match;
if (of_property_read_bool(node, "spi-slave"))
master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi));
else
master = spi_alloc_master(&pdev->dev, sizeof(*mcspi));
if (!master)
return -ENOMEM;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = omap2_mcspi_setup;
master->auto_runtime_pm = true;
master->prepare_message = omap2_mcspi_prepare_message;
master->can_dma = omap2_mcspi_can_dma;
master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
master->slave_abort = omap2_mcspi_slave_abort;
master->dev.of_node = node;
master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
master->use_gpio_descriptors = true;
platform_set_drvdata(pdev, master);
mcspi = spi_master_get_devdata(master);
mcspi->master = master;
match = of_match_device(omap_mcspi_of_match, &pdev->dev);
if (match) {
u32 num_cs = 1; /* default number of chipselect */
pdata = match->data;
of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
master->num_chipselect = num_cs;
if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
} else {
pdata = dev_get_platdata(&pdev->dev);
master->num_chipselect = pdata->num_cs;
mcspi->pin_dir = pdata->pin_dir;
}
regs_offset = pdata->regs_offset;
if (pdata->max_xfer_len) {
mcspi->max_xfer_len = pdata->max_xfer_len;
master->max_transfer_size = omap2_mcspi_max_xfer_size;
}
mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(mcspi->base)) {
status = PTR_ERR(mcspi->base);
goto free_master;
}
mcspi->phys = r->start + regs_offset;
mcspi->base += regs_offset;
mcspi->dev = &pdev->dev;
INIT_LIST_HEAD(&mcspi->ctx.cs);
mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
sizeof(struct omap2_mcspi_dma),
GFP_KERNEL);
if (mcspi->dma_channels == NULL) {
status = -ENOMEM;
goto free_master;
}
for (i = 0; i < master->num_chipselect; i++) {
sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
status = omap2_mcspi_request_dma(mcspi,
&mcspi->dma_channels[i]);
if (status == -EPROBE_DEFER)
goto free_master;
}
status = platform_get_irq(pdev, 0);
if (status < 0)
goto free_master;
init_completion(&mcspi->txdone);
status = devm_request_irq(&pdev->dev, status,
omap2_mcspi_irq_handler, 0, pdev->name,
mcspi);
if (status) {
dev_err(&pdev->dev, "Cannot request IRQ");
goto free_master;
}
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
status = omap2_mcspi_controller_setup(mcspi);
if (status < 0)
goto disable_pm;
status = devm_spi_register_controller(&pdev->dev, master);
if (status < 0)
goto disable_pm;
return status;
disable_pm:
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
free_master:
omap2_mcspi_release_dma(master);
spi_master_put(master);
return status;
}
static void omap2_mcspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
omap2_mcspi_release_dma(master);
pm_runtime_dont_use_autosuspend(mcspi->dev);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:omap2_mcspi");
static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
int error;
error = pinctrl_pm_select_sleep_state(dev);
if (error)
dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
__func__, error);
error = spi_master_suspend(master);
if (error)
dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
__func__, error);
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused omap2_mcspi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
int error;
error = spi_master_resume(master);
if (error)
dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
__func__, error);
return pm_runtime_force_resume(dev);
}
static const struct dev_pm_ops omap2_mcspi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
omap2_mcspi_resume)
.runtime_suspend = omap_mcspi_runtime_suspend,
.runtime_resume = omap_mcspi_runtime_resume,
};
static struct platform_driver omap2_mcspi_driver = {
.driver = {
.name = "omap2_mcspi",
.pm = &omap2_mcspi_pm_ops,
.of_match_table = omap_mcspi_of_match,
},
.probe = omap2_mcspi_probe,
.remove_new = omap2_mcspi_remove,
};
module_platform_driver(omap2_mcspi_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-omap2-mcspi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI_PPC4XX SPI controller driver.
*
* Copyright (C) 2007 Gary Jennejohn <[email protected]>
* Copyright 2008 Stefan Roese <[email protected]>, DENX Software Engineering
* Copyright 2009 Harris Corporation, Steven A. Falco <[email protected]>
*
* Based in part on drivers/spi/spi_s3c24xx.c
*
* Copyright (c) 2006 Ben Dooks
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <[email protected]>
*/
/*
* The PPC4xx SPI controller has no FIFO so each sent/received byte will
* generate an interrupt to the CPU. This can cause high CPU utilization.
* This driver allows platforms to reduce the interrupt load on the CPU
* during SPI transfers by setting max_speed_hz via the device tree.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/io.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
/* bits in mode register - bit 0 is MSb */
/*
* SPI_PPC4XX_MODE_SCP = 0 means "data latched on trailing edge of clock"
* SPI_PPC4XX_MODE_SCP = 1 means "data latched on leading edge of clock"
* Note: This is the inverse of CPHA.
*/
#define SPI_PPC4XX_MODE_SCP (0x80 >> 3)
/* SPI_PPC4XX_MODE_SPE = 1 means "port enabled" */
#define SPI_PPC4XX_MODE_SPE (0x80 >> 4)
/*
* SPI_PPC4XX_MODE_RD = 0 means "MSB first" - this is the normal mode
* SPI_PPC4XX_MODE_RD = 1 means "LSB first" - this is bit-reversed mode
* Note: This is identical to SPI_LSB_FIRST.
*/
#define SPI_PPC4XX_MODE_RD (0x80 >> 5)
/*
* SPI_PPC4XX_MODE_CI = 0 means "clock idles low"
* SPI_PPC4XX_MODE_CI = 1 means "clock idles high"
* Note: This is identical to CPOL.
*/
#define SPI_PPC4XX_MODE_CI (0x80 >> 6)
/*
* SPI_PPC4XX_MODE_IL = 0 means "loopback disable"
* SPI_PPC4XX_MODE_IL = 1 means "loopback enable"
*/
#define SPI_PPC4XX_MODE_IL (0x80 >> 7)
/* bits in control register */
/* starts a transfer when set */
#define SPI_PPC4XX_CR_STR (0x80 >> 7)
/* bits in status register */
/* port is busy with a transfer */
#define SPI_PPC4XX_SR_BSY (0x80 >> 6)
/* RxD ready */
#define SPI_PPC4XX_SR_RBR (0x80 >> 7)
/* clock settings (SCP and CI) for various SPI modes */
#define SPI_CLK_MODE0 (SPI_PPC4XX_MODE_SCP | 0)
#define SPI_CLK_MODE1 (0 | 0)
#define SPI_CLK_MODE2 (SPI_PPC4XX_MODE_SCP | SPI_PPC4XX_MODE_CI)
#define SPI_CLK_MODE3 (0 | SPI_PPC4XX_MODE_CI)
#define DRIVER_NAME "spi_ppc4xx_of"
struct spi_ppc4xx_regs {
u8 mode;
u8 rxd;
u8 txd;
u8 cr;
u8 sr;
u8 dummy;
/*
* Clock divisor modulus register
* This uses the following formula:
* SCPClkOut = OPBCLK/(4(CDM + 1))
* or
* CDM = (OPBCLK/4*SCPClkOut) - 1
* bit 0 is the MSb!
*/
u8 cdm;
};
/* SPI Controller driver's private data. */
struct ppc4xx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
struct completion done;
u64 mapbase;
u64 mapsize;
int irqnum;
/* need this to set the SPI clock */
unsigned int opb_freq;
/* for transfers */
int len;
int count;
/* data buffers */
const unsigned char *tx;
unsigned char *rx;
struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */
struct spi_controller *host;
struct device *dev;
};
/* need this so we can set the clock in the chipselect routine */
struct spi_ppc4xx_cs {
u8 mode;
};
static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t)
{
struct ppc4xx_spi *hw;
u8 data;
dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
t->tx_buf, t->rx_buf, t->len);
hw = spi_controller_get_devdata(spi->controller);
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->len = t->len;
hw->count = 0;
/* send the first byte */
data = hw->tx ? hw->tx[0] : 0;
out_8(&hw->regs->txd, data);
out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR);
wait_for_completion(&hw->done);
return hw->count;
}
static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
{
struct ppc4xx_spi *hw = spi_controller_get_devdata(spi->controller);
struct spi_ppc4xx_cs *cs = spi->controller_state;
int scr;
u8 cdm = 0;
u32 speed;
u8 bits_per_word;
/* Start with the generic configuration for this device. */
bits_per_word = spi->bits_per_word;
speed = spi->max_speed_hz;
/*
* Modify the configuration if the transfer overrides it. Do not allow
* the transfer to overwrite the generic configuration with zeros.
*/
if (t) {
if (t->bits_per_word)
bits_per_word = t->bits_per_word;
if (t->speed_hz)
speed = min(t->speed_hz, spi->max_speed_hz);
}
if (!speed || (speed > spi->max_speed_hz)) {
dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed);
return -EINVAL;
}
/* Write new configuration */
out_8(&hw->regs->mode, cs->mode);
/* Set the clock */
/* opb_freq was already divided by 4 */
scr = (hw->opb_freq / speed) - 1;
if (scr > 0)
cdm = min(scr, 0xff);
dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", cdm, speed);
if (in_8(&hw->regs->cdm) != cdm)
out_8(&hw->regs->cdm, cdm);
mutex_lock(&hw->bitbang.lock);
if (!hw->bitbang.busy) {
hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
/* Need to ndelay here? */
}
mutex_unlock(&hw->bitbang.lock);
return 0;
}
static int spi_ppc4xx_setup(struct spi_device *spi)
{
struct spi_ppc4xx_cs *cs = spi->controller_state;
if (!spi->max_speed_hz) {
dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n");
return -EINVAL;
}
if (cs == NULL) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
}
/*
* We set all bits of the SPI0_MODE register, so,
* no need to read-modify-write
*/
cs->mode = SPI_PPC4XX_MODE_SPE;
switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
cs->mode |= SPI_CLK_MODE0;
break;
case SPI_MODE_1:
cs->mode |= SPI_CLK_MODE1;
break;
case SPI_MODE_2:
cs->mode |= SPI_CLK_MODE2;
break;
case SPI_MODE_3:
cs->mode |= SPI_CLK_MODE3;
break;
}
if (spi->mode & SPI_LSB_FIRST)
cs->mode |= SPI_PPC4XX_MODE_RD;
return 0;
}
static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id)
{
struct ppc4xx_spi *hw;
u8 status;
u8 data;
unsigned int count;
hw = (struct ppc4xx_spi *)dev_id;
status = in_8(&hw->regs->sr);
if (!status)
return IRQ_NONE;
/*
* BSY de-asserts one cycle after the transfer is complete. The
* interrupt is asserted after the transfer is complete. The exact
* relationship is not documented, hence this code.
*/
if (unlikely(status & SPI_PPC4XX_SR_BSY)) {
u8 lstatus;
int cnt = 0;
dev_dbg(hw->dev, "got interrupt but spi still busy?\n");
do {
ndelay(10);
lstatus = in_8(&hw->regs->sr);
} while (++cnt < 100 && lstatus & SPI_PPC4XX_SR_BSY);
if (cnt >= 100) {
dev_err(hw->dev, "busywait: too many loops!\n");
complete(&hw->done);
return IRQ_HANDLED;
} else {
/* status is always 1 (RBR) here */
status = in_8(&hw->regs->sr);
dev_dbg(hw->dev, "loops %d status %x\n", cnt, status);
}
}
count = hw->count;
hw->count++;
/* RBR triggered this interrupt. Therefore, data must be ready. */
data = in_8(&hw->regs->rxd);
if (hw->rx)
hw->rx[count] = data;
count++;
if (count < hw->len) {
data = hw->tx ? hw->tx[count] : 0;
out_8(&hw->regs->txd, data);
out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR);
} else {
complete(&hw->done);
}
return IRQ_HANDLED;
}
static void spi_ppc4xx_cleanup(struct spi_device *spi)
{
kfree(spi->controller_state);
}
static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
{
/*
* On all 4xx PPC's the SPI bus is shared/multiplexed with
* the 2nd I2C bus. We need to enable the SPI bus before
* using it.
*/
/* need to clear bit 14 to enable SPC */
dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0);
}
/*
* platform_device layer stuff...
*/
static int spi_ppc4xx_of_probe(struct platform_device *op)
{
struct ppc4xx_spi *hw;
struct spi_controller *host;
struct spi_bitbang *bbp;
struct resource resource;
struct device_node *np = op->dev.of_node;
struct device *dev = &op->dev;
struct device_node *opbnp;
int ret;
const unsigned int *clk;
host = spi_alloc_host(dev, sizeof(*hw));
if (host == NULL)
return -ENOMEM;
host->dev.of_node = np;
platform_set_drvdata(op, host);
hw = spi_controller_get_devdata(host);
hw->host = host;
hw->dev = dev;
init_completion(&hw->done);
/* Setup the state for the bitbang driver */
bbp = &hw->bitbang;
bbp->master = hw->host;
bbp->setup_transfer = spi_ppc4xx_setupxfer;
bbp->txrx_bufs = spi_ppc4xx_txrx;
bbp->use_dma = 0;
bbp->master->setup = spi_ppc4xx_setup;
bbp->master->cleanup = spi_ppc4xx_cleanup;
bbp->master->bits_per_word_mask = SPI_BPW_MASK(8);
bbp->master->use_gpio_descriptors = true;
/*
* The SPI core will count the number of GPIO descriptors to figure
* out the number of chip selects available on the platform.
*/
bbp->master->num_chipselect = 0;
/* the spi->mode bits understood by this driver: */
bbp->master->mode_bits =
SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
/* Get the clock for the OPB */
opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb");
if (opbnp == NULL) {
dev_err(dev, "OPB: cannot find node\n");
ret = -ENODEV;
goto free_host;
}
/* Get the clock (Hz) for the OPB */
clk = of_get_property(opbnp, "clock-frequency", NULL);
if (clk == NULL) {
dev_err(dev, "OPB: no clock-frequency property set\n");
of_node_put(opbnp);
ret = -ENODEV;
goto free_host;
}
hw->opb_freq = *clk;
hw->opb_freq >>= 2;
of_node_put(opbnp);
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
dev_err(dev, "error while parsing device node resource\n");
goto free_host;
}
hw->mapbase = resource.start;
hw->mapsize = resource_size(&resource);
/* Sanity check */
if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
dev_err(dev, "too small to map registers\n");
ret = -EINVAL;
goto free_host;
}
/* Request IRQ */
hw->irqnum = irq_of_parse_and_map(np, 0);
ret = request_irq(hw->irqnum, spi_ppc4xx_int,
0, "spi_ppc4xx_of", (void *)hw);
if (ret) {
dev_err(dev, "unable to allocate interrupt\n");
goto free_host;
}
if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) {
dev_err(dev, "resource unavailable\n");
ret = -EBUSY;
goto request_mem_error;
}
hw->regs = ioremap(hw->mapbase, sizeof(struct spi_ppc4xx_regs));
if (!hw->regs) {
dev_err(dev, "unable to memory map registers\n");
ret = -ENXIO;
goto map_io_error;
}
spi_ppc4xx_enable(hw);
/* Finally register our spi controller */
dev->dma_mask = 0;
ret = spi_bitbang_start(bbp);
if (ret) {
dev_err(dev, "failed to register SPI host\n");
goto unmap_regs;
}
dev_info(dev, "driver initialized\n");
return 0;
unmap_regs:
iounmap(hw->regs);
map_io_error:
release_mem_region(hw->mapbase, hw->mapsize);
request_mem_error:
free_irq(hw->irqnum, hw);
free_host:
spi_controller_put(host);
dev_err(dev, "initialization failed\n");
return ret;
}
static void spi_ppc4xx_of_remove(struct platform_device *op)
{
struct spi_controller *host = platform_get_drvdata(op);
struct ppc4xx_spi *hw = spi_controller_get_devdata(host);
spi_bitbang_stop(&hw->bitbang);
release_mem_region(hw->mapbase, hw->mapsize);
free_irq(hw->irqnum, hw);
iounmap(hw->regs);
spi_controller_put(host);
}
static const struct of_device_id spi_ppc4xx_of_match[] = {
{ .compatible = "ibm,ppc4xx-spi", },
{},
};
MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
static struct platform_driver spi_ppc4xx_of_driver = {
.probe = spi_ppc4xx_of_probe,
.remove_new = spi_ppc4xx_of_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = spi_ppc4xx_of_match,
},
};
module_platform_driver(spi_ppc4xx_of_driver);
MODULE_AUTHOR("Gary Jennejohn & Stefan Roese");
MODULE_DESCRIPTION("Simple PPC4xx SPI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-ppc4xx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Broadcom BCM2835 auxiliary SPI Controllers
*
* the driver does not rely on the native chipselects at all
* but only uses the gpio type chipselects
*
* Based on: spi-bcm2835.c
*
* Copyright (C) 2015 Martin Sperl
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
/* define polling limits */
static unsigned int polling_limit_us = 30;
module_param(polling_limit_us, uint, 0664);
MODULE_PARM_DESC(polling_limit_us,
"time in us to run a transfer in polling mode - if zero no polling is used\n");
/*
* spi register defines
*
* note there is garbage in the "official" documentation,
* so some data is taken from the file:
* brcm_usrlib/dag/vmcsx/vcinclude/bcm2708_chip/aux_io.h
* inside of:
* http://www.broadcom.com/docs/support/videocore/Brcm_Android_ICS_Graphics_Stack.tar.gz
*/
/* SPI register offsets */
#define BCM2835_AUX_SPI_CNTL0 0x00
#define BCM2835_AUX_SPI_CNTL1 0x04
#define BCM2835_AUX_SPI_STAT 0x08
#define BCM2835_AUX_SPI_PEEK 0x0C
#define BCM2835_AUX_SPI_IO 0x20
#define BCM2835_AUX_SPI_TXHOLD 0x30
/* Bitfields in CNTL0 */
#define BCM2835_AUX_SPI_CNTL0_SPEED 0xFFF00000
#define BCM2835_AUX_SPI_CNTL0_SPEED_MAX 0xFFF
#define BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT 20
#define BCM2835_AUX_SPI_CNTL0_CS 0x000E0000
#define BCM2835_AUX_SPI_CNTL0_POSTINPUT 0x00010000
#define BCM2835_AUX_SPI_CNTL0_VAR_CS 0x00008000
#define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH 0x00004000
#define BCM2835_AUX_SPI_CNTL0_DOUTHOLD 0x00003000
#define BCM2835_AUX_SPI_CNTL0_ENABLE 0x00000800
#define BCM2835_AUX_SPI_CNTL0_IN_RISING 0x00000400
#define BCM2835_AUX_SPI_CNTL0_CLEARFIFO 0x00000200
#define BCM2835_AUX_SPI_CNTL0_OUT_RISING 0x00000100
#define BCM2835_AUX_SPI_CNTL0_CPOL 0x00000080
#define BCM2835_AUX_SPI_CNTL0_MSBF_OUT 0x00000040
#define BCM2835_AUX_SPI_CNTL0_SHIFTLEN 0x0000003F
/* Bitfields in CNTL1 */
#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
/* Bitfields in STAT */
#define BCM2835_AUX_SPI_STAT_TX_LVL 0xFF000000
#define BCM2835_AUX_SPI_STAT_RX_LVL 0x00FF0000
#define BCM2835_AUX_SPI_STAT_TX_FULL 0x00000400
#define BCM2835_AUX_SPI_STAT_TX_EMPTY 0x00000200
#define BCM2835_AUX_SPI_STAT_RX_FULL 0x00000100
#define BCM2835_AUX_SPI_STAT_RX_EMPTY 0x00000080
#define BCM2835_AUX_SPI_STAT_BUSY 0x00000040
#define BCM2835_AUX_SPI_STAT_BITCOUNT 0x0000003F
struct bcm2835aux_spi {
void __iomem *regs;
struct clk *clk;
int irq;
u32 cntl[2];
const u8 *tx_buf;
u8 *rx_buf;
int tx_len;
int rx_len;
int pending;
u64 count_transfer_polling;
u64 count_transfer_irq;
u64 count_transfer_irq_after_poll;
struct dentry *debugfs_dir;
};
#if defined(CONFIG_DEBUG_FS)
static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
const char *dname)
{
char name[64];
struct dentry *dir;
/* get full name */
snprintf(name, sizeof(name), "spi-bcm2835aux-%s", dname);
/* the base directory */
dir = debugfs_create_dir(name, NULL);
bs->debugfs_dir = dir;
/* the counters */
debugfs_create_u64("count_transfer_polling", 0444, dir,
&bs->count_transfer_polling);
debugfs_create_u64("count_transfer_irq", 0444, dir,
&bs->count_transfer_irq);
debugfs_create_u64("count_transfer_irq_after_poll", 0444, dir,
&bs->count_transfer_irq_after_poll);
}
static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
{
debugfs_remove_recursive(bs->debugfs_dir);
bs->debugfs_dir = NULL;
}
#else
static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
const char *dname)
{
}
static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
{
}
#endif /* CONFIG_DEBUG_FS */
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned int reg)
{
return readl(bs->regs + reg);
}
static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned int reg,
u32 val)
{
writel(val, bs->regs + reg);
}
static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
{
u32 data;
int count = min(bs->rx_len, 3);
data = bcm2835aux_rd(bs, BCM2835_AUX_SPI_IO);
if (bs->rx_buf) {
switch (count) {
case 3:
*bs->rx_buf++ = (data >> 16) & 0xff;
fallthrough;
case 2:
*bs->rx_buf++ = (data >> 8) & 0xff;
fallthrough;
case 1:
*bs->rx_buf++ = (data >> 0) & 0xff;
/* fallthrough - no default */
}
}
bs->rx_len -= count;
bs->pending -= count;
}
static inline void bcm2835aux_wr_fifo(struct bcm2835aux_spi *bs)
{
u32 data;
u8 byte;
int count;
int i;
/* gather up to 3 bytes to write to the FIFO */
count = min(bs->tx_len, 3);
data = 0;
for (i = 0; i < count; i++) {
byte = bs->tx_buf ? *bs->tx_buf++ : 0;
data |= byte << (8 * (2 - i));
}
/* and set the variable bit-length */
data |= (count * 8) << 24;
/* and decrement length */
bs->tx_len -= count;
bs->pending += count;
/* write to the correct TX-register */
if (bs->tx_len)
bcm2835aux_wr(bs, BCM2835_AUX_SPI_TXHOLD, data);
else
bcm2835aux_wr(bs, BCM2835_AUX_SPI_IO, data);
}
static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
{
/* disable spi clearing fifo and interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, 0);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0,
BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
}
static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
{
u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
/* check if we have data to read */
for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
bcm2835aux_rd_fifo(bs);
/* check if we have data to write */
while (bs->tx_len &&
(bs->pending < 12) &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
}
}
static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
{
struct spi_controller *host = dev_id;
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
/* IRQ may be shared, so return if our interrupts are disabled */
if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
(BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
return IRQ_NONE;
/* do common fifo handling */
bcm2835aux_spi_transfer_helper(bs);
if (!bs->tx_len) {
/* disable tx fifo empty interrupt */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
BCM2835_AUX_SPI_CNTL1_IDLE);
}
/* and if rx_len is 0 then disable interrupts and wake up completion */
if (!bs->rx_len) {
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
spi_finalize_current_transfer(host);
}
return IRQ_HANDLED;
}
static int __bcm2835aux_spi_transfer_one_irq(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
/* enable interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
BCM2835_AUX_SPI_CNTL1_TXEMPTY |
BCM2835_AUX_SPI_CNTL1_IDLE);
/* and wait for finish... */
return 1;
}
static int bcm2835aux_spi_transfer_one_irq(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
/* update statistics */
bs->count_transfer_irq++;
/* fill in registers and fifos before enabling interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
/* fill in tx fifo with data before enabling interrupts */
while ((bs->tx_len) &&
(bs->pending < 12) &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
}
/* now run the interrupt mode */
return __bcm2835aux_spi_transfer_one_irq(host, spi, tfr);
}
static int bcm2835aux_spi_transfer_one_poll(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
unsigned long timeout;
/* update statistics */
bs->count_transfer_polling++;
/* configure spi */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
/* set the timeout to at least 2 jiffies */
timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
/* loop until finished the transfer */
while (bs->rx_len) {
/* do common fifo handling */
bcm2835aux_spi_transfer_helper(bs);
/* there is still data pending to read check the timeout */
if (bs->rx_len && time_after(jiffies, timeout)) {
dev_dbg_ratelimited(&spi->dev,
"timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* forward to interrupt handler */
bs->count_transfer_irq_after_poll++;
return __bcm2835aux_spi_transfer_one_irq(host,
spi, tfr);
}
}
/* and return without waiting for completion */
return 0;
}
static int bcm2835aux_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
unsigned long spi_hz, clk_hz, speed;
unsigned long hz_per_byte, byte_limit;
/* calculate the registers to handle
*
* note that we use the variable data mode, which
* is not optimal for longer transfers as we waste registers
* resulting (potentially) in more interrupts when transferring
* more than 12 bytes
*/
/* set clock */
spi_hz = tfr->speed_hz;
clk_hz = clk_get_rate(bs->clk);
if (spi_hz >= clk_hz / 2) {
speed = 0;
} else if (spi_hz) {
speed = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1;
if (speed > BCM2835_AUX_SPI_CNTL0_SPEED_MAX)
speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
} else { /* the slowest we can go */
speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
}
/* mask out old speed from previous spi_transfer */
bs->cntl[0] &= ~(BCM2835_AUX_SPI_CNTL0_SPEED);
/* set the new speed */
bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
tfr->effective_speed_hz = clk_hz / (2 * (speed + 1));
/* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
bs->rx_buf = tfr->rx_buf;
bs->tx_len = tfr->len;
bs->rx_len = tfr->len;
bs->pending = 0;
/* Calculate the estimated time in us the transfer runs. Note that
* there are 2 idle clocks cycles after each chunk getting
* transferred - in our case the chunk size is 3 bytes, so we
* approximate this by 9 cycles/byte. This is used to find the number
* of Hz per byte per polling limit. E.g., we can transfer 1 byte in
* 30 µs per 300,000 Hz of bus clock.
*/
hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
/* run in polling mode for short transfers */
if (tfr->len < byte_limit)
return bcm2835aux_spi_transfer_one_poll(host, spi, tfr);
/* run in interrupt mode for all others */
return bcm2835aux_spi_transfer_one_irq(host, spi, tfr);
}
static int bcm2835aux_spi_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
/* handle all the modes */
if (spi->mode & SPI_CPOL) {
bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_OUT_RISING;
} else {
bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_IN_RISING;
}
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
return 0;
}
static int bcm2835aux_spi_unprepare_message(struct spi_controller *host,
struct spi_message *msg)
{
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
bcm2835aux_spi_reset_hw(bs);
return 0;
}
static void bcm2835aux_spi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
bcm2835aux_spi_reset_hw(bs);
}
static int bcm2835aux_spi_setup(struct spi_device *spi)
{
/* sanity check for native cs */
if (spi->mode & SPI_NO_CS)
return 0;
if (spi_get_csgpiod(spi, 0))
return 0;
/* for dt-backwards compatibility: only support native on CS0
* known things not supported with broken native CS:
* * multiple chip-selects: cs0-cs2 are all
* simultaniously asserted whenever there is a transfer
* this even includes SPI_NO_CS
* * SPI_CS_HIGH: cs are always asserted low
* * cs_change: cs is deasserted after each spi_transfer
* * cs_delay_usec: cs is always deasserted one SCK cycle
* after the last transfer
* probably more...
*/
dev_warn(&spi->dev,
"Native CS is not supported - please configure cs-gpio in device-tree\n");
if (spi_get_chipselect(spi, 0) == 0)
return 0;
dev_warn(&spi->dev, "Native CS is not working for cs > 0\n");
return -EINVAL;
}
static int bcm2835aux_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct bcm2835aux_spi *bs;
unsigned long clk_hz;
int err;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*bs));
if (!host)
return -ENOMEM;
platform_set_drvdata(pdev, host);
host->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
host->bits_per_word_mask = SPI_BPW_MASK(8);
/* even though the driver never officially supported native CS
* allow a single native CS for legacy DT support purposes when
* no cs-gpio is configured.
* Known limitations for native cs are:
* * multiple chip-selects: cs0-cs2 are all simultaniously asserted
* whenever there is a transfer - this even includes SPI_NO_CS
* * SPI_CS_HIGH: is ignores - cs are always asserted low
* * cs_change: cs is deasserted after each spi_transfer
* * cs_delay_usec: cs is always deasserted one SCK cycle after
* a spi_transfer
*/
host->num_chipselect = 1;
host->setup = bcm2835aux_spi_setup;
host->transfer_one = bcm2835aux_spi_transfer_one;
host->handle_err = bcm2835aux_spi_handle_err;
host->prepare_message = bcm2835aux_spi_prepare_message;
host->unprepare_message = bcm2835aux_spi_unprepare_message;
host->dev.of_node = pdev->dev.of_node;
host->use_gpio_descriptors = true;
bs = spi_controller_get_devdata(host);
/* the main area */
bs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bs->regs))
return PTR_ERR(bs->regs);
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
return err;
}
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq < 0)
return bs->irq;
/* this also enables the HW block */
err = clk_prepare_enable(bs->clk);
if (err) {
dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
return err;
}
/* just checking if the clock returns a sane value */
clk_hz = clk_get_rate(bs->clk);
if (!clk_hz) {
dev_err(&pdev->dev, "clock returns 0 Hz\n");
err = -ENODEV;
goto out_clk_disable;
}
/* reset SPI-HW block */
bcm2835aux_spi_reset_hw(bs);
err = devm_request_irq(&pdev->dev, bs->irq,
bcm2835aux_spi_interrupt,
IRQF_SHARED,
dev_name(&pdev->dev), host);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
}
err = spi_register_controller(host);
if (err) {
dev_err(&pdev->dev, "could not register SPI host: %d\n", err);
goto out_clk_disable;
}
bcm2835aux_debugfs_create(bs, dev_name(&pdev->dev));
return 0;
out_clk_disable:
clk_disable_unprepare(bs->clk);
return err;
}
static void bcm2835aux_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
bcm2835aux_debugfs_remove(bs);
spi_unregister_controller(host);
bcm2835aux_spi_reset_hw(bs);
/* disable the HW block by releasing the clock */
clk_disable_unprepare(bs->clk);
}
static const struct of_device_id bcm2835aux_spi_match[] = {
{ .compatible = "brcm,bcm2835-aux-spi", },
{}
};
MODULE_DEVICE_TABLE(of, bcm2835aux_spi_match);
static struct platform_driver bcm2835aux_spi_driver = {
.driver = {
.name = "spi-bcm2835aux",
.of_match_table = bcm2835aux_spi_match,
},
.probe = bcm2835aux_spi_probe,
.remove_new = bcm2835aux_spi_remove,
};
module_platform_driver(bcm2835aux_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835 aux");
MODULE_AUTHOR("Martin Sperl <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-bcm2835aux.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Renesas RZ/V2M Clocked Serial Interface (CSI) driver
*
* Copyright (C) 2023 Renesas Electronics Corporation
*/
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/count_zeros.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
/* Registers */
#define CSI_MODE 0x00 /* CSI mode control */
#define CSI_CLKSEL 0x04 /* CSI clock select */
#define CSI_CNT 0x08 /* CSI control */
#define CSI_INT 0x0C /* CSI interrupt status */
#define CSI_IFIFOL 0x10 /* CSI receive FIFO level display */
#define CSI_OFIFOL 0x14 /* CSI transmit FIFO level display */
#define CSI_IFIFO 0x18 /* CSI receive window */
#define CSI_OFIFO 0x1C /* CSI transmit window */
#define CSI_FIFOTRG 0x20 /* CSI FIFO trigger level */
/* CSI_MODE */
#define CSI_MODE_CSIE BIT(7)
#define CSI_MODE_TRMD BIT(6)
#define CSI_MODE_CCL BIT(5)
#define CSI_MODE_DIR BIT(4)
#define CSI_MODE_CSOT BIT(0)
#define CSI_MODE_SETUP 0x00000040
/* CSI_CLKSEL */
#define CSI_CLKSEL_CKP BIT(17)
#define CSI_CLKSEL_DAP BIT(16)
#define CSI_CLKSEL_MODE (CSI_CLKSEL_CKP|CSI_CLKSEL_DAP)
#define CSI_CLKSEL_SLAVE BIT(15)
#define CSI_CLKSEL_CKS GENMASK(14, 1)
/* CSI_CNT */
#define CSI_CNT_CSIRST BIT(28)
#define CSI_CNT_R_TRGEN BIT(19)
#define CSI_CNT_UNDER_E BIT(13)
#define CSI_CNT_OVERF_E BIT(12)
#define CSI_CNT_TREND_E BIT(9)
#define CSI_CNT_CSIEND_E BIT(8)
#define CSI_CNT_T_TRGR_E BIT(4)
#define CSI_CNT_R_TRGR_E BIT(0)
/* CSI_INT */
#define CSI_INT_UNDER BIT(13)
#define CSI_INT_OVERF BIT(12)
#define CSI_INT_TREND BIT(9)
#define CSI_INT_CSIEND BIT(8)
#define CSI_INT_T_TRGR BIT(4)
#define CSI_INT_R_TRGR BIT(0)
/* CSI_FIFOTRG */
#define CSI_FIFOTRG_R_TRG GENMASK(2, 0)
#define CSI_FIFO_SIZE_BYTES 32U
#define CSI_FIFO_HALF_SIZE 16U
#define CSI_EN_DIS_TIMEOUT_US 100
/*
* Clock "csiclk" gets divided by 2 * CSI_CLKSEL_CKS in order to generate the
* serial clock (output from master), with CSI_CLKSEL_CKS ranging from 0x1 (that
* means "csiclk" is divided by 2) to 0x3FFF ("csiclk" is divided by 32766).
*/
#define CSI_CKS_MAX GENMASK(13, 0)
#define UNDERRUN_ERROR BIT(0)
#define OVERFLOW_ERROR BIT(1)
#define TX_TIMEOUT_ERROR BIT(2)
#define RX_TIMEOUT_ERROR BIT(3)
#define CSI_MAX_SPI_SCKO (8 * HZ_PER_MHZ)
struct rzv2m_csi_priv {
void __iomem *base;
struct clk *csiclk;
struct clk *pclk;
struct device *dev;
struct spi_controller *controller;
const void *txbuf;
void *rxbuf;
unsigned int buffer_len;
unsigned int bytes_sent;
unsigned int bytes_received;
unsigned int bytes_to_transfer;
unsigned int words_to_transfer;
unsigned int bytes_per_word;
wait_queue_head_t wait;
u32 errors;
u32 status;
};
static void rzv2m_csi_reg_write_bit(const struct rzv2m_csi_priv *csi,
int reg_offs, int bit_mask, u32 value)
{
int nr_zeros;
u32 tmp;
nr_zeros = count_trailing_zeros(bit_mask);
value <<= nr_zeros;
tmp = (readl(csi->base + reg_offs) & ~bit_mask) | value;
writel(tmp, csi->base + reg_offs);
}
static int rzv2m_csi_sw_reset(struct rzv2m_csi_priv *csi, int assert)
{
u32 reg;
rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_CSIRST, assert);
if (!assert)
return 0;
return readl_poll_timeout(csi->base + CSI_MODE, reg,
!(reg & CSI_MODE_CSOT), 0,
CSI_EN_DIS_TIMEOUT_US);
}
static int rzv2m_csi_start_stop_operation(const struct rzv2m_csi_priv *csi,
int enable, bool wait)
{
u32 reg;
rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CSIE, enable);
if (enable || !wait)
return 0;
return readl_poll_timeout(csi->base + CSI_MODE, reg,
!(reg & CSI_MODE_CSOT), 0,
CSI_EN_DIS_TIMEOUT_US);
}
static int rzv2m_csi_fill_txfifo(struct rzv2m_csi_priv *csi)
{
unsigned int i;
if (readl(csi->base + CSI_OFIFOL))
return -EIO;
if (csi->bytes_per_word == 2) {
const u16 *buf = csi->txbuf;
for (i = 0; i < csi->words_to_transfer; i++)
writel(buf[i], csi->base + CSI_OFIFO);
} else {
const u8 *buf = csi->txbuf;
for (i = 0; i < csi->words_to_transfer; i++)
writel(buf[i], csi->base + CSI_OFIFO);
}
csi->txbuf += csi->bytes_to_transfer;
csi->bytes_sent += csi->bytes_to_transfer;
return 0;
}
static int rzv2m_csi_read_rxfifo(struct rzv2m_csi_priv *csi)
{
unsigned int i;
if (readl(csi->base + CSI_IFIFOL) != csi->bytes_to_transfer)
return -EIO;
if (csi->bytes_per_word == 2) {
u16 *buf = csi->rxbuf;
for (i = 0; i < csi->words_to_transfer; i++)
buf[i] = (u16)readl(csi->base + CSI_IFIFO);
} else {
u8 *buf = csi->rxbuf;
for (i = 0; i < csi->words_to_transfer; i++)
buf[i] = (u8)readl(csi->base + CSI_IFIFO);
}
csi->rxbuf += csi->bytes_to_transfer;
csi->bytes_received += csi->bytes_to_transfer;
return 0;
}
static inline void rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv *csi)
{
unsigned int bytes_transferred = max(csi->bytes_received, csi->bytes_sent);
unsigned int bytes_remaining = csi->buffer_len - bytes_transferred;
unsigned int to_transfer;
if (csi->txbuf)
/*
* Leaving a little bit of headroom in the FIFOs makes it very
* hard to raise an overflow error (which is only possible
* when IP transmits and receives at the same time).
*/
to_transfer = min(CSI_FIFO_HALF_SIZE, bytes_remaining);
else
to_transfer = min(CSI_FIFO_SIZE_BYTES, bytes_remaining);
if (csi->bytes_per_word == 2)
to_transfer >>= 1;
/*
* We can only choose a trigger level from a predefined set of values.
* This will pick a value that is the greatest possible integer that's
* less than or equal to the number of bytes we need to transfer.
* This may result in multiple smaller transfers.
*/
csi->words_to_transfer = rounddown_pow_of_two(to_transfer);
if (csi->bytes_per_word == 2)
csi->bytes_to_transfer = csi->words_to_transfer << 1;
else
csi->bytes_to_transfer = csi->words_to_transfer;
}
static inline void rzv2m_csi_set_rx_fifo_trigger_level(struct rzv2m_csi_priv *csi)
{
rzv2m_csi_reg_write_bit(csi, CSI_FIFOTRG, CSI_FIFOTRG_R_TRG,
ilog2(csi->words_to_transfer));
}
static inline void rzv2m_csi_enable_rx_trigger(struct rzv2m_csi_priv *csi,
bool enable)
{
rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_R_TRGEN, enable);
}
static void rzv2m_csi_disable_irqs(const struct rzv2m_csi_priv *csi,
u32 enable_bits)
{
u32 cnt = readl(csi->base + CSI_CNT);
writel(cnt & ~enable_bits, csi->base + CSI_CNT);
}
static void rzv2m_csi_disable_all_irqs(struct rzv2m_csi_priv *csi)
{
rzv2m_csi_disable_irqs(csi, CSI_CNT_R_TRGR_E | CSI_CNT_T_TRGR_E |
CSI_CNT_CSIEND_E | CSI_CNT_TREND_E |
CSI_CNT_OVERF_E | CSI_CNT_UNDER_E);
}
static inline void rzv2m_csi_clear_irqs(struct rzv2m_csi_priv *csi, u32 irqs)
{
writel(irqs, csi->base + CSI_INT);
}
static void rzv2m_csi_clear_all_irqs(struct rzv2m_csi_priv *csi)
{
rzv2m_csi_clear_irqs(csi, CSI_INT_UNDER | CSI_INT_OVERF |
CSI_INT_TREND | CSI_INT_CSIEND | CSI_INT_T_TRGR |
CSI_INT_R_TRGR);
}
static void rzv2m_csi_enable_irqs(struct rzv2m_csi_priv *csi, u32 enable_bits)
{
u32 cnt = readl(csi->base + CSI_CNT);
writel(cnt | enable_bits, csi->base + CSI_CNT);
}
static int rzv2m_csi_wait_for_interrupt(struct rzv2m_csi_priv *csi,
u32 wait_mask, u32 enable_bits)
{
int ret;
rzv2m_csi_enable_irqs(csi, enable_bits);
ret = wait_event_timeout(csi->wait,
((csi->status & wait_mask) == wait_mask) ||
csi->errors, HZ);
rzv2m_csi_disable_irqs(csi, enable_bits);
if (csi->errors)
return -EIO;
if (!ret)
return -ETIMEDOUT;
return 0;
}
static int rzv2m_csi_wait_for_tx_empty(struct rzv2m_csi_priv *csi)
{
int ret;
if (readl(csi->base + CSI_OFIFOL) == 0)
return 0;
ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_TREND, CSI_CNT_TREND_E);
if (ret == -ETIMEDOUT)
csi->errors |= TX_TIMEOUT_ERROR;
return ret;
}
static inline int rzv2m_csi_wait_for_rx_ready(struct rzv2m_csi_priv *csi)
{
int ret;
if (readl(csi->base + CSI_IFIFOL) == csi->bytes_to_transfer)
return 0;
ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_R_TRGR,
CSI_CNT_R_TRGR_E);
if (ret == -ETIMEDOUT)
csi->errors |= RX_TIMEOUT_ERROR;
return ret;
}
static irqreturn_t rzv2m_csi_irq_handler(int irq, void *data)
{
struct rzv2m_csi_priv *csi = data;
csi->status = readl(csi->base + CSI_INT);
rzv2m_csi_disable_irqs(csi, csi->status);
if (csi->status & CSI_INT_OVERF)
csi->errors |= OVERFLOW_ERROR;
if (csi->status & CSI_INT_UNDER)
csi->errors |= UNDERRUN_ERROR;
wake_up(&csi->wait);
return IRQ_HANDLED;
}
static void rzv2m_csi_setup_clock(struct rzv2m_csi_priv *csi, u32 spi_hz)
{
unsigned long csiclk_rate = clk_get_rate(csi->csiclk);
unsigned long pclk_rate = clk_get_rate(csi->pclk);
unsigned long csiclk_rate_limit = pclk_rate >> 1;
u32 cks;
/*
* There is a restriction on the frequency of CSICLK, it has to be <=
* PCLK / 2.
*/
if (csiclk_rate > csiclk_rate_limit) {
clk_set_rate(csi->csiclk, csiclk_rate >> 1);
csiclk_rate = clk_get_rate(csi->csiclk);
} else if ((csiclk_rate << 1) <= csiclk_rate_limit) {
clk_set_rate(csi->csiclk, csiclk_rate << 1);
csiclk_rate = clk_get_rate(csi->csiclk);
}
spi_hz = spi_hz > CSI_MAX_SPI_SCKO ? CSI_MAX_SPI_SCKO : spi_hz;
cks = DIV_ROUND_UP(csiclk_rate, spi_hz << 1);
if (cks > CSI_CKS_MAX)
cks = CSI_CKS_MAX;
dev_dbg(csi->dev, "SPI clk rate is %ldHz\n", csiclk_rate / (cks << 1));
rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_CKS, cks);
}
static void rzv2m_csi_setup_operating_mode(struct rzv2m_csi_priv *csi,
struct spi_transfer *t)
{
if (t->rx_buf && !t->tx_buf)
/* Reception-only mode */
rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_TRMD, 0);
else
/* Send and receive mode */
rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_TRMD, 1);
csi->bytes_per_word = t->bits_per_word / 8;
rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CCL,
csi->bytes_per_word == 2);
}
static int rzv2m_csi_setup(struct spi_device *spi)
{
struct rzv2m_csi_priv *csi = spi_controller_get_devdata(spi->controller);
int ret;
rzv2m_csi_sw_reset(csi, 0);
writel(CSI_MODE_SETUP, csi->base + CSI_MODE);
/* Setup clock polarity and phase timing */
rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_MODE,
~spi->mode & SPI_MODE_X_MASK);
/* Setup serial data order */
rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_DIR,
!!(spi->mode & SPI_LSB_FIRST));
/* Set the operation mode as master */
rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_SLAVE, 0);
/* Give the IP a SW reset */
ret = rzv2m_csi_sw_reset(csi, 1);
if (ret)
return ret;
rzv2m_csi_sw_reset(csi, 0);
/*
* We need to enable the communication so that the clock will settle
* for the right polarity before enabling the CS.
*/
rzv2m_csi_start_stop_operation(csi, 1, false);
udelay(10);
rzv2m_csi_start_stop_operation(csi, 0, false);
return 0;
}
static int rzv2m_csi_pio_transfer(struct rzv2m_csi_priv *csi)
{
bool tx_completed = !csi->txbuf;
bool rx_completed = !csi->rxbuf;
int ret = 0;
/* Make sure the TX FIFO is empty */
writel(0, csi->base + CSI_OFIFOL);
csi->bytes_sent = 0;
csi->bytes_received = 0;
csi->errors = 0;
rzv2m_csi_disable_all_irqs(csi);
rzv2m_csi_clear_all_irqs(csi);
rzv2m_csi_enable_rx_trigger(csi, true);
while (!tx_completed || !rx_completed) {
/*
* Decide how many words we are going to transfer during
* this cycle (for both TX and RX), then set the RX FIFO trigger
* level accordingly. No need to set a trigger level for the
* TX FIFO, as this IP comes with an interrupt that fires when
* the TX FIFO is empty.
*/
rzv2m_csi_calc_current_transfer(csi);
rzv2m_csi_set_rx_fifo_trigger_level(csi);
rzv2m_csi_enable_irqs(csi, CSI_INT_OVERF | CSI_INT_UNDER);
/* Make sure the RX FIFO is empty */
writel(0, csi->base + CSI_IFIFOL);
writel(readl(csi->base + CSI_INT), csi->base + CSI_INT);
csi->status = 0;
rzv2m_csi_start_stop_operation(csi, 1, false);
/* TX */
if (csi->txbuf) {
ret = rzv2m_csi_fill_txfifo(csi);
if (ret)
break;
ret = rzv2m_csi_wait_for_tx_empty(csi);
if (ret)
break;
if (csi->bytes_sent == csi->buffer_len)
tx_completed = true;
}
/*
* Make sure the RX FIFO contains the desired number of words.
* We then either flush its content, or we copy it onto
* csi->rxbuf.
*/
ret = rzv2m_csi_wait_for_rx_ready(csi);
if (ret)
break;
/* RX */
if (csi->rxbuf) {
rzv2m_csi_start_stop_operation(csi, 0, false);
ret = rzv2m_csi_read_rxfifo(csi);
if (ret)
break;
if (csi->bytes_received == csi->buffer_len)
rx_completed = true;
}
ret = rzv2m_csi_start_stop_operation(csi, 0, true);
if (ret)
goto pio_quit;
if (csi->errors) {
ret = -EIO;
goto pio_quit;
}
}
rzv2m_csi_start_stop_operation(csi, 0, true);
pio_quit:
rzv2m_csi_disable_all_irqs(csi);
rzv2m_csi_enable_rx_trigger(csi, false);
rzv2m_csi_clear_all_irqs(csi);
return ret;
}
static int rzv2m_csi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
struct rzv2m_csi_priv *csi = spi_controller_get_devdata(controller);
struct device *dev = csi->dev;
int ret;
csi->txbuf = transfer->tx_buf;
csi->rxbuf = transfer->rx_buf;
csi->buffer_len = transfer->len;
rzv2m_csi_setup_operating_mode(csi, transfer);
rzv2m_csi_setup_clock(csi, transfer->speed_hz);
ret = rzv2m_csi_pio_transfer(csi);
if (ret) {
if (csi->errors & UNDERRUN_ERROR)
dev_err(dev, "Underrun error\n");
if (csi->errors & OVERFLOW_ERROR)
dev_err(dev, "Overflow error\n");
if (csi->errors & TX_TIMEOUT_ERROR)
dev_err(dev, "TX timeout error\n");
if (csi->errors & RX_TIMEOUT_ERROR)
dev_err(dev, "RX timeout error\n");
}
return ret;
}
static int rzv2m_csi_probe(struct platform_device *pdev)
{
struct spi_controller *controller;
struct device *dev = &pdev->dev;
struct rzv2m_csi_priv *csi;
struct reset_control *rstc;
int irq;
int ret;
controller = devm_spi_alloc_host(dev, sizeof(*csi));
if (!controller)
return -ENOMEM;
csi = spi_controller_get_devdata(controller);
platform_set_drvdata(pdev, csi);
csi->dev = dev;
csi->controller = controller;
csi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi->base))
return PTR_ERR(csi->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
csi->csiclk = devm_clk_get(dev, "csiclk");
if (IS_ERR(csi->csiclk))
return dev_err_probe(dev, PTR_ERR(csi->csiclk),
"could not get csiclk\n");
csi->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(csi->pclk))
return dev_err_probe(dev, PTR_ERR(csi->pclk),
"could not get pclk\n");
rstc = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(rstc))
return dev_err_probe(dev, PTR_ERR(rstc), "Missing reset ctrl\n");
init_waitqueue_head(&csi->wait);
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
controller->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
controller->setup = rzv2m_csi_setup;
controller->transfer_one = rzv2m_csi_transfer_one;
controller->use_gpio_descriptors = true;
device_set_node(&controller->dev, dev_fwnode(dev));
ret = devm_request_irq(dev, irq, rzv2m_csi_irq_handler, 0,
dev_name(dev), csi);
if (ret)
return dev_err_probe(dev, ret, "cannot request IRQ\n");
/*
* The reset also affects other HW that is not under the control
* of Linux. Therefore, all we can do is make sure the reset is
* deasserted.
*/
reset_control_deassert(rstc);
/* Make sure the IP is in SW reset state */
ret = rzv2m_csi_sw_reset(csi, 1);
if (ret)
return ret;
ret = clk_prepare_enable(csi->csiclk);
if (ret)
return dev_err_probe(dev, ret, "could not enable csiclk\n");
ret = spi_register_controller(controller);
if (ret) {
clk_disable_unprepare(csi->csiclk);
return dev_err_probe(dev, ret, "register controller failed\n");
}
return 0;
}
static void rzv2m_csi_remove(struct platform_device *pdev)
{
struct rzv2m_csi_priv *csi = platform_get_drvdata(pdev);
spi_unregister_controller(csi->controller);
rzv2m_csi_sw_reset(csi, 1);
clk_disable_unprepare(csi->csiclk);
}
static const struct of_device_id rzv2m_csi_match[] = {
{ .compatible = "renesas,rzv2m-csi" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzv2m_csi_match);
static struct platform_driver rzv2m_csi_drv = {
.probe = rzv2m_csi_probe,
.remove_new = rzv2m_csi_remove,
.driver = {
.name = "rzv2m_csi",
.of_match_table = rzv2m_csi_match,
},
};
module_platform_driver(rzv2m_csi_drv);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fabrizio Castro <[email protected]>");
MODULE_DESCRIPTION("Clocked Serial Interface Driver");
| linux-master | drivers/spi/spi-rzv2m-csi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI driver for Nvidia's Tegra20 Serial Flash Controller.
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* Author: Laxman Dewangan <[email protected]>
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#define SPI_COMMAND 0x000
#define SPI_GO BIT(30)
#define SPI_M_S BIT(28)
#define SPI_ACTIVE_SCLK_MASK (0x3 << 26)
#define SPI_ACTIVE_SCLK_DRIVE_LOW (0 << 26)
#define SPI_ACTIVE_SCLK_DRIVE_HIGH (1 << 26)
#define SPI_ACTIVE_SCLK_PULL_LOW (2 << 26)
#define SPI_ACTIVE_SCLK_PULL_HIGH (3 << 26)
#define SPI_CK_SDA_FALLING (1 << 21)
#define SPI_CK_SDA_RISING (0 << 21)
#define SPI_CK_SDA_MASK (1 << 21)
#define SPI_ACTIVE_SDA (0x3 << 18)
#define SPI_ACTIVE_SDA_DRIVE_LOW (0 << 18)
#define SPI_ACTIVE_SDA_DRIVE_HIGH (1 << 18)
#define SPI_ACTIVE_SDA_PULL_LOW (2 << 18)
#define SPI_ACTIVE_SDA_PULL_HIGH (3 << 18)
#define SPI_CS_POL_INVERT BIT(16)
#define SPI_TX_EN BIT(15)
#define SPI_RX_EN BIT(14)
#define SPI_CS_VAL_HIGH BIT(13)
#define SPI_CS_VAL_LOW 0x0
#define SPI_CS_SW BIT(12)
#define SPI_CS_HW 0x0
#define SPI_CS_DELAY_MASK (7 << 9)
#define SPI_CS3_EN BIT(8)
#define SPI_CS2_EN BIT(7)
#define SPI_CS1_EN BIT(6)
#define SPI_CS0_EN BIT(5)
#define SPI_CS_MASK (SPI_CS3_EN | SPI_CS2_EN | \
SPI_CS1_EN | SPI_CS0_EN)
#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
#define SPI_MODES (SPI_ACTIVE_SCLK_MASK | SPI_CK_SDA_MASK)
#define SPI_STATUS 0x004
#define SPI_BSY BIT(31)
#define SPI_RDY BIT(30)
#define SPI_TXF_FLUSH BIT(29)
#define SPI_RXF_FLUSH BIT(28)
#define SPI_RX_UNF BIT(27)
#define SPI_TX_OVF BIT(26)
#define SPI_RXF_EMPTY BIT(25)
#define SPI_RXF_FULL BIT(24)
#define SPI_TXF_EMPTY BIT(23)
#define SPI_TXF_FULL BIT(22)
#define SPI_BLK_CNT(count) (((count) & 0xffff) + 1)
#define SPI_FIFO_ERROR (SPI_RX_UNF | SPI_TX_OVF)
#define SPI_FIFO_EMPTY (SPI_TX_EMPTY | SPI_RX_EMPTY)
#define SPI_RX_CMP 0x8
#define SPI_DMA_CTL 0x0C
#define SPI_DMA_EN BIT(31)
#define SPI_IE_RXC BIT(27)
#define SPI_IE_TXC BIT(26)
#define SPI_PACKED BIT(20)
#define SPI_RX_TRIG_MASK (0x3 << 18)
#define SPI_RX_TRIG_1W (0x0 << 18)
#define SPI_RX_TRIG_4W (0x1 << 18)
#define SPI_TX_TRIG_MASK (0x3 << 16)
#define SPI_TX_TRIG_1W (0x0 << 16)
#define SPI_TX_TRIG_4W (0x1 << 16)
#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF)
#define SPI_TX_FIFO 0x10
#define SPI_RX_FIFO 0x20
#define DATA_DIR_TX (1 << 0)
#define DATA_DIR_RX (1 << 1)
#define MAX_CHIP_SELECT 4
#define SPI_FIFO_DEPTH 4
#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
struct tegra_sflash_data {
struct device *dev;
struct spi_master *master;
spinlock_t lock;
struct clk *clk;
struct reset_control *rst;
void __iomem *base;
unsigned irq;
u32 cur_speed;
struct spi_device *cur_spi;
unsigned cur_pos;
unsigned cur_len;
unsigned bytes_per_word;
unsigned cur_direction;
unsigned curr_xfer_words;
unsigned cur_rx_pos;
unsigned cur_tx_pos;
u32 tx_status;
u32 rx_status;
u32 status_reg;
u32 def_command_reg;
u32 command_reg;
u32 dma_control_reg;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
};
static int tegra_sflash_runtime_suspend(struct device *dev);
static int tegra_sflash_runtime_resume(struct device *dev);
static inline u32 tegra_sflash_readl(struct tegra_sflash_data *tsd,
unsigned long reg)
{
return readl(tsd->base + reg);
}
static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
u32 val, unsigned long reg)
{
writel(val, tsd->base + reg);
}
static void tegra_sflash_clear_status(struct tegra_sflash_data *tsd)
{
/* Write 1 to clear status register */
tegra_sflash_writel(tsd, SPI_RDY | SPI_FIFO_ERROR, SPI_STATUS);
}
static unsigned tegra_sflash_calculate_curr_xfer_param(
struct spi_device *spi, struct tegra_sflash_data *tsd,
struct spi_transfer *t)
{
unsigned remain_len = t->len - tsd->cur_pos;
unsigned max_word;
tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
max_word = remain_len / tsd->bytes_per_word;
if (max_word > SPI_FIFO_DEPTH)
max_word = SPI_FIFO_DEPTH;
tsd->curr_xfer_words = max_word;
return max_word;
}
static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
unsigned nbytes;
u32 status;
unsigned max_n_32bit = tsd->curr_xfer_words;
u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
if (max_n_32bit > SPI_FIFO_DEPTH)
max_n_32bit = SPI_FIFO_DEPTH;
nbytes = max_n_32bit * tsd->bytes_per_word;
status = tegra_sflash_readl(tsd, SPI_STATUS);
while (!(status & SPI_TXF_FULL)) {
int i;
u32 x = 0;
for (i = 0; nbytes && (i < tsd->bytes_per_word);
i++, nbytes--)
x |= (u32)(*tx_buf++) << (i * 8);
tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
if (!nbytes)
break;
status = tegra_sflash_readl(tsd, SPI_STATUS);
}
tsd->cur_tx_pos += max_n_32bit * tsd->bytes_per_word;
return max_n_32bit;
}
static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
u32 status;
unsigned int read_words = 0;
u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
status = tegra_sflash_readl(tsd, SPI_STATUS);
while (!(status & SPI_RXF_EMPTY)) {
int i;
u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
for (i = 0; (i < tsd->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
read_words++;
status = tegra_sflash_readl(tsd, SPI_STATUS);
}
tsd->cur_rx_pos += read_words * tsd->bytes_per_word;
return 0;
}
static int tegra_sflash_start_cpu_based_transfer(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
u32 val = 0;
unsigned cur_words;
if (tsd->cur_direction & DATA_DIR_TX)
val |= SPI_IE_TXC;
if (tsd->cur_direction & DATA_DIR_RX)
val |= SPI_IE_RXC;
tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
tsd->dma_control_reg = val;
if (tsd->cur_direction & DATA_DIR_TX)
cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t);
else
cur_words = tsd->curr_xfer_words;
val |= SPI_DMA_BLK_COUNT(cur_words);
tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
tsd->dma_control_reg = val;
val |= SPI_DMA_EN;
tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
return 0;
}
static int tegra_sflash_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, bool is_first_of_msg,
bool is_single_xfer)
{
struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
u32 speed;
u32 command;
speed = t->speed_hz;
if (speed != tsd->cur_speed) {
clk_set_rate(tsd->clk, speed);
tsd->cur_speed = speed;
}
tsd->cur_spi = spi;
tsd->cur_pos = 0;
tsd->cur_rx_pos = 0;
tsd->cur_tx_pos = 0;
tsd->curr_xfer = t;
tegra_sflash_calculate_curr_xfer_param(spi, tsd, t);
if (is_first_of_msg) {
command = tsd->def_command_reg;
command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
command |= SPI_CS_VAL_HIGH;
command &= ~SPI_MODES;
if (spi->mode & SPI_CPHA)
command |= SPI_CK_SDA_FALLING;
if (spi->mode & SPI_CPOL)
command |= SPI_ACTIVE_SCLK_DRIVE_HIGH;
else
command |= SPI_ACTIVE_SCLK_DRIVE_LOW;
command |= SPI_CS0_EN << spi_get_chipselect(spi, 0);
} else {
command = tsd->command_reg;
command &= ~SPI_BIT_LENGTH(~0);
command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
command &= ~(SPI_RX_EN | SPI_TX_EN);
}
tsd->cur_direction = 0;
if (t->rx_buf) {
command |= SPI_RX_EN;
tsd->cur_direction |= DATA_DIR_RX;
}
if (t->tx_buf) {
command |= SPI_TX_EN;
tsd->cur_direction |= DATA_DIR_TX;
}
tegra_sflash_writel(tsd, command, SPI_COMMAND);
tsd->command_reg = command;
return tegra_sflash_start_cpu_based_transfer(tsd, t);
}
static int tegra_sflash_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
bool is_first_msg = true;
int single_xfer;
struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret;
msg->status = 0;
msg->actual_length = 0;
single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
reinit_completion(&tsd->xfer_completion);
ret = tegra_sflash_start_transfer_one(spi, xfer,
is_first_msg, single_xfer);
if (ret < 0) {
dev_err(tsd->dev,
"spi can not start transfer, err %d\n", ret);
goto exit;
}
is_first_msg = false;
ret = wait_for_completion_timeout(&tsd->xfer_completion,
SPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tsd->dev,
"spi transfer timeout, err %d\n", ret);
ret = -EIO;
goto exit;
}
if (tsd->tx_status || tsd->rx_status) {
dev_err(tsd->dev, "Error in Transfer\n");
ret = -EIO;
goto exit;
}
msg->actual_length += xfer->len;
if (xfer->cs_change && xfer->delay.value) {
tegra_sflash_writel(tsd, tsd->def_command_reg,
SPI_COMMAND);
spi_transfer_delay_exec(xfer);
}
}
ret = 0;
exit:
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
msg->status = ret;
spi_finalize_current_message(master);
return ret;
}
static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
{
struct spi_transfer *t = tsd->curr_xfer;
spin_lock(&tsd->lock);
if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) {
dev_err(tsd->dev,
"CpuXfer ERROR bit set 0x%x\n", tsd->status_reg);
dev_err(tsd->dev,
"CpuXfer 0x%08x:0x%08x\n", tsd->command_reg,
tsd->dma_control_reg);
reset_control_assert(tsd->rst);
udelay(2);
reset_control_deassert(tsd->rst);
complete(&tsd->xfer_completion);
goto exit;
}
if (tsd->cur_direction & DATA_DIR_RX)
tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t);
if (tsd->cur_direction & DATA_DIR_TX)
tsd->cur_pos = tsd->cur_tx_pos;
else
tsd->cur_pos = tsd->cur_rx_pos;
if (tsd->cur_pos == t->len) {
complete(&tsd->xfer_completion);
goto exit;
}
tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t);
tegra_sflash_start_cpu_based_transfer(tsd, t);
exit:
spin_unlock(&tsd->lock);
return IRQ_HANDLED;
}
static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
{
struct tegra_sflash_data *tsd = context_data;
tsd->status_reg = tegra_sflash_readl(tsd, SPI_STATUS);
if (tsd->cur_direction & DATA_DIR_TX)
tsd->tx_status = tsd->status_reg & SPI_TX_OVF;
if (tsd->cur_direction & DATA_DIR_RX)
tsd->rx_status = tsd->status_reg & SPI_RX_UNF;
tegra_sflash_clear_status(tsd);
return handle_cpu_based_xfer(tsd);
}
static const struct of_device_id tegra_sflash_of_match[] = {
{ .compatible = "nvidia,tegra20-sflash", },
{}
};
MODULE_DEVICE_TABLE(of, tegra_sflash_of_match);
static int tegra_sflash_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct tegra_sflash_data *tsd;
int ret;
const struct of_device_id *match;
match = of_match_device(tegra_sflash_of_match, &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "Error: No device match found\n");
return -ENODEV;
}
master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
if (!master) {
dev_err(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->transfer_one_message = tegra_sflash_transfer_one_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
platform_set_drvdata(pdev, master);
tsd = spi_master_get_devdata(master);
tsd->master = master;
tsd->dev = &pdev->dev;
spin_lock_init(&tsd->lock);
if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
&master->max_speed_hz))
master->max_speed_hz = 25000000; /* 25MHz */
tsd->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tsd->base)) {
ret = PTR_ERR(tsd->base);
goto exit_free_master;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto exit_free_master;
tsd->irq = ret;
ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
dev_name(&pdev->dev), tsd);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tsd->irq);
goto exit_free_master;
}
tsd->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tsd->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tsd->clk);
goto exit_free_irq;
}
tsd->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tsd->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tsd->rst);
goto exit_free_irq;
}
init_completion(&tsd->xfer_completion);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra_sflash_runtime_resume(&pdev->dev);
if (ret)
goto exit_pm_disable;
}
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
goto exit_pm_disable;
}
/* Reset controller */
reset_control_assert(tsd->rst);
udelay(2);
reset_control_deassert(tsd->rst);
tsd->def_command_reg = SPI_M_S | SPI_CS_SW;
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
pm_runtime_put(&pdev->dev);
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_pm_disable;
}
return ret;
exit_pm_disable:
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_sflash_runtime_suspend(&pdev->dev);
exit_free_irq:
free_irq(tsd->irq, tsd);
exit_free_master:
spi_master_put(master);
return ret;
}
static void tegra_sflash_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
free_irq(tsd->irq, tsd);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_sflash_runtime_suspend(&pdev->dev);
}
#ifdef CONFIG_PM_SLEEP
static int tegra_sflash_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
return spi_master_suspend(master);
}
static int tegra_sflash_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND);
pm_runtime_put(dev);
return spi_master_resume(master);
}
#endif
static int tegra_sflash_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
/* Flush all write which are in PPSB queue by reading back */
tegra_sflash_readl(tsd, SPI_COMMAND);
clk_disable_unprepare(tsd->clk);
return 0;
}
static int tegra_sflash_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(tsd->clk);
if (ret < 0) {
dev_err(tsd->dev, "clk_prepare failed: %d\n", ret);
return ret;
}
return 0;
}
static const struct dev_pm_ops slink_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_sflash_runtime_suspend,
tegra_sflash_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_sflash_suspend, tegra_sflash_resume)
};
static struct platform_driver tegra_sflash_driver = {
.driver = {
.name = "spi-tegra-sflash",
.pm = &slink_pm_ops,
.of_match_table = tegra_sflash_of_match,
},
.probe = tegra_sflash_probe,
.remove_new = tegra_sflash_remove,
};
module_platform_driver(tegra_sflash_driver);
MODULE_ALIAS("platform:spi-tegra-sflash");
MODULE_DESCRIPTION("NVIDIA Tegra20 Serial Flash Controller Driver");
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-tegra20-sflash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Special handling for DW DMA core
*
* Copyright (c) 2009, 2014 Intel Corporation.
*/
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/irqreturn.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/dma-dw.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
#include "spi-dw.h"
#define DW_SPI_RX_BUSY 0
#define DW_SPI_RX_BURST_LEVEL 16
#define DW_SPI_TX_BUSY 1
#define DW_SPI_TX_BURST_LEVEL 16
static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
{
struct dw_dma_slave *s = param;
if (s->dma_dev != chan->device->dev)
return false;
chan->private = s;
return true;
}
static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
{
struct dma_slave_caps caps;
u32 max_burst, def_burst;
int ret;
def_burst = dws->fifo_len / 2;
ret = dma_get_slave_caps(dws->rxchan, &caps);
if (!ret && caps.max_burst)
max_burst = caps.max_burst;
else
max_burst = DW_SPI_RX_BURST_LEVEL;
dws->rxburst = min(max_burst, def_burst);
dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
ret = dma_get_slave_caps(dws->txchan, &caps);
if (!ret && caps.max_burst)
max_burst = caps.max_burst;
else
max_burst = DW_SPI_TX_BURST_LEVEL;
/*
* Having a Rx DMA channel serviced with higher priority than a Tx DMA
* channel might not be enough to provide a well balanced DMA-based
* SPI transfer interface. There might still be moments when the Tx DMA
* channel is occasionally handled faster than the Rx DMA channel.
* That in its turn will eventually cause the SPI Rx FIFO overflow if
* SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
* cleared by the Rx DMA channel. In order to fix the problem the Tx
* DMA activity is intentionally slowed down by limiting the SPI Tx
* FIFO depth with a value twice bigger than the Tx burst length.
*/
dws->txburst = min(max_burst, def_burst);
dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
}
static int dw_spi_dma_caps_init(struct dw_spi *dws)
{
struct dma_slave_caps tx, rx;
int ret;
ret = dma_get_slave_caps(dws->txchan, &tx);
if (ret)
return ret;
ret = dma_get_slave_caps(dws->rxchan, &rx);
if (ret)
return ret;
if (!(tx.directions & BIT(DMA_MEM_TO_DEV) &&
rx.directions & BIT(DMA_DEV_TO_MEM)))
return -ENXIO;
if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
else if (tx.max_sg_burst > 0)
dws->dma_sg_burst = tx.max_sg_burst;
else if (rx.max_sg_burst > 0)
dws->dma_sg_burst = rx.max_sg_burst;
else
dws->dma_sg_burst = 0;
/*
* Assuming both channels belong to the same DMA controller hence the
* peripheral side address width capabilities most likely would be
* the same.
*/
dws->dma_addr_widths = tx.dst_addr_widths & rx.src_addr_widths;
return 0;
}
static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
{
struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
struct pci_dev *dma_dev;
dma_cap_mask_t mask;
int ret = -EBUSY;
/*
* Get pci device for DMA controller, currently it could only
* be the DMA controller of Medfield
*/
dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
if (!dma_dev)
return -ENODEV;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* 1. Init rx channel */
rx->dma_dev = &dma_dev->dev;
dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
if (!dws->rxchan)
goto err_exit;
/* 2. Init tx channel */
tx->dma_dev = &dma_dev->dev;
dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
if (!dws->txchan)
goto free_rxchan;
dws->host->dma_rx = dws->rxchan;
dws->host->dma_tx = dws->txchan;
init_completion(&dws->dma_completion);
ret = dw_spi_dma_caps_init(dws);
if (ret)
goto free_txchan;
dw_spi_dma_maxburst_init(dws);
pci_dev_put(dma_dev);
return 0;
free_txchan:
dma_release_channel(dws->txchan);
dws->txchan = NULL;
free_rxchan:
dma_release_channel(dws->rxchan);
dws->rxchan = NULL;
err_exit:
pci_dev_put(dma_dev);
return ret;
}
static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
{
int ret;
dws->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dws->rxchan)) {
ret = PTR_ERR(dws->rxchan);
dws->rxchan = NULL;
goto err_exit;
}
dws->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dws->txchan)) {
ret = PTR_ERR(dws->txchan);
dws->txchan = NULL;
goto free_rxchan;
}
dws->host->dma_rx = dws->rxchan;
dws->host->dma_tx = dws->txchan;
init_completion(&dws->dma_completion);
ret = dw_spi_dma_caps_init(dws);
if (ret)
goto free_txchan;
dw_spi_dma_maxburst_init(dws);
return 0;
free_txchan:
dma_release_channel(dws->txchan);
dws->txchan = NULL;
free_rxchan:
dma_release_channel(dws->rxchan);
dws->rxchan = NULL;
err_exit:
return ret;
}
static void dw_spi_dma_exit(struct dw_spi *dws)
{
if (dws->txchan) {
dmaengine_terminate_sync(dws->txchan);
dma_release_channel(dws->txchan);
}
if (dws->rxchan) {
dmaengine_terminate_sync(dws->rxchan);
dma_release_channel(dws->rxchan);
}
}
static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
{
dw_spi_check_status(dws, false);
complete(&dws->dma_completion);
return IRQ_HANDLED;
}
static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
{
switch (n_bytes) {
case 1:
return DMA_SLAVE_BUSWIDTH_1_BYTE;
case 2:
return DMA_SLAVE_BUSWIDTH_2_BYTES;
case 4:
return DMA_SLAVE_BUSWIDTH_4_BYTES;
default:
return DMA_SLAVE_BUSWIDTH_UNDEFINED;
}
}
static bool dw_spi_can_dma(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct dw_spi *dws = spi_controller_get_devdata(host);
enum dma_slave_buswidth dma_bus_width;
if (xfer->len <= dws->fifo_len)
return false;
dma_bus_width = dw_spi_dma_convert_width(dws->n_bytes);
return dws->dma_addr_widths & BIT(dma_bus_width);
}
static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
{
unsigned long long ms;
ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
do_div(ms, speed);
ms += ms + 200;
if (ms > UINT_MAX)
ms = UINT_MAX;
ms = wait_for_completion_timeout(&dws->dma_completion,
msecs_to_jiffies(ms));
if (ms == 0) {
dev_err(&dws->host->cur_msg->spi->dev,
"DMA transaction timed out\n");
return -ETIMEDOUT;
}
return 0;
}
static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
{
return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT);
}
static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
struct spi_transfer *xfer)
{
int retry = DW_SPI_WAIT_RETRIES;
struct spi_delay delay;
u32 nents;
nents = dw_readl(dws, DW_SPI_TXFLR);
delay.unit = SPI_DELAY_UNIT_SCK;
delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
while (dw_spi_dma_tx_busy(dws) && retry--)
spi_delay_exec(&delay, xfer);
if (retry < 0) {
dev_err(&dws->host->dev, "Tx hanged up\n");
return -EIO;
}
return 0;
}
/*
* dws->dma_chan_busy is set before the dma transfer starts, callback for tx
* channel will clear a corresponding bit.
*/
static void dw_spi_dma_tx_done(void *arg)
{
struct dw_spi *dws = arg;
clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy))
return;
complete(&dws->dma_completion);
}
static int dw_spi_dma_config_tx(struct dw_spi *dws)
{
struct dma_slave_config txconf;
memset(&txconf, 0, sizeof(txconf));
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = dws->txburst;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
txconf.device_fc = false;
return dmaengine_slave_config(dws->txchan, &txconf);
}
static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
unsigned int nents)
{
struct dma_async_tx_descriptor *txdesc;
dma_cookie_t cookie;
int ret;
txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
return -ENOMEM;
txdesc->callback = dw_spi_dma_tx_done;
txdesc->callback_param = dws;
cookie = dmaengine_submit(txdesc);
ret = dma_submit_error(cookie);
if (ret) {
dmaengine_terminate_sync(dws->txchan);
return ret;
}
set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
return 0;
}
static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
{
return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT);
}
static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
{
int retry = DW_SPI_WAIT_RETRIES;
struct spi_delay delay;
unsigned long ns, us;
u32 nents;
/*
* It's unlikely that DMA engine is still doing the data fetching, but
* if it's let's give it some reasonable time. The timeout calculation
* is based on the synchronous APB/SSI reference clock rate, on a
* number of data entries left in the Rx FIFO, times a number of clock
* periods normally needed for a single APB read/write transaction
* without PREADY signal utilized (which is true for the DW APB SSI
* controller).
*/
nents = dw_readl(dws, DW_SPI_RXFLR);
ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
if (ns <= NSEC_PER_USEC) {
delay.unit = SPI_DELAY_UNIT_NSECS;
delay.value = ns;
} else {
us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
delay.unit = SPI_DELAY_UNIT_USECS;
delay.value = clamp_val(us, 0, USHRT_MAX);
}
while (dw_spi_dma_rx_busy(dws) && retry--)
spi_delay_exec(&delay, NULL);
if (retry < 0) {
dev_err(&dws->host->dev, "Rx hanged up\n");
return -EIO;
}
return 0;
}
/*
* dws->dma_chan_busy is set before the dma transfer starts, callback for rx
* channel will clear a corresponding bit.
*/
static void dw_spi_dma_rx_done(void *arg)
{
struct dw_spi *dws = arg;
clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy))
return;
complete(&dws->dma_completion);
}
static int dw_spi_dma_config_rx(struct dw_spi *dws)
{
struct dma_slave_config rxconf;
memset(&rxconf, 0, sizeof(rxconf));
rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = dws->rxburst;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
rxconf.device_fc = false;
return dmaengine_slave_config(dws->rxchan, &rxconf);
}
static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
unsigned int nents)
{
struct dma_async_tx_descriptor *rxdesc;
dma_cookie_t cookie;
int ret;
rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
return -ENOMEM;
rxdesc->callback = dw_spi_dma_rx_done;
rxdesc->callback_param = dws;
cookie = dmaengine_submit(rxdesc);
ret = dma_submit_error(cookie);
if (ret) {
dmaengine_terminate_sync(dws->rxchan);
return ret;
}
set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
return 0;
}
static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
{
u16 imr, dma_ctrl;
int ret;
if (!xfer->tx_buf)
return -EINVAL;
/* Setup DMA channels */
ret = dw_spi_dma_config_tx(dws);
if (ret)
return ret;
if (xfer->rx_buf) {
ret = dw_spi_dma_config_rx(dws);
if (ret)
return ret;
}
/* Set the DMA handshaking interface */
dma_ctrl = DW_SPI_DMACR_TDMAE;
if (xfer->rx_buf)
dma_ctrl |= DW_SPI_DMACR_RDMAE;
dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
/* Set the interrupt mask */
imr = DW_SPI_INT_TXOI;
if (xfer->rx_buf)
imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI;
dw_spi_umask_intr(dws, imr);
reinit_completion(&dws->dma_completion);
dws->transfer_handler = dw_spi_dma_transfer_handler;
return 0;
}
static int dw_spi_dma_transfer_all(struct dw_spi *dws,
struct spi_transfer *xfer)
{
int ret;
/* Submit the DMA Tx transfer */
ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
if (ret)
goto err_clear_dmac;
/* Submit the DMA Rx transfer if required */
if (xfer->rx_buf) {
ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
xfer->rx_sg.nents);
if (ret)
goto err_clear_dmac;
/* rx must be started before tx due to spi instinct */
dma_async_issue_pending(dws->rxchan);
}
dma_async_issue_pending(dws->txchan);
ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
err_clear_dmac:
dw_writel(dws, DW_SPI_DMACR, 0);
return ret;
}
/*
* In case if at least one of the requested DMA channels doesn't support the
* hardware accelerated SG list entries traverse, the DMA driver will most
* likely work that around by performing the IRQ-based SG list entries
* resubmission. That might and will cause a problem if the DMA Tx channel is
* recharged and re-executed before the Rx DMA channel. Due to
* non-deterministic IRQ-handler execution latency the DMA Tx channel will
* start pushing data to the SPI bus before the Rx DMA channel is even
* reinitialized with the next inbound SG list entry. By doing so the DMA Tx
* channel will implicitly start filling the DW APB SSI Rx FIFO up, which while
* the DMA Rx channel being recharged and re-executed will eventually be
* overflown.
*
* In order to solve the problem we have to feed the DMA engine with SG list
* entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs
* synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg
* and rx_sg lists may have different number of entries of different lengths
* (though total length should match) let's virtually split the SG-lists to the
* set of DMA transfers, which length is a minimum of the ordered SG-entries
* lengths. An ASCII-sketch of the implemented algo is following:
* xfer->len
* |___________|
* tx_sg list: |___|____|__|
* rx_sg list: |_|____|____|
* DMA transfers: |_|_|__|_|__|
*
* Note in order to have this workaround solving the denoted problem the DMA
* engine driver should properly initialize the max_sg_burst capability and set
* the DMA device max segment size parameter with maximum data block size the
* DMA engine supports.
*/
static int dw_spi_dma_transfer_one(struct dw_spi *dws,
struct spi_transfer *xfer)
{
struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp;
unsigned int tx_len = 0, rx_len = 0;
unsigned int base, len;
int ret;
sg_init_table(&tx_tmp, 1);
sg_init_table(&rx_tmp, 1);
for (base = 0, len = 0; base < xfer->len; base += len) {
/* Fetch next Tx DMA data chunk */
if (!tx_len) {
tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg);
tx_len = sg_dma_len(tx_sg);
}
/* Fetch next Rx DMA data chunk */
if (!rx_len) {
rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg);
sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg);
rx_len = sg_dma_len(rx_sg);
}
len = min(tx_len, rx_len);
sg_dma_len(&tx_tmp) = len;
sg_dma_len(&rx_tmp) = len;
/* Submit DMA Tx transfer */
ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1);
if (ret)
break;
/* Submit DMA Rx transfer */
ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1);
if (ret)
break;
/* Rx must be started before Tx due to SPI instinct */
dma_async_issue_pending(dws->rxchan);
dma_async_issue_pending(dws->txchan);
/*
* Here we only need to wait for the DMA transfer to be
* finished since SPI controller is kept enabled during the
* procedure this loop implements and there is no risk to lose
* data left in the Tx/Rx FIFOs.
*/
ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz);
if (ret)
break;
reinit_completion(&dws->dma_completion);
sg_dma_address(&tx_tmp) += len;
sg_dma_address(&rx_tmp) += len;
tx_len -= len;
rx_len -= len;
}
dw_writel(dws, DW_SPI_DMACR, 0);
return ret;
}
static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
{
unsigned int nents;
int ret;
nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents);
/*
* Execute normal DMA-based transfer (which submits the Rx and Tx SG
* lists directly to the DMA engine at once) if either full hardware
* accelerated SG list traverse is supported by both channels, or the
* Tx-only SPI transfer is requested, or the DMA engine is capable to
* handle both SG lists on hardware accelerated basis.
*/
if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst)
ret = dw_spi_dma_transfer_all(dws, xfer);
else
ret = dw_spi_dma_transfer_one(dws, xfer);
if (ret)
return ret;
if (dws->host->cur_msg->status == -EINPROGRESS) {
ret = dw_spi_dma_wait_tx_done(dws, xfer);
if (ret)
return ret;
}
if (xfer->rx_buf && dws->host->cur_msg->status == -EINPROGRESS)
ret = dw_spi_dma_wait_rx_done(dws);
return ret;
}
static void dw_spi_dma_stop(struct dw_spi *dws)
{
if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) {
dmaengine_terminate_sync(dws->txchan);
clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
}
if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) {
dmaengine_terminate_sync(dws->rxchan);
clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
}
}
static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
.dma_init = dw_spi_dma_init_mfld,
.dma_exit = dw_spi_dma_exit,
.dma_setup = dw_spi_dma_setup,
.can_dma = dw_spi_can_dma,
.dma_transfer = dw_spi_dma_transfer,
.dma_stop = dw_spi_dma_stop,
};
void dw_spi_dma_setup_mfld(struct dw_spi *dws)
{
dws->dma_ops = &dw_spi_dma_mfld_ops;
}
EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE);
static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
.dma_init = dw_spi_dma_init_generic,
.dma_exit = dw_spi_dma_exit,
.dma_setup = dw_spi_dma_setup,
.can_dma = dw_spi_can_dma,
.dma_transfer = dw_spi_dma_transfer,
.dma_stop = dw_spi_dma_stop,
};
void dw_spi_dma_setup_generic(struct dw_spi *dws)
{
dws->dma_ops = &dw_spi_dma_generic_ops;
}
EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE);
| linux-master | drivers/spi/spi-dw-dma.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.