python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
/* flash_info mfr_flag. Used to read proprietary FSR register. */
#define USE_FSR BIT(0)
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */
#define SPINOR_OP_MT_RD_ANY_REG 0x85 /* Read volatile register */
#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */
#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */
#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */
#define SPINOR_REG_MT_CFR1V_DEF 0x1f /* Default dummy cycles */
#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */
#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */
/* Flag Status Register bits */
#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
#define FSR_E_ERR BIT(5) /* Erase operation status */
#define FSR_P_ERR BIT(4) /* Program operation status */
#define FSR_PT_ERR BIT(1) /* Protection error bit */
/* Micron ST SPI NOR flash operations. */
#define MICRON_ST_NOR_WR_ANY_REG_OP(naddr, addr, ndata, buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 0), \
SPI_MEM_OP_ADDR(naddr, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(ndata, buf, 0))
#define MICRON_ST_RDFSR_OP(buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_IN(1, buf, 0))
#define MICRON_ST_CLFSR_OP \
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
static int micron_st_nor_octal_dtr_en(struct spi_nor *nor)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
int ret;
u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
/* Use 20 dummy cycles for memory array reads. */
*buf = 20;
op = (struct spi_mem_op)
MICRON_ST_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
SPINOR_REG_MT_CFR1V, 1, buf);
ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
buf[0] = SPINOR_MT_OCT_DTR;
op = (struct spi_mem_op)
MICRON_ST_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
SPINOR_REG_MT_CFR0V, 1, buf);
ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, 0, 8, buf, SNOR_PROTO_8_8_8_DTR);
if (ret) {
dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret);
return ret;
}
if (memcmp(buf, nor->info->id, nor->info->id_len))
return -EINVAL;
return 0;
}
static int micron_st_nor_octal_dtr_dis(struct spi_nor *nor)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
int ret;
/*
* The register is 1-byte wide, but 1-byte transactions are not allowed
* in 8D-8D-8D mode. The next register is the dummy cycle configuration
* register. Since the transaction needs to be at least 2 bytes wide,
* set the next register to its default value. This also makes sense
* because the value was changed when enabling 8D-8D-8D mode, it should
* be reset when disabling.
*/
buf[0] = SPINOR_MT_EXSPI;
buf[1] = SPINOR_REG_MT_CFR1V_DEF;
op = (struct spi_mem_op)
MICRON_ST_NOR_WR_ANY_REG_OP(nor->addr_nbytes,
SPINOR_REG_MT_CFR0V, 2, buf);
ret = spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
if (ret)
return ret;
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1);
if (ret) {
dev_dbg(nor->dev, "error %d reading JEDEC ID after disabling 8D-8D-8D mode\n", ret);
return ret;
}
if (memcmp(buf, nor->info->id, nor->info->id_len))
return -EINVAL;
return 0;
}
static int micron_st_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
return enable ? micron_st_nor_octal_dtr_en(nor) :
micron_st_nor_octal_dtr_dis(nor);
}
static void mt35xu512aba_default_init(struct spi_nor *nor)
{
nor->params->set_octal_dtr = micron_st_nor_set_octal_dtr;
}
static int mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
{
/* Set the Fast Read settings. */
nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR],
0, 20, SPINOR_OP_MT_DTR_RD,
SNOR_PROTO_8_8_8_DTR);
nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
nor->params->rdsr_dummy = 8;
nor->params->rdsr_addr_nbytes = 0;
/*
* The BFPT quad enable field is set to a reserved value so the quad
* enable function is ignored by spi_nor_parse_bfpt(). Make sure we
* disable it.
*/
nor->params->quad_enable = NULL;
return 0;
}
static const struct spi_nor_fixups mt35xu512aba_fixups = {
.default_init = mt35xu512aba_default_init,
.post_sfdp = mt35xu512aba_post_sfdp_fixup,
};
static const struct flash_info micron_nor_parts[] = {
{ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ |
SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE)
MFR_FLAGS(USE_FSR)
.fixups = &mt35xu512aba_fixups
},
{ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
MFR_FLAGS(USE_FSR)
},
};
static const struct flash_info st_nor_parts[] = {
{ "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
MFR_FLAGS(USE_FSR)
},
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
MFR_FLAGS(USE_FSR)
},
{ "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
MFR_FLAGS(USE_FSR)
},
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
MFR_FLAGS(USE_FSR)
},
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048)
FLAGS(NO_CHIP_ERASE)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096)
FLAGS(NO_CHIP_ERASE)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096)
FLAGS(NO_CHIP_ERASE)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_FSR)
},
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2) },
{ "m25p10", INFO(0x202011, 0, 32 * 1024, 4) },
{ "m25p20", INFO(0x202012, 0, 64 * 1024, 4) },
{ "m25p40", INFO(0x202013, 0, 64 * 1024, 8) },
{ "m25p80", INFO(0x202014, 0, 64 * 1024, 16) },
{ "m25p16", INFO(0x202015, 0, 64 * 1024, 32) },
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64) },
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2) },
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4) },
{ "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4) },
{ "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8) },
{ "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16) },
{ "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32) },
{ "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64) },
{ "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128) },
{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64) },
{ "m45pe10", INFO(0x204011, 0, 64 * 1024, 2) },
{ "m45pe80", INFO(0x204014, 0, 64 * 1024, 16) },
{ "m45pe16", INFO(0x204015, 0, 64 * 1024, 32) },
{ "m25pe20", INFO(0x208012, 0, 64 * 1024, 4) },
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px16", INFO(0x207115, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px32", INFO(0x207116, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "m25px64", INFO(0x207117, 0, 64 * 1024, 128) },
{ "m25px80", INFO(0x207114, 0, 64 * 1024, 16) },
};
/**
* micron_st_nor_read_fsr() - Read the Flag Status Register.
* @nor: pointer to 'struct spi_nor'
* @fsr: pointer to a DMA-able buffer where the value of the
* Flag Status Register will be written. Should be at least 2
* bytes.
*
* Return: 0 on success, -errno otherwise.
*/
static int micron_st_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = MICRON_ST_RDFSR_OP(fsr);
if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
op.addr.nbytes = nor->params->rdsr_addr_nbytes;
op.dummy.nbytes = nor->params->rdsr_dummy;
/*
* We don't want to read only one byte in DTR mode. So,
* read 2 and then discard the second byte.
*/
op.data.nbytes = 2;
}
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
1);
}
if (ret)
dev_dbg(nor->dev, "error %d reading FSR\n", ret);
return ret;
}
/**
* micron_st_nor_clear_fsr() - Clear the Flag Status Register.
* @nor: pointer to 'struct spi_nor'.
*/
static void micron_st_nor_clear_fsr(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = MICRON_ST_CLFSR_OP;
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
NULL, 0);
}
if (ret)
dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
}
/**
* micron_st_nor_ready() - Query the Status Register as well as the Flag Status
* Register to see if the flash is ready for new commands. If there are any
* errors in the FSR clear them.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int micron_st_nor_ready(struct spi_nor *nor)
{
int sr_ready, ret;
sr_ready = spi_nor_sr_ready(nor);
if (sr_ready < 0)
return sr_ready;
ret = micron_st_nor_read_fsr(nor, nor->bouncebuf);
if (ret) {
/*
* Some controllers, such as Intel SPI, do not support low
* level operations such as reading the flag status
* register. They only expose small amount of high level
* operations to the software. If this is the case we use
* only the status register value.
*/
return ret == -EOPNOTSUPP ? sr_ready : ret;
}
if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
if (nor->bouncebuf[0] & FSR_E_ERR)
dev_err(nor->dev, "Erase operation failed.\n");
else
dev_err(nor->dev, "Program operation failed.\n");
if (nor->bouncebuf[0] & FSR_PT_ERR)
dev_err(nor->dev,
"Attempted to modify a protected sector.\n");
micron_st_nor_clear_fsr(nor);
/*
* WEL bit remains set to one when an erase or page program
* error occurs. Issue a Write Disable command to protect
* against inadvertent writes that can possibly corrupt the
* contents of the memory.
*/
ret = spi_nor_write_disable(nor);
if (ret)
return ret;
return -EIO;
}
return sr_ready && !!(nor->bouncebuf[0] & FSR_READY);
}
static void micron_st_nor_default_init(struct spi_nor *nor)
{
nor->flags |= SNOR_F_HAS_LOCK;
nor->flags &= ~SNOR_F_HAS_16BIT_SR;
nor->params->quad_enable = NULL;
}
static int micron_st_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
if (nor->info->mfr_flags & USE_FSR)
params->ready = micron_st_nor_ready;
if (!params->set_4byte_addr_mode)
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b;
return 0;
}
static const struct spi_nor_fixups micron_st_nor_fixups = {
.default_init = micron_st_nor_default_init,
.late_init = micron_st_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_micron = {
.name = "micron",
.parts = micron_nor_parts,
.nparts = ARRAY_SIZE(micron_nor_parts),
.fixups = µn_st_nor_fixups,
};
const struct spi_nor_manufacturer spi_nor_st = {
.name = "st",
.parts = st_nor_parts,
.nparts = ARRAY_SIZE(st_nor_parts),
.fixups = µn_st_nor_fixups,
};
| linux-master | drivers/mtd/spi-nor/micron-st.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static int
is25lp256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
/*
* IS25LP256 supports 4B opcodes, but the BFPT advertises
* BFPT_DWORD1_ADDRESS_BYTES_3_ONLY.
* Overwrite the number of address bytes advertised by the BFPT.
*/
if ((bfpt->dwords[SFDP_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
nor->params->addr_nbytes = 4;
return 0;
}
static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
static int pm25lv_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_erase_map *map = &nor->params->erase_map;
int i;
/* The PM25LV series has a different 4k sector erase opcode */
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (map->erase_type[i].size == 4096)
map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
return 0;
}
static const struct spi_nor_fixups pm25lv_nor_fixups = {
.late_init = pm25lv_nor_late_init,
};
static const struct flash_info issi_nor_parts[] = {
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
NO_SFDP_FLAGS(SECT_4K) },
{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512)
PARSE_SFDP
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &is25lp256_fixups },
{ "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp256", INFO(0x9d7019, 0, 0, 0)
PARSE_SFDP
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
FLAGS(SPI_NOR_QUAD_PP)
.fixups = &is25lp256_fixups },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &pm25lv_nor_fixups
},
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &pm25lv_nor_fixups
},
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
};
static void issi_nor_default_init(struct spi_nor *nor)
{
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
static const struct spi_nor_fixups issi_fixups = {
.default_init = issi_nor_default_init,
};
const struct spi_nor_manufacturer spi_nor_issi = {
.name = "issi",
.parts = issi_nor_parts,
.nparts = ARRAY_SIZE(issi_nor_parts),
.fixups = &issi_fixups,
};
| linux-master | drivers/mtd/spi-nor/issi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
#define ATMEL_SR_GLOBAL_PROTECT_MASK GENMASK(5, 2)
/*
* The Atmel AT25FS010/AT25FS040 parts have some weird configuration for the
* block protection bits. We don't support them. But legacy behavior in linux
* is to unlock the whole flash array on startup. Therefore, we have to support
* exactly this operation.
*/
static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
return -EOPNOTSUPP;
}
static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
int ret;
/* We only support unlocking the whole flash array */
if (ofs || len != nor->params->size)
return -EINVAL;
/* Write 0x00 to the status register to disable write protection */
ret = spi_nor_write_sr_and_check(nor, 0);
if (ret)
dev_dbg(nor->dev, "unable to clear BP bits, WP# asserted?\n");
return ret;
}
static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
return -EOPNOTSUPP;
}
static const struct spi_nor_locking_ops at25fs_nor_locking_ops = {
.lock = at25fs_nor_lock,
.unlock = at25fs_nor_unlock,
.is_locked = at25fs_nor_is_locked,
};
static int at25fs_nor_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &at25fs_nor_locking_ops;
return 0;
}
static const struct spi_nor_fixups at25fs_nor_fixups = {
.late_init = at25fs_nor_late_init,
};
/**
* atmel_nor_set_global_protection - Do a Global Protect or Unprotect command
* @nor: pointer to 'struct spi_nor'
* @ofs: offset in bytes
* @len: len in bytes
* @is_protect: if true do a Global Protect otherwise it is a Global Unprotect
*
* Return: 0 on success, -error otherwise.
*/
static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
uint64_t len, bool is_protect)
{
int ret;
u8 sr;
/* We only support locking the whole flash array */
if (ofs || len != nor->params->size)
return -EINVAL;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
sr = nor->bouncebuf[0];
/* SRWD bit needs to be cleared, otherwise the protection doesn't change */
if (sr & SR_SRWD) {
sr &= ~SR_SRWD;
ret = spi_nor_write_sr_and_check(nor, sr);
if (ret) {
dev_dbg(nor->dev, "unable to clear SRWD bit, WP# asserted?\n");
return ret;
}
}
if (is_protect) {
sr |= ATMEL_SR_GLOBAL_PROTECT_MASK;
/*
* Set the SRWD bit again as soon as we are protecting
* anything. This will ensure that the WP# pin is working
* correctly. By doing this we also behave the same as
* spi_nor_sr_lock(), which sets SRWD if any block protection
* is active.
*/
sr |= SR_SRWD;
} else {
sr &= ~ATMEL_SR_GLOBAL_PROTECT_MASK;
}
nor->bouncebuf[0] = sr;
/*
* We cannot use the spi_nor_write_sr_and_check() because this command
* isn't really setting any bits, instead it is an pseudo command for
* "Global Unprotect" or "Global Protect"
*/
return spi_nor_write_sr(nor, nor->bouncebuf, 1);
}
static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs,
uint64_t len)
{
return atmel_nor_set_global_protection(nor, ofs, len, true);
}
static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs,
uint64_t len)
{
return atmel_nor_set_global_protection(nor, ofs, len, false);
}
static int atmel_nor_is_global_protected(struct spi_nor *nor, loff_t ofs,
uint64_t len)
{
int ret;
if (ofs >= nor->params->size || (ofs + len) > nor->params->size)
return -EINVAL;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
return ((nor->bouncebuf[0] & ATMEL_SR_GLOBAL_PROTECT_MASK) == ATMEL_SR_GLOBAL_PROTECT_MASK);
}
static const struct spi_nor_locking_ops atmel_nor_global_protection_ops = {
.lock = atmel_nor_global_protect,
.unlock = atmel_nor_global_unprotect,
.is_locked = atmel_nor_is_global_protected,
};
static int atmel_nor_global_protection_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &atmel_nor_global_protection_ops;
return 0;
}
static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
.late_init = atmel_nor_global_protection_late_init,
};
static const struct flash_info atmel_nor_parts[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &at25fs_nor_fixups },
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &at25fs_nor_fixups },
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_nor_global_protection_fixups },
{ "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_nor_global_protection_fixups },
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_nor_global_protection_fixups },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_nor_global_protection_fixups },
{ "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_nor_global_protection_fixups },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_nor_global_protection_fixups },
{ "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_nor_global_protection_fixups },
{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_atmel = {
.name = "atmel",
.parts = atmel_nor_parts,
.nparts = ARRAY_SIZE(atmel_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/atmel.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mtd/spi-nor.h>
#include "core.h"
/* flash_info mfr_flag. Used to clear sticky prorietary SR bits. */
#define USE_CLSR BIT(0)
#define USE_CLPEF BIT(1)
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
#define SPINOR_REG_CYPRESS_VREG 0x00800000
#define SPINOR_REG_CYPRESS_STR1 0x0
#define SPINOR_REG_CYPRESS_STR1V \
(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_STR1)
#define SPINOR_REG_CYPRESS_CFR1 0x2
#define SPINOR_REG_CYPRESS_CFR1_QUAD_EN BIT(1) /* Quad Enable */
#define SPINOR_REG_CYPRESS_CFR2 0x3
#define SPINOR_REG_CYPRESS_CFR2V \
(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR2)
#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK GENMASK(3, 0)
#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24 0xb
#define SPINOR_REG_CYPRESS_CFR2_ADRBYT BIT(7)
#define SPINOR_REG_CYPRESS_CFR3 0x4
#define SPINOR_REG_CYPRESS_CFR3_PGSZ BIT(4) /* Page size. */
#define SPINOR_REG_CYPRESS_CFR5 0x6
#define SPINOR_REG_CYPRESS_CFR5_BIT6 BIT(6)
#define SPINOR_REG_CYPRESS_CFR5_DDR BIT(1)
#define SPINOR_REG_CYPRESS_CFR5_OPI BIT(0)
#define SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN \
(SPINOR_REG_CYPRESS_CFR5_BIT6 | SPINOR_REG_CYPRESS_CFR5_DDR | \
SPINOR_REG_CYPRESS_CFR5_OPI)
#define SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS SPINOR_REG_CYPRESS_CFR5_BIT6
#define SPINOR_OP_CYPRESS_RD_FAST 0xee
#define SPINOR_REG_CYPRESS_ARCFN 0x00000006
/* Cypress SPI NOR flash operations. */
#define CYPRESS_NOR_WR_ANY_REG_OP(naddr, addr, ndata, buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 0), \
SPI_MEM_OP_ADDR(naddr, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(ndata, buf, 0))
#define CYPRESS_NOR_RD_ANY_REG_OP(naddr, addr, ndummy, buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 0), \
SPI_MEM_OP_ADDR(naddr, addr, 0), \
SPI_MEM_OP_DUMMY(ndummy, 0), \
SPI_MEM_OP_DATA_IN(1, buf, 0))
#define SPANSION_OP(opcode) \
SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
/**
* struct spansion_nor_params - Spansion private parameters.
* @clsr: Clear Status Register or Clear Program and Erase Failure Flag
* opcode.
*/
struct spansion_nor_params {
u8 clsr;
};
/**
* spansion_nor_clear_sr() - Clear the Status Register.
* @nor: pointer to 'struct spi_nor'.
*/
static void spansion_nor_clear_sr(struct spi_nor *nor)
{
const struct spansion_nor_params *priv_params = nor->params->priv;
int ret;
if (nor->spimem) {
struct spi_mem_op op = SPANSION_OP(priv_params->clsr);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
NULL, 0);
}
if (ret)
dev_dbg(nor->dev, "error %d clearing SR\n", ret);
}
static int cypress_nor_sr_ready_and_clear_reg(struct spi_nor *nor, u64 addr)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_mem_op op =
CYPRESS_NOR_RD_ANY_REG_OP(params->addr_mode_nbytes, addr,
0, nor->bouncebuf);
int ret;
if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
op.dummy.nbytes = params->rdsr_dummy;
op.data.nbytes = 2;
}
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
if (nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
if (nor->bouncebuf[0] & SR_E_ERR)
dev_err(nor->dev, "Erase Error occurred\n");
else
dev_err(nor->dev, "Programming Error occurred\n");
spansion_nor_clear_sr(nor);
ret = spi_nor_write_disable(nor);
if (ret)
return ret;
return -EIO;
}
return !(nor->bouncebuf[0] & SR_WIP);
}
/**
* cypress_nor_sr_ready_and_clear() - Query the Status Register of each die by
* using Read Any Register command to see if the whole flash is ready for new
* commands and clear it if there are any errors.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int cypress_nor_sr_ready_and_clear(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
u64 addr;
int ret;
u8 i;
for (i = 0; i < params->n_dice; i++) {
addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_STR1;
ret = cypress_nor_sr_ready_and_clear_reg(nor, addr);
if (ret < 0)
return ret;
else if (ret == 0)
return 0;
}
return 1;
}
static int cypress_nor_set_memlat(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
int ret;
u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
op = (struct spi_mem_op)
CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0, buf);
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
/* Use 24 dummy cycles for memory array reads. */
*buf &= ~SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK;
*buf |= FIELD_PREP(SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK,
SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24);
op = (struct spi_mem_op)
CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1, buf);
ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
nor->read_dummy = 24;
return 0;
}
static int cypress_nor_set_octal_dtr_bits(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
/* Set the octal and DTR enable bits. */
buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN;
op = (struct spi_mem_op)
CYPRESS_NOR_WR_ANY_REG_OP(nor->params->addr_mode_nbytes,
addr, 1, buf);
return spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
}
static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
{
const struct spi_nor_flash_parameter *params = nor->params;
u8 *buf = nor->bouncebuf;
u64 addr;
int i, ret;
for (i = 0; i < params->n_dice; i++) {
addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR2;
ret = cypress_nor_set_memlat(nor, addr);
if (ret)
return ret;
addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
ret = cypress_nor_set_octal_dtr_bits(nor, addr);
if (ret)
return ret;
}
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, nor->addr_nbytes, 3, buf,
SNOR_PROTO_8_8_8_DTR);
if (ret) {
dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret);
return ret;
}
if (memcmp(buf, nor->info->id, nor->info->id_len))
return -EINVAL;
return 0;
}
static int cypress_nor_set_single_spi_bits(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
/*
* The register is 1-byte wide, but 1-byte transactions are not allowed
* in 8D-8D-8D mode. Since there is no register at the next location,
* just initialize the value to 0 and let the transaction go on.
*/
buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS;
buf[1] = 0;
op = (struct spi_mem_op)
CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes, addr, 2, buf);
return spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
}
static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
{
const struct spi_nor_flash_parameter *params = nor->params;
u8 *buf = nor->bouncebuf;
u64 addr;
int i, ret;
for (i = 0; i < params->n_dice; i++) {
addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
ret = cypress_nor_set_single_spi_bits(nor, addr);
if (ret)
return ret;
}
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1);
if (ret) {
dev_dbg(nor->dev, "error %d reading JEDEC ID after disabling 8D-8D-8D mode\n", ret);
return ret;
}
if (memcmp(buf, nor->info->id, nor->info->id_len))
return -EINVAL;
return 0;
}
static int cypress_nor_quad_enable_volatile_reg(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
u8 cfr1v_written;
int ret;
op = (struct spi_mem_op)
CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0,
nor->bouncebuf);
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1_QUAD_EN)
return 0;
/* Update the Quad Enable bit. */
nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1_QUAD_EN;
op = (struct spi_mem_op)
CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1,
nor->bouncebuf);
ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
cfr1v_written = nor->bouncebuf[0];
/* Read back and check it. */
op = (struct spi_mem_op)
CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0,
nor->bouncebuf);
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
if (nor->bouncebuf[0] != cfr1v_written) {
dev_err(nor->dev, "CFR1: Read back test failed\n");
return -EIO;
}
return 0;
}
/**
* cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile
* register.
* @nor: pointer to a 'struct spi_nor'
*
* It is recommended to update volatile registers in the field application due
* to a risk of the non-volatile registers corruption by power interrupt. This
* function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable
* bit in the CFR1 non-volatile in advance (typically by a Flash programmer
* before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is
* also set during Flash power-up.
*
* Return: 0 on success, -errno otherwise.
*/
static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
u64 addr;
u8 i;
int ret;
for (i = 0; i < params->n_dice; i++) {
addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR1;
ret = cypress_nor_quad_enable_volatile_reg(nor, addr);
if (ret)
return ret;
}
return 0;
}
/**
* cypress_nor_determine_addr_mode_by_sr1() - Determine current address mode
* (3 or 4-byte) by querying status
* register 1 (SR1).
* @nor: pointer to a 'struct spi_nor'
* @addr_mode: ponter to a buffer where we return the determined
* address mode.
*
* This function tries to determine current address mode by comparing SR1 value
* from RDSR1(no address), RDAR(3-byte address), and RDAR(4-byte address).
*
* Return: 0 on success, -errno otherwise.
*/
static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor,
u8 *addr_mode)
{
struct spi_mem_op op =
CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_STR1V, 0,
nor->bouncebuf);
bool is3byte, is4byte;
int ret;
ret = spi_nor_read_sr(nor, &nor->bouncebuf[1]);
if (ret)
return ret;
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
is3byte = (nor->bouncebuf[0] == nor->bouncebuf[1]);
op = (struct spi_mem_op)
CYPRESS_NOR_RD_ANY_REG_OP(4, SPINOR_REG_CYPRESS_STR1V, 0,
nor->bouncebuf);
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
is4byte = (nor->bouncebuf[0] == nor->bouncebuf[1]);
if (is3byte == is4byte)
return -EIO;
if (is3byte)
*addr_mode = 3;
else
*addr_mode = 4;
return 0;
}
/**
* cypress_nor_set_addr_mode_nbytes() - Set the number of address bytes mode of
* current address mode.
* @nor: pointer to a 'struct spi_nor'
*
* Determine current address mode by reading SR1 with different methods, then
* query CFR2V[7] to confirm. If determination is failed, force enter to 4-byte
* address mode.
*
* Return: 0 on success, -errno otherwise.
*/
static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor)
{
struct spi_mem_op op;
u8 addr_mode;
int ret;
/*
* Read SR1 by RDSR1 and RDAR(3- AND 4-byte addr). Use write enable
* that sets bit-1 in SR1.
*/
ret = spi_nor_write_enable(nor);
if (ret)
return ret;
ret = cypress_nor_determine_addr_mode_by_sr1(nor, &addr_mode);
if (ret) {
ret = spi_nor_set_4byte_addr_mode(nor, true);
if (ret)
return ret;
return spi_nor_write_disable(nor);
}
ret = spi_nor_write_disable(nor);
if (ret)
return ret;
/*
* Query CFR2V and make sure no contradiction between determined address
* mode and CFR2V[7].
*/
op = (struct spi_mem_op)
CYPRESS_NOR_RD_ANY_REG_OP(addr_mode, SPINOR_REG_CYPRESS_CFR2V,
0, nor->bouncebuf);
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR2_ADRBYT) {
if (addr_mode != 4)
return spi_nor_set_4byte_addr_mode(nor, true);
} else {
if (addr_mode != 3)
return spi_nor_set_4byte_addr_mode(nor, true);
}
nor->params->addr_nbytes = addr_mode;
nor->params->addr_mode_nbytes = addr_mode;
return 0;
}
/**
* cypress_nor_get_page_size() - Get flash page size configuration.
* @nor: pointer to a 'struct spi_nor'
*
* The BFPT table advertises a 512B or 256B page size depending on part but the
* page size is actually configurable (with the default being 256B). Read from
* CFR3V[4] and set the correct size.
*
* Return: 0 on success, -errno otherwise.
*/
static int cypress_nor_get_page_size(struct spi_nor *nor)
{
struct spi_mem_op op =
CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
0, 0, nor->bouncebuf);
struct spi_nor_flash_parameter *params = nor->params;
int ret;
u8 i;
/*
* Use the minimum common page size configuration. Programming 256-byte
* under 512-byte page size configuration is safe.
*/
params->page_size = 256;
for (i = 0; i < params->n_dice; i++) {
op.addr.val = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR3;
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
if (!(nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3_PGSZ))
return 0;
}
params->page_size = 512;
return 0;
}
static void cypress_nor_ecc_init(struct spi_nor *nor)
{
/*
* Programming is supported only in 16-byte ECC data unit granularity.
* Byte-programming, bit-walking, or multiple program operations to the
* same ECC data unit without an erase are not allowed.
*/
nor->params->writesize = 16;
nor->flags |= SNOR_F_ECC;
}
static int
s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
struct spi_mem_op op;
int ret;
ret = cypress_nor_set_addr_mode_nbytes(nor);
if (ret)
return ret;
/* Read Architecture Configuration Register (ARCFN) */
op = (struct spi_mem_op)
CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
SPINOR_REG_CYPRESS_ARCFN, 1,
nor->bouncebuf);
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
/* ARCFN value must be 0 if uniform sector is selected */
if (nor->bouncebuf[0])
return -ENODEV;
return 0;
}
static int s25fs256t_post_sfdp_fixup(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
/*
* S25FS256T does not define the SCCR map, but we would like to use the
* same code base for both single and multi chip package devices, thus
* set the vreg_offset and n_dice to be able to do so.
*/
params->vreg_offset = devm_kmalloc(nor->dev, sizeof(u32), GFP_KERNEL);
if (!params->vreg_offset)
return -ENOMEM;
params->vreg_offset[0] = SPINOR_REG_CYPRESS_VREG;
params->n_dice = 1;
/* PP_1_1_4_4B is supported but missing in 4BAIT. */
params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4],
SPINOR_OP_PP_1_1_4_4B,
SNOR_PROTO_1_1_4);
return cypress_nor_get_page_size(nor);
}
static int s25fs256t_late_init(struct spi_nor *nor)
{
cypress_nor_ecc_init(nor);
return 0;
}
static struct spi_nor_fixups s25fs256t_fixups = {
.post_bfpt = s25fs256t_post_bfpt_fixup,
.post_sfdp = s25fs256t_post_sfdp_fixup,
.late_init = s25fs256t_late_init,
};
static int
s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
int ret;
ret = cypress_nor_set_addr_mode_nbytes(nor);
if (ret)
return ret;
/* Replace Quad Enable with volatile version */
nor->params->quad_enable = cypress_nor_quad_enable_volatile;
return 0;
}
static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_type *erase_type = params->erase_map.erase_type;
unsigned int i;
if (!params->n_dice || !params->vreg_offset) {
dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
__func__);
return -EOPNOTSUPP;
}
/* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
if (params->size == SZ_256M)
params->n_dice = 2;
/*
* In some parts, 3byte erase opcodes are advertised by 4BAIT.
* Convert them to 4byte erase opcodes.
*/
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
switch (erase_type[i].opcode) {
case SPINOR_OP_SE:
erase_type[i].opcode = SPINOR_OP_SE_4B;
break;
case SPINOR_OP_BE_4K:
erase_type[i].opcode = SPINOR_OP_BE_4K_4B;
break;
default:
break;
}
}
return cypress_nor_get_page_size(nor);
}
static int s25hx_t_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
/* Fast Read 4B requires mode cycles */
params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
return 0;
}
static struct spi_nor_fixups s25hx_t_fixups = {
.post_bfpt = s25hx_t_post_bfpt_fixup,
.post_sfdp = s25hx_t_post_sfdp_fixup,
.late_init = s25hx_t_late_init,
};
/**
* cypress_nor_set_octal_dtr() - Enable or disable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
* This also sets the memory access latency cycles to 24 to allow the flash to
* run at up to 200MHz.
*
* Return: 0 on success, -errno otherwise.
*/
static int cypress_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
return enable ? cypress_nor_octal_dtr_en(nor) :
cypress_nor_octal_dtr_dis(nor);
}
static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
if (!params->n_dice || !params->vreg_offset) {
dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
__func__);
return -EOPNOTSUPP;
}
/* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
if (params->size == SZ_256M)
params->n_dice = 2;
/*
* On older versions of the flash the xSPI Profile 1.0 table has the
* 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
*/
if (params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
SPINOR_OP_CYPRESS_RD_FAST;
/* This flash is also missing the 4-byte Page Program opcode bit. */
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
/*
* Since xSPI Page Program opcode is backward compatible with
* Legacy SPI, use Legacy SPI opcode there as well.
*/
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
/*
* The xSPI Profile 1.0 table advertises the number of additional
* address bytes needed for Read Status Register command as 0 but the
* actual value for that is 4.
*/
params->rdsr_addr_nbytes = 4;
return cypress_nor_get_page_size(nor);
}
static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
return cypress_nor_set_addr_mode_nbytes(nor);
}
static int s28hx_t_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
params->set_octal_dtr = cypress_nor_set_octal_dtr;
params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
return 0;
}
static const struct spi_nor_fixups s28hx_t_fixups = {
.post_sfdp = s28hx_t_post_sfdp_fixup,
.post_bfpt = s28hx_t_post_bfpt_fixup,
.late_init = s28hx_t_late_init,
};
static int
s25fs_s_nor_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
/*
* The S25FS-S chip family reports 512-byte pages in BFPT but
* in reality the write buffer still wraps at the safe default
* of 256 bytes. Overwrite the page size advertised by BFPT
* to get the writes working.
*/
nor->params->page_size = 256;
return 0;
}
static const struct spi_nor_fixups s25fs_s_nor_fixups = {
.post_bfpt = s25fs_s_nor_post_bfpt_fixups,
};
static const struct flash_info spansion_nor_parts[] = {
/* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128)
NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
.fixups = &s25fs_s_nor_fixups, },
{ "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
.fixups = &s25fs_s_nor_fixups, },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
MFR_FLAGS(USE_CLSR)
},
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128) },
{ "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "s25fs256t", INFO6(0x342b19, 0x0f0890, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s25fs256t_fixups },
{ "s25hl512t", INFO6(0x342a1a, 0x0f0390, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
{ "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
{ "s25hl02gt", INFO6(0x342a1c, 0x0f0090, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
FLAGS(NO_CHIP_ERASE)
.fixups = &s25hx_t_fixups },
{ "s25hs512t", INFO6(0x342b1a, 0x0f0390, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
{ "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
{ "s25hs02gt", INFO6(0x342b1c, 0x0f0090, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
FLAGS(NO_CHIP_ERASE)
.fixups = &s25hx_t_fixups },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
{ "s28hl512t", INFO(0x345a1a, 0, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
{ "s28hl01gt", INFO(0x345a1b, 0, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
{ "s28hs512t", INFO(0x345b1a, 0, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
{ "s28hs01gt", INFO(0x345b1b, 0, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
{ "s28hs02gt", INFO(0x345b1c, 0, 0, 0)
PARSE_SFDP
MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
};
/**
* spansion_nor_sr_ready_and_clear() - Query the Status Register to see if the
* flash is ready for new commands and clear it if there are any errors.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int spansion_nor_sr_ready_and_clear(struct spi_nor *nor)
{
int ret;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
if (nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
if (nor->bouncebuf[0] & SR_E_ERR)
dev_err(nor->dev, "Erase Error occurred\n");
else
dev_err(nor->dev, "Programming Error occurred\n");
spansion_nor_clear_sr(nor);
/*
* WEL bit remains set to one when an erase or page program
* error occurs. Issue a Write Disable command to protect
* against inadvertent writes that can possibly corrupt the
* contents of the memory.
*/
ret = spi_nor_write_disable(nor);
if (ret)
return ret;
return -EIO;
}
return !(nor->bouncebuf[0] & SR_WIP);
}
static int spansion_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spansion_nor_params *priv_params;
u8 mfr_flags = nor->info->mfr_flags;
if (params->size > SZ_16M) {
nor->flags |= SNOR_F_4B_OPCODES;
/* No small sector erase for 4-byte command set */
nor->erase_opcode = SPINOR_OP_SE;
nor->mtd.erasesize = nor->info->sector_size;
}
if (mfr_flags & (USE_CLSR | USE_CLPEF)) {
priv_params = devm_kmalloc(nor->dev, sizeof(*priv_params),
GFP_KERNEL);
if (!priv_params)
return -ENOMEM;
if (mfr_flags & USE_CLSR)
priv_params->clsr = SPINOR_OP_CLSR;
else if (mfr_flags & USE_CLPEF)
priv_params->clsr = SPINOR_OP_CLPEF;
params->priv = priv_params;
params->ready = spansion_nor_sr_ready_and_clear;
}
return 0;
}
static const struct spi_nor_fixups spansion_nor_fixups = {
.late_init = spansion_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_spansion = {
.name = "spansion",
.parts = spansion_nor_parts,
.nparts = ARRAY_SIZE(spansion_nor_parts),
.fixups = &spansion_nor_fixups,
};
| linux-master | drivers/mtd/spi-nor/spansion.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/bitfield.h>
#include <linux/mtd/spi-nor.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include "core.h"
#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
#define SFDP_PARAM_HEADER_PTP(p) \
(((p)->parameter_table_pointer[2] << 16) | \
((p)->parameter_table_pointer[1] << 8) | \
((p)->parameter_table_pointer[0] << 0))
#define SFDP_PARAM_HEADER_PARAM_LEN(p) ((p)->length * 4)
#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
#define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */
#define SFDP_SCCR_MAP_ID 0xff87 /*
* Status, Control and Configuration
* Register Map.
*/
#define SFDP_SCCR_MAP_MC_ID 0xff88 /*
* Status, Control and Configuration
* Register Map Offsets for Multi-Chip
* SPI Memory Devices.
*/
#define SFDP_SIGNATURE 0x50444653U
struct sfdp_header {
u32 signature; /* Ox50444653U <=> "SFDP" */
u8 minor;
u8 major;
u8 nph; /* 0-base number of parameter headers */
u8 unused;
/* Basic Flash Parameter Table. */
struct sfdp_parameter_header bfpt_header;
};
/* Fast Read settings. */
struct sfdp_bfpt_read {
/* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
u32 hwcaps;
/*
* The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
* whether the Fast Read x-y-z command is supported.
*/
u32 supported_dword;
u32 supported_bit;
/*
* The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
* encodes the op code, the number of mode clocks and the number of wait
* states to be used by Fast Read x-y-z command.
*/
u32 settings_dword;
u32 settings_shift;
/* The SPI protocol for this Fast Read x-y-z command. */
enum spi_nor_protocol proto;
};
struct sfdp_bfpt_erase {
/*
* The half-word at offset <shift> in DWORD <dword> encodes the
* op code and erase sector size to be used by Sector Erase commands.
*/
u32 dword;
u32 shift;
};
#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
#define SMPT_CMD_READ_DUMMY_SHIFT 16
#define SMPT_CMD_READ_DUMMY(_cmd) \
(((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
#define SMPT_CMD_READ_DATA_SHIFT 24
#define SMPT_CMD_READ_DATA(_cmd) \
(((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
#define SMPT_CMD_OPCODE_SHIFT 8
#define SMPT_CMD_OPCODE(_cmd) \
(((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
#define SMPT_MAP_REGION_COUNT_SHIFT 16
#define SMPT_MAP_REGION_COUNT(_header) \
((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
SMPT_MAP_REGION_COUNT_SHIFT) + 1)
#define SMPT_MAP_ID_MASK GENMASK(15, 8)
#define SMPT_MAP_ID_SHIFT 8
#define SMPT_MAP_ID(_header) \
(((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
#define SMPT_MAP_REGION_SIZE_SHIFT 8
#define SMPT_MAP_REGION_SIZE(_region) \
(((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
#define SMPT_DESC_TYPE_MAP BIT(1)
#define SMPT_DESC_END BIT(0)
#define SFDP_4BAIT_DWORD_MAX 2
struct sfdp_4bait {
/* The hardware capability. */
u32 hwcaps;
/*
* The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
* the associated 4-byte address op code is supported.
*/
u32 supported_bit;
};
/**
* spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
* addr_nbytes and read_dummy members of the struct spi_nor
* should be previously set.
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the serial flash memory
* @len: number of bytes to read
* @buf: buffer where the data is copied into (dma-safe memory)
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
{
ssize_t ret;
while (len) {
ret = spi_nor_read_data(nor, addr, len, buf);
if (ret < 0)
return ret;
if (!ret || ret > len)
return -EIO;
buf += ret;
addr += ret;
len -= ret;
}
return 0;
}
/**
* spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the SFDP area to start reading data from
* @len: number of bytes to read
* @buf: buffer where the SFDP data are copied into (dma-safe memory)
*
* Whatever the actual numbers of bytes for address and dummy cycles are
* for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
* followed by a 3-byte address and 8 dummy clock cycles.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
size_t len, void *buf)
{
u8 addr_nbytes, read_opcode, read_dummy;
int ret;
read_opcode = nor->read_opcode;
addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
nor->read_opcode = SPINOR_OP_RDSFDP;
nor->addr_nbytes = 3;
nor->read_dummy = 8;
ret = spi_nor_read_raw(nor, addr, len, buf);
nor->read_opcode = read_opcode;
nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
return ret;
}
/**
* spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the SFDP area to start reading data from
* @len: number of bytes to read
* @buf: buffer where the SFDP data are copied into
*
* Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
* guaranteed to be dma-safe.
*
* Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
* otherwise.
*/
static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
size_t len, void *buf)
{
void *dma_safe_buf;
int ret;
dma_safe_buf = kmalloc(len, GFP_KERNEL);
if (!dma_safe_buf)
return -ENOMEM;
ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
memcpy(buf, dma_safe_buf, len);
kfree(dma_safe_buf);
return ret;
}
static void
spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
u16 half,
enum spi_nor_protocol proto)
{
read->num_mode_clocks = (half >> 5) & 0x07;
read->num_wait_states = (half >> 0) & 0x1f;
read->opcode = (half >> 8) & 0xff;
read->proto = proto;
}
static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
/* Fast Read 1-1-2 */
{
SNOR_HWCAPS_READ_1_1_2,
SFDP_DWORD(1), BIT(16), /* Supported bit */
SFDP_DWORD(4), 0, /* Settings */
SNOR_PROTO_1_1_2,
},
/* Fast Read 1-2-2 */
{
SNOR_HWCAPS_READ_1_2_2,
SFDP_DWORD(1), BIT(20), /* Supported bit */
SFDP_DWORD(4), 16, /* Settings */
SNOR_PROTO_1_2_2,
},
/* Fast Read 2-2-2 */
{
SNOR_HWCAPS_READ_2_2_2,
SFDP_DWORD(5), BIT(0), /* Supported bit */
SFDP_DWORD(6), 16, /* Settings */
SNOR_PROTO_2_2_2,
},
/* Fast Read 1-1-4 */
{
SNOR_HWCAPS_READ_1_1_4,
SFDP_DWORD(1), BIT(22), /* Supported bit */
SFDP_DWORD(3), 16, /* Settings */
SNOR_PROTO_1_1_4,
},
/* Fast Read 1-4-4 */
{
SNOR_HWCAPS_READ_1_4_4,
SFDP_DWORD(1), BIT(21), /* Supported bit */
SFDP_DWORD(3), 0, /* Settings */
SNOR_PROTO_1_4_4,
},
/* Fast Read 4-4-4 */
{
SNOR_HWCAPS_READ_4_4_4,
SFDP_DWORD(5), BIT(4), /* Supported bit */
SFDP_DWORD(7), 16, /* Settings */
SNOR_PROTO_4_4_4,
},
};
static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
/* Erase Type 1 in DWORD8 bits[15:0] */
{SFDP_DWORD(8), 0},
/* Erase Type 2 in DWORD8 bits[31:16] */
{SFDP_DWORD(8), 16},
/* Erase Type 3 in DWORD9 bits[15:0] */
{SFDP_DWORD(9), 0},
/* Erase Type 4 in DWORD9 bits[31:16] */
{SFDP_DWORD(9), 16},
};
/**
* spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
* @erase: pointer to a structure that describes a SPI NOR erase type
* @size: the size of the sector/block erased by the erase type
* @opcode: the SPI command op code to erase the sector/block
* @i: erase type index as sorted in the Basic Flash Parameter Table
*
* The supported Erase Types will be sorted at init in ascending order, with
* the smallest Erase Type size being the first member in the erase_type array
* of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
* the Basic Flash Parameter Table since it will be used later on to
* synchronize with the supported Erase Types defined in SFDP optional tables.
*/
static void
spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
u32 size, u8 opcode, u8 i)
{
erase->idx = i;
spi_nor_set_erase_type(erase, size, opcode);
}
/**
* spi_nor_map_cmp_erase_type() - compare the map's erase types by size
* @l: member in the left half of the map's erase_type array
* @r: member in the right half of the map's erase_type array
*
* Comparison function used in the sort() call to sort in ascending order the
* map's erase types, the smallest erase type size being the first member in the
* sorted erase_type array.
*
* Return: the result of @l->size - @r->size
*/
static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
{
const struct spi_nor_erase_type *left = l, *right = r;
return left->size - right->size;
}
/**
* spi_nor_sort_erase_mask() - sort erase mask
* @map: the erase map of the SPI NOR
* @erase_mask: the erase type mask to be sorted
*
* Replicate the sort done for the map's erase types in BFPT: sort the erase
* mask in ascending order with the smallest erase type size starting from
* BIT(0) in the sorted erase mask.
*
* Return: sorted erase mask.
*/
static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
{
struct spi_nor_erase_type *erase_type = map->erase_type;
int i;
u8 sorted_erase_mask = 0;
if (!erase_mask)
return 0;
/* Replicate the sort done for the map's erase types. */
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
sorted_erase_mask |= BIT(i);
return sorted_erase_mask;
}
/**
* spi_nor_regions_sort_erase_types() - sort erase types in each region
* @map: the erase map of the SPI NOR
*
* Function assumes that the erase types defined in the erase map are already
* sorted in ascending order, with the smallest erase type size being the first
* member in the erase_type array. It replicates the sort done for the map's
* erase types. Each region's erase bitmask will indicate which erase types are
* supported from the sorted erase types defined in the erase map.
* Sort the all region's erase type at init in order to speed up the process of
* finding the best erase command at runtime.
*/
static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
{
struct spi_nor_erase_region *region = map->regions;
u8 region_erase_mask, sorted_erase_mask;
while (region) {
region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
sorted_erase_mask = spi_nor_sort_erase_mask(map,
region_erase_mask);
/* Overwrite erase mask. */
region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
sorted_erase_mask;
region = spi_nor_region_next(region);
}
}
/**
* spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
* @nor: pointer to a 'struct spi_nor'
* @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
* the Basic Flash Parameter Table length and version
*
* The Basic Flash Parameter Table is the main and only mandatory table as
* defined by the SFDP (JESD216) specification.
* It provides us with the total size (memory density) of the data array and
* the number of address bytes for Fast Read, Page Program and Sector Erase
* commands.
* For Fast READ commands, it also gives the number of mode clock cycles and
* wait states (regrouped in the number of dummy clock cycles) for each
* supported instruction op code.
* For Page Program, the page size is now available since JESD216 rev A, however
* the supported instruction op codes are still not provided.
* For Sector Erase commands, this table stores the supported instruction op
* codes and the associated sector sizes.
* Finally, the Quad Enable Requirements (QER) are also available since JESD216
* rev A. The QER bits encode the manufacturer dependent procedure to be
* executed to set the Quad Enable (QE) bit in some internal register of the
* Quad SPI memory. Indeed the QE bit, when it exists, must be set before
* sending any Quad SPI command to the memory. Actually, setting the QE bit
* tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
* and IO3 hence enabling 4 (Quad) I/O lines.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_parse_bfpt(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *map = ¶ms->erase_map;
struct spi_nor_erase_type *erase_type = map->erase_type;
struct sfdp_bfpt bfpt;
size_t len;
int i, cmd, err;
u32 addr, val;
u32 dword;
u16 half;
u8 erase_mask;
/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
return -EINVAL;
/* Read the Basic Flash Parameter Table. */
len = min_t(size_t, sizeof(bfpt),
bfpt_header->length * sizeof(u32));
addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
memset(&bfpt, 0, sizeof(bfpt));
err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
if (err < 0)
return err;
/* Fix endianness of the BFPT DWORDs. */
le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX);
/* Number of address bytes. */
switch (bfpt.dwords[SFDP_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
params->addr_nbytes = 3;
params->addr_mode_nbytes = 3;
break;
case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
params->addr_nbytes = 4;
params->addr_mode_nbytes = 4;
break;
default:
break;
}
/* Flash Memory Density (in bits). */
val = bfpt.dwords[SFDP_DWORD(2)];
if (val & BIT(31)) {
val &= ~BIT(31);
/*
* Prevent overflows on params->size. Anyway, a NOR of 2^64
* bits is unlikely to exist so this error probably means
* the BFPT we are reading is corrupted/wrong.
*/
if (val > 63)
return -EINVAL;
params->size = 1ULL << val;
} else {
params->size = val + 1;
}
params->size >>= 3; /* Convert to bytes. */
/* Fast Read settings. */
for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
struct spi_nor_read_command *read;
if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
params->hwcaps.mask &= ~rd->hwcaps;
continue;
}
params->hwcaps.mask |= rd->hwcaps;
cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
read = ¶ms->reads[cmd];
half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
}
/*
* Sector Erase settings. Reinitialize the uniform erase map using the
* Erase Types defined in the bfpt table.
*/
erase_mask = 0;
memset(¶ms->erase_map, 0, sizeof(params->erase_map));
for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
u32 erasesize;
u8 opcode;
half = bfpt.dwords[er->dword] >> er->shift;
erasesize = half & 0xff;
/* erasesize == 0 means this Erase Type is not supported. */
if (!erasesize)
continue;
erasesize = 1U << erasesize;
opcode = (half >> 8) & 0xff;
erase_mask |= BIT(i);
spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
opcode, i);
}
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
/*
* Sort all the map's Erase Types in ascending order with the smallest
* erase size being the first member in the erase_type array.
*/
sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
spi_nor_map_cmp_erase_type, NULL);
/*
* Sort the erase types in the uniform region in order to update the
* uniform_erase_type bitmask. The bitmask will be used later on when
* selecting the uniform erase.
*/
spi_nor_regions_sort_erase_types(map);
map->uniform_erase_type = map->uniform_region.offset &
SNOR_ERASE_TYPE_MASK;
/* Stop here if not JESD216 rev A or later. */
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
/* Page size: this field specifies 'N' so the page size = 2^N bytes. */
val = bfpt.dwords[SFDP_DWORD(11)];
val &= BFPT_DWORD11_PAGE_SIZE_MASK;
val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
params->page_size = 1U << val;
/* Quad Enable Requirements. */
switch (bfpt.dwords[SFDP_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
case BFPT_DWORD15_QER_NONE:
params->quad_enable = NULL;
break;
case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
/*
* Writing only one byte to the Status Register has the
* side-effect of clearing Status Register 2.
*/
case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
/*
* Read Configuration Register (35h) instruction is not
* supported.
*/
nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
params->quad_enable = spi_nor_sr2_bit1_quad_enable;
break;
case BFPT_DWORD15_QER_SR1_BIT6:
nor->flags &= ~SNOR_F_HAS_16BIT_SR;
params->quad_enable = spi_nor_sr1_bit6_quad_enable;
break;
case BFPT_DWORD15_QER_SR2_BIT7:
nor->flags &= ~SNOR_F_HAS_16BIT_SR;
params->quad_enable = spi_nor_sr2_bit7_quad_enable;
break;
case BFPT_DWORD15_QER_SR2_BIT1:
/*
* JESD216 rev B or later does not specify if writing only one
* byte to the Status Register clears or not the Status
* Register 2, so let's be cautious and keep the default
* assumption of a 16-bit Write Status (01h) command.
*/
nor->flags |= SNOR_F_HAS_16BIT_SR;
params->quad_enable = spi_nor_sr2_bit1_quad_enable;
break;
default:
dev_dbg(nor->dev, "BFPT QER reserved value used\n");
break;
}
dword = bfpt.dwords[SFDP_DWORD(16)] & BFPT_DWORD16_4B_ADDR_MODE_MASK;
if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_BRWR))
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
else if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_WREN_EN4B_EX4B))
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b;
else if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_EN4B_EX4B))
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
else
dev_dbg(nor->dev, "BFPT: 4-Byte Address Mode method is not recognized or not implemented\n");
/* Soft Reset support. */
if (bfpt.dwords[SFDP_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST)
nor->flags |= SNOR_F_SOFT_RESET;
/* Stop here if not JESD216 rev C or later. */
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
/* 8D-8D-8D command extension. */
switch (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
case BFPT_DWORD18_CMD_EXT_REP:
nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
break;
case BFPT_DWORD18_CMD_EXT_INV:
nor->cmd_ext_type = SPI_NOR_EXT_INVERT;
break;
case BFPT_DWORD18_CMD_EXT_RES:
dev_dbg(nor->dev, "Reserved command extension used\n");
break;
case BFPT_DWORD18_CMD_EXT_16B:
dev_dbg(nor->dev, "16-bit opcodes not supported\n");
return -EOPNOTSUPP;
}
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
}
/**
* spi_nor_smpt_addr_nbytes() - return the number of address bytes used in the
* configuration detection command.
* @nor: pointer to a 'struct spi_nor'
* @settings: configuration detection command descriptor, dword1
*/
static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings)
{
switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
case SMPT_CMD_ADDRESS_LEN_0:
return 0;
case SMPT_CMD_ADDRESS_LEN_3:
return 3;
case SMPT_CMD_ADDRESS_LEN_4:
return 4;
case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
default:
return nor->params->addr_mode_nbytes;
}
}
/**
* spi_nor_smpt_read_dummy() - return the configuration detection command read
* latency, in clock cycles.
* @nor: pointer to a 'struct spi_nor'
* @settings: configuration detection command descriptor, dword1
*
* Return: the number of dummy cycles for an SMPT read
*/
static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
{
u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
return nor->read_dummy;
return read_dummy;
}
/**
* spi_nor_get_map_in_use() - get the configuration map in use
* @nor: pointer to a 'struct spi_nor'
* @smpt: pointer to the sector map parameter table
* @smpt_len: sector map parameter table length
*
* Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
*/
static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
u8 smpt_len)
{
const u32 *ret;
u8 *buf;
u32 addr;
int err;
u8 i;
u8 addr_nbytes, read_opcode, read_dummy;
u8 read_data_mask, map_id;
/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_opcode = nor->read_opcode;
map_id = 0;
/* Determine if there are any optional Detection Command Descriptors */
for (i = 0; i < smpt_len; i += 2) {
if (smpt[i] & SMPT_DESC_TYPE_MAP)
break;
read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
nor->addr_nbytes = spi_nor_smpt_addr_nbytes(nor, smpt[i]);
nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
addr = smpt[i + 1];
err = spi_nor_read_raw(nor, addr, 1, buf);
if (err) {
ret = ERR_PTR(err);
goto out;
}
/*
* Build an index value that is used to select the Sector Map
* Configuration that is currently in use.
*/
map_id = map_id << 1 | !!(*buf & read_data_mask);
}
/*
* If command descriptors are provided, they always precede map
* descriptors in the table. There is no need to start the iteration
* over smpt array all over again.
*
* Find the matching configuration map.
*/
ret = ERR_PTR(-EINVAL);
while (i < smpt_len) {
if (SMPT_MAP_ID(smpt[i]) == map_id) {
ret = smpt + i;
break;
}
/*
* If there are no more configuration map descriptors and no
* configuration ID matched the configuration identifier, the
* sector address map is unknown.
*/
if (smpt[i] & SMPT_DESC_END)
break;
/* increment the table index to the next map */
i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
}
/* fall through */
out:
kfree(buf);
nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_opcode = read_opcode;
return ret;
}
static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
{
region->offset |= SNOR_LAST_REGION;
}
static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
{
region->offset |= SNOR_OVERLAID_REGION;
}
/**
* spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
* @region: pointer to a structure that describes a SPI NOR erase region
* @erase: pointer to a structure that describes a SPI NOR erase type
* @erase_type: erase type bitmask
*/
static void
spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
const struct spi_nor_erase_type *erase,
const u8 erase_type)
{
int i;
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
continue;
if (region->size & erase[i].size_mask) {
spi_nor_region_mark_overlay(region);
return;
}
}
}
/**
* spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
* @nor: pointer to a 'struct spi_nor'
* @smpt: pointer to the sector map parameter table
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
const u32 *smpt)
{
struct spi_nor_erase_map *map = &nor->params->erase_map;
struct spi_nor_erase_type *erase = map->erase_type;
struct spi_nor_erase_region *region;
u64 offset;
u32 region_count;
int i, j;
u8 uniform_erase_type, save_uniform_erase_type;
u8 erase_type, regions_erase_type;
region_count = SMPT_MAP_REGION_COUNT(*smpt);
/*
* The regions will be freed when the driver detaches from the
* device.
*/
region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
map->regions = region;
uniform_erase_type = 0xff;
regions_erase_type = 0;
offset = 0;
/* Populate regions. */
for (i = 0; i < region_count; i++) {
j = i + 1; /* index for the region dword */
region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
region[i].offset = offset | erase_type;
spi_nor_region_check_overlay(®ion[i], erase, erase_type);
/*
* Save the erase types that are supported in all regions and
* can erase the entire flash memory.
*/
uniform_erase_type &= erase_type;
/*
* regions_erase_type mask will indicate all the erase types
* supported in this configuration map.
*/
regions_erase_type |= erase_type;
offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
region[i].size;
}
spi_nor_region_mark_end(®ion[i - 1]);
save_uniform_erase_type = map->uniform_erase_type;
map->uniform_erase_type = spi_nor_sort_erase_mask(map,
uniform_erase_type);
if (!regions_erase_type) {
/*
* Roll back to the previous uniform_erase_type mask, SMPT is
* broken.
*/
map->uniform_erase_type = save_uniform_erase_type;
return -EINVAL;
}
/*
* BFPT advertises all the erase types supported by all the possible
* map configurations. Mask out the erase types that are not supported
* by the current map configuration.
*/
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (!(regions_erase_type & BIT(erase[i].idx)))
spi_nor_mask_erase_type(&erase[i]);
return 0;
}
/**
* spi_nor_parse_smpt() - parse Sector Map Parameter Table
* @nor: pointer to a 'struct spi_nor'
* @smpt_header: sector map parameter table header
*
* This table is optional, but when available, we parse it to identify the
* location and size of sectors within the main data array of the flash memory
* device and to identify which Erase Types are supported by each sector.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_parse_smpt(struct spi_nor *nor,
const struct sfdp_parameter_header *smpt_header)
{
const u32 *sector_map;
u32 *smpt;
size_t len;
u32 addr;
int ret;
/* Read the Sector Map Parameter Table. */
len = smpt_header->length * sizeof(*smpt);
smpt = kmalloc(len, GFP_KERNEL);
if (!smpt)
return -ENOMEM;
addr = SFDP_PARAM_HEADER_PTP(smpt_header);
ret = spi_nor_read_sfdp(nor, addr, len, smpt);
if (ret)
goto out;
/* Fix endianness of the SMPT DWORDs. */
le32_to_cpu_array(smpt, smpt_header->length);
sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
if (IS_ERR(sector_map)) {
ret = PTR_ERR(sector_map);
goto out;
}
ret = spi_nor_init_non_uniform_erase_map(nor, sector_map);
if (ret)
goto out;
spi_nor_regions_sort_erase_types(&nor->params->erase_map);
/* fall through */
out:
kfree(smpt);
return ret;
}
/**
* spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
* @nor: pointer to a 'struct spi_nor'.
* @param_header: pointer to the 'struct sfdp_parameter_header' describing
* the 4-Byte Address Instruction Table length and version.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_parse_4bait(struct spi_nor *nor,
const struct sfdp_parameter_header *param_header)
{
static const struct sfdp_4bait reads[] = {
{ SNOR_HWCAPS_READ, BIT(0) },
{ SNOR_HWCAPS_READ_FAST, BIT(1) },
{ SNOR_HWCAPS_READ_1_1_2, BIT(2) },
{ SNOR_HWCAPS_READ_1_2_2, BIT(3) },
{ SNOR_HWCAPS_READ_1_1_4, BIT(4) },
{ SNOR_HWCAPS_READ_1_4_4, BIT(5) },
{ SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
{ SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
{ SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
};
static const struct sfdp_4bait programs[] = {
{ SNOR_HWCAPS_PP, BIT(6) },
{ SNOR_HWCAPS_PP_1_1_4, BIT(7) },
{ SNOR_HWCAPS_PP_1_4_4, BIT(8) },
};
static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
{ 0u /* not used */, BIT(9) },
{ 0u /* not used */, BIT(10) },
{ 0u /* not used */, BIT(11) },
{ 0u /* not used */, BIT(12) },
};
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_pp_command *params_pp = params->page_programs;
struct spi_nor_erase_map *map = ¶ms->erase_map;
struct spi_nor_erase_type *erase_type = map->erase_type;
u32 *dwords;
size_t len;
u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
int i, ret;
if (param_header->major != SFDP_JESD216_MAJOR ||
param_header->length < SFDP_4BAIT_DWORD_MAX)
return -EINVAL;
/* Read the 4-byte Address Instruction Table. */
len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
dwords = kmalloc(len, GFP_KERNEL);
if (!dwords)
return -ENOMEM;
addr = SFDP_PARAM_HEADER_PTP(param_header);
ret = spi_nor_read_sfdp(nor, addr, len, dwords);
if (ret)
goto out;
/* Fix endianness of the 4BAIT DWORDs. */
le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX);
/*
* Compute the subset of (Fast) Read commands for which the 4-byte
* version is supported.
*/
discard_hwcaps = 0;
read_hwcaps = 0;
for (i = 0; i < ARRAY_SIZE(reads); i++) {
const struct sfdp_4bait *read = &reads[i];
discard_hwcaps |= read->hwcaps;
if ((params->hwcaps.mask & read->hwcaps) &&
(dwords[SFDP_DWORD(1)] & read->supported_bit))
read_hwcaps |= read->hwcaps;
}
/*
* Compute the subset of Page Program commands for which the 4-byte
* version is supported.
*/
pp_hwcaps = 0;
for (i = 0; i < ARRAY_SIZE(programs); i++) {
const struct sfdp_4bait *program = &programs[i];
/*
* The 4 Byte Address Instruction (Optional) Table is the only
* SFDP table that indicates support for Page Program Commands.
* Bypass the params->hwcaps.mask and consider 4BAIT the biggest
* authority for specifying Page Program support.
*/
discard_hwcaps |= program->hwcaps;
if (dwords[SFDP_DWORD(1)] & program->supported_bit)
pp_hwcaps |= program->hwcaps;
}
/*
* Compute the subset of Sector Erase commands for which the 4-byte
* version is supported.
*/
erase_mask = 0;
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
const struct sfdp_4bait *erase = &erases[i];
if (dwords[SFDP_DWORD(1)] & erase->supported_bit)
erase_mask |= BIT(i);
}
/* Replicate the sort done for the map's erase types in BFPT. */
erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
/*
* We need at least one 4-byte op code per read, program and erase
* operation; the .read(), .write() and .erase() hooks share the
* nor->addr_nbytes value.
*/
if (!read_hwcaps || !pp_hwcaps || !erase_mask)
goto out;
/*
* Discard all operations from the 4-byte instruction set which are
* not supported by this memory.
*/
params->hwcaps.mask &= ~discard_hwcaps;
params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
/* Use the 4-byte address instruction set. */
for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
struct spi_nor_read_command *read_cmd = ¶ms->reads[i];
read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
}
/* 4BAIT is the only SFDP table that indicates page program support. */
if (pp_hwcaps & SNOR_HWCAPS_PP) {
spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP],
SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
/*
* Since xSPI Page Program opcode is backward compatible with
* Legacy SPI, use Legacy SPI opcode there as well.
*/
spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_8_8_8_DTR],
SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
}
if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4],
SPINOR_OP_PP_1_1_4_4B,
SNOR_PROTO_1_1_4);
if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4],
SPINOR_OP_PP_1_4_4_4B,
SNOR_PROTO_1_4_4);
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
if (erase_mask & BIT(i))
erase_type[i].opcode = (dwords[SFDP_DWORD(2)] >>
erase_type[i].idx * 8) & 0xFF;
else
spi_nor_mask_erase_type(&erase_type[i]);
}
/*
* We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
* later because we already did the conversion to 4byte opcodes. Also,
* this latest function implements a legacy quirk for the erase size of
* Spansion memory. However this quirk is no longer needed with new
* SFDP compliant memories.
*/
params->addr_nbytes = 4;
nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
/* fall through */
out:
kfree(dwords);
return ret;
}
#define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29)
#define PROFILE1_DWORD1_RDSR_DUMMY BIT(28)
#define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8)
#define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7)
#define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27)
#define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17)
#define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7)
/**
* spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table
* @nor: pointer to a 'struct spi_nor'
* @profile1_header: pointer to the 'struct sfdp_parameter_header' describing
* the Profile 1.0 Table length and version.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_parse_profile1(struct spi_nor *nor,
const struct sfdp_parameter_header *profile1_header)
{
u32 *dwords, addr;
size_t len;
int ret;
u8 dummy, opcode;
len = profile1_header->length * sizeof(*dwords);
dwords = kmalloc(len, GFP_KERNEL);
if (!dwords)
return -ENOMEM;
addr = SFDP_PARAM_HEADER_PTP(profile1_header);
ret = spi_nor_read_sfdp(nor, addr, len, dwords);
if (ret)
goto out;
le32_to_cpu_array(dwords, profile1_header->length);
/* Get 8D-8D-8D fast read opcode and dummy cycles. */
opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[SFDP_DWORD(1)]);
/* Set the Read Status Register dummy cycles and dummy address bytes. */
if (dwords[SFDP_DWORD(1)] & PROFILE1_DWORD1_RDSR_DUMMY)
nor->params->rdsr_dummy = 8;
else
nor->params->rdsr_dummy = 4;
if (dwords[SFDP_DWORD(1)] & PROFILE1_DWORD1_RDSR_ADDR_BYTES)
nor->params->rdsr_addr_nbytes = 4;
else
nor->params->rdsr_addr_nbytes = 0;
/*
* We don't know what speed the controller is running at. Find the
* dummy cycles for the fastest frequency the flash can run at to be
* sure we are never short of dummy cycles. A value of 0 means the
* frequency is not supported.
*
* Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let
* flashes set the correct value if needed in their fixup hooks.
*/
dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[SFDP_DWORD(4)]);
if (!dummy)
dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ,
dwords[SFDP_DWORD(5)]);
if (!dummy)
dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ,
dwords[SFDP_DWORD(5)]);
if (!dummy)
dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ,
dwords[SFDP_DWORD(5)]);
if (!dummy)
dev_dbg(nor->dev,
"Can't find dummy cycles from Profile 1.0 table\n");
/* Round up to an even value to avoid tripping controllers up. */
dummy = round_up(dummy, 2);
/* Update the fast read settings. */
nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR],
0, dummy, opcode,
SNOR_PROTO_8_8_8_DTR);
/*
* Page Program is "Required Command" in the xSPI Profile 1.0. Update
* the params->hwcaps.mask here.
*/
nor->params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
out:
kfree(dwords);
return ret;
}
#define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31)
/**
* spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register
* Map.
* @nor: pointer to a 'struct spi_nor'
* @sccr_header: pointer to the 'struct sfdp_parameter_header' describing
* the SCCR Map table length and version.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_parse_sccr(struct spi_nor *nor,
const struct sfdp_parameter_header *sccr_header)
{
struct spi_nor_flash_parameter *params = nor->params;
u32 *dwords, addr;
size_t len;
int ret;
len = sccr_header->length * sizeof(*dwords);
dwords = kmalloc(len, GFP_KERNEL);
if (!dwords)
return -ENOMEM;
addr = SFDP_PARAM_HEADER_PTP(sccr_header);
ret = spi_nor_read_sfdp(nor, addr, len, dwords);
if (ret)
goto out;
le32_to_cpu_array(dwords, sccr_header->length);
/* Address offset for volatile registers (die 0) */
if (!params->vreg_offset) {
params->vreg_offset = devm_kmalloc(nor->dev, sizeof(*dwords),
GFP_KERNEL);
if (!params->vreg_offset) {
ret = -ENOMEM;
goto out;
}
}
params->vreg_offset[0] = dwords[SFDP_DWORD(1)];
params->n_dice = 1;
if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE,
dwords[SFDP_DWORD(22)]))
nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
out:
kfree(dwords);
return ret;
}
/**
* spi_nor_parse_sccr_mc() - Parse the Status, Control and Configuration
* Register Map Offsets for Multi-Chip SPI Memory
* Devices.
* @nor: pointer to a 'struct spi_nor'
* @sccr_mc_header: pointer to the 'struct sfdp_parameter_header' describing
* the SCCR Map offsets table length and version.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_parse_sccr_mc(struct spi_nor *nor,
const struct sfdp_parameter_header *sccr_mc_header)
{
struct spi_nor_flash_parameter *params = nor->params;
u32 *dwords, addr;
u8 i, n_dice;
size_t len;
int ret;
len = sccr_mc_header->length * sizeof(*dwords);
dwords = kmalloc(len, GFP_KERNEL);
if (!dwords)
return -ENOMEM;
addr = SFDP_PARAM_HEADER_PTP(sccr_mc_header);
ret = spi_nor_read_sfdp(nor, addr, len, dwords);
if (ret)
goto out;
le32_to_cpu_array(dwords, sccr_mc_header->length);
/*
* Pair of DOWRDs (volatile and non-volatile register offsets) per
* additional die. Hence, length = 2 * (number of additional dice).
*/
n_dice = 1 + sccr_mc_header->length / 2;
/* Address offset for volatile registers of additional dice */
params->vreg_offset =
devm_krealloc(nor->dev, params->vreg_offset,
n_dice * sizeof(*dwords),
GFP_KERNEL);
if (!params->vreg_offset) {
ret = -ENOMEM;
goto out;
}
for (i = 1; i < n_dice; i++)
params->vreg_offset[i] = dwords[SFDP_DWORD(i) * 2];
params->n_dice = n_dice;
out:
kfree(dwords);
return ret;
}
/**
* spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
* after SFDP has been parsed. Called only for flashes that define JESD216 SFDP
* tables.
* @nor: pointer to a 'struct spi_nor'
*
* Used to tweak various flash parameters when information provided by the SFDP
* tables are wrong.
*/
static int spi_nor_post_sfdp_fixups(struct spi_nor *nor)
{
int ret;
if (nor->manufacturer && nor->manufacturer->fixups &&
nor->manufacturer->fixups->post_sfdp) {
ret = nor->manufacturer->fixups->post_sfdp(nor);
if (ret)
return ret;
}
if (nor->info->fixups && nor->info->fixups->post_sfdp)
return nor->info->fixups->post_sfdp(nor);
return 0;
}
/**
* spi_nor_check_sfdp_signature() - check for a valid SFDP signature
* @nor: pointer to a 'struct spi_nor'
*
* Used to detect if the flash supports the RDSFDP command as well as the
* presence of a valid SFDP table.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_check_sfdp_signature(struct spi_nor *nor)
{
u32 signature;
int err;
/* Get the SFDP header. */
err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(signature),
&signature);
if (err < 0)
return err;
/* Check the SFDP signature. */
if (le32_to_cpu(signature) != SFDP_SIGNATURE)
return -EINVAL;
return 0;
}
/**
* spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
*
* The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
* specification. This is a standard which tends to supported by almost all
* (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
* runtime the main parameters needed to perform basic SPI flash operations such
* as Fast Read, Page Program or Sector Erase commands.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_parse_sfdp(struct spi_nor *nor)
{
const struct sfdp_parameter_header *param_header, *bfpt_header;
struct sfdp_parameter_header *param_headers = NULL;
struct sfdp_header header;
struct device *dev = nor->dev;
struct sfdp *sfdp;
size_t sfdp_size;
size_t psize;
int i, err;
/* Get the SFDP header. */
err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
if (err < 0)
return err;
/* Check the SFDP header version. */
if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
header.major != SFDP_JESD216_MAJOR)
return -EINVAL;
/*
* Verify that the first and only mandatory parameter header is a
* Basic Flash Parameter Table header as specified in JESD216.
*/
bfpt_header = &header.bfpt_header;
if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
bfpt_header->major != SFDP_JESD216_MAJOR)
return -EINVAL;
sfdp_size = SFDP_PARAM_HEADER_PTP(bfpt_header) +
SFDP_PARAM_HEADER_PARAM_LEN(bfpt_header);
/*
* Allocate memory then read all parameter headers with a single
* Read SFDP command. These parameter headers will actually be parsed
* twice: a first time to get the latest revision of the basic flash
* parameter table, then a second time to handle the supported optional
* tables.
* Hence we read the parameter headers once for all to reduce the
* processing time. Also we use kmalloc() instead of devm_kmalloc()
* because we don't need to keep these parameter headers: the allocated
* memory is always released with kfree() before exiting this function.
*/
if (header.nph) {
psize = header.nph * sizeof(*param_headers);
param_headers = kmalloc(psize, GFP_KERNEL);
if (!param_headers)
return -ENOMEM;
err = spi_nor_read_sfdp(nor, sizeof(header),
psize, param_headers);
if (err < 0) {
dev_dbg(dev, "failed to read SFDP parameter headers\n");
goto exit;
}
}
/*
* Cache the complete SFDP data. It is not (easily) possible to fetch
* SFDP after probe time and we need it for the sysfs access.
*/
for (i = 0; i < header.nph; i++) {
param_header = ¶m_headers[i];
sfdp_size = max_t(size_t, sfdp_size,
SFDP_PARAM_HEADER_PTP(param_header) +
SFDP_PARAM_HEADER_PARAM_LEN(param_header));
}
/*
* Limit the total size to a reasonable value to avoid allocating too
* much memory just of because the flash returned some insane values.
*/
if (sfdp_size > PAGE_SIZE) {
dev_dbg(dev, "SFDP data (%zu) too big, truncating\n",
sfdp_size);
sfdp_size = PAGE_SIZE;
}
sfdp = devm_kzalloc(dev, sizeof(*sfdp), GFP_KERNEL);
if (!sfdp) {
err = -ENOMEM;
goto exit;
}
/*
* The SFDP is organized in chunks of DWORDs. Thus, in theory, the
* sfdp_size should be a multiple of DWORDs. But in case a flash
* is not spec compliant, make sure that we have enough space to store
* the complete SFDP data.
*/
sfdp->num_dwords = DIV_ROUND_UP(sfdp_size, sizeof(*sfdp->dwords));
sfdp->dwords = devm_kcalloc(dev, sfdp->num_dwords,
sizeof(*sfdp->dwords), GFP_KERNEL);
if (!sfdp->dwords) {
err = -ENOMEM;
devm_kfree(dev, sfdp);
goto exit;
}
err = spi_nor_read_sfdp(nor, 0, sfdp_size, sfdp->dwords);
if (err < 0) {
dev_dbg(dev, "failed to read SFDP data\n");
devm_kfree(dev, sfdp->dwords);
devm_kfree(dev, sfdp);
goto exit;
}
nor->sfdp = sfdp;
/*
* Check other parameter headers to get the latest revision of
* the basic flash parameter table.
*/
for (i = 0; i < header.nph; i++) {
param_header = ¶m_headers[i];
if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
param_header->major == SFDP_JESD216_MAJOR &&
(param_header->minor > bfpt_header->minor ||
(param_header->minor == bfpt_header->minor &&
param_header->length > bfpt_header->length)))
bfpt_header = param_header;
}
err = spi_nor_parse_bfpt(nor, bfpt_header);
if (err)
goto exit;
/* Parse optional parameter tables. */
for (i = 0; i < header.nph; i++) {
param_header = ¶m_headers[i];
switch (SFDP_PARAM_HEADER_ID(param_header)) {
case SFDP_SECTOR_MAP_ID:
err = spi_nor_parse_smpt(nor, param_header);
break;
case SFDP_4BAIT_ID:
err = spi_nor_parse_4bait(nor, param_header);
break;
case SFDP_PROFILE1_ID:
err = spi_nor_parse_profile1(nor, param_header);
break;
case SFDP_SCCR_MAP_ID:
err = spi_nor_parse_sccr(nor, param_header);
break;
case SFDP_SCCR_MAP_MC_ID:
err = spi_nor_parse_sccr_mc(nor, param_header);
break;
default:
break;
}
if (err) {
dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
SFDP_PARAM_HEADER_ID(param_header));
/*
* Let's not drop all information we extracted so far
* if optional table parsers fail. In case of failing,
* each optional parser is responsible to roll back to
* the previously known spi_nor data.
*/
err = 0;
}
}
err = spi_nor_post_sfdp_fixups(nor);
exit:
kfree(param_headers);
return err;
}
| linux-master | drivers/mtd/spi-nor/sfdp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static const struct flash_info everspin_nor_parts[] = {
/* Everspin */
{ "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
{ "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) },
{ "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) },
};
const struct spi_nor_manufacturer spi_nor_everspin = {
.name = "everspin",
.parts = everspin_nor_parts,
.nparts = ARRAY_SIZE(everspin_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/everspin.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static const struct flash_info xmc_nor_parts[] = {
/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
};
const struct spi_nor_manufacturer spi_nor_xmc = {
.name = "xmc",
.parts = xmc_nor_parts,
.nparts = ARRAY_SIZE(xmc_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/xmc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* HiSilicon FMC SPI NOR flash controller driver
*
* Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/* Hardware register offsets and field definitions */
#define FMC_CFG 0x00
#define FMC_CFG_OP_MODE_MASK BIT_MASK(0)
#define FMC_CFG_OP_MODE_BOOT 0
#define FMC_CFG_OP_MODE_NORMAL 1
#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1)
#define FMC_CFG_FLASH_SEL_MASK 0x6
#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5)
#define FMC_ECC_TYPE_MASK GENMASK(7, 5)
#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10)
#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10)
#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10)
#define FMC_GLOBAL_CFG 0x04
#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6)
#define FMC_SPI_TIMING_CFG 0x08
#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8)
#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4)
#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf)
#define CS_HOLD_TIME 0x6
#define CS_SETUP_TIME 0x6
#define CS_DESELECT_TIME 0xf
#define FMC_INT 0x18
#define FMC_INT_OP_DONE BIT(0)
#define FMC_INT_CLR 0x20
#define FMC_CMD 0x24
#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff)
#define FMC_ADDRL 0x2c
#define FMC_OP_CFG 0x30
#define OP_CFG_FM_CS(cs) ((cs) << 11)
#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7)
#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4)
#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf)
#define FMC_DATA_NUM 0x38
#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0))
#define FMC_OP 0x3c
#define FMC_OP_DUMMY_EN BIT(8)
#define FMC_OP_CMD1_EN BIT(7)
#define FMC_OP_ADDR_EN BIT(6)
#define FMC_OP_WRITE_DATA_EN BIT(5)
#define FMC_OP_READ_DATA_EN BIT(2)
#define FMC_OP_READ_STATUS_EN BIT(1)
#define FMC_OP_REG_OP_START BIT(0)
#define FMC_DMA_LEN 0x40
#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0))
#define FMC_DMA_SADDR_D0 0x4c
#define HIFMC_DMA_MAX_LEN (4096)
#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1)
#define FMC_OP_DMA 0x68
#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16)
#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8)
#define OP_CTRL_RW_OP(op) ((op) << 1)
#define OP_CTRL_DMA_OP_READY BIT(0)
#define FMC_OP_READ 0x0
#define FMC_OP_WRITE 0x1
#define FMC_WAIT_TIMEOUT 1000000
enum hifmc_iftype {
IF_TYPE_STD,
IF_TYPE_DUAL,
IF_TYPE_DIO,
IF_TYPE_QUAD,
IF_TYPE_QIO,
};
struct hifmc_priv {
u32 chipselect;
u32 clkrate;
struct hifmc_host *host;
};
#define HIFMC_MAX_CHIP_NUM 2
struct hifmc_host {
struct device *dev;
struct mutex lock;
void __iomem *regbase;
void __iomem *iobase;
struct clk *clk;
void *buffer;
dma_addr_t dma_buffer;
struct spi_nor *nor[HIFMC_MAX_CHIP_NUM];
u32 num_chip;
};
static inline int hisi_spi_nor_wait_op_finish(struct hifmc_host *host)
{
u32 reg;
return readl_poll_timeout(host->regbase + FMC_INT, reg,
(reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
}
static int hisi_spi_nor_get_if_type(enum spi_nor_protocol proto)
{
enum hifmc_iftype if_type;
switch (proto) {
case SNOR_PROTO_1_1_2:
if_type = IF_TYPE_DUAL;
break;
case SNOR_PROTO_1_2_2:
if_type = IF_TYPE_DIO;
break;
case SNOR_PROTO_1_1_4:
if_type = IF_TYPE_QUAD;
break;
case SNOR_PROTO_1_4_4:
if_type = IF_TYPE_QIO;
break;
case SNOR_PROTO_1_1_1:
default:
if_type = IF_TYPE_STD;
break;
}
return if_type;
}
static void hisi_spi_nor_init(struct hifmc_host *host)
{
u32 reg;
reg = TIMING_CFG_TCSH(CS_HOLD_TIME)
| TIMING_CFG_TCSS(CS_SETUP_TIME)
| TIMING_CFG_TSHSL(CS_DESELECT_TIME);
writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
}
static int hisi_spi_nor_prep(struct spi_nor *nor)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
int ret;
mutex_lock(&host->lock);
ret = clk_set_rate(host->clk, priv->clkrate);
if (ret)
goto out;
ret = clk_prepare_enable(host->clk);
if (ret)
goto out;
return 0;
out:
mutex_unlock(&host->lock);
return ret;
}
static void hisi_spi_nor_unprep(struct spi_nor *nor)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
clk_disable_unprepare(host->clk);
mutex_unlock(&host->lock);
}
static int hisi_spi_nor_op_reg(struct spi_nor *nor,
u8 opcode, size_t len, u8 optype)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
u32 reg;
reg = FMC_CMD_CMD1(opcode);
writel(reg, host->regbase + FMC_CMD);
reg = FMC_DATA_NUM_CNT(len);
writel(reg, host->regbase + FMC_DATA_NUM);
reg = OP_CFG_FM_CS(priv->chipselect);
writel(reg, host->regbase + FMC_OP_CFG);
writel(0xff, host->regbase + FMC_INT_CLR);
reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
writel(reg, host->regbase + FMC_OP);
return hisi_spi_nor_wait_op_finish(host);
}
static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
size_t len)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
int ret;
ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN);
if (ret)
return ret;
memcpy_fromio(buf, host->iobase, len);
return 0;
}
static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
const u8 *buf, size_t len)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
if (len)
memcpy_toio(host->iobase, buf, len);
return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN);
}
static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
dma_addr_t dma_buf, size_t len, u8 op_type)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
u8 if_type = 0;
u32 reg;
reg = readl(host->regbase + FMC_CFG);
reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
reg |= FMC_CFG_OP_MODE_NORMAL;
reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES
: SPI_NOR_ADDR_MODE_3BYTES;
writel(reg, host->regbase + FMC_CFG);
writel(start_off, host->regbase + FMC_ADDRL);
writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0);
writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN);
reg = OP_CFG_FM_CS(priv->chipselect);
if (op_type == FMC_OP_READ)
if_type = hisi_spi_nor_get_if_type(nor->read_proto);
else
if_type = hisi_spi_nor_get_if_type(nor->write_proto);
reg |= OP_CFG_MEM_IF_TYPE(if_type);
if (op_type == FMC_OP_READ)
reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
writel(reg, host->regbase + FMC_OP_CFG);
writel(0xff, host->regbase + FMC_INT_CLR);
reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY;
reg |= (op_type == FMC_OP_READ)
? OP_CTRL_RD_OPCODE(nor->read_opcode)
: OP_CTRL_WR_OPCODE(nor->program_opcode);
writel(reg, host->regbase + FMC_OP_DMA);
return hisi_spi_nor_wait_op_finish(host);
}
static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,
u_char *read_buf)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
size_t offset;
int ret;
for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
ret = hisi_spi_nor_dma_transfer(nor,
from + offset, host->dma_buffer, trans, FMC_OP_READ);
if (ret) {
dev_warn(nor->dev, "DMA read timeout\n");
return ret;
}
memcpy(read_buf + offset, host->buffer, trans);
}
return len;
}
static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
size_t len, const u_char *write_buf)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
size_t offset;
int ret;
for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
memcpy(host->buffer, write_buf + offset, trans);
ret = hisi_spi_nor_dma_transfer(nor,
to + offset, host->dma_buffer, trans, FMC_OP_WRITE);
if (ret) {
dev_warn(nor->dev, "DMA write timeout\n");
return ret;
}
}
return len;
}
static const struct spi_nor_controller_ops hisi_controller_ops = {
.prepare = hisi_spi_nor_prep,
.unprepare = hisi_spi_nor_unprep,
.read_reg = hisi_spi_nor_read_reg,
.write_reg = hisi_spi_nor_write_reg,
.read = hisi_spi_nor_read,
.write = hisi_spi_nor_write,
};
/*
* Get spi flash device information and register it as a mtd device.
*/
static int hisi_spi_nor_register(struct device_node *np,
struct hifmc_host *host)
{
const struct spi_nor_hwcaps hwcaps = {
.mask = SNOR_HWCAPS_READ |
SNOR_HWCAPS_READ_FAST |
SNOR_HWCAPS_READ_1_1_2 |
SNOR_HWCAPS_READ_1_1_4 |
SNOR_HWCAPS_PP,
};
struct device *dev = host->dev;
struct spi_nor *nor;
struct hifmc_priv *priv;
struct mtd_info *mtd;
int ret;
nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
if (!nor)
return -ENOMEM;
nor->dev = dev;
spi_nor_set_flash_node(nor, np);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ret = of_property_read_u32(np, "reg", &priv->chipselect);
if (ret) {
dev_err(dev, "There's no reg property for %pOF\n",
np);
return ret;
}
ret = of_property_read_u32(np, "spi-max-frequency",
&priv->clkrate);
if (ret) {
dev_err(dev, "There's no spi-max-frequency property for %pOF\n",
np);
return ret;
}
priv->host = host;
nor->priv = priv;
nor->controller_ops = &hisi_controller_ops;
ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
return ret;
mtd = &nor->mtd;
mtd->name = np->name;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
return ret;
host->nor[host->num_chip] = nor;
host->num_chip++;
return 0;
}
static void hisi_spi_nor_unregister_all(struct hifmc_host *host)
{
int i;
for (i = 0; i < host->num_chip; i++)
mtd_device_unregister(&host->nor[i]->mtd);
}
static int hisi_spi_nor_register_all(struct hifmc_host *host)
{
struct device *dev = host->dev;
struct device_node *np;
int ret;
for_each_available_child_of_node(dev->of_node, np) {
ret = hisi_spi_nor_register(np, host);
if (ret) {
of_node_put(np);
goto fail;
}
if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
of_node_put(np);
break;
}
}
return 0;
fail:
hisi_spi_nor_unregister_all(host);
return ret;
}
static int hisi_spi_nor_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hifmc_host *host;
int ret;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
platform_set_drvdata(pdev, host);
host->dev = dev;
host->regbase = devm_platform_ioremap_resource_byname(pdev, "control");
if (IS_ERR(host->regbase))
return PTR_ERR(host->regbase);
host->iobase = devm_platform_ioremap_resource_byname(pdev, "memory");
if (IS_ERR(host->iobase))
return PTR_ERR(host->iobase);
host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(host->clk))
return PTR_ERR(host->clk);
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_warn(dev, "Unable to set dma mask\n");
return ret;
}
host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN,
&host->dma_buffer, GFP_KERNEL);
if (!host->buffer)
return -ENOMEM;
ret = clk_prepare_enable(host->clk);
if (ret)
return ret;
mutex_init(&host->lock);
hisi_spi_nor_init(host);
ret = hisi_spi_nor_register_all(host);
if (ret)
mutex_destroy(&host->lock);
clk_disable_unprepare(host->clk);
return ret;
}
static int hisi_spi_nor_remove(struct platform_device *pdev)
{
struct hifmc_host *host = platform_get_drvdata(pdev);
hisi_spi_nor_unregister_all(host);
mutex_destroy(&host->lock);
return 0;
}
static const struct of_device_id hisi_spi_nor_dt_ids[] = {
{ .compatible = "hisilicon,fmc-spi-nor"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids);
static struct platform_driver hisi_spi_nor_driver = {
.driver = {
.name = "hisi-sfc",
.of_match_table = hisi_spi_nor_dt_ids,
},
.probe = hisi_spi_nor_probe,
.remove = hisi_spi_nor_remove,
};
module_platform_driver(hisi_spi_nor_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver");
| linux-master | drivers/mtd/spi-nor/controllers/hisi-sfc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI NOR driver for NXP SPI Flash Interface (SPIFI)
*
* Copyright (C) 2015 Joachim Eastwood <[email protected]>
*
* Based on Freescale QuadSPI driver:
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
/* NXP SPIFI registers, bits and macros */
#define SPIFI_CTRL 0x000
#define SPIFI_CTRL_TIMEOUT(timeout) (timeout)
#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16)
#define SPIFI_CTRL_MODE3 BIT(23)
#define SPIFI_CTRL_DUAL BIT(28)
#define SPIFI_CTRL_FBCLK BIT(30)
#define SPIFI_CMD 0x004
#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff)
#define SPIFI_CMD_DOUT BIT(15)
#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16)
#define SPIFI_CMD_FIELDFORM(field) ((field) << 19)
#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0)
#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1)
#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21)
#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1)
#define SPIFI_CMD_OPCODE(op) ((op) << 24)
#define SPIFI_ADDR 0x008
#define SPIFI_IDATA 0x00c
#define SPIFI_CLIMIT 0x010
#define SPIFI_DATA 0x014
#define SPIFI_MCMD 0x018
#define SPIFI_STAT 0x01c
#define SPIFI_STAT_MCINIT BIT(0)
#define SPIFI_STAT_CMD BIT(1)
#define SPIFI_STAT_RESET BIT(4)
#define SPI_NOR_MAX_ID_LEN 6
struct nxp_spifi {
struct device *dev;
struct clk *clk_spifi;
struct clk *clk_reg;
void __iomem *io_base;
void __iomem *flash_base;
struct spi_nor nor;
bool memory_mode;
u32 mcmd;
};
static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi)
{
u8 stat;
int ret;
ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
!(stat & SPIFI_STAT_CMD), 10, 30);
if (ret)
dev_warn(spifi->dev, "command timed out\n");
return ret;
}
static int nxp_spifi_reset(struct nxp_spifi *spifi)
{
u8 stat;
int ret;
writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT);
ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
!(stat & SPIFI_STAT_RESET), 10, 30);
if (ret)
dev_warn(spifi->dev, "state reset timed out\n");
return ret;
}
static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi)
{
int ret;
if (!spifi->memory_mode)
return 0;
ret = nxp_spifi_reset(spifi);
if (ret)
dev_err(spifi->dev, "unable to enter command mode\n");
else
spifi->memory_mode = false;
return ret;
}
static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
{
u8 stat;
int ret;
if (spifi->memory_mode)
return 0;
writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD);
ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
stat & SPIFI_STAT_MCINIT, 10, 30);
if (ret)
dev_err(spifi->dev, "unable to enter memory mode\n");
else
spifi->memory_mode = true;
return ret;
}
static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
size_t len)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
int ret;
ret = nxp_spifi_set_memory_mode_off(spifi);
if (ret)
return ret;
cmd = SPIFI_CMD_DATALEN(len) |
SPIFI_CMD_OPCODE(opcode) |
SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
writel(cmd, spifi->io_base + SPIFI_CMD);
while (len--)
*buf++ = readb(spifi->io_base + SPIFI_DATA);
return nxp_spifi_wait_for_cmd(spifi);
}
static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
size_t len)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
int ret;
ret = nxp_spifi_set_memory_mode_off(spifi);
if (ret)
return ret;
cmd = SPIFI_CMD_DOUT |
SPIFI_CMD_DATALEN(len) |
SPIFI_CMD_OPCODE(opcode) |
SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
writel(cmd, spifi->io_base + SPIFI_CMD);
while (len--)
writeb(*buf++, spifi->io_base + SPIFI_DATA);
return nxp_spifi_wait_for_cmd(spifi);
}
static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
u_char *buf)
{
struct nxp_spifi *spifi = nor->priv;
int ret;
ret = nxp_spifi_set_memory_mode_on(spifi);
if (ret)
return ret;
memcpy_fromio(buf, spifi->flash_base + from, len);
return len;
}
static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
const u_char *buf)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
int ret;
size_t i;
ret = nxp_spifi_set_memory_mode_off(spifi);
if (ret)
return ret;
writel(to, spifi->io_base + SPIFI_ADDR);
cmd = SPIFI_CMD_DOUT |
SPIFI_CMD_DATALEN(len) |
SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->program_opcode) |
SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
for (i = 0; i < len; i++)
writeb(buf[i], spifi->io_base + SPIFI_DATA);
ret = nxp_spifi_wait_for_cmd(spifi);
if (ret)
return ret;
return len;
}
static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
int ret;
ret = nxp_spifi_set_memory_mode_off(spifi);
if (ret)
return ret;
writel(offs, spifi->io_base + SPIFI_ADDR);
cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->erase_opcode) |
SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
return nxp_spifi_wait_for_cmd(spifi);
}
static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
{
switch (spifi->nor.read_proto) {
case SNOR_PROTO_1_1_1:
spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL;
break;
case SNOR_PROTO_1_1_2:
case SNOR_PROTO_1_1_4:
spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA;
break;
default:
dev_err(spifi->dev, "unsupported SPI read mode\n");
return -EINVAL;
}
/* Memory mode supports address length between 1 and 4 */
if (spifi->nor.addr_nbytes < 1 || spifi->nor.addr_nbytes > 4)
return -EINVAL;
spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
return 0;
}
static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
{
u8 id[SPI_NOR_MAX_ID_LEN];
nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
SPI_NOR_MAX_ID_LEN);
}
static const struct spi_nor_controller_ops nxp_spifi_controller_ops = {
.read_reg = nxp_spifi_read_reg,
.write_reg = nxp_spifi_write_reg,
.read = nxp_spifi_read,
.write = nxp_spifi_write,
.erase = nxp_spifi_erase,
};
static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
struct device_node *np)
{
struct spi_nor_hwcaps hwcaps = {
.mask = SNOR_HWCAPS_READ |
SNOR_HWCAPS_READ_FAST |
SNOR_HWCAPS_PP,
};
u32 ctrl, property;
u16 mode = 0;
int ret;
if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) {
switch (property) {
case 1:
break;
case 2:
mode |= SPI_RX_DUAL;
break;
case 4:
mode |= SPI_RX_QUAD;
break;
default:
dev_err(spifi->dev, "unsupported rx-bus-width\n");
return -EINVAL;
}
}
if (of_property_read_bool(np, "spi-cpha"))
mode |= SPI_CPHA;
if (of_property_read_bool(np, "spi-cpol"))
mode |= SPI_CPOL;
/* Setup control register defaults */
ctrl = SPIFI_CTRL_TIMEOUT(1000) |
SPIFI_CTRL_CSHIGH(15) |
SPIFI_CTRL_FBCLK;
if (mode & SPI_RX_DUAL) {
ctrl |= SPIFI_CTRL_DUAL;
hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
} else if (mode & SPI_RX_QUAD) {
ctrl &= ~SPIFI_CTRL_DUAL;
hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
} else {
ctrl |= SPIFI_CTRL_DUAL;
}
switch (mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
ctrl &= ~SPIFI_CTRL_MODE3;
break;
case SPI_MODE_3:
ctrl |= SPIFI_CTRL_MODE3;
break;
default:
dev_err(spifi->dev, "only mode 0 and 3 supported\n");
return -EINVAL;
}
writel(ctrl, spifi->io_base + SPIFI_CTRL);
spifi->nor.dev = spifi->dev;
spi_nor_set_flash_node(&spifi->nor, np);
spifi->nor.priv = spifi;
spifi->nor.controller_ops = &nxp_spifi_controller_ops;
/*
* The first read on a hard reset isn't reliable so do a
* dummy read of the id before calling spi_nor_scan().
* The reason for this problem is unknown.
*
* The official NXP spifilib uses more or less the same
* workaround that is applied here by reading the device
* id multiple times.
*/
nxp_spifi_dummy_id_read(&spifi->nor);
ret = spi_nor_scan(&spifi->nor, NULL, &hwcaps);
if (ret) {
dev_err(spifi->dev, "device scan failed\n");
return ret;
}
ret = nxp_spifi_setup_memory_cmd(spifi);
if (ret) {
dev_err(spifi->dev, "memory command setup failed\n");
return ret;
}
ret = mtd_device_register(&spifi->nor.mtd, NULL, 0);
if (ret) {
dev_err(spifi->dev, "mtd device parse failed\n");
return ret;
}
return 0;
}
static int nxp_spifi_probe(struct platform_device *pdev)
{
struct device_node *flash_np;
struct nxp_spifi *spifi;
int ret;
spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL);
if (!spifi)
return -ENOMEM;
spifi->io_base = devm_platform_ioremap_resource_byname(pdev, "spifi");
if (IS_ERR(spifi->io_base))
return PTR_ERR(spifi->io_base);
spifi->flash_base = devm_platform_ioremap_resource_byname(pdev, "flash");
if (IS_ERR(spifi->flash_base))
return PTR_ERR(spifi->flash_base);
spifi->clk_spifi = devm_clk_get_enabled(&pdev->dev, "spifi");
if (IS_ERR(spifi->clk_spifi)) {
dev_err(&pdev->dev, "spifi clock not found or unable to enable\n");
return PTR_ERR(spifi->clk_spifi);
}
spifi->clk_reg = devm_clk_get_enabled(&pdev->dev, "reg");
if (IS_ERR(spifi->clk_reg)) {
dev_err(&pdev->dev, "reg clock not found or unable to enable\n");
return PTR_ERR(spifi->clk_reg);
}
spifi->dev = &pdev->dev;
platform_set_drvdata(pdev, spifi);
/* Initialize and reset device */
nxp_spifi_reset(spifi);
writel(0, spifi->io_base + SPIFI_IDATA);
writel(0, spifi->io_base + SPIFI_MCMD);
nxp_spifi_reset(spifi);
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
dev_err(&pdev->dev, "no SPI flash device to configure\n");
return -ENODEV;
}
ret = nxp_spifi_setup_flash(spifi, flash_np);
of_node_put(flash_np);
if (ret) {
dev_err(&pdev->dev, "unable to setup flash chip\n");
return ret;
}
return 0;
}
static int nxp_spifi_remove(struct platform_device *pdev)
{
struct nxp_spifi *spifi = platform_get_drvdata(pdev);
mtd_device_unregister(&spifi->nor.mtd);
return 0;
}
static const struct of_device_id nxp_spifi_match[] = {
{.compatible = "nxp,lpc1773-spifi"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nxp_spifi_match);
static struct platform_driver nxp_spifi_driver = {
.probe = nxp_spifi_probe,
.remove = nxp_spifi_remove,
.driver = {
.name = "nxp-spifi",
.of_match_table = nxp_spifi_match,
},
};
module_platform_driver(nxp_spifi_driver);
MODULE_DESCRIPTION("NXP SPI Flash Interface driver");
MODULE_AUTHOR("Joachim Eastwood <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/spi-nor/controllers/nxp-spifi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver
*
* Copyright (c) 2015 Ariel D'Alessandro <[email protected]>
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
/* Registers */
#define LPC18XX_EEPROM_AUTOPROG 0x00c
#define LPC18XX_EEPROM_AUTOPROG_WORD 0x1
#define LPC18XX_EEPROM_CLKDIV 0x014
#define LPC18XX_EEPROM_PWRDWN 0x018
#define LPC18XX_EEPROM_PWRDWN_NO 0x0
#define LPC18XX_EEPROM_PWRDWN_YES 0x1
#define LPC18XX_EEPROM_INTSTAT 0xfe0
#define LPC18XX_EEPROM_INTSTAT_END_OF_PROG BIT(2)
#define LPC18XX_EEPROM_INTSTATCLR 0xfe8
#define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST BIT(2)
/* Fixed page size (bytes) */
#define LPC18XX_EEPROM_PAGE_SIZE 0x80
/* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */
#define LPC18XX_EEPROM_CLOCK_HZ 1500000
/* EEPROM requires 3 ms of erase/program time between each writing */
#define LPC18XX_EEPROM_PROGRAM_TIME 3
struct lpc18xx_eeprom_dev {
struct clk *clk;
void __iomem *reg_base;
void __iomem *mem_base;
struct nvmem_device *nvmem;
unsigned reg_bytes;
unsigned val_bytes;
int size;
};
static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom,
u32 reg, u32 val)
{
writel(val, eeprom->reg_base + reg);
}
static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom,
u32 reg)
{
return readl(eeprom->reg_base + reg);
}
static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom)
{
unsigned long end;
u32 val;
/* Wait until EEPROM program operation has finished */
end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10);
while (time_is_after_jiffies(end)) {
val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT);
if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) {
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR,
LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST);
return 0;
}
usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC,
(LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC);
}
return -ETIMEDOUT;
}
static int lpc18xx_eeprom_gather_write(void *context, unsigned int reg,
void *val, size_t bytes)
{
struct lpc18xx_eeprom_dev *eeprom = context;
unsigned int offset = reg;
int ret;
/*
* The last page contains the EEPROM initialization data and is not
* writable.
*/
if ((reg > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE) ||
(reg + bytes > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE))
return -EINVAL;
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_NO);
/* Wait 100 us while the EEPROM wakes up */
usleep_range(100, 200);
while (bytes) {
writel(*(u32 *)val, eeprom->mem_base + offset);
ret = lpc18xx_eeprom_busywait_until_prog(eeprom);
if (ret < 0)
return ret;
bytes -= eeprom->val_bytes;
val += eeprom->val_bytes;
offset += eeprom->val_bytes;
}
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_YES);
return 0;
}
static int lpc18xx_eeprom_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct lpc18xx_eeprom_dev *eeprom = context;
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_NO);
/* Wait 100 us while the EEPROM wakes up */
usleep_range(100, 200);
while (bytes) {
*(u32 *)val = readl(eeprom->mem_base + offset);
bytes -= eeprom->val_bytes;
val += eeprom->val_bytes;
offset += eeprom->val_bytes;
}
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_YES);
return 0;
}
static struct nvmem_config lpc18xx_nvmem_config = {
.name = "lpc18xx-eeprom",
.stride = 4,
.word_size = 4,
.reg_read = lpc18xx_eeprom_read,
.reg_write = lpc18xx_eeprom_gather_write,
};
static int lpc18xx_eeprom_probe(struct platform_device *pdev)
{
struct lpc18xx_eeprom_dev *eeprom;
struct device *dev = &pdev->dev;
struct reset_control *rst;
unsigned long clk_rate;
struct resource *res;
int ret;
eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL);
if (!eeprom)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
eeprom->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(eeprom->reg_base))
return PTR_ERR(eeprom->reg_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
eeprom->mem_base = devm_ioremap_resource(dev, res);
if (IS_ERR(eeprom->mem_base))
return PTR_ERR(eeprom->mem_base);
eeprom->clk = devm_clk_get(&pdev->dev, "eeprom");
if (IS_ERR(eeprom->clk)) {
dev_err(&pdev->dev, "failed to get eeprom clock\n");
return PTR_ERR(eeprom->clk);
}
ret = clk_prepare_enable(eeprom->clk);
if (ret < 0) {
dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret);
return ret;
}
rst = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(rst)) {
dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst));
ret = PTR_ERR(rst);
goto err_clk;
}
ret = reset_control_assert(rst);
if (ret < 0) {
dev_err(dev, "failed to assert reset: %d\n", ret);
goto err_clk;
}
eeprom->val_bytes = 4;
eeprom->reg_bytes = 4;
/*
* Clock rate is generated by dividing the system bus clock by the
* division factor, contained in the divider register (minus 1 encoded).
*/
clk_rate = clk_get_rate(eeprom->clk);
clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1;
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate);
/*
* Writing a single word to the page will start the erase/program cycle
* automatically
*/
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG,
LPC18XX_EEPROM_AUTOPROG_WORD);
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_YES);
eeprom->size = resource_size(res);
lpc18xx_nvmem_config.size = resource_size(res);
lpc18xx_nvmem_config.dev = dev;
lpc18xx_nvmem_config.priv = eeprom;
eeprom->nvmem = devm_nvmem_register(dev, &lpc18xx_nvmem_config);
if (IS_ERR(eeprom->nvmem)) {
ret = PTR_ERR(eeprom->nvmem);
goto err_clk;
}
platform_set_drvdata(pdev, eeprom);
return 0;
err_clk:
clk_disable_unprepare(eeprom->clk);
return ret;
}
static int lpc18xx_eeprom_remove(struct platform_device *pdev)
{
struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev);
clk_disable_unprepare(eeprom->clk);
return 0;
}
static const struct of_device_id lpc18xx_eeprom_of_match[] = {
{ .compatible = "nxp,lpc1857-eeprom" },
{ },
};
MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match);
static struct platform_driver lpc18xx_eeprom_driver = {
.probe = lpc18xx_eeprom_probe,
.remove = lpc18xx_eeprom_remove,
.driver = {
.name = "lpc18xx-eeprom",
.of_match_table = lpc18xx_eeprom_of_match,
},
};
module_platform_driver(lpc18xx_eeprom_driver);
MODULE_AUTHOR("Ariel D'Alessandro <[email protected]>");
MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/lpc18xx_eeprom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OP-TEE STM32MP BSEC PTA interface, used by STM32 ROMEM driver
*
* Copyright (C) 2022, STMicroelectronics - All Rights Reserved
*/
#include <linux/tee_drv.h>
#include "stm32-bsec-optee-ta.h"
/*
* Read OTP memory
*
* [in] value[0].a OTP start offset in byte
* [in] value[0].b Access type (0:shadow, 1:fuse, 2:lock)
* [out] memref[1].buffer Output buffer to store read values
* [out] memref[1].size Size of OTP to be read
*
* Return codes:
* TEE_SUCCESS - Invoke command success
* TEE_ERROR_BAD_PARAMETERS - Incorrect input param
* TEE_ERROR_ACCESS_DENIED - OTP not accessible by caller
*/
#define PTA_BSEC_READ_MEM 0x0
/*
* Write OTP memory
*
* [in] value[0].a OTP start offset in byte
* [in] value[0].b Access type (0:shadow, 1:fuse, 2:lock)
* [in] memref[1].buffer Input buffer to read values
* [in] memref[1].size Size of OTP to be written
*
* Return codes:
* TEE_SUCCESS - Invoke command success
* TEE_ERROR_BAD_PARAMETERS - Incorrect input param
* TEE_ERROR_ACCESS_DENIED - OTP not accessible by caller
*/
#define PTA_BSEC_WRITE_MEM 0x1
/* value of PTA_BSEC access type = value[in] b */
#define SHADOW_ACCESS 0
#define FUSE_ACCESS 1
#define LOCK_ACCESS 2
/* Bitfield definition for LOCK status */
#define LOCK_PERM BIT(30)
/* OP-TEE STM32MP BSEC TA UUID */
static const uuid_t stm32mp_bsec_ta_uuid =
UUID_INIT(0x94cf71ad, 0x80e6, 0x40b5,
0xa7, 0xc6, 0x3d, 0xc5, 0x01, 0xeb, 0x28, 0x03);
/*
* Check whether this driver supports the BSEC TA in the TEE instance
* represented by the params (ver/data) to this function.
*/
static int stm32_bsec_optee_ta_match(struct tee_ioctl_version_data *ver,
const void *data)
{
/* Currently this driver only supports GP compliant, OP-TEE based TA */
if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
(ver->gen_caps & TEE_GEN_CAP_GP))
return 1;
else
return 0;
}
/* Open a session to OP-TEE for STM32MP BSEC TA */
static int stm32_bsec_ta_open_session(struct tee_context *ctx, u32 *id)
{
struct tee_ioctl_open_session_arg sess_arg;
int rc;
memset(&sess_arg, 0, sizeof(sess_arg));
export_uuid(sess_arg.uuid, &stm32mp_bsec_ta_uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
sess_arg.num_params = 0;
rc = tee_client_open_session(ctx, &sess_arg, NULL);
if ((rc < 0) || (sess_arg.ret != 0)) {
pr_err("%s: tee_client_open_session failed err:%#x, ret:%#x\n",
__func__, sess_arg.ret, rc);
if (!rc)
rc = -EINVAL;
} else {
*id = sess_arg.session;
}
return rc;
}
/* close a session to OP-TEE for STM32MP BSEC TA */
static void stm32_bsec_ta_close_session(void *ctx, u32 id)
{
tee_client_close_session(ctx, id);
}
/* stm32_bsec_optee_ta_open() - initialize the STM32MP BSEC TA */
int stm32_bsec_optee_ta_open(struct tee_context **ctx)
{
struct tee_context *tee_ctx;
u32 session_id;
int rc;
/* Open context with TEE driver */
tee_ctx = tee_client_open_context(NULL, stm32_bsec_optee_ta_match, NULL, NULL);
if (IS_ERR(tee_ctx)) {
rc = PTR_ERR(tee_ctx);
if (rc == -ENOENT)
return -EPROBE_DEFER;
pr_err("%s: tee_client_open_context failed (%d)\n", __func__, rc);
return rc;
}
/* Check STM32MP BSEC TA presence */
rc = stm32_bsec_ta_open_session(tee_ctx, &session_id);
if (rc) {
tee_client_close_context(tee_ctx);
return rc;
}
stm32_bsec_ta_close_session(tee_ctx, session_id);
*ctx = tee_ctx;
return 0;
}
/* stm32_bsec_optee_ta_open() - release the PTA STM32MP BSEC TA */
void stm32_bsec_optee_ta_close(void *ctx)
{
tee_client_close_context(ctx);
}
/* stm32_bsec_optee_ta_read() - nvmem read access using PTA client driver */
int stm32_bsec_optee_ta_read(struct tee_context *ctx, unsigned int offset,
void *buf, size_t bytes)
{
struct tee_shm *shm;
struct tee_ioctl_invoke_arg arg;
struct tee_param param[2];
u8 *shm_buf;
u32 start, num_bytes;
int ret;
u32 session_id;
ret = stm32_bsec_ta_open_session(ctx, &session_id);
if (ret)
return ret;
memset(&arg, 0, sizeof(arg));
memset(¶m, 0, sizeof(param));
arg.func = PTA_BSEC_READ_MEM;
arg.session = session_id;
arg.num_params = 2;
/* align access on 32bits */
start = ALIGN_DOWN(offset, 4);
num_bytes = round_up(offset + bytes - start, 4);
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
param[0].u.value.a = start;
param[0].u.value.b = SHADOW_ACCESS;
shm = tee_shm_alloc_kernel_buf(ctx, num_bytes);
if (IS_ERR(shm)) {
ret = PTR_ERR(shm);
goto out_tee_session;
}
param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
param[1].u.memref.shm = shm;
param[1].u.memref.size = num_bytes;
ret = tee_client_invoke_func(ctx, &arg, param);
if (ret < 0 || arg.ret != 0) {
pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n",
arg.ret, ret);
if (!ret)
ret = -EIO;
}
if (!ret) {
shm_buf = tee_shm_get_va(shm, 0);
if (IS_ERR(shm_buf)) {
ret = PTR_ERR(shm_buf);
pr_err("tee_shm_get_va failed for transmit (%d)\n", ret);
} else {
/* read data from 32 bits aligned buffer */
memcpy(buf, &shm_buf[offset % 4], bytes);
}
}
tee_shm_free(shm);
out_tee_session:
stm32_bsec_ta_close_session(ctx, session_id);
return ret;
}
/* stm32_bsec_optee_ta_write() - nvmem write access using PTA client driver */
int stm32_bsec_optee_ta_write(struct tee_context *ctx, unsigned int lower,
unsigned int offset, void *buf, size_t bytes)
{ struct tee_shm *shm;
struct tee_ioctl_invoke_arg arg;
struct tee_param param[2];
u8 *shm_buf;
int ret;
u32 session_id;
ret = stm32_bsec_ta_open_session(ctx, &session_id);
if (ret)
return ret;
/* Allow only writing complete 32-bits aligned words */
if ((bytes % 4) || (offset % 4))
return -EINVAL;
memset(&arg, 0, sizeof(arg));
memset(¶m, 0, sizeof(param));
arg.func = PTA_BSEC_WRITE_MEM;
arg.session = session_id;
arg.num_params = 2;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
param[0].u.value.a = offset;
param[0].u.value.b = FUSE_ACCESS;
shm = tee_shm_alloc_kernel_buf(ctx, bytes);
if (IS_ERR(shm)) {
ret = PTR_ERR(shm);
goto out_tee_session;
}
param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
param[1].u.memref.shm = shm;
param[1].u.memref.size = bytes;
shm_buf = tee_shm_get_va(shm, 0);
if (IS_ERR(shm_buf)) {
ret = PTR_ERR(shm_buf);
pr_err("tee_shm_get_va failed for transmit (%d)\n", ret);
tee_shm_free(shm);
goto out_tee_session;
}
memcpy(shm_buf, buf, bytes);
ret = tee_client_invoke_func(ctx, &arg, param);
if (ret < 0 || arg.ret != 0) {
pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", arg.ret, ret);
if (!ret)
ret = -EIO;
}
pr_debug("Write OTPs %d to %zu, ret=%d\n", offset / 4, (offset + bytes) / 4, ret);
/* Lock the upper OTPs with ECC protection, word programming only */
if (!ret && ((offset + bytes) >= (lower * 4))) {
u32 start, nb_lock;
u32 *lock = (u32 *)shm_buf;
int i;
/*
* don't lock the lower OTPs, no ECC protection and incremental
* bit programming, a second write is allowed
*/
start = max_t(u32, offset, lower * 4);
nb_lock = (offset + bytes - start) / 4;
param[0].u.value.a = start;
param[0].u.value.b = LOCK_ACCESS;
param[1].u.memref.size = nb_lock * 4;
for (i = 0; i < nb_lock; i++)
lock[i] = LOCK_PERM;
ret = tee_client_invoke_func(ctx, &arg, param);
if (ret < 0 || arg.ret != 0) {
pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", arg.ret, ret);
if (!ret)
ret = -EIO;
}
pr_debug("Lock upper OTPs %d to %d, ret=%d\n",
start / 4, start / 4 + nb_lock, ret);
}
tee_shm_free(shm);
out_tee_session:
stm32_bsec_ta_close_session(ctx, session_id);
return ret;
}
| linux-master | drivers/nvmem/stm32-bsec-optee-ta.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The OCOTP driver for Sunplus SP7021
*
* Copyright (C) 2019 Sunplus Technology Inc., All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
/*
* OTP memory
* Each bank contains 4 words (32 bits).
* Bank 0 starts at offset 0 from the base.
*/
#define OTP_WORDS_PER_BANK 4
#define OTP_WORD_SIZE sizeof(u32)
#define OTP_BIT_ADDR_OF_BANK (8 * OTP_WORD_SIZE * OTP_WORDS_PER_BANK)
#define QAC628_OTP_NUM_BANKS 8
#define QAC628_OTP_SIZE (QAC628_OTP_NUM_BANKS * OTP_WORDS_PER_BANK * OTP_WORD_SIZE)
#define OTP_READ_TIMEOUT_US 200000
/* HB_GPIO */
#define ADDRESS_8_DATA 0x20
/* OTP_RX */
#define OTP_CONTROL_2 0x48
#define OTP_RD_PERIOD GENMASK(15, 8)
#define OTP_RD_PERIOD_MASK ~GENMASK(15, 8)
#define CPU_CLOCK FIELD_PREP(OTP_RD_PERIOD, 30)
#define SEL_BAK_KEY2 BIT(5)
#define SEL_BAK_KEY2_MASK ~BIT(5)
#define SW_TRIM_EN BIT(4)
#define SW_TRIM_EN_MASK ~BIT(4)
#define SEL_BAK_KEY BIT(3)
#define SEL_BAK_KEY_MASK ~BIT(3)
#define OTP_READ BIT(2)
#define OTP_LOAD_SECURE_DATA BIT(1)
#define OTP_LOAD_SECURE_DATA_MASK ~BIT(1)
#define OTP_DO_CRC BIT(0)
#define OTP_DO_CRC_MASK ~BIT(0)
#define OTP_STATUS 0x4c
#define OTP_READ_DONE BIT(4)
#define OTP_READ_DONE_MASK ~BIT(4)
#define OTP_LOAD_SECURE_DONE_MASK ~BIT(2)
#define OTP_READ_ADDRESS 0x50
enum base_type {
HB_GPIO,
OTPRX,
BASEMAX,
};
struct sp_ocotp_priv {
struct device *dev;
void __iomem *base[BASEMAX];
struct clk *clk;
};
struct sp_ocotp_data {
int size;
};
static const struct sp_ocotp_data sp_otp_v0 = {
.size = QAC628_OTP_SIZE,
};
static int sp_otp_read_real(struct sp_ocotp_priv *otp, int addr, char *value)
{
unsigned int addr_data;
unsigned int byte_shift;
unsigned int status;
int ret;
addr_data = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
addr_data = addr_data / OTP_WORD_SIZE;
byte_shift = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
byte_shift = byte_shift % OTP_WORD_SIZE;
addr = addr / (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
addr = addr * OTP_BIT_ADDR_OF_BANK;
writel(readl(otp->base[OTPRX] + OTP_STATUS) & OTP_READ_DONE_MASK &
OTP_LOAD_SECURE_DONE_MASK, otp->base[OTPRX] + OTP_STATUS);
writel(addr, otp->base[OTPRX] + OTP_READ_ADDRESS);
writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) | OTP_READ,
otp->base[OTPRX] + OTP_CONTROL_2);
writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) & SEL_BAK_KEY2_MASK & SW_TRIM_EN_MASK
& SEL_BAK_KEY_MASK & OTP_LOAD_SECURE_DATA_MASK & OTP_DO_CRC_MASK,
otp->base[OTPRX] + OTP_CONTROL_2);
writel((readl(otp->base[OTPRX] + OTP_CONTROL_2) & OTP_RD_PERIOD_MASK) | CPU_CLOCK,
otp->base[OTPRX] + OTP_CONTROL_2);
ret = readl_poll_timeout(otp->base[OTPRX] + OTP_STATUS, status,
status & OTP_READ_DONE, 10, OTP_READ_TIMEOUT_US);
if (ret < 0)
return ret;
*value = (readl(otp->base[HB_GPIO] + ADDRESS_8_DATA + addr_data * OTP_WORD_SIZE)
>> (8 * byte_shift)) & 0xff;
return ret;
}
static int sp_ocotp_read(void *priv, unsigned int offset, void *value, size_t bytes)
{
struct sp_ocotp_priv *otp = priv;
unsigned int addr;
char *buf = value;
char val[4];
int ret;
ret = clk_enable(otp->clk);
if (ret)
return ret;
*buf = 0;
for (addr = offset; addr < (offset + bytes); addr++) {
ret = sp_otp_read_real(otp, addr, val);
if (ret < 0) {
dev_err(otp->dev, "OTP read fail:%d at %d", ret, addr);
goto disable_clk;
}
*buf++ = *val;
}
disable_clk:
clk_disable(otp->clk);
return ret;
}
static struct nvmem_config sp_ocotp_nvmem_config = {
.name = "sp-ocotp",
.read_only = true,
.word_size = 1,
.size = QAC628_OTP_SIZE,
.stride = 1,
.reg_read = sp_ocotp_read,
.owner = THIS_MODULE,
};
static int sp_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct nvmem_device *nvmem;
struct sp_ocotp_priv *otp;
struct resource *res;
int ret;
otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
if (!otp)
return -ENOMEM;
otp->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hb_gpio");
otp->base[HB_GPIO] = devm_ioremap_resource(dev, res);
if (IS_ERR(otp->base[HB_GPIO]))
return PTR_ERR(otp->base[HB_GPIO]);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otprx");
otp->base[OTPRX] = devm_ioremap_resource(dev, res);
if (IS_ERR(otp->base[OTPRX]))
return PTR_ERR(otp->base[OTPRX]);
otp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(otp->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(otp->clk),
"devm_clk_get fail\n");
ret = clk_prepare(otp->clk);
if (ret < 0) {
dev_err(dev, "failed to prepare clk: %d\n", ret);
return ret;
}
sp_ocotp_nvmem_config.priv = otp;
sp_ocotp_nvmem_config.dev = dev;
nvmem = devm_nvmem_register(dev, &sp_ocotp_nvmem_config);
if (IS_ERR(nvmem)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(nvmem),
"register nvmem device fail\n");
goto err;
}
platform_set_drvdata(pdev, nvmem);
dev_dbg(dev, "banks:%d x wpb:%d x wsize:%d = %d",
(int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK,
(int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE);
return 0;
err:
clk_unprepare(otp->clk);
return ret;
}
static const struct of_device_id sp_ocotp_dt_ids[] = {
{ .compatible = "sunplus,sp7021-ocotp", .data = &sp_otp_v0 },
{ }
};
MODULE_DEVICE_TABLE(of, sp_ocotp_dt_ids);
static struct platform_driver sp_otp_driver = {
.probe = sp_ocotp_probe,
.driver = {
.name = "sunplus,sp7021-ocotp",
.of_match_table = sp_ocotp_dt_ids,
}
};
module_platform_driver(sp_otp_driver);
MODULE_AUTHOR("Vincent Shih <[email protected]>");
MODULE_DESCRIPTION("Sunplus On-Chip OTP driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/sunplus-ocotp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* EEPROM driver for RAVE SP
*
* Copyright (C) 2018 Zodiac Inflight Innovations
*
*/
#include <linux/kernel.h>
#include <linux/mfd/rave-sp.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
/**
* enum rave_sp_eeprom_access_type - Supported types of EEPROM access
*
* @RAVE_SP_EEPROM_WRITE: EEPROM write
* @RAVE_SP_EEPROM_READ: EEPROM read
*/
enum rave_sp_eeprom_access_type {
RAVE_SP_EEPROM_WRITE = 0,
RAVE_SP_EEPROM_READ = 1,
};
/**
* enum rave_sp_eeprom_header_size - EEPROM command header sizes
*
* @RAVE_SP_EEPROM_HEADER_SMALL: EEPROM header size for "small" devices (< 8K)
* @RAVE_SP_EEPROM_HEADER_BIG: EEPROM header size for "big" devices (> 8K)
*/
enum rave_sp_eeprom_header_size {
RAVE_SP_EEPROM_HEADER_SMALL = 4U,
RAVE_SP_EEPROM_HEADER_BIG = 5U,
};
#define RAVE_SP_EEPROM_HEADER_MAX RAVE_SP_EEPROM_HEADER_BIG
#define RAVE_SP_EEPROM_PAGE_SIZE 32U
/**
* struct rave_sp_eeprom_page - RAVE SP EEPROM page
*
* @type: Access type (see enum rave_sp_eeprom_access_type)
* @success: Success flag (Success = 1, Failure = 0)
* @data: Read data
*
* Note this structure corresponds to RSP_*_EEPROM payload from RAVE
* SP ICD
*/
struct rave_sp_eeprom_page {
u8 type;
u8 success;
u8 data[RAVE_SP_EEPROM_PAGE_SIZE];
} __packed;
/**
* struct rave_sp_eeprom - RAVE SP EEPROM device
*
* @sp: Pointer to parent RAVE SP device
* @mutex: Lock protecting access to EEPROM
* @address: EEPROM device address
* @header_size: Size of EEPROM command header for this device
* @dev: Pointer to corresponding struct device used for logging
*/
struct rave_sp_eeprom {
struct rave_sp *sp;
struct mutex mutex;
u8 address;
unsigned int header_size;
struct device *dev;
};
/**
* rave_sp_eeprom_io - Low-level part of EEPROM page access
*
* @eeprom: EEPROM device to write to
* @type: EEPROM access type (read or write)
* @idx: number of the EEPROM page
* @page: Data to write or buffer to store result (via page->data)
*
* This function does all of the low-level work required to perform a
* EEPROM access. This includes formatting correct command payload,
* sending it and checking received results.
*
* Returns zero in case of success or negative error code in
* case of failure.
*/
static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom,
enum rave_sp_eeprom_access_type type,
u16 idx,
struct rave_sp_eeprom_page *page)
{
const bool is_write = type == RAVE_SP_EEPROM_WRITE;
const unsigned int data_size = is_write ? sizeof(page->data) : 0;
const unsigned int cmd_size = eeprom->header_size + data_size;
const unsigned int rsp_size =
is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page);
unsigned int offset = 0;
u8 cmd[RAVE_SP_EEPROM_HEADER_MAX + sizeof(page->data)];
int ret;
if (WARN_ON(cmd_size > sizeof(cmd)))
return -EINVAL;
cmd[offset++] = eeprom->address;
cmd[offset++] = 0;
cmd[offset++] = type;
cmd[offset++] = idx;
/*
* If there's still room in this command's header it means we
* are talkin to EEPROM that uses 16-bit page numbers and we
* have to specify index's MSB in payload as well.
*/
if (offset < eeprom->header_size)
cmd[offset++] = idx >> 8;
/*
* Copy our data to write to command buffer first. In case of
* a read data_size should be zero and memcpy would become a
* no-op
*/
memcpy(&cmd[offset], page->data, data_size);
ret = rave_sp_exec(eeprom->sp, cmd, cmd_size, page, rsp_size);
if (ret)
return ret;
if (page->type != type)
return -EPROTO;
if (!page->success)
return -EIO;
return 0;
}
/**
* rave_sp_eeprom_page_access - Access single EEPROM page
*
* @eeprom: EEPROM device to access
* @type: Access type to perform (read or write)
* @offset: Offset within EEPROM to access
* @data: Data buffer
* @data_len: Size of the data buffer
*
* This function performs a generic access to a single page or a
* portion thereof. Requested access MUST NOT cross the EEPROM page
* boundary.
*
* Returns zero in case of success or negative error code in
* case of failure.
*/
static int
rave_sp_eeprom_page_access(struct rave_sp_eeprom *eeprom,
enum rave_sp_eeprom_access_type type,
unsigned int offset, u8 *data,
size_t data_len)
{
const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE;
const unsigned int page_nr = offset / RAVE_SP_EEPROM_PAGE_SIZE;
struct rave_sp_eeprom_page page;
int ret;
/*
* This function will not work if data access we've been asked
* to do is crossing EEPROM page boundary. Normally this
* should never happen and getting here would indicate a bug
* in the code.
*/
if (WARN_ON(data_len > sizeof(page.data) - page_offset))
return -EINVAL;
if (type == RAVE_SP_EEPROM_WRITE) {
/*
* If doing a partial write we need to do a read first
* to fill the rest of the page with correct data.
*/
if (data_len < RAVE_SP_EEPROM_PAGE_SIZE) {
ret = rave_sp_eeprom_io(eeprom, RAVE_SP_EEPROM_READ,
page_nr, &page);
if (ret)
return ret;
}
memcpy(&page.data[page_offset], data, data_len);
}
ret = rave_sp_eeprom_io(eeprom, type, page_nr, &page);
if (ret)
return ret;
/*
* Since we receive the result of the read via 'page.data'
* buffer we need to copy that to 'data'
*/
if (type == RAVE_SP_EEPROM_READ)
memcpy(data, &page.data[page_offset], data_len);
return 0;
}
/**
* rave_sp_eeprom_access - Access EEPROM data
*
* @eeprom: EEPROM device to access
* @type: Access type to perform (read or write)
* @offset: Offset within EEPROM to access
* @data: Data buffer
* @data_len: Size of the data buffer
*
* This function performs a generic access (either read or write) at
* arbitrary offset (not necessary page aligned) of arbitrary length
* (is not constrained by EEPROM page size).
*
* Returns zero in case of success or negative error code in case of
* failure.
*/
static int rave_sp_eeprom_access(struct rave_sp_eeprom *eeprom,
enum rave_sp_eeprom_access_type type,
unsigned int offset, u8 *data,
unsigned int data_len)
{
unsigned int residue;
unsigned int chunk;
unsigned int head;
int ret;
mutex_lock(&eeprom->mutex);
head = offset % RAVE_SP_EEPROM_PAGE_SIZE;
residue = data_len;
do {
/*
* First iteration, if we are doing an access that is
* not 32-byte aligned, we need to access only data up
* to a page boundary to avoid corssing it in
* rave_sp_eeprom_page_access()
*/
if (unlikely(head)) {
chunk = RAVE_SP_EEPROM_PAGE_SIZE - head;
/*
* This can only happen once per
* rave_sp_eeprom_access() call, so we set
* head to zero to process all the other
* iterations normally.
*/
head = 0;
} else {
chunk = RAVE_SP_EEPROM_PAGE_SIZE;
}
/*
* We should never read more that 'residue' bytes
*/
chunk = min(chunk, residue);
ret = rave_sp_eeprom_page_access(eeprom, type, offset,
data, chunk);
if (ret)
goto out;
residue -= chunk;
offset += chunk;
data += chunk;
} while (residue);
out:
mutex_unlock(&eeprom->mutex);
return ret;
}
static int rave_sp_eeprom_reg_read(void *eeprom, unsigned int offset,
void *val, size_t bytes)
{
return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_READ,
offset, val, bytes);
}
static int rave_sp_eeprom_reg_write(void *eeprom, unsigned int offset,
void *val, size_t bytes)
{
return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_WRITE,
offset, val, bytes);
}
static int rave_sp_eeprom_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rave_sp *sp = dev_get_drvdata(dev->parent);
struct device_node *np = dev->of_node;
struct nvmem_config config = { 0 };
struct rave_sp_eeprom *eeprom;
struct nvmem_device *nvmem;
u32 reg[2], size;
if (of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg))) {
dev_err(dev, "Failed to parse \"reg\" property\n");
return -EINVAL;
}
size = reg[1];
/*
* Per ICD, we have no more than 2 bytes to specify EEPROM
* page.
*/
if (size > U16_MAX * RAVE_SP_EEPROM_PAGE_SIZE) {
dev_err(dev, "Specified size is too big\n");
return -EINVAL;
}
eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL);
if (!eeprom)
return -ENOMEM;
eeprom->address = reg[0];
eeprom->sp = sp;
eeprom->dev = dev;
if (size > SZ_8K)
eeprom->header_size = RAVE_SP_EEPROM_HEADER_BIG;
else
eeprom->header_size = RAVE_SP_EEPROM_HEADER_SMALL;
mutex_init(&eeprom->mutex);
config.id = -1;
of_property_read_string(np, "zii,eeprom-name", &config.name);
config.priv = eeprom;
config.dev = dev;
config.size = size;
config.reg_read = rave_sp_eeprom_reg_read;
config.reg_write = rave_sp_eeprom_reg_write;
config.word_size = 1;
config.stride = 1;
nvmem = devm_nvmem_register(dev, &config);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id rave_sp_eeprom_of_match[] = {
{ .compatible = "zii,rave-sp-eeprom" },
{}
};
MODULE_DEVICE_TABLE(of, rave_sp_eeprom_of_match);
static struct platform_driver rave_sp_eeprom_driver = {
.probe = rave_sp_eeprom_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = rave_sp_eeprom_of_match,
},
};
module_platform_driver(rave_sp_eeprom_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrey Vostrikov <[email protected]>");
MODULE_AUTHOR("Nikita Yushchenko <[email protected]>");
MODULE_AUTHOR("Andrey Smirnov <[email protected]>");
MODULE_DESCRIPTION("RAVE SP EEPROM driver");
| linux-master | drivers/nvmem/rave-sp-eeprom.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* i.MX6 OCOTP fusebox driver
*
* Copyright (c) 2015 Pengutronix, Philipp Zabel <[email protected]>
*
* Copyright 2019 NXP
*
* Based on the barebox ocotp driver,
* Copyright (c) 2010 Baruch Siach <[email protected]>,
* Orex Computed Radiography
*
* Write support based on the fsl_otp driver,
* Copyright (C) 2010-2013 Freescale Semiconductor, Inc
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the
* OTP Bank0 Word0
*/
#define IMX_OCOTP_OFFSET_PER_WORD 0x10 /* Offset between the start addr
* of two consecutive OTP words.
*/
#define IMX_OCOTP_ADDR_CTRL 0x0000
#define IMX_OCOTP_ADDR_CTRL_SET 0x0004
#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008
#define IMX_OCOTP_ADDR_TIMING 0x0010
#define IMX_OCOTP_ADDR_DATA0 0x0020
#define IMX_OCOTP_ADDR_DATA1 0x0030
#define IMX_OCOTP_ADDR_DATA2 0x0040
#define IMX_OCOTP_ADDR_DATA3 0x0050
#define IMX_OCOTP_BM_CTRL_ADDR 0x000000FF
#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
#define IMX_OCOTP_BM_CTRL_ADDR_8MP 0x000001FF
#define IMX_OCOTP_BM_CTRL_BUSY_8MP 0x00000200
#define IMX_OCOTP_BM_CTRL_ERROR_8MP 0x00000400
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP 0x00000800
#define IMX_OCOTP_BM_CTRL_DEFAULT \
{ \
.bm_addr = IMX_OCOTP_BM_CTRL_ADDR, \
.bm_busy = IMX_OCOTP_BM_CTRL_BUSY, \
.bm_error = IMX_OCOTP_BM_CTRL_ERROR, \
.bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\
}
#define IMX_OCOTP_BM_CTRL_8MP \
{ \
.bm_addr = IMX_OCOTP_BM_CTRL_ADDR_8MP, \
.bm_busy = IMX_OCOTP_BM_CTRL_BUSY_8MP, \
.bm_error = IMX_OCOTP_BM_CTRL_ERROR_8MP, \
.bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP,\
}
#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */
#define TIMING_STROBE_READ_NS 37 /* Min time before read */
#define TIMING_RELAX_NS 17
#define DEF_FSOURCE 1001 /* > 1000 ns */
#define DEF_STROBE_PROG 10000 /* IPG clocks */
#define IMX_OCOTP_WR_UNLOCK 0x3E770000
#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA
static DEFINE_MUTEX(ocotp_mutex);
struct ocotp_priv {
struct device *dev;
struct clk *clk;
void __iomem *base;
const struct ocotp_params *params;
struct nvmem_config *config;
};
struct ocotp_ctrl_reg {
u32 bm_addr;
u32 bm_busy;
u32 bm_error;
u32 bm_rel_shadows;
};
struct ocotp_params {
unsigned int nregs;
unsigned int bank_address_words;
void (*set_timing)(struct ocotp_priv *priv);
struct ocotp_ctrl_reg ctrl;
};
static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags)
{
int count;
u32 c, mask;
u32 bm_ctrl_busy, bm_ctrl_error;
void __iomem *base = priv->base;
bm_ctrl_busy = priv->params->ctrl.bm_busy;
bm_ctrl_error = priv->params->ctrl.bm_error;
mask = bm_ctrl_busy | bm_ctrl_error | flags;
for (count = 10000; count >= 0; count--) {
c = readl(base + IMX_OCOTP_ADDR_CTRL);
if (!(c & mask))
break;
cpu_relax();
}
if (count < 0) {
/* HW_OCOTP_CTRL[ERROR] will be set under the following
* conditions:
* - A write is performed to a shadow register during a shadow
* reload (essentially, while HW_OCOTP_CTRL[RELOAD_SHADOWS] is
* set. In addition, the contents of the shadow register shall
* not be updated.
* - A write is performed to a shadow register which has been
* locked.
* - A read is performed to from a shadow register which has
* been read locked.
* - A program is performed to a fuse word which has been locked
* - A read is performed to from a fuse word which has been read
* locked.
*/
if (c & bm_ctrl_error)
return -EPERM;
return -ETIMEDOUT;
}
return 0;
}
static void imx_ocotp_clr_err_if_set(struct ocotp_priv *priv)
{
u32 c, bm_ctrl_error;
void __iomem *base = priv->base;
bm_ctrl_error = priv->params->ctrl.bm_error;
c = readl(base + IMX_OCOTP_ADDR_CTRL);
if (!(c & bm_ctrl_error))
return;
writel(bm_ctrl_error, base + IMX_OCOTP_ADDR_CTRL_CLR);
}
static int imx_ocotp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct ocotp_priv *priv = context;
unsigned int count;
u8 *buf, *p;
int i, ret;
u32 index, num_bytes;
index = offset >> 2;
num_bytes = round_up((offset % 4) + bytes, 4);
count = num_bytes >> 2;
if (count > (priv->params->nregs - index))
count = priv->params->nregs - index;
p = kzalloc(num_bytes, GFP_KERNEL);
if (!p)
return -ENOMEM;
mutex_lock(&ocotp_mutex);
buf = p;
ret = clk_prepare_enable(priv->clk);
if (ret < 0) {
mutex_unlock(&ocotp_mutex);
dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
kfree(p);
return ret;
}
ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
dev_err(priv->dev, "timeout during read setup\n");
goto read_end;
}
for (i = index; i < (index + count); i++) {
*(u32 *)buf = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 +
i * IMX_OCOTP_OFFSET_PER_WORD);
/* 47.3.1.2
* For "read locked" registers 0xBADABADA will be returned and
* HW_OCOTP_CTRL[ERROR] will be set. It must be cleared by
* software before any new write, read or reload access can be
* issued
*/
if (*((u32 *)buf) == IMX_OCOTP_READ_LOCKED_VAL)
imx_ocotp_clr_err_if_set(priv);
buf += 4;
}
index = offset % 4;
memcpy(val, &p[index], bytes);
read_end:
clk_disable_unprepare(priv->clk);
mutex_unlock(&ocotp_mutex);
kfree(p);
return ret;
}
static int imx_ocotp_cell_pp(void *context, const char *id, int index,
unsigned int offset, void *data, size_t bytes)
{
u8 *buf = data;
int i;
/* Deal with some post processing of nvmem cell data */
if (id && !strcmp(id, "mac-address"))
for (i = 0; i < bytes / 2; i++)
swap(buf[i], buf[bytes - i - 1]);
return 0;
}
static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
{
unsigned long clk_rate;
unsigned long strobe_read, relax, strobe_prog;
u32 timing;
/* 47.3.1.3.1
* Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
* fields with timing values to match the current frequency of the
* ipg_clk. OTP writes will work at maximum bus frequencies as long
* as the HW_OCOTP_TIMING parameters are set correctly.
*
* Note: there are minimum timings required to ensure an OTP fuse burns
* correctly that are independent of the ipg_clk. Those values are not
* formally documented anywhere however, working from the minimum
* timings given in u-boot we can say:
*
* - Minimum STROBE_PROG time is 10 microseconds. Intuitively 10
* microseconds feels about right as representative of a minimum time
* to physically burn out a fuse.
*
* - Minimum STROBE_READ i.e. the time to wait post OTP fuse burn before
* performing another read is 37 nanoseconds
*
* - Minimum RELAX timing is 17 nanoseconds. This final RELAX minimum
* timing is not entirely clear the documentation says "This
* count value specifies the time to add to all default timing
* parameters other than the Tpgm and Trd. It is given in number
* of ipg_clk periods." where Tpgm and Trd refer to STROBE_PROG
* and STROBE_READ respectively. What the other timing parameters
* are though, is not specified. Experience shows a zero RELAX
* value will mess up a re-load of the shadow registers post OTP
* burn.
*/
clk_rate = clk_get_rate(priv->clk);
relax = DIV_ROUND_UP(clk_rate * TIMING_RELAX_NS, 1000000000) - 1;
strobe_read = DIV_ROUND_UP(clk_rate * TIMING_STROBE_READ_NS,
1000000000);
strobe_read += 2 * (relax + 1) - 1;
strobe_prog = DIV_ROUND_CLOSEST(clk_rate * TIMING_STROBE_PROG_US,
1000000);
strobe_prog += 2 * (relax + 1) - 1;
timing = readl(priv->base + IMX_OCOTP_ADDR_TIMING) & 0x0FC00000;
timing |= strobe_prog & 0x00000FFF;
timing |= (relax << 12) & 0x0000F000;
timing |= (strobe_read << 16) & 0x003F0000;
writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
}
static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv)
{
unsigned long clk_rate;
u64 fsource, strobe_prog;
u32 timing;
/* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1
* 6.4.3.3
*/
clk_rate = clk_get_rate(priv->clk);
fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE,
NSEC_PER_SEC) + 1;
strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG,
NSEC_PER_SEC) + 1;
timing = strobe_prog & 0x00000FFF;
timing |= (fsource << 12) & 0x000FF000;
writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
}
static int imx_ocotp_write(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct ocotp_priv *priv = context;
u32 *buf = val;
int ret;
u32 ctrl;
u8 waddr;
u8 word = 0;
/* allow only writing one complete OTP word at a time */
if ((bytes != priv->config->word_size) ||
(offset % priv->config->word_size))
return -EINVAL;
mutex_lock(&ocotp_mutex);
ret = clk_prepare_enable(priv->clk);
if (ret < 0) {
mutex_unlock(&ocotp_mutex);
dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
return ret;
}
/* Setup the write timing values */
priv->params->set_timing(priv);
/* 47.3.1.3.2
* Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear.
* Overlapped accesses are not supported by the controller. Any pending
* write or reload must be completed before a write access can be
* requested.
*/
ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
dev_err(priv->dev, "timeout during timing setup\n");
goto write_end;
}
/* 47.3.1.3.3
* Write the requested address to HW_OCOTP_CTRL[ADDR] and program the
* unlock code into HW_OCOTP_CTRL[WR_UNLOCK]. This must be programmed
* for each write access. The lock code is documented in the register
* description. Both the unlock code and address can be written in the
* same operation.
*/
if (priv->params->bank_address_words != 0) {
/*
* In banked/i.MX7 mode the OTP register bank goes into waddr
* see i.MX 7Solo Applications Processor Reference Manual, Rev.
* 0.1 section 6.4.3.1
*/
offset = offset / priv->config->word_size;
waddr = offset / priv->params->bank_address_words;
word = offset & (priv->params->bank_address_words - 1);
} else {
/*
* Non-banked i.MX6 mode.
* OTP write/read address specifies one of 128 word address
* locations
*/
waddr = offset / 4;
}
ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
ctrl &= ~priv->params->ctrl.bm_addr;
ctrl |= waddr & priv->params->ctrl.bm_addr;
ctrl |= IMX_OCOTP_WR_UNLOCK;
writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL);
/* 47.3.1.3.4
* Write the data to the HW_OCOTP_DATA register. This will automatically
* set HW_OCOTP_CTRL[BUSY] and clear HW_OCOTP_CTRL[WR_UNLOCK]. To
* protect programming same OTP bit twice, before program OCOTP will
* automatically read fuse value in OTP and use read value to mask
* program data. The controller will use masked program data to program
* a 32-bit word in the OTP per the address in HW_OCOTP_CTRL[ADDR]. Bit
* fields with 1's will result in that OTP bit being programmed. Bit
* fields with 0's will be ignored. At the same time that the write is
* accepted, the controller makes an internal copy of
* HW_OCOTP_CTRL[ADDR] which cannot be updated until the next write
* sequence is initiated. This copy guarantees that erroneous writes to
* HW_OCOTP_CTRL[ADDR] will not affect an active write operation. It
* should also be noted that during the programming HW_OCOTP_DATA will
* shift right (with zero fill). This shifting is required to program
* the OTP serially. During the write operation, HW_OCOTP_DATA cannot be
* modified.
* Note: on i.MX7 there are four data fields to write for banked write
* with the fuse blowing operation only taking place after data0
* has been written. This is why data0 must always be the last
* register written.
*/
if (priv->params->bank_address_words != 0) {
/* Banked/i.MX7 mode */
switch (word) {
case 0:
writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
break;
case 1:
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
break;
case 2:
writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
break;
case 3:
writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
break;
}
} else {
/* Non-banked i.MX6 mode */
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
}
/* 47.4.1.4.5
* Once complete, the controller will clear BUSY. A write request to a
* protected or locked region will result in no OTP access and no
* setting of HW_OCOTP_CTRL[BUSY]. In addition HW_OCOTP_CTRL[ERROR] will
* be set. It must be cleared by software before any new write access
* can be issued.
*/
ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
if (ret == -EPERM) {
dev_err(priv->dev, "failed write to locked region");
imx_ocotp_clr_err_if_set(priv);
} else {
dev_err(priv->dev, "timeout during data write\n");
}
goto write_end;
}
/* 47.3.1.4
* Write Postamble: Due to internal electrical characteristics of the
* OTP during writes, all OTP operations following a write must be
* separated by 2 us after the clearing of HW_OCOTP_CTRL_BUSY following
* the write.
*/
udelay(2);
/* reload all shadow registers */
writel(priv->params->ctrl.bm_rel_shadows,
priv->base + IMX_OCOTP_ADDR_CTRL_SET);
ret = imx_ocotp_wait_for_busy(priv,
priv->params->ctrl.bm_rel_shadows);
if (ret < 0)
dev_err(priv->dev, "timeout during shadow register reload\n");
write_end:
clk_disable_unprepare(priv->clk);
mutex_unlock(&ocotp_mutex);
return ret < 0 ? ret : bytes;
}
static struct nvmem_config imx_ocotp_nvmem_config = {
.name = "imx-ocotp",
.read_only = false,
.word_size = 4,
.stride = 1,
.reg_read = imx_ocotp_read,
.reg_write = imx_ocotp_write,
};
static const struct ocotp_params imx6q_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sl_params = {
.nregs = 64,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sll_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sx_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ul_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ull_params = {
.nregs = 64,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx7d_params = {
.nregs = 64,
.bank_address_words = 4,
.set_timing = imx_ocotp_set_imx7_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx7ulp_params = {
.nregs = 256,
.bank_address_words = 0,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mq_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mm_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mn_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mp_params = {
.nregs = 384,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_8MP,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
{ .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
{ .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
{ .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params },
{ .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
{ .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
{ .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
{ .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params },
{ .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params },
{ .compatible = "fsl,imx8mp-ocotp", .data = &imx8mp_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
static void imx_ocotp_fixup_cell_info(struct nvmem_device *nvmem,
struct nvmem_layout *layout,
struct nvmem_cell_info *cell)
{
cell->read_post_process = imx_ocotp_cell_pp;
}
static struct nvmem_layout imx_ocotp_layout = {
.fixup_cell_info = imx_ocotp_fixup_cell_info,
};
static int imx_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocotp_priv *priv;
struct nvmem_device *nvmem;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
priv->params = of_device_get_match_data(&pdev->dev);
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
imx_ocotp_nvmem_config.layout = &imx_ocotp_layout;
priv->config = &imx_ocotp_nvmem_config;
clk_prepare_enable(priv->clk);
imx_ocotp_clr_err_if_set(priv);
clk_disable_unprepare(priv->clk);
nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config);
return PTR_ERR_OR_ZERO(nvmem);
}
static struct platform_driver imx_ocotp_driver = {
.probe = imx_ocotp_probe,
.driver = {
.name = "imx_ocotp",
.of_match_table = imx_ocotp_dt_ids,
},
};
module_platform_driver(imx_ocotp_driver);
MODULE_AUTHOR("Philipp Zabel <[email protected]>");
MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/imx-ocotp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Factory-programmed memory read access driver
*
* Copyright (C) 2017, STMicroelectronics - All Rights Reserved
* Author: Fabrice Gasnier <[email protected]> for STMicroelectronics.
*/
#include <linux/arm-smccc.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/tee_drv.h>
#include "stm32-bsec-optee-ta.h"
/* BSEC secure service access from non-secure */
#define STM32_SMC_BSEC 0x82001003
#define STM32_SMC_READ_SHADOW 0x01
#define STM32_SMC_PROG_OTP 0x02
#define STM32_SMC_WRITE_SHADOW 0x03
#define STM32_SMC_READ_OTP 0x04
/* shadow registers offset */
#define STM32MP15_BSEC_DATA0 0x200
struct stm32_romem_cfg {
int size;
u8 lower;
bool ta;
};
struct stm32_romem_priv {
void __iomem *base;
struct nvmem_config cfg;
u8 lower;
struct tee_context *ctx;
};
static int stm32_romem_read(void *context, unsigned int offset, void *buf,
size_t bytes)
{
struct stm32_romem_priv *priv = context;
u8 *buf8 = buf;
int i;
for (i = offset; i < offset + bytes; i++)
*buf8++ = readb_relaxed(priv->base + i);
return 0;
}
static int stm32_bsec_smc(u8 op, u32 otp, u32 data, u32 *result)
{
#if IS_ENABLED(CONFIG_HAVE_ARM_SMCCC)
struct arm_smccc_res res;
arm_smccc_smc(STM32_SMC_BSEC, op, otp, data, 0, 0, 0, 0, &res);
if (res.a0)
return -EIO;
if (result)
*result = (u32)res.a1;
return 0;
#else
return -ENXIO;
#endif
}
static int stm32_bsec_read(void *context, unsigned int offset, void *buf,
size_t bytes)
{
struct stm32_romem_priv *priv = context;
struct device *dev = priv->cfg.dev;
u32 roffset, rbytes, val;
u8 *buf8 = buf, *val8 = (u8 *)&val;
int i, j = 0, ret, skip_bytes, size;
/* Round unaligned access to 32-bits */
roffset = rounddown(offset, 4);
skip_bytes = offset & 0x3;
rbytes = roundup(bytes + skip_bytes, 4);
if (roffset + rbytes > priv->cfg.size)
return -EINVAL;
for (i = roffset; (i < roffset + rbytes); i += 4) {
u32 otp = i >> 2;
if (otp < priv->lower) {
/* read lower data from shadow registers */
val = readl_relaxed(
priv->base + STM32MP15_BSEC_DATA0 + i);
} else {
ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, otp, 0,
&val);
if (ret) {
dev_err(dev, "Can't read data%d (%d)\n", otp,
ret);
return ret;
}
}
/* skip first bytes in case of unaligned read */
if (skip_bytes)
size = min(bytes, (size_t)(4 - skip_bytes));
else
size = min(bytes, (size_t)4);
memcpy(&buf8[j], &val8[skip_bytes], size);
bytes -= size;
j += size;
skip_bytes = 0;
}
return 0;
}
static int stm32_bsec_write(void *context, unsigned int offset, void *buf,
size_t bytes)
{
struct stm32_romem_priv *priv = context;
struct device *dev = priv->cfg.dev;
u32 *buf32 = buf;
int ret, i;
/* Allow only writing complete 32-bits aligned words */
if ((bytes % 4) || (offset % 4))
return -EINVAL;
for (i = offset; i < offset + bytes; i += 4) {
ret = stm32_bsec_smc(STM32_SMC_PROG_OTP, i >> 2, *buf32++,
NULL);
if (ret) {
dev_err(dev, "Can't write data%d (%d)\n", i >> 2, ret);
return ret;
}
}
if (offset + bytes >= priv->lower * 4)
dev_warn(dev, "Update of upper OTPs with ECC protection (word programming, only once)\n");
return 0;
}
static int stm32_bsec_pta_read(void *context, unsigned int offset, void *buf,
size_t bytes)
{
struct stm32_romem_priv *priv = context;
return stm32_bsec_optee_ta_read(priv->ctx, offset, buf, bytes);
}
static int stm32_bsec_pta_write(void *context, unsigned int offset, void *buf,
size_t bytes)
{
struct stm32_romem_priv *priv = context;
return stm32_bsec_optee_ta_write(priv->ctx, priv->lower, offset, buf, bytes);
}
static bool stm32_bsec_smc_check(void)
{
u32 val;
int ret;
/* check that the OP-TEE support the BSEC SMC (legacy mode) */
ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, 0, 0, &val);
return !ret;
}
static bool optee_presence_check(void)
{
struct device_node *np;
bool tee_detected = false;
/* check that the OP-TEE node is present and available. */
np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
if (np && of_device_is_available(np))
tee_detected = true;
of_node_put(np);
return tee_detected;
}
static int stm32_romem_probe(struct platform_device *pdev)
{
const struct stm32_romem_cfg *cfg;
struct device *dev = &pdev->dev;
struct stm32_romem_priv *priv;
struct resource *res;
int rc;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->cfg.name = "stm32-romem";
priv->cfg.word_size = 1;
priv->cfg.stride = 1;
priv->cfg.dev = dev;
priv->cfg.priv = priv;
priv->cfg.owner = THIS_MODULE;
priv->cfg.type = NVMEM_TYPE_OTP;
priv->lower = 0;
cfg = (const struct stm32_romem_cfg *)
of_match_device(dev->driver->of_match_table, dev)->data;
if (!cfg) {
priv->cfg.read_only = true;
priv->cfg.size = resource_size(res);
priv->cfg.reg_read = stm32_romem_read;
} else {
priv->cfg.size = cfg->size;
priv->lower = cfg->lower;
if (cfg->ta || optee_presence_check()) {
rc = stm32_bsec_optee_ta_open(&priv->ctx);
if (rc) {
/* wait for OP-TEE client driver to be up and ready */
if (rc == -EPROBE_DEFER)
return -EPROBE_DEFER;
/* BSEC PTA is required or SMC not supported */
if (cfg->ta || !stm32_bsec_smc_check())
return rc;
}
}
if (priv->ctx) {
rc = devm_add_action_or_reset(dev, stm32_bsec_optee_ta_close, priv->ctx);
if (rc) {
dev_err(dev, "devm_add_action_or_reset() failed (%d)\n", rc);
return rc;
}
priv->cfg.reg_read = stm32_bsec_pta_read;
priv->cfg.reg_write = stm32_bsec_pta_write;
} else {
priv->cfg.reg_read = stm32_bsec_read;
priv->cfg.reg_write = stm32_bsec_write;
}
}
return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &priv->cfg));
}
/*
* STM32MP15/13 BSEC OTP regions: 4096 OTP bits (with 3072 effective bits)
* => 96 x 32-bits data words
* - Lower: 1K bits, 2:1 redundancy, incremental bit programming
* => 32 (x 32-bits) lower shadow registers = words 0 to 31
* - Upper: 2K bits, ECC protection, word programming only
* => 64 (x 32-bits) = words 32 to 95
*/
static const struct stm32_romem_cfg stm32mp15_bsec_cfg = {
.size = 384,
.lower = 32,
.ta = false,
};
static const struct stm32_romem_cfg stm32mp13_bsec_cfg = {
.size = 384,
.lower = 32,
.ta = true,
};
static const struct of_device_id stm32_romem_of_match[] __maybe_unused = {
{ .compatible = "st,stm32f4-otp", }, {
.compatible = "st,stm32mp15-bsec",
.data = (void *)&stm32mp15_bsec_cfg,
}, {
.compatible = "st,stm32mp13-bsec",
.data = (void *)&stm32mp13_bsec_cfg,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, stm32_romem_of_match);
static struct platform_driver stm32_romem_driver = {
.probe = stm32_romem_probe,
.driver = {
.name = "stm32-romem",
.of_match_table = of_match_ptr(stm32_romem_of_match),
},
};
module_platform_driver(stm32_romem_driver);
MODULE_AUTHOR("Fabrice Gasnier <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STM32 RO-MEM");
MODULE_ALIAS("platform:nvmem-stm32-romem");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/stm32-romem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Nintendo Wii and Wii U OTP driver
*
* This is a driver exposing the OTP of a Nintendo Wii or Wii U console.
*
* This memory contains common and per-console keys, signatures and
* related data required to access peripherals.
*
* Based on reversed documentation from https://wiiubrew.org/wiki/Hardware/OTP
*
* Copyright (C) 2021 Emmanuel Gil Peyrot <[email protected]>
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#define HW_OTPCMD 0
#define HW_OTPDATA 4
#define OTP_READ 0x80000000
#define BANK_SIZE 128
#define WORD_SIZE 4
struct nintendo_otp_priv {
void __iomem *regs;
};
struct nintendo_otp_devtype_data {
const char *name;
unsigned int num_banks;
};
static const struct nintendo_otp_devtype_data hollywood_otp_data = {
.name = "wii-otp",
.num_banks = 1,
};
static const struct nintendo_otp_devtype_data latte_otp_data = {
.name = "wiiu-otp",
.num_banks = 8,
};
static int nintendo_otp_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct nintendo_otp_priv *priv = context;
u32 *val = _val;
int words = bytes / WORD_SIZE;
u32 bank, addr;
while (words--) {
bank = (reg / BANK_SIZE) << 8;
addr = (reg / WORD_SIZE) % (BANK_SIZE / WORD_SIZE);
iowrite32be(OTP_READ | bank | addr, priv->regs + HW_OTPCMD);
*val++ = ioread32be(priv->regs + HW_OTPDATA);
reg += WORD_SIZE;
}
return 0;
}
static const struct of_device_id nintendo_otp_of_table[] = {
{ .compatible = "nintendo,hollywood-otp", .data = &hollywood_otp_data },
{ .compatible = "nintendo,latte-otp", .data = &latte_otp_data },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, nintendo_otp_of_table);
static int nintendo_otp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *of_id =
of_match_device(nintendo_otp_of_table, dev);
struct nvmem_device *nvmem;
struct nintendo_otp_priv *priv;
struct nvmem_config config = {
.stride = WORD_SIZE,
.word_size = WORD_SIZE,
.reg_read = nintendo_otp_reg_read,
.read_only = true,
.root_only = true,
};
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
if (of_id->data) {
const struct nintendo_otp_devtype_data *data = of_id->data;
config.name = data->name;
config.size = data->num_banks * BANK_SIZE;
}
config.dev = dev;
config.priv = priv;
nvmem = devm_nvmem_register(dev, &config);
return PTR_ERR_OR_ZERO(nvmem);
}
static struct platform_driver nintendo_otp_driver = {
.probe = nintendo_otp_probe,
.driver = {
.name = "nintendo-otp",
.of_match_table = nintendo_otp_of_table,
},
};
module_platform_driver(nintendo_otp_driver);
MODULE_AUTHOR("Emmanuel Gil Peyrot <[email protected]>");
MODULE_DESCRIPTION("Nintendo Wii and Wii U OTP driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/nintendo-otp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 Pengutronix, Steffen Trumtrar <[email protected]>
* Copyright (c) 2017 Pengutronix, Oleksij Rempel <[email protected]>
*/
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define IMX6Q_SNVS_HPLR 0x00
#define IMX6Q_SNVS_LPLR 0x34
#define IMX6Q_SNVS_LPGPR 0x68
#define IMX7D_SNVS_HPLR 0x00
#define IMX7D_SNVS_LPLR 0x34
#define IMX7D_SNVS_LPGPR 0x90
#define IMX_GPR_SL BIT(5)
#define IMX_GPR_HL BIT(5)
struct snvs_lpgpr_cfg {
int offset;
int offset_hplr;
int offset_lplr;
int size;
};
struct snvs_lpgpr_priv {
struct device_d *dev;
struct regmap *regmap;
struct nvmem_config cfg;
const struct snvs_lpgpr_cfg *dcfg;
};
static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = {
.offset = IMX6Q_SNVS_LPGPR,
.offset_hplr = IMX6Q_SNVS_HPLR,
.offset_lplr = IMX6Q_SNVS_LPLR,
.size = 4,
};
static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx7d = {
.offset = IMX7D_SNVS_LPGPR,
.offset_hplr = IMX7D_SNVS_HPLR,
.offset_lplr = IMX7D_SNVS_LPLR,
.size = 16,
};
static int snvs_lpgpr_write(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct snvs_lpgpr_priv *priv = context;
const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
unsigned int lock_reg;
int ret;
ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg);
if (ret < 0)
return ret;
if (lock_reg & IMX_GPR_SL)
return -EPERM;
ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg);
if (ret < 0)
return ret;
if (lock_reg & IMX_GPR_HL)
return -EPERM;
return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val,
bytes / 4);
}
static int snvs_lpgpr_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct snvs_lpgpr_priv *priv = context;
const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
return regmap_bulk_read(priv->regmap, dcfg->offset + offset,
val, bytes / 4);
}
static int snvs_lpgpr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct device_node *syscon_node;
struct snvs_lpgpr_priv *priv;
struct nvmem_config *cfg;
struct nvmem_device *nvmem;
const struct snvs_lpgpr_cfg *dcfg;
if (!node)
return -ENOENT;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dcfg = of_device_get_match_data(dev);
if (!dcfg)
return -EINVAL;
syscon_node = of_get_parent(node);
if (!syscon_node)
return -ENODEV;
priv->regmap = syscon_node_to_regmap(syscon_node);
of_node_put(syscon_node);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
priv->dcfg = dcfg;
cfg = &priv->cfg;
cfg->priv = priv;
cfg->name = dev_name(dev);
cfg->dev = dev;
cfg->stride = 4;
cfg->word_size = 4;
cfg->size = dcfg->size;
cfg->owner = THIS_MODULE;
cfg->reg_read = snvs_lpgpr_read;
cfg->reg_write = snvs_lpgpr_write;
nvmem = devm_nvmem_register(dev, cfg);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id snvs_lpgpr_dt_ids[] = {
{ .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q },
{ .compatible = "fsl,imx6ul-snvs-lpgpr",
.data = &snvs_lpgpr_cfg_imx6q },
{ .compatible = "fsl,imx7d-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx7d },
{ },
};
MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids);
static struct platform_driver snvs_lpgpr_driver = {
.probe = snvs_lpgpr_probe,
.driver = {
.name = "snvs_lpgpr",
.of_match_table = snvs_lpgpr_dt_ids,
},
};
module_platform_driver(snvs_lpgpr_driver);
MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 and i.MX7 Secure Non-Volatile Storage");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/snvs_lpgpr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Rockchip eFuse Driver
*
* Copyright (c) 2015 Rockchip Electronics Co. Ltd.
* Author: Caesar Wang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#define RK3288_A_SHIFT 6
#define RK3288_A_MASK 0x3ff
#define RK3288_PGENB BIT(3)
#define RK3288_LOAD BIT(2)
#define RK3288_STROBE BIT(1)
#define RK3288_CSB BIT(0)
#define RK3328_SECURE_SIZES 96
#define RK3328_INT_STATUS 0x0018
#define RK3328_DOUT 0x0020
#define RK3328_AUTO_CTRL 0x0024
#define RK3328_INT_FINISH BIT(0)
#define RK3328_AUTO_ENB BIT(0)
#define RK3328_AUTO_RD BIT(1)
#define RK3399_A_SHIFT 16
#define RK3399_A_MASK 0x3ff
#define RK3399_NBYTES 4
#define RK3399_STROBSFTSEL BIT(9)
#define RK3399_RSB BIT(7)
#define RK3399_PD BIT(5)
#define RK3399_PGENB BIT(3)
#define RK3399_LOAD BIT(2)
#define RK3399_STROBE BIT(1)
#define RK3399_CSB BIT(0)
#define REG_EFUSE_CTRL 0x0000
#define REG_EFUSE_DOUT 0x0004
struct rockchip_efuse_chip {
struct device *dev;
void __iomem *base;
struct clk *clk;
};
static int rockchip_rk3288_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rockchip_efuse_chip *efuse = context;
u8 *buf = val;
int ret;
ret = clk_prepare_enable(efuse->clk);
if (ret < 0) {
dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
return ret;
}
writel(RK3288_LOAD | RK3288_PGENB, efuse->base + REG_EFUSE_CTRL);
udelay(1);
while (bytes--) {
writel(readl(efuse->base + REG_EFUSE_CTRL) &
(~(RK3288_A_MASK << RK3288_A_SHIFT)),
efuse->base + REG_EFUSE_CTRL);
writel(readl(efuse->base + REG_EFUSE_CTRL) |
((offset++ & RK3288_A_MASK) << RK3288_A_SHIFT),
efuse->base + REG_EFUSE_CTRL);
udelay(1);
writel(readl(efuse->base + REG_EFUSE_CTRL) |
RK3288_STROBE, efuse->base + REG_EFUSE_CTRL);
udelay(1);
*buf++ = readb(efuse->base + REG_EFUSE_DOUT);
writel(readl(efuse->base + REG_EFUSE_CTRL) &
(~RK3288_STROBE), efuse->base + REG_EFUSE_CTRL);
udelay(1);
}
/* Switch to standby mode */
writel(RK3288_PGENB | RK3288_CSB, efuse->base + REG_EFUSE_CTRL);
clk_disable_unprepare(efuse->clk);
return 0;
}
static int rockchip_rk3328_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rockchip_efuse_chip *efuse = context;
unsigned int addr_start, addr_end, addr_offset, addr_len;
u32 out_value, status;
u8 *buf;
int ret, i = 0;
ret = clk_prepare_enable(efuse->clk);
if (ret < 0) {
dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
return ret;
}
/* 128 Byte efuse, 96 Byte for secure, 32 Byte for non-secure */
offset += RK3328_SECURE_SIZES;
addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES;
addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES;
addr_offset = offset % RK3399_NBYTES;
addr_len = addr_end - addr_start;
buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)),
GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto nomem;
}
while (addr_len--) {
writel(RK3328_AUTO_RD | RK3328_AUTO_ENB |
((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT),
efuse->base + RK3328_AUTO_CTRL);
udelay(4);
status = readl(efuse->base + RK3328_INT_STATUS);
if (!(status & RK3328_INT_FINISH)) {
ret = -EIO;
goto err;
}
out_value = readl(efuse->base + RK3328_DOUT);
writel(RK3328_INT_FINISH, efuse->base + RK3328_INT_STATUS);
memcpy(&buf[i], &out_value, RK3399_NBYTES);
i += RK3399_NBYTES;
}
memcpy(val, buf + addr_offset, bytes);
err:
kfree(buf);
nomem:
clk_disable_unprepare(efuse->clk);
return ret;
}
static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rockchip_efuse_chip *efuse = context;
unsigned int addr_start, addr_end, addr_offset, addr_len;
u32 out_value;
u8 *buf;
int ret, i = 0;
ret = clk_prepare_enable(efuse->clk);
if (ret < 0) {
dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
return ret;
}
addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES;
addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES;
addr_offset = offset % RK3399_NBYTES;
addr_len = addr_end - addr_start;
buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)),
GFP_KERNEL);
if (!buf) {
clk_disable_unprepare(efuse->clk);
return -ENOMEM;
}
writel(RK3399_LOAD | RK3399_PGENB | RK3399_STROBSFTSEL | RK3399_RSB,
efuse->base + REG_EFUSE_CTRL);
udelay(1);
while (addr_len--) {
writel(readl(efuse->base + REG_EFUSE_CTRL) | RK3399_STROBE |
((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT),
efuse->base + REG_EFUSE_CTRL);
udelay(1);
out_value = readl(efuse->base + REG_EFUSE_DOUT);
writel(readl(efuse->base + REG_EFUSE_CTRL) & (~RK3399_STROBE),
efuse->base + REG_EFUSE_CTRL);
udelay(1);
memcpy(&buf[i], &out_value, RK3399_NBYTES);
i += RK3399_NBYTES;
}
/* Switch to standby mode */
writel(RK3399_PD | RK3399_CSB, efuse->base + REG_EFUSE_CTRL);
memcpy(val, buf + addr_offset, bytes);
kfree(buf);
clk_disable_unprepare(efuse->clk);
return 0;
}
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
.stride = 1,
.word_size = 1,
.read_only = true,
};
static const struct of_device_id rockchip_efuse_match[] = {
/* deprecated but kept around for dts binding compatibility */
{
.compatible = "rockchip,rockchip-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
.compatible = "rockchip,rk3066a-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
.compatible = "rockchip,rk3188-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
.compatible = "rockchip,rk3228-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
.compatible = "rockchip,rk3288-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
.compatible = "rockchip,rk3368-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
.compatible = "rockchip,rk3328-efuse",
.data = (void *)&rockchip_rk3328_efuse_read,
},
{
.compatible = "rockchip,rk3399-efuse",
.data = (void *)&rockchip_rk3399_efuse_read,
},
{ /* sentinel */},
};
MODULE_DEVICE_TABLE(of, rockchip_efuse_match);
static int rockchip_efuse_probe(struct platform_device *pdev)
{
struct resource *res;
struct nvmem_device *nvmem;
struct rockchip_efuse_chip *efuse;
const void *data;
struct device *dev = &pdev->dev;
data = of_device_get_match_data(dev);
if (!data) {
dev_err(dev, "failed to get match data\n");
return -EINVAL;
}
efuse = devm_kzalloc(dev, sizeof(struct rockchip_efuse_chip),
GFP_KERNEL);
if (!efuse)
return -ENOMEM;
efuse->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(efuse->base))
return PTR_ERR(efuse->base);
efuse->clk = devm_clk_get(dev, "pclk_efuse");
if (IS_ERR(efuse->clk))
return PTR_ERR(efuse->clk);
efuse->dev = dev;
if (of_property_read_u32(dev->of_node, "rockchip,efuse-size",
&econfig.size))
econfig.size = resource_size(res);
econfig.reg_read = data;
econfig.priv = efuse;
econfig.dev = efuse->dev;
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
}
static struct platform_driver rockchip_efuse_driver = {
.probe = rockchip_efuse_probe,
.driver = {
.name = "rockchip-efuse",
.of_match_table = rockchip_efuse_match,
},
};
module_platform_driver(rockchip_efuse_driver);
MODULE_DESCRIPTION("rockchip_efuse driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/rockchip-efuse.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Rafał Miłecki <[email protected]>
*/
#include <linux/bcm47xx_nvram.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define NVRAM_MAGIC "FLSH"
struct brcm_nvram {
struct device *dev;
void __iomem *base;
struct nvmem_cell_info *cells;
int ncells;
};
struct brcm_nvram_header {
char magic[4];
__le32 len;
__le32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
__le32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
__le32 config_ncdl; /* ncdl values for memc */
};
static int brcm_nvram_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct brcm_nvram *priv = context;
u8 *dst = val;
while (bytes--)
*dst++ = readb(priv->base + offset++);
return 0;
}
static int brcm_nvram_read_post_process_macaddr(void *context, const char *id, int index,
unsigned int offset, void *buf, size_t bytes)
{
u8 mac[ETH_ALEN];
if (bytes != 3 * ETH_ALEN - 1)
return -EINVAL;
if (!mac_pton(buf, mac))
return -EINVAL;
if (index)
eth_addr_add(mac, index);
ether_addr_copy(buf, mac);
return 0;
}
static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
size_t len)
{
struct device *dev = priv->dev;
char *var, *value, *eq;
int idx;
priv->ncells = 0;
for (var = data + sizeof(struct brcm_nvram_header);
var < (char *)data + len && *var;
var += strlen(var) + 1) {
priv->ncells++;
}
priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
if (!priv->cells)
return -ENOMEM;
for (var = data + sizeof(struct brcm_nvram_header), idx = 0;
var < (char *)data + len && *var;
var = value + strlen(value) + 1, idx++) {
eq = strchr(var, '=');
if (!eq)
break;
*eq = '\0';
value = eq + 1;
priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
if (!priv->cells[idx].name)
return -ENOMEM;
priv->cells[idx].offset = value - (char *)data;
priv->cells[idx].bytes = strlen(value);
priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
if (!strcmp(var, "et0macaddr") ||
!strcmp(var, "et1macaddr") ||
!strcmp(var, "et2macaddr")) {
priv->cells[idx].raw_len = strlen(value);
priv->cells[idx].bytes = ETH_ALEN;
priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr;
}
}
return 0;
}
static int brcm_nvram_parse(struct brcm_nvram *priv)
{
struct device *dev = priv->dev;
struct brcm_nvram_header header;
uint8_t *data;
size_t len;
int err;
memcpy_fromio(&header, priv->base, sizeof(header));
if (memcmp(header.magic, NVRAM_MAGIC, 4)) {
dev_err(dev, "Invalid NVRAM magic\n");
return -EINVAL;
}
len = le32_to_cpu(header.len);
data = kzalloc(len, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy_fromio(data, priv->base, len);
data[len - 1] = '\0';
err = brcm_nvram_add_cells(priv, data, len);
if (err) {
dev_err(dev, "Failed to add cells: %d\n", err);
return err;
}
kfree(data);
return 0;
}
static int brcm_nvram_probe(struct platform_device *pdev)
{
struct nvmem_config config = {
.name = "brcm-nvram",
.reg_read = brcm_nvram_read,
};
struct device *dev = &pdev->dev;
struct resource *res;
struct brcm_nvram *priv;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
err = brcm_nvram_parse(priv);
if (err)
return err;
bcm47xx_nvram_init_from_iomem(priv->base, resource_size(res));
config.dev = dev;
config.cells = priv->cells;
config.ncells = priv->ncells;
config.priv = priv;
config.size = resource_size(res);
return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
}
static const struct of_device_id brcm_nvram_of_match_table[] = {
{ .compatible = "brcm,nvram", },
{},
};
static struct platform_driver brcm_nvram_driver = {
.probe = brcm_nvram_probe,
.driver = {
.name = "brcm_nvram",
.of_match_table = brcm_nvram_of_match_table,
},
};
static int __init brcm_nvram_init(void)
{
return platform_driver_register(&brcm_nvram_driver);
}
subsys_initcall_sync(brcm_nvram_init);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, brcm_nvram_of_match_table);
| linux-master | drivers/nvmem/brcm_nvram.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: Andrew-CT Chen <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/property.h>
struct mtk_efuse_pdata {
bool uses_post_processing;
};
struct mtk_efuse_priv {
void __iomem *base;
};
static int mtk_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct mtk_efuse_priv *priv = context;
void __iomem *addr = priv->base + reg;
u8 *val = _val;
int i;
for (i = 0; i < bytes; i++, val++)
*val = readb(addr + i);
return 0;
}
static int mtk_efuse_gpu_speedbin_pp(void *context, const char *id, int index,
unsigned int offset, void *data, size_t bytes)
{
u8 *val = data;
if (val[0] < 8)
val[0] = BIT(val[0]);
return 0;
}
static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem,
struct nvmem_layout *layout,
struct nvmem_cell_info *cell)
{
size_t sz = strlen(cell->name);
/*
* On some SoCs, the GPU speedbin is not read as bitmask but as
* a number with range [0-7] (max 3 bits): post process to use
* it in OPP tables to describe supported-hw.
*/
if (cell->nbits <= 3 &&
strncmp(cell->name, "gpu-speedbin", min(sz, strlen("gpu-speedbin"))) == 0)
cell->read_post_process = mtk_efuse_gpu_speedbin_pp;
}
static struct nvmem_layout mtk_efuse_layout = {
.fixup_cell_info = mtk_efuse_fixup_cell_info,
};
static int mtk_efuse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
struct nvmem_config econfig = {};
struct mtk_efuse_priv *priv;
const struct mtk_efuse_pdata *pdata;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
pdata = device_get_match_data(dev);
econfig.stride = 1;
econfig.word_size = 1;
econfig.reg_read = mtk_reg_read;
econfig.size = resource_size(res);
econfig.priv = priv;
econfig.dev = dev;
if (pdata->uses_post_processing)
econfig.layout = &mtk_efuse_layout;
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct mtk_efuse_pdata mtk_mt8186_efuse_pdata = {
.uses_post_processing = true,
};
static const struct mtk_efuse_pdata mtk_efuse_pdata = {
.uses_post_processing = false,
};
static const struct of_device_id mtk_efuse_of_match[] = {
{ .compatible = "mediatek,mt8173-efuse", .data = &mtk_efuse_pdata },
{ .compatible = "mediatek,mt8186-efuse", .data = &mtk_mt8186_efuse_pdata },
{ .compatible = "mediatek,efuse", .data = &mtk_efuse_pdata },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, mtk_efuse_of_match);
static struct platform_driver mtk_efuse_driver = {
.probe = mtk_efuse_probe,
.driver = {
.name = "mediatek,efuse",
.of_match_table = mtk_efuse_of_match,
},
};
static int __init mtk_efuse_init(void)
{
int ret;
ret = platform_driver_register(&mtk_efuse_driver);
if (ret) {
pr_err("Failed to register efuse driver\n");
return ret;
}
return 0;
}
static void __exit mtk_efuse_exit(void)
{
return platform_driver_unregister(&mtk_efuse_driver);
}
subsys_initcall(mtk_efuse_init);
module_exit(mtk_efuse_exit);
MODULE_AUTHOR("Andrew-CT Chen <[email protected]>");
MODULE_DESCRIPTION("Mediatek EFUSE driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/mtk-efuse.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Rafał Miłecki <[email protected]>
*/
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
enum u_boot_env_format {
U_BOOT_FORMAT_SINGLE,
U_BOOT_FORMAT_REDUNDANT,
U_BOOT_FORMAT_BROADCOM,
};
struct u_boot_env {
struct device *dev;
enum u_boot_env_format format;
struct mtd_info *mtd;
/* Cells */
struct nvmem_cell_info *cells;
int ncells;
};
struct u_boot_env_image_single {
__le32 crc32;
uint8_t data[];
} __packed;
struct u_boot_env_image_redundant {
__le32 crc32;
u8 mark;
uint8_t data[];
} __packed;
struct u_boot_env_image_broadcom {
__le32 magic;
__le32 len;
__le32 crc32;
DECLARE_FLEX_ARRAY(uint8_t, data);
} __packed;
static int u_boot_env_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct u_boot_env *priv = context;
struct device *dev = priv->dev;
size_t bytes_read;
int err;
err = mtd_read(priv->mtd, offset, bytes, &bytes_read, val);
if (err && !mtd_is_bitflip(err)) {
dev_err(dev, "Failed to read from mtd: %d\n", err);
return err;
}
if (bytes_read != bytes) {
dev_err(dev, "Failed to read %zu bytes\n", bytes);
return -EIO;
}
return 0;
}
static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index,
unsigned int offset, void *buf, size_t bytes)
{
u8 mac[ETH_ALEN];
if (bytes != 3 * ETH_ALEN - 1)
return -EINVAL;
if (!mac_pton(buf, mac))
return -EINVAL;
if (index)
eth_addr_add(mac, index);
ether_addr_copy(buf, mac);
return 0;
}
static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
size_t data_offset, size_t data_len)
{
struct device *dev = priv->dev;
char *data = buf + data_offset;
char *var, *value, *eq;
int idx;
priv->ncells = 0;
for (var = data; var < data + data_len && *var; var += strlen(var) + 1)
priv->ncells++;
priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
if (!priv->cells)
return -ENOMEM;
for (var = data, idx = 0;
var < data + data_len && *var;
var = value + strlen(value) + 1, idx++) {
eq = strchr(var, '=');
if (!eq)
break;
*eq = '\0';
value = eq + 1;
priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
if (!priv->cells[idx].name)
return -ENOMEM;
priv->cells[idx].offset = data_offset + value - data;
priv->cells[idx].bytes = strlen(value);
priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
if (!strcmp(var, "ethaddr")) {
priv->cells[idx].raw_len = strlen(value);
priv->cells[idx].bytes = ETH_ALEN;
priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr;
}
}
if (WARN_ON(idx != priv->ncells))
priv->ncells = idx;
return 0;
}
static int u_boot_env_parse(struct u_boot_env *priv)
{
struct device *dev = priv->dev;
size_t crc32_data_offset;
size_t crc32_data_len;
size_t crc32_offset;
size_t data_offset;
size_t data_len;
uint32_t crc32;
uint32_t calc;
size_t bytes;
uint8_t *buf;
int err;
buf = kcalloc(1, priv->mtd->size, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf);
if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) {
dev_err(dev, "Failed to read from mtd: %d\n", err);
goto err_kfree;
}
switch (priv->format) {
case U_BOOT_FORMAT_SINGLE:
crc32_offset = offsetof(struct u_boot_env_image_single, crc32);
crc32_data_offset = offsetof(struct u_boot_env_image_single, data);
data_offset = offsetof(struct u_boot_env_image_single, data);
break;
case U_BOOT_FORMAT_REDUNDANT:
crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data);
data_offset = offsetof(struct u_boot_env_image_redundant, data);
break;
case U_BOOT_FORMAT_BROADCOM:
crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32);
crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data);
data_offset = offsetof(struct u_boot_env_image_broadcom, data);
break;
}
crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset));
crc32_data_len = priv->mtd->size - crc32_data_offset;
data_len = priv->mtd->size - data_offset;
calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
if (calc != crc32) {
dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
err = -EINVAL;
goto err_kfree;
}
buf[priv->mtd->size - 1] = '\0';
err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
if (err)
dev_err(dev, "Failed to add cells: %d\n", err);
err_kfree:
kfree(buf);
err_out:
return err;
}
static int u_boot_env_probe(struct platform_device *pdev)
{
struct nvmem_config config = {
.name = "u-boot-env",
.reg_read = u_boot_env_read,
};
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct u_boot_env *priv;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->format = (uintptr_t)of_device_get_match_data(dev);
priv->mtd = of_get_mtd_device_by_node(np);
if (IS_ERR(priv->mtd)) {
dev_err_probe(dev, PTR_ERR(priv->mtd), "Failed to get %pOF MTD\n", np);
return PTR_ERR(priv->mtd);
}
err = u_boot_env_parse(priv);
if (err)
return err;
config.dev = dev;
config.cells = priv->cells;
config.ncells = priv->ncells;
config.priv = priv;
config.size = priv->mtd->size;
return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
}
static const struct of_device_id u_boot_env_of_match_table[] = {
{ .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, },
{ .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
{ .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
{ .compatible = "brcm,env", .data = (void *)U_BOOT_FORMAT_BROADCOM, },
{},
};
static struct platform_driver u_boot_env_driver = {
.probe = u_boot_env_probe,
.driver = {
.name = "u_boot_env",
.of_match_table = u_boot_env_of_match_table,
},
};
module_platform_driver(u_boot_env_driver);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table);
| linux-master | drivers/nvmem/u-boot-env.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Toradex AG.
*
* Author: Sanchayan Maity <[email protected]>
*
* Based on the barebox ocotp driver,
* Copyright (c) 2010 Baruch Siach <[email protected]>
* Orex Computed Radiography
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/* OCOTP Register Offsets */
#define OCOTP_CTRL_REG 0x00
#define OCOTP_CTRL_SET 0x04
#define OCOTP_CTRL_CLR 0x08
#define OCOTP_TIMING 0x10
#define OCOTP_DATA 0x20
#define OCOTP_READ_CTRL_REG 0x30
#define OCOTP_READ_FUSE_DATA 0x40
/* OCOTP Register bits and masks */
#define OCOTP_CTRL_WR_UNLOCK 16
#define OCOTP_CTRL_WR_UNLOCK_KEY 0x3E77
#define OCOTP_CTRL_WR_UNLOCK_MASK GENMASK(31, 16)
#define OCOTP_CTRL_ADDR 0
#define OCOTP_CTRL_ADDR_MASK GENMASK(6, 0)
#define OCOTP_CTRL_RELOAD_SHADOWS BIT(10)
#define OCOTP_CTRL_ERR BIT(9)
#define OCOTP_CTRL_BUSY BIT(8)
#define OCOTP_TIMING_STROBE_READ 16
#define OCOTP_TIMING_STROBE_READ_MASK GENMASK(21, 16)
#define OCOTP_TIMING_RELAX 12
#define OCOTP_TIMING_RELAX_MASK GENMASK(15, 12)
#define OCOTP_TIMING_STROBE_PROG 0
#define OCOTP_TIMING_STROBE_PROG_MASK GENMASK(11, 0)
#define OCOTP_READ_CTRL_READ_FUSE 0x1
#define VF610_OCOTP_TIMEOUT 100000
#define BF(value, field) (((value) << field) & field##_MASK)
#define DEF_RELAX 20
static const int base_to_fuse_addr_mappings[][2] = {
{0x400, 0x00},
{0x410, 0x01},
{0x420, 0x02},
{0x450, 0x05},
{0x4F0, 0x0F},
{0x600, 0x20},
{0x610, 0x21},
{0x620, 0x22},
{0x630, 0x23},
{0x640, 0x24},
{0x650, 0x25},
{0x660, 0x26},
{0x670, 0x27},
{0x6F0, 0x2F},
{0x880, 0x38},
{0x890, 0x39},
{0x8A0, 0x3A},
{0x8B0, 0x3B},
{0x8C0, 0x3C},
{0x8D0, 0x3D},
{0x8E0, 0x3E},
{0x8F0, 0x3F},
{0xC80, 0x78},
{0xC90, 0x79},
{0xCA0, 0x7A},
{0xCB0, 0x7B},
{0xCC0, 0x7C},
{0xCD0, 0x7D},
{0xCE0, 0x7E},
{0xCF0, 0x7F},
};
struct vf610_ocotp {
void __iomem *base;
struct clk *clk;
struct device *dev;
struct nvmem_device *nvmem;
int timing;
};
static int vf610_ocotp_wait_busy(void __iomem *base)
{
int timeout = VF610_OCOTP_TIMEOUT;
while ((readl(base) & OCOTP_CTRL_BUSY) && --timeout)
udelay(10);
if (!timeout) {
writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR);
return -ETIMEDOUT;
}
udelay(10);
return 0;
}
static int vf610_ocotp_calculate_timing(struct vf610_ocotp *ocotp_dev)
{
u32 clk_rate;
u32 relax, strobe_read, strobe_prog;
u32 timing;
clk_rate = clk_get_rate(ocotp_dev->clk);
/* Refer section OTP read/write timing parameters in TRM */
relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
timing = BF(relax, OCOTP_TIMING_RELAX);
timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ);
timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG);
return timing;
}
static int vf610_get_fuse_address(int base_addr_offset)
{
int i;
for (i = 0; i < ARRAY_SIZE(base_to_fuse_addr_mappings); i++) {
if (base_to_fuse_addr_mappings[i][0] == base_addr_offset)
return base_to_fuse_addr_mappings[i][1];
}
return -EINVAL;
}
static int vf610_ocotp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct vf610_ocotp *ocotp = context;
void __iomem *base = ocotp->base;
u32 reg, *buf = val;
int fuse_addr;
int ret;
while (bytes > 0) {
fuse_addr = vf610_get_fuse_address(offset);
if (fuse_addr > 0) {
writel(ocotp->timing, base + OCOTP_TIMING);
ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG);
if (ret)
return ret;
reg = readl(base + OCOTP_CTRL_REG);
reg &= ~OCOTP_CTRL_ADDR_MASK;
reg &= ~OCOTP_CTRL_WR_UNLOCK_MASK;
reg |= BF(fuse_addr, OCOTP_CTRL_ADDR);
writel(reg, base + OCOTP_CTRL_REG);
writel(OCOTP_READ_CTRL_READ_FUSE,
base + OCOTP_READ_CTRL_REG);
ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG);
if (ret)
return ret;
if (readl(base) & OCOTP_CTRL_ERR) {
dev_dbg(ocotp->dev, "Error reading from fuse address %x\n",
fuse_addr);
writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR);
}
/*
* In case of error, we do not abort and expect to read
* 0xBADABADA as mentioned by the TRM. We just read this
* value and return.
*/
*buf = readl(base + OCOTP_READ_FUSE_DATA);
} else {
*buf = 0;
}
buf++;
bytes -= 4;
offset += 4;
}
return 0;
}
static struct nvmem_config ocotp_config = {
.name = "ocotp",
.stride = 4,
.word_size = 4,
.reg_read = vf610_ocotp_read,
};
static const struct of_device_id ocotp_of_match[] = {
{ .compatible = "fsl,vf610-ocotp", },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, ocotp_of_match);
static int vf610_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct vf610_ocotp *ocotp_dev;
ocotp_dev = devm_kzalloc(dev, sizeof(struct vf610_ocotp), GFP_KERNEL);
if (!ocotp_dev)
return -ENOMEM;
ocotp_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ocotp_dev->base))
return PTR_ERR(ocotp_dev->base);
ocotp_dev->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ocotp_dev->clk)) {
dev_err(dev, "failed getting clock, err = %ld\n",
PTR_ERR(ocotp_dev->clk));
return PTR_ERR(ocotp_dev->clk);
}
ocotp_dev->dev = dev;
ocotp_dev->timing = vf610_ocotp_calculate_timing(ocotp_dev);
ocotp_config.size = resource_size(res);
ocotp_config.priv = ocotp_dev;
ocotp_config.dev = dev;
ocotp_dev->nvmem = devm_nvmem_register(dev, &ocotp_config);
return PTR_ERR_OR_ZERO(ocotp_dev->nvmem);
}
static struct platform_driver vf610_ocotp_driver = {
.probe = vf610_ocotp_probe,
.driver = {
.name = "vf610-ocotp",
.of_match_table = ocotp_of_match,
},
};
module_platform_driver(vf610_ocotp_driver);
MODULE_AUTHOR("Sanchayan Maity <[email protected]>");
MODULE_DESCRIPTION("Vybrid OCOTP driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/vf610-ocotp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Amlogic Meson6, Meson8 and Meson8b eFuse Driver
*
* Copyright (c) 2017 Martin Blumenstingl <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#define MESON_MX_EFUSE_CNTL1 0x04
#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27)
#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26)
#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25)
#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24)
#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16)
#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14)
#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13)
#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12)
#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11)
#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0)
#define MESON_MX_EFUSE_CNTL2 0x08
#define MESON_MX_EFUSE_CNTL4 0x10
#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10)
struct meson_mx_efuse_platform_data {
const char *name;
unsigned int word_size;
};
struct meson_mx_efuse {
void __iomem *base;
struct clk *core_clk;
struct nvmem_device *nvmem;
struct nvmem_config config;
};
static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg,
u32 mask, u32 set)
{
u32 data;
data = readl(efuse->base + reg);
data &= ~mask;
data |= (set & mask);
writel(data, efuse->base + reg);
}
static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse)
{
int err;
err = clk_prepare_enable(efuse->core_clk);
if (err)
return err;
/* power up the efuse */
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0);
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4,
MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0);
return 0;
}
static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse)
{
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_PD_ENABLE,
MESON_MX_EFUSE_CNTL1_PD_ENABLE);
clk_disable_unprepare(efuse->core_clk);
}
static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse,
unsigned int addr, u32 *value)
{
int err;
u32 regval;
/* write the address to read */
regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr);
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval);
/* inform the hardware that we changed the address */
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET,
MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET);
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0);
/* start the read process */
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_AUTO_RD_START,
MESON_MX_EFUSE_CNTL1_AUTO_RD_START);
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0);
/*
* perform a dummy read to ensure that the HW has the RD_BUSY bit set
* when polling for the status below.
*/
readl(efuse->base + MESON_MX_EFUSE_CNTL1);
err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1,
regval,
(!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)),
1, 1000);
if (err) {
dev_err(efuse->config.dev,
"Timeout while reading efuse address %u\n", addr);
return err;
}
*value = readl(efuse->base + MESON_MX_EFUSE_CNTL2);
return 0;
}
static int meson_mx_efuse_read(void *context, unsigned int offset,
void *buf, size_t bytes)
{
struct meson_mx_efuse *efuse = context;
u32 tmp;
int err, i, addr;
err = meson_mx_efuse_hw_enable(efuse);
if (err)
return err;
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
for (i = 0; i < bytes; i += efuse->config.word_size) {
addr = (offset + i) / efuse->config.word_size;
err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
if (err)
break;
memcpy(buf + i, &tmp,
min_t(size_t, bytes - i, efuse->config.word_size));
}
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0);
meson_mx_efuse_hw_disable(efuse);
return err;
}
static const struct meson_mx_efuse_platform_data meson6_efuse_data = {
.name = "meson6-efuse",
.word_size = 1,
};
static const struct meson_mx_efuse_platform_data meson8_efuse_data = {
.name = "meson8-efuse",
.word_size = 4,
};
static const struct meson_mx_efuse_platform_data meson8b_efuse_data = {
.name = "meson8b-efuse",
.word_size = 4,
};
static const struct of_device_id meson_mx_efuse_match[] = {
{ .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data },
{ .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data },
{ .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_mx_efuse_match);
static int meson_mx_efuse_probe(struct platform_device *pdev)
{
const struct meson_mx_efuse_platform_data *drvdata;
struct meson_mx_efuse *efuse;
drvdata = of_device_get_match_data(&pdev->dev);
if (!drvdata)
return -EINVAL;
efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
if (!efuse)
return -ENOMEM;
efuse->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(efuse->base))
return PTR_ERR(efuse->base);
efuse->config.name = drvdata->name;
efuse->config.owner = THIS_MODULE;
efuse->config.dev = &pdev->dev;
efuse->config.priv = efuse;
efuse->config.stride = drvdata->word_size;
efuse->config.word_size = drvdata->word_size;
efuse->config.size = SZ_512;
efuse->config.read_only = true;
efuse->config.reg_read = meson_mx_efuse_read;
efuse->core_clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(efuse->core_clk)) {
dev_err(&pdev->dev, "Failed to get core clock\n");
return PTR_ERR(efuse->core_clk);
}
efuse->nvmem = devm_nvmem_register(&pdev->dev, &efuse->config);
return PTR_ERR_OR_ZERO(efuse->nvmem);
}
static struct platform_driver meson_mx_efuse_driver = {
.probe = meson_mx_efuse_probe,
.driver = {
.name = "meson-mx-efuse",
.of_match_table = meson_mx_efuse_match,
},
};
module_platform_driver(meson_mx_efuse_driver);
MODULE_AUTHOR("Martin Blumenstingl <[email protected]>");
MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/meson-mx-efuse.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Allwinner sunXi SoCs Security ID support.
*
* Copyright (c) 2013 Oliver Schinagl <[email protected]>
* Copyright (C) 2014 Maxime Ripard <[email protected]>
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/random.h>
/* Registers and special values for doing register-based SID readout on H3 */
#define SUN8I_SID_PRCTL 0x40
#define SUN8I_SID_RDKEY 0x60
#define SUN8I_SID_OFFSET_MASK 0x1FF
#define SUN8I_SID_OFFSET_SHIFT 16
#define SUN8I_SID_OP_LOCK (0xAC << 8)
#define SUN8I_SID_READ BIT(1)
struct sunxi_sid_cfg {
u32 value_offset;
u32 size;
bool need_register_readout;
};
struct sunxi_sid {
void __iomem *base;
u32 value_offset;
};
static int sunxi_sid_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
u32 word;
/* .stride = 4 so offset is guaranteed to be aligned */
__ioread32_copy(val, sid->base + sid->value_offset + offset, bytes / 4);
val += round_down(bytes, 4);
offset += round_down(bytes, 4);
bytes = bytes % 4;
if (!bytes)
return 0;
/* Handle any trailing bytes */
word = readl_relaxed(sid->base + sid->value_offset + offset);
memcpy(val, &word, bytes);
return 0;
}
static int sun8i_sid_register_readout(const struct sunxi_sid *sid,
const unsigned int offset,
u32 *out)
{
u32 reg_val;
int ret;
/* Set word, lock access, and set read command */
reg_val = (offset & SUN8I_SID_OFFSET_MASK)
<< SUN8I_SID_OFFSET_SHIFT;
reg_val |= SUN8I_SID_OP_LOCK | SUN8I_SID_READ;
writel(reg_val, sid->base + SUN8I_SID_PRCTL);
ret = readl_poll_timeout(sid->base + SUN8I_SID_PRCTL, reg_val,
!(reg_val & SUN8I_SID_READ), 100, 250000);
if (ret)
return ret;
if (out)
*out = readl(sid->base + SUN8I_SID_RDKEY);
writel(0, sid->base + SUN8I_SID_PRCTL);
return 0;
}
/*
* On Allwinner H3, the value on the 0x200 offset of the SID controller seems
* to be not reliable at all.
* Read by the registers instead.
*/
static int sun8i_sid_read_by_reg(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
u32 word;
int ret;
/* .stride = 4 so offset is guaranteed to be aligned */
while (bytes >= 4) {
ret = sun8i_sid_register_readout(sid, offset, val);
if (ret)
return ret;
val += 4;
offset += 4;
bytes -= 4;
}
if (!bytes)
return 0;
/* Handle any trailing bytes */
ret = sun8i_sid_register_readout(sid, offset, &word);
if (ret)
return ret;
memcpy(val, &word, bytes);
return 0;
}
static int sunxi_sid_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct nvmem_config *nvmem_cfg;
struct nvmem_device *nvmem;
struct sunxi_sid *sid;
int size;
char *randomness;
const struct sunxi_sid_cfg *cfg;
sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
if (!sid)
return -ENOMEM;
cfg = of_device_get_match_data(dev);
if (!cfg)
return -EINVAL;
sid->value_offset = cfg->value_offset;
sid->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sid->base))
return PTR_ERR(sid->base);
size = cfg->size;
nvmem_cfg = devm_kzalloc(dev, sizeof(*nvmem_cfg), GFP_KERNEL);
if (!nvmem_cfg)
return -ENOMEM;
nvmem_cfg->dev = dev;
nvmem_cfg->name = "sunxi-sid";
nvmem_cfg->type = NVMEM_TYPE_OTP;
nvmem_cfg->read_only = true;
nvmem_cfg->size = cfg->size;
nvmem_cfg->word_size = 1;
nvmem_cfg->stride = 4;
nvmem_cfg->priv = sid;
if (cfg->need_register_readout)
nvmem_cfg->reg_read = sun8i_sid_read_by_reg;
else
nvmem_cfg->reg_read = sunxi_sid_read;
nvmem = devm_nvmem_register(dev, nvmem_cfg);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
randomness = kzalloc(size, GFP_KERNEL);
if (!randomness)
return -ENOMEM;
nvmem_cfg->reg_read(sid, 0, randomness, size);
add_device_randomness(randomness, size);
kfree(randomness);
platform_set_drvdata(pdev, nvmem);
return 0;
}
static const struct sunxi_sid_cfg sun4i_a10_cfg = {
.size = 0x10,
};
static const struct sunxi_sid_cfg sun7i_a20_cfg = {
.size = 0x200,
};
static const struct sunxi_sid_cfg sun8i_h3_cfg = {
.value_offset = 0x200,
.size = 0x100,
.need_register_readout = true,
};
static const struct sunxi_sid_cfg sun50i_a64_cfg = {
.value_offset = 0x200,
.size = 0x100,
};
static const struct sunxi_sid_cfg sun50i_h6_cfg = {
.value_offset = 0x200,
.size = 0x200,
};
static const struct of_device_id sunxi_sid_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
{ .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
{ .compatible = "allwinner,sun20i-d1-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
static struct platform_driver sunxi_sid_driver = {
.probe = sunxi_sid_probe,
.driver = {
.name = "eeprom-sunxi-sid",
.of_match_table = sunxi_sid_of_match,
},
};
module_platform_driver(sunxi_sid_driver);
MODULE_AUTHOR("Oliver Schinagl <[email protected]>");
MODULE_DESCRIPTION("Allwinner sunxi security id driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/sunxi_sid.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* i.MX9 OCOTP fusebox driver
*
* Copyright 2023 NXP
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
enum fuse_type {
FUSE_FSB = 1,
FUSE_ELE = 2,
FUSE_INVALID = -1
};
struct ocotp_map_entry {
u32 start; /* start word */
u32 num; /* num words */
enum fuse_type type;
};
struct ocotp_devtype_data {
u32 reg_off;
char *name;
u32 size;
u32 num_entry;
u32 flag;
nvmem_reg_read_t reg_read;
struct ocotp_map_entry entry[];
};
struct imx_ocotp_priv {
struct device *dev;
void __iomem *base;
struct nvmem_config config;
struct mutex lock;
const struct ocotp_devtype_data *data;
};
static enum fuse_type imx_ocotp_fuse_type(void *context, u32 index)
{
struct imx_ocotp_priv *priv = context;
const struct ocotp_devtype_data *data = priv->data;
u32 start, end;
int i;
for (i = 0; i < data->num_entry; i++) {
start = data->entry[i].start;
end = data->entry[i].start + data->entry[i].num;
if (index >= start && index < end)
return data->entry[i].type;
}
return FUSE_INVALID;
}
static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, size_t bytes)
{
struct imx_ocotp_priv *priv = context;
void __iomem *reg = priv->base + priv->data->reg_off;
u32 count, index, num_bytes;
enum fuse_type type;
u32 *buf;
void *p;
int i;
index = offset;
num_bytes = round_up(bytes, 4);
count = num_bytes >> 2;
if (count > ((priv->data->size >> 2) - index))
count = (priv->data->size >> 2) - index;
p = kzalloc(num_bytes, GFP_KERNEL);
if (!p)
return -ENOMEM;
mutex_lock(&priv->lock);
buf = p;
for (i = index; i < (index + count); i++) {
type = imx_ocotp_fuse_type(context, i);
if (type == FUSE_INVALID || type == FUSE_ELE) {
*buf++ = 0;
continue;
}
*buf++ = readl_relaxed(reg + (i << 2));
}
memcpy(val, (u8 *)p, bytes);
mutex_unlock(&priv->lock);
kfree(p);
return 0;
};
static int imx_ele_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct imx_ocotp_priv *priv;
struct nvmem_device *nvmem;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->data = of_device_get_match_data(dev);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->config.dev = dev;
priv->config.name = "ELE-OCOTP";
priv->config.id = NVMEM_DEVID_AUTO;
priv->config.owner = THIS_MODULE;
priv->config.size = priv->data->size;
priv->config.reg_read = priv->data->reg_read;
priv->config.word_size = 4;
priv->config.stride = 1;
priv->config.priv = priv;
priv->config.read_only = true;
mutex_init(&priv->lock);
nvmem = devm_nvmem_register(dev, &priv->config);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
return 0;
}
static const struct ocotp_devtype_data imx93_ocotp_data = {
.reg_off = 0x8000,
.reg_read = imx_ocotp_reg_read,
.size = 2048,
.num_entry = 6,
.entry = {
{ 0, 52, FUSE_FSB },
{ 63, 1, FUSE_ELE},
{ 128, 16, FUSE_ELE },
{ 182, 1, FUSE_ELE },
{ 188, 1, FUSE_ELE },
{ 312, 200, FUSE_FSB }
},
};
static const struct of_device_id imx_ele_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx93-ocotp", .data = &imx93_ocotp_data, },
{},
};
MODULE_DEVICE_TABLE(of, imx_ele_ocotp_dt_ids);
static struct platform_driver imx_ele_ocotp_driver = {
.driver = {
.name = "imx_ele_ocotp",
.of_match_table = imx_ele_ocotp_dt_ids,
},
.probe = imx_ele_ocotp_probe,
};
module_platform_driver(imx_ele_ocotp_driver);
MODULE_DESCRIPTION("i.MX OCOTP/ELE driver");
MODULE_AUTHOR("Peng Fan <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/imx-ocotp-ele.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* i.MX IIM driver
*
* Copyright (c) 2017 Pengutronix, Michael Grzeschik <[email protected]>
*
* Based on the barebox iim driver,
* Copyright (c) 2010 Baruch Siach <[email protected]>,
* Orex Computed Radiography
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#define IIM_BANK_BASE(n) (0x800 + 0x400 * (n))
struct imx_iim_drvdata {
unsigned int nregs;
};
struct iim_priv {
void __iomem *base;
struct clk *clk;
};
static int imx_iim_read(void *context, unsigned int offset,
void *buf, size_t bytes)
{
struct iim_priv *iim = context;
int i, ret;
u8 *buf8 = buf;
ret = clk_prepare_enable(iim->clk);
if (ret)
return ret;
for (i = offset; i < offset + bytes; i++) {
int bank = i >> 5;
int reg = i & 0x1f;
*buf8++ = readl(iim->base + IIM_BANK_BASE(bank) + reg * 4);
}
clk_disable_unprepare(iim->clk);
return 0;
}
static struct imx_iim_drvdata imx27_drvdata = {
.nregs = 2 * 32,
};
static struct imx_iim_drvdata imx25_imx31_imx35_drvdata = {
.nregs = 3 * 32,
};
static struct imx_iim_drvdata imx51_drvdata = {
.nregs = 4 * 32,
};
static struct imx_iim_drvdata imx53_drvdata = {
.nregs = 4 * 32 + 16,
};
static const struct of_device_id imx_iim_dt_ids[] = {
{
.compatible = "fsl,imx25-iim",
.data = &imx25_imx31_imx35_drvdata,
}, {
.compatible = "fsl,imx27-iim",
.data = &imx27_drvdata,
}, {
.compatible = "fsl,imx31-iim",
.data = &imx25_imx31_imx35_drvdata,
}, {
.compatible = "fsl,imx35-iim",
.data = &imx25_imx31_imx35_drvdata,
}, {
.compatible = "fsl,imx51-iim",
.data = &imx51_drvdata,
}, {
.compatible = "fsl,imx53-iim",
.data = &imx53_drvdata,
}, {
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, imx_iim_dt_ids);
static int imx_iim_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct iim_priv *iim;
struct nvmem_device *nvmem;
struct nvmem_config cfg = {};
const struct imx_iim_drvdata *drvdata = NULL;
iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL);
if (!iim)
return -ENOMEM;
iim->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iim->base))
return PTR_ERR(iim->base);
drvdata = of_device_get_match_data(&pdev->dev);
iim->clk = devm_clk_get(dev, NULL);
if (IS_ERR(iim->clk))
return PTR_ERR(iim->clk);
cfg.name = "imx-iim",
cfg.read_only = true,
cfg.word_size = 1,
cfg.stride = 1,
cfg.reg_read = imx_iim_read,
cfg.dev = dev;
cfg.size = drvdata->nregs;
cfg.priv = iim;
nvmem = devm_nvmem_register(dev, &cfg);
return PTR_ERR_OR_ZERO(nvmem);
}
static struct platform_driver imx_iim_driver = {
.probe = imx_iim_probe,
.driver = {
.name = "imx-iim",
.of_match_table = imx_iim_dt_ids,
},
};
module_platform_driver(imx_iim_driver);
MODULE_AUTHOR("Michael Grzeschik <[email protected]>");
MODULE_DESCRIPTION("i.MX IIM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/imx-iim.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Layerscape SFP driver
*
* Copyright (c) 2022 Michael Walle <[email protected]>
*
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#define LAYERSCAPE_SFP_OTP_OFFSET 0x0200
struct layerscape_sfp_priv {
struct regmap *regmap;
};
struct layerscape_sfp_data {
int size;
enum regmap_endian endian;
};
static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct layerscape_sfp_priv *priv = context;
return regmap_bulk_read(priv->regmap,
LAYERSCAPE_SFP_OTP_OFFSET + offset, val,
bytes / 4);
}
static struct nvmem_config layerscape_sfp_nvmem_config = {
.name = "fsl-sfp",
.reg_read = layerscape_sfp_read,
.word_size = 4,
.stride = 4,
};
static int layerscape_sfp_probe(struct platform_device *pdev)
{
const struct layerscape_sfp_data *data;
struct layerscape_sfp_priv *priv;
struct nvmem_device *nvmem;
struct regmap_config config = { 0 };
void __iomem *base;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
data = device_get_match_data(&pdev->dev);
config.reg_bits = 32;
config.reg_stride = 4;
config.val_bits = 32;
config.val_format_endian = data->endian;
config.max_register = LAYERSCAPE_SFP_OTP_OFFSET + data->size - 4;
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
layerscape_sfp_nvmem_config.size = data->size;
layerscape_sfp_nvmem_config.dev = &pdev->dev;
layerscape_sfp_nvmem_config.priv = priv;
nvmem = devm_nvmem_register(&pdev->dev, &layerscape_sfp_nvmem_config);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct layerscape_sfp_data ls1021a_data = {
.size = 0x88,
.endian = REGMAP_ENDIAN_BIG,
};
static const struct layerscape_sfp_data ls1028a_data = {
.size = 0x88,
.endian = REGMAP_ENDIAN_LITTLE,
};
static const struct of_device_id layerscape_sfp_dt_ids[] = {
{ .compatible = "fsl,ls1021a-sfp", .data = &ls1021a_data },
{ .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data },
{},
};
MODULE_DEVICE_TABLE(of, layerscape_sfp_dt_ids);
static struct platform_driver layerscape_sfp_driver = {
.probe = layerscape_sfp_probe,
.driver = {
.name = "layerscape_sfp",
.of_match_table = layerscape_sfp_dt_ids,
},
};
module_platform_driver(layerscape_sfp_driver);
MODULE_AUTHOR("Michael Walle <[email protected]>");
MODULE_DESCRIPTION("Layerscape Security Fuse Processor driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/layerscape-sfp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
/**
* struct sec_qfprom - structure holding secure qfprom attributes
*
* @base: starting physical address for secure qfprom corrected address space.
* @dev: qfprom device structure.
*/
struct sec_qfprom {
phys_addr_t base;
struct device *dev;
};
static int sec_qfprom_reg_read(void *context, unsigned int reg, void *_val, size_t bytes)
{
struct sec_qfprom *priv = context;
unsigned int i;
u8 *val = _val;
u32 read_val;
u8 *tmp;
for (i = 0; i < bytes; i++, reg++) {
if (i == 0 || reg % 4 == 0) {
if (qcom_scm_io_readl(priv->base + (reg & ~3), &read_val)) {
dev_err(priv->dev, "Couldn't access fuse register\n");
return -EINVAL;
}
tmp = (u8 *)&read_val;
}
val[i] = tmp[reg & 3];
}
return 0;
}
static int sec_qfprom_probe(struct platform_device *pdev)
{
struct nvmem_config econfig = {
.name = "sec-qfprom",
.stride = 1,
.word_size = 1,
.id = NVMEM_DEVID_AUTO,
.reg_read = sec_qfprom_reg_read,
};
struct device *dev = &pdev->dev;
struct nvmem_device *nvmem;
struct sec_qfprom *priv;
struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
priv->base = res->start;
econfig.size = resource_size(res);
econfig.dev = dev;
econfig.priv = priv;
priv->dev = dev;
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id sec_qfprom_of_match[] = {
{ .compatible = "qcom,sec-qfprom" },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, sec_qfprom_of_match);
static struct platform_driver qfprom_driver = {
.probe = sec_qfprom_probe,
.driver = {
.name = "qcom_sec_qfprom",
.of_match_table = sec_qfprom_of_match,
},
};
module_platform_driver(qfprom_driver);
MODULE_DESCRIPTION("Qualcomm Secure QFPROM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/sec-qfprom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* JZ4780 EFUSE Memory Support driver
*
* Copyright (c) 2017 PrasannaKumar Muralidharan <[email protected]>
* Copyright (c) 2020 H. Nikolaus Schaller <[email protected]>
*/
/*
* Currently supports JZ4780 efuse which has 8K programmable bit.
* Efuse is separated into seven segments as below:
*
* -----------------------------------------------------------------------
* | 64 bit | 128 bit | 128 bit | 3520 bit | 8 bit | 2296 bit | 2048 bit |
* -----------------------------------------------------------------------
*
* The rom itself is accessed using a 9 bit address line and an 8 word wide bus
* which reads/writes based on strobes. The strobe is configured in the config
* register and is based on number of cycles of the bus clock.
*
* Driver supports read only as the writes are done in the Factory.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/timer.h>
#define JZ_EFUCTRL (0x0) /* Control Register */
#define JZ_EFUCFG (0x4) /* Configure Register*/
#define JZ_EFUSTATE (0x8) /* Status Register */
#define JZ_EFUDATA(n) (0xC + (n) * 4)
/* We read 32 byte chunks to avoid complexity in the driver. */
#define JZ_EFU_READ_SIZE 32
#define EFUCTRL_ADDR_MASK 0x3FF
#define EFUCTRL_ADDR_SHIFT 21
#define EFUCTRL_LEN_MASK 0x1F
#define EFUCTRL_LEN_SHIFT 16
#define EFUCTRL_PG_EN BIT(15)
#define EFUCTRL_WR_EN BIT(1)
#define EFUCTRL_RD_EN BIT(0)
#define EFUCFG_INT_EN BIT(31)
#define EFUCFG_RD_ADJ_MASK 0xF
#define EFUCFG_RD_ADJ_SHIFT 20
#define EFUCFG_RD_STR_MASK 0xF
#define EFUCFG_RD_STR_SHIFT 16
#define EFUCFG_WR_ADJ_MASK 0xF
#define EFUCFG_WR_ADJ_SHIFT 12
#define EFUCFG_WR_STR_MASK 0xFFF
#define EFUCFG_WR_STR_SHIFT 0
#define EFUSTATE_WR_DONE BIT(1)
#define EFUSTATE_RD_DONE BIT(0)
struct jz4780_efuse {
struct device *dev;
struct regmap *map;
struct clk *clk;
};
/* main entry point */
static int jz4780_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct jz4780_efuse *efuse = context;
while (bytes > 0) {
size_t start = offset & ~(JZ_EFU_READ_SIZE - 1);
size_t chunk = min(bytes, (start + JZ_EFU_READ_SIZE)
- offset);
char buf[JZ_EFU_READ_SIZE];
unsigned int tmp;
u32 ctrl;
int ret;
ctrl = (start << EFUCTRL_ADDR_SHIFT)
| ((JZ_EFU_READ_SIZE - 1) << EFUCTRL_LEN_SHIFT)
| EFUCTRL_RD_EN;
regmap_update_bits(efuse->map, JZ_EFUCTRL,
(EFUCTRL_ADDR_MASK << EFUCTRL_ADDR_SHIFT) |
(EFUCTRL_LEN_MASK << EFUCTRL_LEN_SHIFT) |
EFUCTRL_PG_EN | EFUCTRL_WR_EN |
EFUCTRL_RD_EN,
ctrl);
ret = regmap_read_poll_timeout(efuse->map, JZ_EFUSTATE,
tmp, tmp & EFUSTATE_RD_DONE,
1 * MSEC_PER_SEC,
50 * MSEC_PER_SEC);
if (ret < 0) {
dev_err(efuse->dev, "Time out while reading efuse data");
return ret;
}
ret = regmap_bulk_read(efuse->map, JZ_EFUDATA(0),
buf, JZ_EFU_READ_SIZE / sizeof(u32));
if (ret < 0)
return ret;
memcpy(val, &buf[offset - start], chunk);
val += chunk;
offset += chunk;
bytes -= chunk;
}
return 0;
}
static struct nvmem_config jz4780_efuse_nvmem_config = {
.name = "jz4780-efuse",
.size = 1024,
.word_size = 1,
.stride = 1,
.owner = THIS_MODULE,
.reg_read = jz4780_efuse_read,
};
static const struct regmap_config jz4780_efuse_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = JZ_EFUDATA(7),
};
static void clk_disable_unprepare_helper(void *clock)
{
clk_disable_unprepare(clock);
}
static int jz4780_efuse_probe(struct platform_device *pdev)
{
struct nvmem_device *nvmem;
struct jz4780_efuse *efuse;
struct nvmem_config cfg;
unsigned long clk_rate;
unsigned long rd_adj;
unsigned long rd_strobe;
struct device *dev = &pdev->dev;
void __iomem *regs;
int ret;
efuse = devm_kzalloc(dev, sizeof(*efuse), GFP_KERNEL);
if (!efuse)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
efuse->map = devm_regmap_init_mmio(dev, regs,
&jz4780_efuse_regmap_config);
if (IS_ERR(efuse->map))
return PTR_ERR(efuse->map);
efuse->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(efuse->clk))
return PTR_ERR(efuse->clk);
ret = clk_prepare_enable(efuse->clk);
if (ret < 0)
return ret;
ret = devm_add_action_or_reset(&pdev->dev,
clk_disable_unprepare_helper,
efuse->clk);
if (ret < 0)
return ret;
clk_rate = clk_get_rate(efuse->clk);
efuse->dev = dev;
/*
* rd_adj and rd_strobe are 4 bit values
* conditions:
* bus clk_period * (rd_adj + 1) > 6.5ns
* bus clk_period * (rd_adj + 5 + rd_strobe) > 35ns
* i.e. rd_adj >= 6.5ns / clk_period
* i.e. rd_strobe >= 35 ns / clk_period - 5 - rd_adj + 1
* constants:
* 1 / 6.5ns == 153846154 Hz
* 1 / 35ns == 28571429 Hz
*/
rd_adj = clk_rate / 153846154;
rd_strobe = clk_rate / 28571429 - 5 - rd_adj + 1;
if (rd_adj > EFUCFG_RD_ADJ_MASK ||
rd_strobe > EFUCFG_RD_STR_MASK) {
dev_err(&pdev->dev, "Cannot set clock configuration\n");
return -EINVAL;
}
regmap_update_bits(efuse->map, JZ_EFUCFG,
(EFUCFG_RD_ADJ_MASK << EFUCFG_RD_ADJ_SHIFT) |
(EFUCFG_RD_STR_MASK << EFUCFG_RD_STR_SHIFT),
(rd_adj << EFUCFG_RD_ADJ_SHIFT) |
(rd_strobe << EFUCFG_RD_STR_SHIFT));
cfg = jz4780_efuse_nvmem_config;
cfg.dev = &pdev->dev;
cfg.priv = efuse;
nvmem = devm_nvmem_register(dev, &cfg);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id jz4780_efuse_match[] = {
{ .compatible = "ingenic,jz4780-efuse" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, jz4780_efuse_match);
static struct platform_driver jz4780_efuse_driver = {
.probe = jz4780_efuse_probe,
.driver = {
.name = "jz4780-efuse",
.of_match_table = jz4780_efuse_match,
},
};
module_platform_driver(jz4780_efuse_driver);
MODULE_AUTHOR("PrasannaKumar Muralidharan <[email protected]>");
MODULE_AUTHOR("H. Nikolaus Schaller <[email protected]>");
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_DESCRIPTION("Ingenic JZ4780 efuse driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/jz4780-efuse.c |
// SPDX-License-Identifier: GPL-2.0
/*
* nvmem framework core.
*
* Copyright (C) 2015 Srinivas Kandagatla <[email protected]>
* Copyright (C) 2013 Maxime Ripard <[email protected]>
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/slab.h>
struct nvmem_device {
struct module *owner;
struct device dev;
int stride;
int word_size;
int id;
struct kref refcnt;
size_t size;
bool read_only;
bool root_only;
int flags;
enum nvmem_type type;
struct bin_attribute eeprom;
struct device *base_dev;
struct list_head cells;
const struct nvmem_keepout *keepout;
unsigned int nkeepout;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
struct gpio_desc *wp_gpio;
struct nvmem_layout *layout;
void *priv;
};
#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
#define FLAG_COMPAT BIT(0)
struct nvmem_cell_entry {
const char *name;
int offset;
size_t raw_len;
int bytes;
int bit_offset;
int nbits;
nvmem_cell_post_process_t read_post_process;
void *priv;
struct device_node *np;
struct nvmem_device *nvmem;
struct list_head node;
};
struct nvmem_cell {
struct nvmem_cell_entry *entry;
const char *id;
int index;
};
static DEFINE_MUTEX(nvmem_mutex);
static DEFINE_IDA(nvmem_ida);
static DEFINE_MUTEX(nvmem_cell_mutex);
static LIST_HEAD(nvmem_cell_tables);
static DEFINE_MUTEX(nvmem_lookup_mutex);
static LIST_HEAD(nvmem_lookup_list);
static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
static DEFINE_SPINLOCK(nvmem_layout_lock);
static LIST_HEAD(nvmem_layouts);
static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
if (nvmem->reg_read)
return nvmem->reg_read(nvmem->priv, offset, val, bytes);
return -EINVAL;
}
static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
int ret;
if (nvmem->reg_write) {
gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
return ret;
}
return -EINVAL;
}
static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
unsigned int offset, void *val,
size_t bytes, int write)
{
unsigned int end = offset + bytes;
unsigned int kend, ksize;
const struct nvmem_keepout *keepout = nvmem->keepout;
const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
int rc;
/*
* Skip all keepouts before the range being accessed.
* Keepouts are sorted.
*/
while ((keepout < keepoutend) && (keepout->end <= offset))
keepout++;
while ((offset < end) && (keepout < keepoutend)) {
/* Access the valid portion before the keepout. */
if (offset < keepout->start) {
kend = min(end, keepout->start);
ksize = kend - offset;
if (write)
rc = __nvmem_reg_write(nvmem, offset, val, ksize);
else
rc = __nvmem_reg_read(nvmem, offset, val, ksize);
if (rc)
return rc;
offset += ksize;
val += ksize;
}
/*
* Now we're aligned to the start of this keepout zone. Go
* through it.
*/
kend = min(end, keepout->end);
ksize = kend - offset;
if (!write)
memset(val, keepout->value, ksize);
val += ksize;
offset += ksize;
keepout++;
}
/*
* If we ran out of keepouts but there's still stuff to do, send it
* down directly
*/
if (offset < end) {
ksize = end - offset;
if (write)
return __nvmem_reg_write(nvmem, offset, val, ksize);
else
return __nvmem_reg_read(nvmem, offset, val, ksize);
}
return 0;
}
static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
if (!nvmem->nkeepout)
return __nvmem_reg_read(nvmem, offset, val, bytes);
return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
}
static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
if (!nvmem->nkeepout)
return __nvmem_reg_write(nvmem, offset, val, bytes);
return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
}
#ifdef CONFIG_NVMEM_SYSFS
static const char * const nvmem_type_str[] = {
[NVMEM_TYPE_UNKNOWN] = "Unknown",
[NVMEM_TYPE_EEPROM] = "EEPROM",
[NVMEM_TYPE_OTP] = "OTP",
[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
[NVMEM_TYPE_FRAM] = "FRAM",
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key eeprom_lock_key;
#endif
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
}
static DEVICE_ATTR_RO(type);
static struct attribute *nvmem_attrs[] = {
&dev_attr_type.attr,
NULL,
};
static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
struct nvmem_device *nvmem;
int rc;
if (attr->private)
dev = attr->private;
else
dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
/* Stop the user from reading */
if (pos >= nvmem->size)
return 0;
if (!IS_ALIGNED(pos, nvmem->stride))
return -EINVAL;
if (count < nvmem->word_size)
return -EINVAL;
if (pos + count > nvmem->size)
count = nvmem->size - pos;
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_read)
return -EPERM;
rc = nvmem_reg_read(nvmem, pos, buf, count);
if (rc)
return rc;
return count;
}
static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
struct nvmem_device *nvmem;
int rc;
if (attr->private)
dev = attr->private;
else
dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
/* Stop the user from writing */
if (pos >= nvmem->size)
return -EFBIG;
if (!IS_ALIGNED(pos, nvmem->stride))
return -EINVAL;
if (count < nvmem->word_size)
return -EINVAL;
if (pos + count > nvmem->size)
count = nvmem->size - pos;
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_write)
return -EPERM;
rc = nvmem_reg_write(nvmem, pos, buf, count);
if (rc)
return rc;
return count;
}
static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
{
umode_t mode = 0400;
if (!nvmem->root_only)
mode |= 0044;
if (!nvmem->read_only)
mode |= 0200;
if (!nvmem->reg_write)
mode &= ~0200;
if (!nvmem->reg_read)
mode &= ~0444;
return mode;
}
static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
struct bin_attribute *attr, int i)
{
struct device *dev = kobj_to_dev(kobj);
struct nvmem_device *nvmem = to_nvmem_device(dev);
attr->size = nvmem->size;
return nvmem_bin_attr_get_umode(nvmem);
}
/* default read/write permissions */
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
.name = "nvmem",
.mode = 0644,
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
};
static struct bin_attribute *nvmem_bin_attributes[] = {
&bin_attr_rw_nvmem,
NULL,
};
static const struct attribute_group nvmem_bin_group = {
.bin_attrs = nvmem_bin_attributes,
.attrs = nvmem_attrs,
.is_bin_visible = nvmem_bin_attr_is_visible,
};
static const struct attribute_group *nvmem_dev_groups[] = {
&nvmem_bin_group,
NULL,
};
static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
.name = "eeprom",
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
};
/*
* nvmem_setup_compat() - Create an additional binary entry in
* drivers sys directory, to be backwards compatible with the older
* drivers/misc/eeprom drivers.
*/
static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
const struct nvmem_config *config)
{
int rval;
if (!config->compat)
return 0;
if (!config->base_dev)
return -EINVAL;
if (config->type == NVMEM_TYPE_FRAM)
bin_attr_nvmem_eeprom_compat.attr.name = "fram";
nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
nvmem->eeprom.attr.key = &eeprom_lock_key;
#endif
nvmem->eeprom.private = &nvmem->dev;
nvmem->base_dev = config->base_dev;
rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
if (rval) {
dev_err(&nvmem->dev,
"Failed to create eeprom binary file %d\n", rval);
return rval;
}
nvmem->flags |= FLAG_COMPAT;
return 0;
}
static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
const struct nvmem_config *config)
{
if (config->compat)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
}
#else /* CONFIG_NVMEM_SYSFS */
static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
const struct nvmem_config *config)
{
return -ENOSYS;
}
static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
const struct nvmem_config *config)
{
}
#endif /* CONFIG_NVMEM_SYSFS */
static void nvmem_release(struct device *dev)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
ida_free(&nvmem_ida, nvmem->id);
gpiod_put(nvmem->wp_gpio);
kfree(nvmem);
}
static const struct device_type nvmem_provider_type = {
.release = nvmem_release,
};
static struct bus_type nvmem_bus_type = {
.name = "nvmem",
};
static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
{
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
mutex_lock(&nvmem_mutex);
list_del(&cell->node);
mutex_unlock(&nvmem_mutex);
of_node_put(cell->np);
kfree_const(cell->name);
kfree(cell);
}
static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
{
struct nvmem_cell_entry *cell, *p;
list_for_each_entry_safe(cell, p, &nvmem->cells, node)
nvmem_cell_entry_drop(cell);
}
static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
{
mutex_lock(&nvmem_mutex);
list_add_tail(&cell->node, &cell->nvmem->cells);
mutex_unlock(&nvmem_mutex);
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
}
static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
struct nvmem_cell_entry *cell)
{
cell->nvmem = nvmem;
cell->offset = info->offset;
cell->raw_len = info->raw_len ?: info->bytes;
cell->bytes = info->bytes;
cell->name = info->name;
cell->read_post_process = info->read_post_process;
cell->priv = info->priv;
cell->bit_offset = info->bit_offset;
cell->nbits = info->nbits;
cell->np = info->np;
if (cell->nbits)
cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
BITS_PER_BYTE);
if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
dev_err(&nvmem->dev,
"cell %s unaligned to nvmem stride %d\n",
cell->name ?: "<unknown>", nvmem->stride);
return -EINVAL;
}
return 0;
}
static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
struct nvmem_cell_entry *cell)
{
int err;
err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
if (err)
return err;
cell->name = kstrdup_const(info->name, GFP_KERNEL);
if (!cell->name)
return -ENOMEM;
return 0;
}
/**
* nvmem_add_one_cell() - Add one cell information to an nvmem device
*
* @nvmem: nvmem device to add cells to.
* @info: nvmem cell info to add to the device
*
* Return: 0 or negative error code on failure.
*/
int nvmem_add_one_cell(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info)
{
struct nvmem_cell_entry *cell;
int rval;
cell = kzalloc(sizeof(*cell), GFP_KERNEL);
if (!cell)
return -ENOMEM;
rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
if (rval) {
kfree(cell);
return rval;
}
nvmem_cell_entry_add(cell);
return 0;
}
EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
/**
* nvmem_add_cells() - Add cell information to an nvmem device
*
* @nvmem: nvmem device to add cells to.
* @info: nvmem cell info to add to the device
* @ncells: number of cells in info
*
* Return: 0 or negative error code on failure.
*/
static int nvmem_add_cells(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
int ncells)
{
int i, rval;
for (i = 0; i < ncells; i++) {
rval = nvmem_add_one_cell(nvmem, &info[i]);
if (rval)
return rval;
}
return 0;
}
/**
* nvmem_register_notifier() - Register a notifier block for nvmem events.
*
* @nb: notifier block to be called on nvmem events.
*
* Return: 0 on success, negative error number on failure.
*/
int nvmem_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&nvmem_notifier, nb);
}
EXPORT_SYMBOL_GPL(nvmem_register_notifier);
/**
* nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
*
* @nb: notifier block to be unregistered.
*
* Return: 0 on success, negative error number on failure.
*/
int nvmem_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
}
EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
{
const struct nvmem_cell_info *info;
struct nvmem_cell_table *table;
struct nvmem_cell_entry *cell;
int rval = 0, i;
mutex_lock(&nvmem_cell_mutex);
list_for_each_entry(table, &nvmem_cell_tables, node) {
if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
for (i = 0; i < table->ncells; i++) {
info = &table->cells[i];
cell = kzalloc(sizeof(*cell), GFP_KERNEL);
if (!cell) {
rval = -ENOMEM;
goto out;
}
rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
if (rval) {
kfree(cell);
goto out;
}
nvmem_cell_entry_add(cell);
}
}
}
out:
mutex_unlock(&nvmem_cell_mutex);
return rval;
}
static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
{
struct nvmem_cell_entry *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
list_for_each_entry(iter, &nvmem->cells, node) {
if (strcmp(cell_id, iter->name) == 0) {
cell = iter;
break;
}
}
mutex_unlock(&nvmem_mutex);
return cell;
}
static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
{
unsigned int cur = 0;
const struct nvmem_keepout *keepout = nvmem->keepout;
const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
while (keepout < keepoutend) {
/* Ensure keepouts are sorted and don't overlap. */
if (keepout->start < cur) {
dev_err(&nvmem->dev,
"Keepout regions aren't sorted or overlap.\n");
return -ERANGE;
}
if (keepout->end < keepout->start) {
dev_err(&nvmem->dev,
"Invalid keepout region.\n");
return -EINVAL;
}
/*
* Validate keepouts (and holes between) don't violate
* word_size constraints.
*/
if ((keepout->end - keepout->start < nvmem->word_size) ||
((keepout->start != cur) &&
(keepout->start - cur < nvmem->word_size))) {
dev_err(&nvmem->dev,
"Keepout regions violate word_size constraints.\n");
return -ERANGE;
}
/* Validate keepouts don't violate stride (alignment). */
if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
!IS_ALIGNED(keepout->end, nvmem->stride)) {
dev_err(&nvmem->dev,
"Keepout regions violate stride.\n");
return -EINVAL;
}
cur = keepout->end;
keepout++;
}
return 0;
}
static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
{
struct nvmem_layout *layout = nvmem->layout;
struct device *dev = &nvmem->dev;
struct device_node *child;
const __be32 *addr;
int len, ret;
for_each_child_of_node(np, child) {
struct nvmem_cell_info info = {0};
addr = of_get_property(child, "reg", &len);
if (!addr)
continue;
if (len < 2 * sizeof(u32)) {
dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
of_node_put(child);
return -EINVAL;
}
info.offset = be32_to_cpup(addr++);
info.bytes = be32_to_cpup(addr);
info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
addr = of_get_property(child, "bits", &len);
if (addr && len == (2 * sizeof(u32))) {
info.bit_offset = be32_to_cpup(addr++);
info.nbits = be32_to_cpup(addr);
}
info.np = of_node_get(child);
if (layout && layout->fixup_cell_info)
layout->fixup_cell_info(nvmem, layout, &info);
ret = nvmem_add_one_cell(nvmem, &info);
kfree(info.name);
if (ret) {
of_node_put(child);
return ret;
}
}
return 0;
}
static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
{
return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
}
static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
{
struct device_node *layout_np;
int err = 0;
layout_np = of_nvmem_layout_get_container(nvmem);
if (!layout_np)
return 0;
if (of_device_is_compatible(layout_np, "fixed-layout"))
err = nvmem_add_cells_from_dt(nvmem, layout_np);
of_node_put(layout_np);
return err;
}
int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
{
layout->owner = owner;
spin_lock(&nvmem_layout_lock);
list_add(&layout->node, &nvmem_layouts);
spin_unlock(&nvmem_layout_lock);
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout);
return 0;
}
EXPORT_SYMBOL_GPL(__nvmem_layout_register);
void nvmem_layout_unregister(struct nvmem_layout *layout)
{
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout);
spin_lock(&nvmem_layout_lock);
list_del(&layout->node);
spin_unlock(&nvmem_layout_lock);
}
EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
{
struct device_node *layout_np;
struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
layout_np = of_nvmem_layout_get_container(nvmem);
if (!layout_np)
return NULL;
/*
* In case the nvmem device was built-in while the layout was built as a
* module, we shall manually request the layout driver loading otherwise
* we'll never have any match.
*/
of_request_module(layout_np);
spin_lock(&nvmem_layout_lock);
list_for_each_entry(l, &nvmem_layouts, node) {
if (of_match_node(l->of_match_table, layout_np)) {
if (try_module_get(l->owner))
layout = l;
break;
}
}
spin_unlock(&nvmem_layout_lock);
of_node_put(layout_np);
return layout;
}
static void nvmem_layout_put(struct nvmem_layout *layout)
{
if (layout)
module_put(layout->owner);
}
static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
{
struct nvmem_layout *layout = nvmem->layout;
int ret;
if (layout && layout->add_cells) {
ret = layout->add_cells(&nvmem->dev, nvmem, layout);
if (ret)
return ret;
}
return 0;
}
#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_layout_get_container() - Get OF node to layout container.
*
* @nvmem: nvmem device.
*
* Return: a node pointer with refcount incremented or NULL if no
* container exists. Use of_node_put() on it when done.
*/
struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
{
return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
}
EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
#endif
const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
struct nvmem_layout *layout)
{
struct device_node __maybe_unused *layout_np;
const struct of_device_id *match;
layout_np = of_nvmem_layout_get_container(nvmem);
match = of_match_node(layout->of_match_table, layout_np);
return match ? match->data : NULL;
}
EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
/**
* nvmem_register() - Register a nvmem device for given nvmem_config.
* Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
* @config: nvmem device configuration with which nvmem device is created.
*
* Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
* on success.
*/
struct nvmem_device *nvmem_register(const struct nvmem_config *config)
{
struct nvmem_device *nvmem;
int rval;
if (!config->dev)
return ERR_PTR(-EINVAL);
if (!config->reg_read && !config->reg_write)
return ERR_PTR(-EINVAL);
nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
if (!nvmem)
return ERR_PTR(-ENOMEM);
rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
if (rval < 0) {
kfree(nvmem);
return ERR_PTR(rval);
}
nvmem->id = rval;
nvmem->dev.type = &nvmem_provider_type;
nvmem->dev.bus = &nvmem_bus_type;
nvmem->dev.parent = config->dev;
device_initialize(&nvmem->dev);
if (!config->ignore_wp)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
rval = PTR_ERR(nvmem->wp_gpio);
nvmem->wp_gpio = NULL;
goto err_put_device;
}
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride ?: 1;
nvmem->word_size = config->word_size ?: 1;
nvmem->size = config->size;
nvmem->root_only = config->root_only;
nvmem->priv = config->priv;
nvmem->type = config->type;
nvmem->reg_read = config->reg_read;
nvmem->reg_write = config->reg_write;
nvmem->keepout = config->keepout;
nvmem->nkeepout = config->nkeepout;
if (config->of_node)
nvmem->dev.of_node = config->of_node;
else if (!config->no_of_node)
nvmem->dev.of_node = config->dev->of_node;
switch (config->id) {
case NVMEM_DEVID_NONE:
rval = dev_set_name(&nvmem->dev, "%s", config->name);
break;
case NVMEM_DEVID_AUTO:
rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
break;
default:
rval = dev_set_name(&nvmem->dev, "%s%d",
config->name ? : "nvmem",
config->name ? config->id : nvmem->id);
break;
}
if (rval)
goto err_put_device;
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
#ifdef CONFIG_NVMEM_SYSFS
nvmem->dev.groups = nvmem_dev_groups;
#endif
if (nvmem->nkeepout) {
rval = nvmem_validate_keepouts(nvmem);
if (rval)
goto err_put_device;
}
if (config->compat) {
rval = nvmem_sysfs_setup_compat(nvmem, config);
if (rval)
goto err_put_device;
}
/*
* If the driver supplied a layout by config->layout, the module
* pointer will be NULL and nvmem_layout_put() will be a noop.
*/
nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
if (IS_ERR(nvmem->layout)) {
rval = PTR_ERR(nvmem->layout);
nvmem->layout = NULL;
if (rval == -EPROBE_DEFER)
goto err_teardown_compat;
}
if (config->cells) {
rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
if (rval)
goto err_remove_cells;
}
rval = nvmem_add_cells_from_table(nvmem);
if (rval)
goto err_remove_cells;
rval = nvmem_add_cells_from_legacy_of(nvmem);
if (rval)
goto err_remove_cells;
rval = nvmem_add_cells_from_fixed_layout(nvmem);
if (rval)
goto err_remove_cells;
rval = nvmem_add_cells_from_layout(nvmem);
if (rval)
goto err_remove_cells;
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
rval = device_add(&nvmem->dev);
if (rval)
goto err_remove_cells;
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
err_remove_cells:
nvmem_device_remove_all_cells(nvmem);
nvmem_layout_put(nvmem->layout);
err_teardown_compat:
if (config->compat)
nvmem_sysfs_remove_compat(nvmem, config);
err_put_device:
put_device(&nvmem->dev);
return ERR_PTR(rval);
}
EXPORT_SYMBOL_GPL(nvmem_register);
static void nvmem_device_release(struct kref *kref)
{
struct nvmem_device *nvmem;
nvmem = container_of(kref, struct nvmem_device, refcnt);
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
if (nvmem->flags & FLAG_COMPAT)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
nvmem_device_remove_all_cells(nvmem);
nvmem_layout_put(nvmem->layout);
device_unregister(&nvmem->dev);
}
/**
* nvmem_unregister() - Unregister previously registered nvmem device
*
* @nvmem: Pointer to previously registered nvmem device.
*/
void nvmem_unregister(struct nvmem_device *nvmem)
{
if (nvmem)
kref_put(&nvmem->refcnt, nvmem_device_release);
}
EXPORT_SYMBOL_GPL(nvmem_unregister);
static void devm_nvmem_unregister(void *nvmem)
{
nvmem_unregister(nvmem);
}
/**
* devm_nvmem_register() - Register a managed nvmem device for given
* nvmem_config.
* Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
* @dev: Device that uses the nvmem device.
* @config: nvmem device configuration with which nvmem device is created.
*
* Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
* on success.
*/
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *config)
{
struct nvmem_device *nvmem;
int ret;
nvmem = nvmem_register(config);
if (IS_ERR(nvmem))
return nvmem;
ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
if (ret)
return ERR_PTR(ret);
return nvmem;
}
EXPORT_SYMBOL_GPL(devm_nvmem_register);
static struct nvmem_device *__nvmem_device_get(void *data,
int (*match)(struct device *dev, const void *data))
{
struct nvmem_device *nvmem = NULL;
struct device *dev;
mutex_lock(&nvmem_mutex);
dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
if (dev)
nvmem = to_nvmem_device(dev);
mutex_unlock(&nvmem_mutex);
if (!nvmem)
return ERR_PTR(-EPROBE_DEFER);
if (!try_module_get(nvmem->owner)) {
dev_err(&nvmem->dev,
"could not increase module refcount for cell %s\n",
nvmem_dev_name(nvmem));
put_device(&nvmem->dev);
return ERR_PTR(-EINVAL);
}
kref_get(&nvmem->refcnt);
return nvmem;
}
static void __nvmem_device_put(struct nvmem_device *nvmem)
{
put_device(&nvmem->dev);
module_put(nvmem->owner);
kref_put(&nvmem->refcnt, nvmem_device_release);
}
#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_device_get() - Get nvmem device from a given id
*
* @np: Device tree node that uses the nvmem device.
* @id: nvmem name from nvmem-names property.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
* on success.
*/
struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
{
struct device_node *nvmem_np;
struct nvmem_device *nvmem;
int index = 0;
if (id)
index = of_property_match_string(np, "nvmem-names", id);
nvmem_np = of_parse_phandle(np, "nvmem", index);
if (!nvmem_np)
return ERR_PTR(-ENOENT);
nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
return nvmem;
}
EXPORT_SYMBOL_GPL(of_nvmem_device_get);
#endif
/**
* nvmem_device_get() - Get nvmem device from a given id
*
* @dev: Device that uses the nvmem device.
* @dev_name: name of the requested nvmem device.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
* on success.
*/
struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
{
if (dev->of_node) { /* try dt first */
struct nvmem_device *nvmem;
nvmem = of_nvmem_device_get(dev->of_node, dev_name);
if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
return nvmem;
}
return __nvmem_device_get((void *)dev_name, device_match_name);
}
EXPORT_SYMBOL_GPL(nvmem_device_get);
/**
* nvmem_device_find() - Find nvmem device with matching function
*
* @data: Data to pass to match function
* @match: Callback function to check device
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
* on success.
*/
struct nvmem_device *nvmem_device_find(void *data,
int (*match)(struct device *dev, const void *data))
{
return __nvmem_device_get(data, match);
}
EXPORT_SYMBOL_GPL(nvmem_device_find);
static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
{
struct nvmem_device **nvmem = res;
if (WARN_ON(!nvmem || !*nvmem))
return 0;
return *nvmem == data;
}
static void devm_nvmem_device_release(struct device *dev, void *res)
{
nvmem_device_put(*(struct nvmem_device **)res);
}
/**
* devm_nvmem_device_put() - put alredy got nvmem device
*
* @dev: Device that uses the nvmem device.
* @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
* that needs to be released.
*/
void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
{
int ret;
ret = devres_release(dev, devm_nvmem_device_release,
devm_nvmem_device_match, nvmem);
WARN_ON(ret);
}
EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
/**
* nvmem_device_put() - put alredy got nvmem device
*
* @nvmem: pointer to nvmem device that needs to be released.
*/
void nvmem_device_put(struct nvmem_device *nvmem)
{
__nvmem_device_put(nvmem);
}
EXPORT_SYMBOL_GPL(nvmem_device_put);
/**
* devm_nvmem_device_get() - Get nvmem cell of device form a given id
*
* @dev: Device that requests the nvmem device.
* @id: name id for the requested nvmem device.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
* on success. The nvmem_cell will be freed by the automatically once the
* device is freed.
*/
struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
{
struct nvmem_device **ptr, *nvmem;
ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
nvmem = nvmem_device_get(dev, id);
if (!IS_ERR(nvmem)) {
*ptr = nvmem;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return nvmem;
}
EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
const char *id, int index)
{
struct nvmem_cell *cell;
const char *name = NULL;
cell = kzalloc(sizeof(*cell), GFP_KERNEL);
if (!cell)
return ERR_PTR(-ENOMEM);
if (id) {
name = kstrdup_const(id, GFP_KERNEL);
if (!name) {
kfree(cell);
return ERR_PTR(-ENOMEM);
}
}
cell->id = name;
cell->entry = entry;
cell->index = index;
return cell;
}
static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
{
struct nvmem_cell_entry *cell_entry;
struct nvmem_cell *cell = ERR_PTR(-ENOENT);
struct nvmem_cell_lookup *lookup;
struct nvmem_device *nvmem;
const char *dev_id;
if (!dev)
return ERR_PTR(-EINVAL);
dev_id = dev_name(dev);
mutex_lock(&nvmem_lookup_mutex);
list_for_each_entry(lookup, &nvmem_lookup_list, node) {
if ((strcmp(lookup->dev_id, dev_id) == 0) &&
(strcmp(lookup->con_id, con_id) == 0)) {
/* This is the right entry. */
nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
device_match_name);
if (IS_ERR(nvmem)) {
/* Provider may not be registered yet. */
cell = ERR_CAST(nvmem);
break;
}
cell_entry = nvmem_find_cell_entry_by_name(nvmem,
lookup->cell_name);
if (!cell_entry) {
__nvmem_device_put(nvmem);
cell = ERR_PTR(-ENOENT);
} else {
cell = nvmem_create_cell(cell_entry, con_id, 0);
if (IS_ERR(cell))
__nvmem_device_put(nvmem);
}
break;
}
}
mutex_unlock(&nvmem_lookup_mutex);
return cell;
}
#if IS_ENABLED(CONFIG_OF)
static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
{
struct nvmem_cell_entry *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
list_for_each_entry(iter, &nvmem->cells, node) {
if (np == iter->np) {
cell = iter;
break;
}
}
mutex_unlock(&nvmem_mutex);
return cell;
}
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
* @np: Device tree node that uses the nvmem cell.
* @id: nvmem cell name from nvmem-cell-names property, or NULL
* for the cell at index 0 (the lone cell with no accompanying
* nvmem-cell-names property).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* nvmem_cell_put().
*/
struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
{
struct device_node *cell_np, *nvmem_np;
struct nvmem_device *nvmem;
struct nvmem_cell_entry *cell_entry;
struct nvmem_cell *cell;
struct of_phandle_args cell_spec;
int index = 0;
int cell_index = 0;
int ret;
/* if cell name exists, find index to the name */
if (id)
index = of_property_match_string(np, "nvmem-cell-names", id);
ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
"#nvmem-cell-cells",
index, &cell_spec);
if (ret)
return ERR_PTR(-ENOENT);
if (cell_spec.args_count > 1)
return ERR_PTR(-EINVAL);
cell_np = cell_spec.np;
if (cell_spec.args_count)
cell_index = cell_spec.args[0];
nvmem_np = of_get_parent(cell_np);
if (!nvmem_np) {
of_node_put(cell_np);
return ERR_PTR(-EINVAL);
}
/* nvmem layouts produce cells within the nvmem-layout container */
if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
nvmem_np = of_get_next_parent(nvmem_np);
if (!nvmem_np) {
of_node_put(cell_np);
return ERR_PTR(-EINVAL);
}
}
nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
if (IS_ERR(nvmem)) {
of_node_put(cell_np);
return ERR_CAST(nvmem);
}
cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
of_node_put(cell_np);
if (!cell_entry) {
__nvmem_device_put(nvmem);
return ERR_PTR(-ENOENT);
}
cell = nvmem_create_cell(cell_entry, id, cell_index);
if (IS_ERR(cell))
__nvmem_device_put(nvmem);
return cell;
}
EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
#endif
/**
* nvmem_cell_get() - Get nvmem cell of device form a given cell name
*
* @dev: Device that requests the nvmem cell.
* @id: nvmem cell name to get (this corresponds with the name from the
* nvmem-cell-names property for DT systems and with the con_id from
* the lookup entry for non-DT systems).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* nvmem_cell_put().
*/
struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
{
struct nvmem_cell *cell;
if (dev->of_node) { /* try dt first */
cell = of_nvmem_cell_get(dev->of_node, id);
if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
return cell;
}
/* NULL cell id only allowed for device tree; invalid otherwise */
if (!id)
return ERR_PTR(-EINVAL);
return nvmem_cell_get_from_lookup(dev, id);
}
EXPORT_SYMBOL_GPL(nvmem_cell_get);
static void devm_nvmem_cell_release(struct device *dev, void *res)
{
nvmem_cell_put(*(struct nvmem_cell **)res);
}
/**
* devm_nvmem_cell_get() - Get nvmem cell of device form a given id
*
* @dev: Device that requests the nvmem cell.
* @id: nvmem cell name id to get.
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* automatically once the device is freed.
*/
struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
{
struct nvmem_cell **ptr, *cell;
ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
cell = nvmem_cell_get(dev, id);
if (!IS_ERR(cell)) {
*ptr = cell;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return cell;
}
EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
{
struct nvmem_cell **c = res;
if (WARN_ON(!c || !*c))
return 0;
return *c == data;
}
/**
* devm_nvmem_cell_put() - Release previously allocated nvmem cell
* from devm_nvmem_cell_get.
*
* @dev: Device that requests the nvmem cell.
* @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
*/
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
{
int ret;
ret = devres_release(dev, devm_nvmem_cell_release,
devm_nvmem_cell_match, cell);
WARN_ON(ret);
}
EXPORT_SYMBOL(devm_nvmem_cell_put);
/**
* nvmem_cell_put() - Release previously allocated nvmem cell.
*
* @cell: Previously allocated nvmem cell by nvmem_cell_get().
*/
void nvmem_cell_put(struct nvmem_cell *cell)
{
struct nvmem_device *nvmem = cell->entry->nvmem;
if (cell->id)
kfree_const(cell->id);
kfree(cell);
__nvmem_device_put(nvmem);
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
{
u8 *p, *b;
int i, extra, bit_offset = cell->bit_offset;
p = b = buf;
if (bit_offset) {
/* First shift */
*b++ >>= bit_offset;
/* setup rest of the bytes if any */
for (i = 1; i < cell->bytes; i++) {
/* Get bits from next byte and shift them towards msb */
*p |= *b << (BITS_PER_BYTE - bit_offset);
p = b;
*b++ >>= bit_offset;
}
} else {
/* point to the msb */
p += cell->bytes - 1;
}
/* result fits in less bytes */
extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
while (--extra >= 0)
*p-- = 0;
/* clear msb bits if any leftover in the last byte */
if (cell->nbits % BITS_PER_BYTE)
*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
}
static int __nvmem_cell_read(struct nvmem_device *nvmem,
struct nvmem_cell_entry *cell,
void *buf, size_t *len, const char *id, int index)
{
int rc;
rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
if (rc)
return rc;
/* shift bits in-place */
if (cell->bit_offset || cell->nbits)
nvmem_shift_read_buffer_in_place(cell, buf);
if (cell->read_post_process) {
rc = cell->read_post_process(cell->priv, id, index,
cell->offset, buf, cell->raw_len);
if (rc)
return rc;
}
if (len)
*len = cell->bytes;
return 0;
}
/**
* nvmem_cell_read() - Read a given nvmem cell
*
* @cell: nvmem cell to be read.
* @len: pointer to length of cell which will be populated on successful read;
* can be NULL.
*
* Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
* buffer should be freed by the consumer with a kfree().
*/
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
struct nvmem_cell_entry *entry = cell->entry;
struct nvmem_device *nvmem = entry->nvmem;
u8 *buf;
int rc;
if (!nvmem)
return ERR_PTR(-EINVAL);
buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
if (rc) {
kfree(buf);
return ERR_PTR(rc);
}
return buf;
}
EXPORT_SYMBOL_GPL(nvmem_cell_read);
static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
u8 *_buf, int len)
{
struct nvmem_device *nvmem = cell->nvmem;
int i, rc, nbits, bit_offset = cell->bit_offset;
u8 v, *p, *buf, *b, pbyte, pbits;
nbits = cell->nbits;
buf = kzalloc(cell->bytes, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
memcpy(buf, _buf, len);
p = b = buf;
if (bit_offset) {
pbyte = *b;
*b <<= bit_offset;
/* setup the first byte with lsb bits from nvmem */
rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
if (rc)
goto err;
*b++ |= GENMASK(bit_offset - 1, 0) & v;
/* setup rest of the byte if any */
for (i = 1; i < cell->bytes; i++) {
/* Get last byte bits and shift them towards lsb */
pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
pbyte = *b;
p = b;
*b <<= bit_offset;
*b++ |= pbits;
}
}
/* if it's not end on byte boundary */
if ((nbits + bit_offset) % BITS_PER_BYTE) {
/* setup the last byte with msb bits from nvmem */
rc = nvmem_reg_read(nvmem,
cell->offset + cell->bytes - 1, &v, 1);
if (rc)
goto err;
*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
}
return buf;
err:
kfree(buf);
return ERR_PTR(rc);
}
static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
{
struct nvmem_device *nvmem = cell->nvmem;
int rc;
if (!nvmem || nvmem->read_only ||
(cell->bit_offset == 0 && len != cell->bytes))
return -EINVAL;
/*
* Any cells which have a read_post_process hook are read-only because
* we cannot reverse the operation and it might affect other cells,
* too.
*/
if (cell->read_post_process)
return -EINVAL;
if (cell->bit_offset || cell->nbits) {
buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
if (IS_ERR(buf))
return PTR_ERR(buf);
}
rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
/* free the tmp buffer */
if (cell->bit_offset || cell->nbits)
kfree(buf);
if (rc)
return rc;
return len;
}
/**
* nvmem_cell_write() - Write to a given nvmem cell
*
* @cell: nvmem cell to be written.
* @buf: Buffer to be written.
* @len: length of buffer to be written to nvmem cell.
*
* Return: length of bytes written or negative on failure.
*/
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
{
return __nvmem_cell_entry_write(cell->entry, buf, len);
}
EXPORT_SYMBOL_GPL(nvmem_cell_write);
static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
void *val, size_t count)
{
struct nvmem_cell *cell;
void *buf;
size_t len;
cell = nvmem_cell_get(dev, cell_id);
if (IS_ERR(cell))
return PTR_ERR(cell);
buf = nvmem_cell_read(cell, &len);
if (IS_ERR(buf)) {
nvmem_cell_put(cell);
return PTR_ERR(buf);
}
if (len != count) {
kfree(buf);
nvmem_cell_put(cell);
return -EINVAL;
}
memcpy(val, buf, count);
kfree(buf);
nvmem_cell_put(cell);
return 0;
}
/**
* nvmem_cell_read_u8() - Read a cell value as a u8
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
{
return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
/**
* nvmem_cell_read_u16() - Read a cell value as a u16
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
{
return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
/**
* nvmem_cell_read_u32() - Read a cell value as a u32
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
{
return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
/**
* nvmem_cell_read_u64() - Read a cell value as a u64
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
{
return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
static const void *nvmem_cell_read_variable_common(struct device *dev,
const char *cell_id,
size_t max_len, size_t *len)
{
struct nvmem_cell *cell;
int nbits;
void *buf;
cell = nvmem_cell_get(dev, cell_id);
if (IS_ERR(cell))
return cell;
nbits = cell->entry->nbits;
buf = nvmem_cell_read(cell, len);
nvmem_cell_put(cell);
if (IS_ERR(buf))
return buf;
/*
* If nbits is set then nvmem_cell_read() can significantly exaggerate
* the length of the real data. Throw away the extra junk.
*/
if (nbits)
*len = DIV_ROUND_UP(nbits, 8);
if (*len > max_len) {
kfree(buf);
return ERR_PTR(-ERANGE);
}
return buf;
}
/**
* nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
u32 *val)
{
size_t len;
const u8 *buf;
int i;
buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
if (IS_ERR(buf))
return PTR_ERR(buf);
/* Copy w/ implicit endian conversion */
*val = 0;
for (i = 0; i < len; i++)
*val |= buf[i] << (8 * i);
kfree(buf);
return 0;
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
/**
* nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
u64 *val)
{
size_t len;
const u8 *buf;
int i;
buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
if (IS_ERR(buf))
return PTR_ERR(buf);
/* Copy w/ implicit endian conversion */
*val = 0;
for (i = 0; i < len; i++)
*val |= (uint64_t)buf[i] << (8 * i);
kfree(buf);
return 0;
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
/**
* nvmem_device_cell_read() - Read a given nvmem device and cell
*
* @nvmem: nvmem device to read from.
* @info: nvmem cell info to be read.
* @buf: buffer pointer which will be populated on successful read.
*
* Return: length of successful bytes read on success and negative
* error code on error.
*/
ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf)
{
struct nvmem_cell_entry cell;
int rc;
ssize_t len;
if (!nvmem)
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
if (rc)
return rc;
rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
if (rc)
return rc;
return len;
}
EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
/**
* nvmem_device_cell_write() - Write cell to a given nvmem device
*
* @nvmem: nvmem device to be written to.
* @info: nvmem cell info to be written.
* @buf: buffer to be written to cell.
*
* Return: length of bytes written or negative error code on failure.
*/
int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf)
{
struct nvmem_cell_entry cell;
int rc;
if (!nvmem)
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
if (rc)
return rc;
return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
}
EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
/**
* nvmem_device_read() - Read from a given nvmem device
*
* @nvmem: nvmem device to read from.
* @offset: offset in nvmem device.
* @bytes: number of bytes to read.
* @buf: buffer pointer which will be populated on successful read.
*
* Return: length of successful bytes read on success and negative
* error code on error.
*/
int nvmem_device_read(struct nvmem_device *nvmem,
unsigned int offset,
size_t bytes, void *buf)
{
int rc;
if (!nvmem)
return -EINVAL;
rc = nvmem_reg_read(nvmem, offset, buf, bytes);
if (rc)
return rc;
return bytes;
}
EXPORT_SYMBOL_GPL(nvmem_device_read);
/**
* nvmem_device_write() - Write cell to a given nvmem device
*
* @nvmem: nvmem device to be written to.
* @offset: offset in nvmem device.
* @bytes: number of bytes to write.
* @buf: buffer to be written.
*
* Return: length of bytes written or negative error code on failure.
*/
int nvmem_device_write(struct nvmem_device *nvmem,
unsigned int offset,
size_t bytes, void *buf)
{
int rc;
if (!nvmem)
return -EINVAL;
rc = nvmem_reg_write(nvmem, offset, buf, bytes);
if (rc)
return rc;
return bytes;
}
EXPORT_SYMBOL_GPL(nvmem_device_write);
/**
* nvmem_add_cell_table() - register a table of cell info entries
*
* @table: table of cell info entries
*/
void nvmem_add_cell_table(struct nvmem_cell_table *table)
{
mutex_lock(&nvmem_cell_mutex);
list_add_tail(&table->node, &nvmem_cell_tables);
mutex_unlock(&nvmem_cell_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
/**
* nvmem_del_cell_table() - remove a previously registered cell info table
*
* @table: table of cell info entries
*/
void nvmem_del_cell_table(struct nvmem_cell_table *table)
{
mutex_lock(&nvmem_cell_mutex);
list_del(&table->node);
mutex_unlock(&nvmem_cell_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
/**
* nvmem_add_cell_lookups() - register a list of cell lookup entries
*
* @entries: array of cell lookup entries
* @nentries: number of cell lookup entries in the array
*/
void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
{
int i;
mutex_lock(&nvmem_lookup_mutex);
for (i = 0; i < nentries; i++)
list_add_tail(&entries[i].node, &nvmem_lookup_list);
mutex_unlock(&nvmem_lookup_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
/**
* nvmem_del_cell_lookups() - remove a list of previously added cell lookup
* entries
*
* @entries: array of cell lookup entries
* @nentries: number of cell lookup entries in the array
*/
void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
{
int i;
mutex_lock(&nvmem_lookup_mutex);
for (i = 0; i < nentries; i++)
list_del(&entries[i].node);
mutex_unlock(&nvmem_lookup_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
/**
* nvmem_dev_name() - Get the name of a given nvmem device.
*
* @nvmem: nvmem device.
*
* Return: name of the nvmem device.
*/
const char *nvmem_dev_name(struct nvmem_device *nvmem)
{
return dev_name(&nvmem->dev);
}
EXPORT_SYMBOL_GPL(nvmem_dev_name);
static int __init nvmem_init(void)
{
return bus_register(&nvmem_bus_type);
}
static void __exit nvmem_exit(void)
{
bus_unregister(&nvmem_bus_type);
}
subsys_initcall(nvmem_init);
module_exit(nvmem_exit);
MODULE_AUTHOR("Srinivas Kandagatla <[email protected]");
MODULE_AUTHOR("Maxime Ripard <[email protected]");
MODULE_DESCRIPTION("nvmem Driver Core");
| linux-master | drivers/nvmem/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2023 Westermo Network Technologies AB
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
struct qoriq_efuse_priv {
void __iomem *base;
};
static int qoriq_efuse_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct qoriq_efuse_priv *priv = context;
/* .stride = 4 so offset is guaranteed to be aligned */
__ioread32_copy(val, priv->base + offset, bytes / 4);
/* Ignore trailing bytes (there shouldn't be any) */
return 0;
}
static int qoriq_efuse_probe(struct platform_device *pdev)
{
struct nvmem_config config = {
.dev = &pdev->dev,
.read_only = true,
.reg_read = qoriq_efuse_read,
.stride = sizeof(u32),
.word_size = sizeof(u32),
.name = "qoriq_efuse_read",
.id = NVMEM_DEVID_AUTO,
.root_only = true,
};
struct qoriq_efuse_priv *priv;
struct nvmem_device *nvmem;
struct resource *res;
priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
config.size = resource_size(res);
config.priv = priv;
nvmem = devm_nvmem_register(config.dev, &config);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id qoriq_efuse_of_match[] = {
{ .compatible = "fsl,t1023-sfp", },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, qoriq_efuse_of_match);
static struct platform_driver qoriq_efuse_driver = {
.probe = qoriq_efuse_probe,
.driver = {
.name = "qoriq-efuse",
.of_match_table = qoriq_efuse_of_match,
},
};
module_platform_driver(qoriq_efuse_driver);
MODULE_AUTHOR("Richard Alpe <[email protected]>");
MODULE_DESCRIPTION("NXP QorIQ Security Fuse Processor (SFP) Reader");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/qoriq-efuse.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Rockchip OTP Driver
*
* Copyright (c) 2018 Rockchip Electronics Co. Ltd.
* Author: Finley Xiao <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
/* OTP Register Offsets */
#define OTPC_SBPI_CTRL 0x0020
#define OTPC_SBPI_CMD_VALID_PRE 0x0024
#define OTPC_SBPI_CS_VALID_PRE 0x0028
#define OTPC_SBPI_STATUS 0x002C
#define OTPC_USER_CTRL 0x0100
#define OTPC_USER_ADDR 0x0104
#define OTPC_USER_ENABLE 0x0108
#define OTPC_USER_Q 0x0124
#define OTPC_INT_STATUS 0x0304
#define OTPC_SBPI_CMD0_OFFSET 0x1000
#define OTPC_SBPI_CMD1_OFFSET 0x1004
/* OTP Register bits and masks */
#define OTPC_USER_ADDR_MASK GENMASK(31, 16)
#define OTPC_USE_USER BIT(0)
#define OTPC_USE_USER_MASK GENMASK(16, 16)
#define OTPC_USER_FSM_ENABLE BIT(0)
#define OTPC_USER_FSM_ENABLE_MASK GENMASK(16, 16)
#define OTPC_SBPI_DONE BIT(1)
#define OTPC_USER_DONE BIT(2)
#define SBPI_DAP_ADDR 0x02
#define SBPI_DAP_ADDR_SHIFT 8
#define SBPI_DAP_ADDR_MASK GENMASK(31, 24)
#define SBPI_CMD_VALID_MASK GENMASK(31, 16)
#define SBPI_DAP_CMD_WRF 0xC0
#define SBPI_DAP_REG_ECC 0x3A
#define SBPI_ECC_ENABLE 0x00
#define SBPI_ECC_DISABLE 0x09
#define SBPI_ENABLE BIT(0)
#define SBPI_ENABLE_MASK GENMASK(16, 16)
#define OTPC_TIMEOUT 10000
/* RK3588 Register */
#define RK3588_OTPC_AUTO_CTRL 0x04
#define RK3588_OTPC_AUTO_EN 0x08
#define RK3588_OTPC_INT_ST 0x84
#define RK3588_OTPC_DOUT0 0x20
#define RK3588_NO_SECURE_OFFSET 0x300
#define RK3588_NBYTES 4
#define RK3588_BURST_NUM 1
#define RK3588_BURST_SHIFT 8
#define RK3588_ADDR_SHIFT 16
#define RK3588_AUTO_EN BIT(0)
#define RK3588_RD_DONE BIT(1)
struct rockchip_data {
int size;
const char * const *clks;
int num_clks;
nvmem_reg_read_t reg_read;
};
struct rockchip_otp {
struct device *dev;
void __iomem *base;
struct clk_bulk_data *clks;
struct reset_control *rst;
const struct rockchip_data *data;
};
static int rockchip_otp_reset(struct rockchip_otp *otp)
{
int ret;
ret = reset_control_assert(otp->rst);
if (ret) {
dev_err(otp->dev, "failed to assert otp phy %d\n", ret);
return ret;
}
udelay(2);
ret = reset_control_deassert(otp->rst);
if (ret) {
dev_err(otp->dev, "failed to deassert otp phy %d\n", ret);
return ret;
}
return 0;
}
static int rockchip_otp_wait_status(struct rockchip_otp *otp,
unsigned int reg, u32 flag)
{
u32 status = 0;
int ret;
ret = readl_poll_timeout_atomic(otp->base + reg, status,
(status & flag), 1, OTPC_TIMEOUT);
if (ret)
return ret;
/* clean int status */
writel(flag, otp->base + reg);
return 0;
}
static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
{
int ret = 0;
writel(SBPI_DAP_ADDR_MASK | (SBPI_DAP_ADDR << SBPI_DAP_ADDR_SHIFT),
otp->base + OTPC_SBPI_CTRL);
writel(SBPI_CMD_VALID_MASK | 0x1, otp->base + OTPC_SBPI_CMD_VALID_PRE);
writel(SBPI_DAP_CMD_WRF | SBPI_DAP_REG_ECC,
otp->base + OTPC_SBPI_CMD0_OFFSET);
if (enable)
writel(SBPI_ECC_ENABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
else
writel(SBPI_ECC_DISABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL);
ret = rockchip_otp_wait_status(otp, OTPC_INT_STATUS, OTPC_SBPI_DONE);
if (ret < 0)
dev_err(otp->dev, "timeout during ecc_enable\n");
return ret;
}
static int px30_otp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rockchip_otp *otp = context;
u8 *buf = val;
int ret;
ret = rockchip_otp_reset(otp);
if (ret) {
dev_err(otp->dev, "failed to reset otp phy\n");
return ret;
}
ret = rockchip_otp_ecc_enable(otp, false);
if (ret < 0) {
dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
return ret;
}
writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
udelay(5);
while (bytes--) {
writel(offset++ | OTPC_USER_ADDR_MASK,
otp->base + OTPC_USER_ADDR);
writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
otp->base + OTPC_USER_ENABLE);
ret = rockchip_otp_wait_status(otp, OTPC_INT_STATUS, OTPC_USER_DONE);
if (ret < 0) {
dev_err(otp->dev, "timeout during read setup\n");
goto read_end;
}
*buf++ = readb(otp->base + OTPC_USER_Q);
}
read_end:
writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
return ret;
}
static int rk3588_otp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rockchip_otp *otp = context;
unsigned int addr_start, addr_end, addr_len;
int ret, i = 0;
u32 data;
u8 *buf;
addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES;
addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES;
addr_len = addr_end - addr_start;
addr_start += RK3588_NO_SECURE_OFFSET;
buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (addr_len--) {
writel((addr_start << RK3588_ADDR_SHIFT) |
(RK3588_BURST_NUM << RK3588_BURST_SHIFT),
otp->base + RK3588_OTPC_AUTO_CTRL);
writel(RK3588_AUTO_EN, otp->base + RK3588_OTPC_AUTO_EN);
ret = rockchip_otp_wait_status(otp, RK3588_OTPC_INT_ST,
RK3588_RD_DONE);
if (ret < 0) {
dev_err(otp->dev, "timeout during read setup\n");
goto read_end;
}
data = readl(otp->base + RK3588_OTPC_DOUT0);
memcpy(&buf[i], &data, RK3588_NBYTES);
i += RK3588_NBYTES;
addr_start++;
}
memcpy(val, buf + offset % RK3588_NBYTES, bytes);
read_end:
kfree(buf);
return ret;
}
static int rockchip_otp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rockchip_otp *otp = context;
int ret;
if (!otp->data || !otp->data->reg_read)
return -EINVAL;
ret = clk_bulk_prepare_enable(otp->data->num_clks, otp->clks);
if (ret < 0) {
dev_err(otp->dev, "failed to prepare/enable clks\n");
return ret;
}
ret = otp->data->reg_read(context, offset, val, bytes);
clk_bulk_disable_unprepare(otp->data->num_clks, otp->clks);
return ret;
}
static struct nvmem_config otp_config = {
.name = "rockchip-otp",
.owner = THIS_MODULE,
.read_only = true,
.stride = 1,
.word_size = 1,
.reg_read = rockchip_otp_read,
};
static const char * const px30_otp_clocks[] = {
"otp", "apb_pclk", "phy",
};
static const struct rockchip_data px30_data = {
.size = 0x40,
.clks = px30_otp_clocks,
.num_clks = ARRAY_SIZE(px30_otp_clocks),
.reg_read = px30_otp_read,
};
static const char * const rk3588_otp_clocks[] = {
"otp", "apb_pclk", "phy", "arb",
};
static const struct rockchip_data rk3588_data = {
.size = 0x400,
.clks = rk3588_otp_clocks,
.num_clks = ARRAY_SIZE(rk3588_otp_clocks),
.reg_read = rk3588_otp_read,
};
static const struct of_device_id rockchip_otp_match[] = {
{
.compatible = "rockchip,px30-otp",
.data = &px30_data,
},
{
.compatible = "rockchip,rk3308-otp",
.data = &px30_data,
},
{
.compatible = "rockchip,rk3588-otp",
.data = &rk3588_data,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rockchip_otp_match);
static int rockchip_otp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_otp *otp;
const struct rockchip_data *data;
struct nvmem_device *nvmem;
int ret, i;
data = of_device_get_match_data(dev);
if (!data)
return dev_err_probe(dev, -EINVAL, "failed to get match data\n");
otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp),
GFP_KERNEL);
if (!otp)
return -ENOMEM;
otp->data = data;
otp->dev = dev;
otp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(otp->base))
return dev_err_probe(dev, PTR_ERR(otp->base),
"failed to ioremap resource\n");
otp->clks = devm_kcalloc(dev, data->num_clks, sizeof(*otp->clks),
GFP_KERNEL);
if (!otp->clks)
return -ENOMEM;
for (i = 0; i < data->num_clks; ++i)
otp->clks[i].id = data->clks[i];
ret = devm_clk_bulk_get(dev, data->num_clks, otp->clks);
if (ret)
return dev_err_probe(dev, ret, "failed to get clocks\n");
otp->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(otp->rst))
return dev_err_probe(dev, PTR_ERR(otp->rst),
"failed to get resets\n");
otp_config.size = data->size;
otp_config.priv = otp;
otp_config.dev = dev;
nvmem = devm_nvmem_register(dev, &otp_config);
if (IS_ERR(nvmem))
return dev_err_probe(dev, PTR_ERR(nvmem),
"failed to register nvmem device\n");
return 0;
}
static struct platform_driver rockchip_otp_driver = {
.probe = rockchip_otp_probe,
.driver = {
.name = "rockchip-otp",
.of_match_table = rockchip_otp_match,
},
};
module_platform_driver(rockchip_otp_driver);
MODULE_DESCRIPTION("Rockchip OTP driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/rockchip-otp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP LPC18xx/43xx OTP memory NVMEM driver
*
* Copyright (c) 2016 Joachim Eastwood <[email protected]>
*
* Based on the imx ocotp driver,
* Copyright (c) 2015 Pengutronix, Philipp Zabel <[email protected]>
*
* TODO: add support for writing OTP register via API in boot ROM.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/*
* LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts
* at offset 0 from the base.
*
* Bank 0 contains the part ID for Flashless devices and is reseverd for
* devices with Flash.
* Bank 1/2 is generale purpose or AES key storage for secure devices.
* Bank 3 contains control data, USB ID and generale purpose words.
*/
#define LPC18XX_OTP_NUM_BANKS 4
#define LPC18XX_OTP_WORDS_PER_BANK 4
#define LPC18XX_OTP_WORD_SIZE sizeof(u32)
#define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \
LPC18XX_OTP_WORDS_PER_BANK * \
LPC18XX_OTP_WORD_SIZE)
struct lpc18xx_otp {
void __iomem *base;
};
static int lpc18xx_otp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct lpc18xx_otp *otp = context;
unsigned int count = bytes >> 2;
u32 index = offset >> 2;
u32 *buf = val;
int i;
if (count > (LPC18XX_OTP_SIZE - index))
count = LPC18XX_OTP_SIZE - index;
for (i = index; i < (index + count); i++)
*buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE);
return 0;
}
static struct nvmem_config lpc18xx_otp_nvmem_config = {
.name = "lpc18xx-otp",
.read_only = true,
.word_size = LPC18XX_OTP_WORD_SIZE,
.stride = LPC18XX_OTP_WORD_SIZE,
.reg_read = lpc18xx_otp_read,
};
static int lpc18xx_otp_probe(struct platform_device *pdev)
{
struct nvmem_device *nvmem;
struct lpc18xx_otp *otp;
otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
if (!otp)
return -ENOMEM;
otp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(otp->base))
return PTR_ERR(otp->base);
lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE;
lpc18xx_otp_nvmem_config.dev = &pdev->dev;
lpc18xx_otp_nvmem_config.priv = otp;
nvmem = devm_nvmem_register(&pdev->dev, &lpc18xx_otp_nvmem_config);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id lpc18xx_otp_dt_ids[] = {
{ .compatible = "nxp,lpc1850-otp" },
{ },
};
MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids);
static struct platform_driver lpc18xx_otp_driver = {
.probe = lpc18xx_otp_probe,
.driver = {
.name = "lpc18xx_otp",
.of_match_table = lpc18xx_otp_dt_ids,
},
};
module_platform_driver(lpc18xx_otp_driver);
MODULE_AUTHOR("Joachim Eastwoood <[email protected]>");
MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/lpc18xx_otp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX8 OCOTP fusebox driver
*
* Copyright 2019 NXP
*
* Peng Fan <[email protected]>
*/
#include <linux/arm-smccc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define IMX_SIP_OTP_WRITE 0xc200000B
enum ocotp_devtype {
IMX8QXP,
IMX8QM,
};
#define ECC_REGION BIT(0)
#define HOLE_REGION BIT(1)
struct ocotp_region {
u32 start;
u32 end;
u32 flag;
};
struct ocotp_devtype_data {
int devtype;
int nregs;
u32 num_region;
struct ocotp_region region[];
};
struct ocotp_priv {
struct device *dev;
const struct ocotp_devtype_data *data;
struct imx_sc_ipc *nvmem_ipc;
};
struct imx_sc_msg_misc_fuse_read {
struct imx_sc_rpc_msg hdr;
u32 word;
} __packed;
static DEFINE_MUTEX(scu_ocotp_mutex);
static struct ocotp_devtype_data imx8qxp_data = {
.devtype = IMX8QXP,
.nregs = 800,
.num_region = 3,
.region = {
{0x10, 0x10f, ECC_REGION},
{0x110, 0x21F, HOLE_REGION},
{0x220, 0x31F, ECC_REGION},
},
};
static struct ocotp_devtype_data imx8qm_data = {
.devtype = IMX8QM,
.nregs = 800,
.num_region = 2,
.region = {
{0x10, 0x10f, ECC_REGION},
{0x1a0, 0x1ff, ECC_REGION},
},
};
static bool in_hole(void *context, u32 index)
{
struct ocotp_priv *priv = context;
const struct ocotp_devtype_data *data = priv->data;
int i;
for (i = 0; i < data->num_region; i++) {
if (data->region[i].flag & HOLE_REGION) {
if ((index >= data->region[i].start) &&
(index <= data->region[i].end))
return true;
}
}
return false;
}
static bool in_ecc(void *context, u32 index)
{
struct ocotp_priv *priv = context;
const struct ocotp_devtype_data *data = priv->data;
int i;
for (i = 0; i < data->num_region; i++) {
if (data->region[i].flag & ECC_REGION) {
if ((index >= data->region[i].start) &&
(index <= data->region[i].end))
return true;
}
}
return false;
}
static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
u32 *val)
{
struct imx_sc_msg_misc_fuse_read msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
int ret;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_MISC;
hdr->func = IMX_SC_MISC_FUNC_OTP_FUSE_READ;
hdr->size = 2;
msg.word = word;
ret = imx_scu_call_rpc(ipc, &msg, true);
if (ret)
return ret;
*val = msg.word;
return 0;
}
static int imx_scu_ocotp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct ocotp_priv *priv = context;
u32 count, index, num_bytes;
u32 *buf;
void *p;
int i, ret;
index = offset;
num_bytes = round_up(bytes, 4);
count = num_bytes >> 2;
if (count > (priv->data->nregs - index))
count = priv->data->nregs - index;
p = kzalloc(num_bytes, GFP_KERNEL);
if (!p)
return -ENOMEM;
mutex_lock(&scu_ocotp_mutex);
buf = p;
for (i = index; i < (index + count); i++) {
if (in_hole(context, i)) {
*buf++ = 0;
continue;
}
ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf);
if (ret) {
mutex_unlock(&scu_ocotp_mutex);
kfree(p);
return ret;
}
buf++;
}
memcpy(val, (u8 *)p, bytes);
mutex_unlock(&scu_ocotp_mutex);
kfree(p);
return 0;
}
static int imx_scu_ocotp_write(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct ocotp_priv *priv = context;
struct arm_smccc_res res;
u32 *buf = val;
u32 tmp;
u32 index;
int ret;
/* allow only writing one complete OTP word at a time */
if (bytes != 4)
return -EINVAL;
index = offset;
if (in_hole(context, index))
return -EINVAL;
if (in_ecc(context, index)) {
pr_warn("ECC region, only program once\n");
mutex_lock(&scu_ocotp_mutex);
ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, index, &tmp);
mutex_unlock(&scu_ocotp_mutex);
if (ret)
return ret;
if (tmp) {
pr_warn("ECC region, already has value: %x\n", tmp);
return -EIO;
}
}
mutex_lock(&scu_ocotp_mutex);
arm_smccc_smc(IMX_SIP_OTP_WRITE, index, *buf, 0, 0, 0, 0, 0, &res);
mutex_unlock(&scu_ocotp_mutex);
return res.a0;
}
static struct nvmem_config imx_scu_ocotp_nvmem_config = {
.name = "imx-scu-ocotp",
.read_only = false,
.word_size = 4,
.stride = 1,
.owner = THIS_MODULE,
.reg_read = imx_scu_ocotp_read,
.reg_write = imx_scu_ocotp_write,
};
static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data },
{ .compatible = "fsl,imx8qm-scu-ocotp", (void *)&imx8qm_data },
{ },
};
MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids);
static int imx_scu_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocotp_priv *priv;
struct nvmem_device *nvmem;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ret = imx_scu_get_handle(&priv->nvmem_ipc);
if (ret)
return ret;
priv->data = of_device_get_match_data(dev);
priv->dev = dev;
imx_scu_ocotp_nvmem_config.size = 4 * priv->data->nregs;
imx_scu_ocotp_nvmem_config.dev = dev;
imx_scu_ocotp_nvmem_config.priv = priv;
nvmem = devm_nvmem_register(dev, &imx_scu_ocotp_nvmem_config);
return PTR_ERR_OR_ZERO(nvmem);
}
static struct platform_driver imx_scu_ocotp_driver = {
.probe = imx_scu_ocotp_probe,
.driver = {
.name = "imx_scu_ocotp",
.of_match_table = imx_scu_ocotp_dt_ids,
},
};
module_platform_driver(imx_scu_ocotp_driver);
MODULE_AUTHOR("Peng Fan <[email protected]>");
MODULE_DESCRIPTION("i.MX8 SCU OCOTP fuse box driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/imx-ocotp-scu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define SDAM_MEM_START 0x40
#define REGISTER_MAP_ID 0x40
#define REGISTER_MAP_VERSION 0x41
#define SDAM_SIZE 0x44
#define SDAM_PBS_TRIG_SET 0xE5
#define SDAM_PBS_TRIG_CLR 0xE6
struct sdam_chip {
struct regmap *regmap;
struct nvmem_config sdam_config;
unsigned int base;
unsigned int size;
};
/* read only register offsets */
static const u8 sdam_ro_map[] = {
REGISTER_MAP_ID,
REGISTER_MAP_VERSION,
SDAM_SIZE
};
static bool sdam_is_valid(struct sdam_chip *sdam, unsigned int offset,
size_t len)
{
unsigned int sdam_mem_end = SDAM_MEM_START + sdam->size - 1;
if (!len)
return false;
if (offset >= SDAM_MEM_START && offset <= sdam_mem_end
&& (offset + len - 1) <= sdam_mem_end)
return true;
else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR)
&& (len == 1))
return true;
return false;
}
static bool sdam_is_ro(unsigned int offset, size_t len)
{
int i;
for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++)
if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i])
return true;
return false;
}
static int sdam_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
struct sdam_chip *sdam = priv;
struct device *dev = sdam->sdam_config.dev;
int rc;
if (!sdam_is_valid(sdam, offset, bytes)) {
dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
offset, bytes);
return -EINVAL;
}
rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes);
if (rc < 0)
dev_err(dev, "Failed to read SDAM offset %#x len=%zd, rc=%d\n",
offset, bytes, rc);
return rc;
}
static int sdam_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
struct sdam_chip *sdam = priv;
struct device *dev = sdam->sdam_config.dev;
int rc;
if (!sdam_is_valid(sdam, offset, bytes)) {
dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
offset, bytes);
return -EINVAL;
}
if (sdam_is_ro(offset, bytes)) {
dev_err(dev, "Invalid write offset %#x len=%zd\n",
offset, bytes);
return -EINVAL;
}
rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes);
if (rc < 0)
dev_err(dev, "Failed to write SDAM offset %#x len=%zd, rc=%d\n",
offset, bytes, rc);
return rc;
}
static int sdam_probe(struct platform_device *pdev)
{
struct sdam_chip *sdam;
struct nvmem_device *nvmem;
unsigned int val;
int rc;
sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL);
if (!sdam)
return -ENOMEM;
sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!sdam->regmap) {
dev_err(&pdev->dev, "Failed to get regmap handle\n");
return -ENXIO;
}
rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base);
if (rc < 0) {
dev_err(&pdev->dev, "Failed to get SDAM base, rc=%d\n", rc);
return -EINVAL;
}
rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val);
if (rc < 0) {
dev_err(&pdev->dev, "Failed to read SDAM_SIZE rc=%d\n", rc);
return -EINVAL;
}
sdam->size = val * 32;
sdam->sdam_config.dev = &pdev->dev;
sdam->sdam_config.name = "spmi_sdam";
sdam->sdam_config.id = NVMEM_DEVID_AUTO;
sdam->sdam_config.owner = THIS_MODULE;
sdam->sdam_config.stride = 1;
sdam->sdam_config.word_size = 1;
sdam->sdam_config.reg_read = sdam_read;
sdam->sdam_config.reg_write = sdam_write;
sdam->sdam_config.priv = sdam;
nvmem = devm_nvmem_register(&pdev->dev, &sdam->sdam_config);
if (IS_ERR(nvmem)) {
dev_err(&pdev->dev,
"Failed to register SDAM nvmem device rc=%ld\n",
PTR_ERR(nvmem));
return -ENXIO;
}
dev_dbg(&pdev->dev,
"SDAM base=%#x size=%u registered successfully\n",
sdam->base, sdam->size);
return 0;
}
static const struct of_device_id sdam_match_table[] = {
{ .compatible = "qcom,spmi-sdam" },
{},
};
MODULE_DEVICE_TABLE(of, sdam_match_table);
static struct platform_driver sdam_driver = {
.driver = {
.name = "qcom,spmi-sdam",
.of_match_table = sdam_match_table,
},
.probe = sdam_probe,
};
module_platform_driver(sdam_driver);
MODULE_DESCRIPTION("QCOM SPMI SDAM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/qcom-spmi-sdam.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Spreadtrum Communications Inc.
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/hwspinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#define SPRD_EFUSE_ENABLE 0x20
#define SPRD_EFUSE_ERR_FLAG 0x24
#define SPRD_EFUSE_ERR_CLR 0x28
#define SPRD_EFUSE_MAGIC_NUM 0x2c
#define SPRD_EFUSE_FW_CFG 0x50
#define SPRD_EFUSE_PW_SWT 0x54
#define SPRD_EFUSE_MEM(val) (0x1000 + ((val) << 2))
#define SPRD_EFUSE_VDD_EN BIT(0)
#define SPRD_EFUSE_AUTO_CHECK_EN BIT(1)
#define SPRD_EFUSE_DOUBLE_EN BIT(2)
#define SPRD_EFUSE_MARGIN_RD_EN BIT(3)
#define SPRD_EFUSE_LOCK_WR_EN BIT(4)
#define SPRD_EFUSE_ERR_CLR_MASK GENMASK(13, 0)
#define SPRD_EFUSE_ENK1_ON BIT(0)
#define SPRD_EFUSE_ENK2_ON BIT(1)
#define SPRD_EFUSE_PROG_EN BIT(2)
#define SPRD_EFUSE_MAGIC_NUMBER 0x8810
/* Block width (bytes) definitions */
#define SPRD_EFUSE_BLOCK_WIDTH 4
/*
* The Spreadtrum AP efuse contains 2 parts: normal efuse and secure efuse,
* and we can only access the normal efuse in kernel. So define the normal
* block offset index and normal block numbers.
*/
#define SPRD_EFUSE_NORMAL_BLOCK_NUMS 24
#define SPRD_EFUSE_NORMAL_BLOCK_OFFSET 72
/* Timeout (ms) for the trylock of hardware spinlocks */
#define SPRD_EFUSE_HWLOCK_TIMEOUT 5000
/*
* Since different Spreadtrum SoC chip can have different normal block numbers
* and offset. And some SoC can support block double feature, which means
* when reading or writing data to efuse memory, the controller can save double
* data in case one data become incorrect after a long period.
*
* Thus we should save them in the device data structure.
*/
struct sprd_efuse_variant_data {
u32 blk_nums;
u32 blk_offset;
bool blk_double;
};
struct sprd_efuse {
struct device *dev;
struct clk *clk;
struct hwspinlock *hwlock;
struct mutex mutex;
void __iomem *base;
const struct sprd_efuse_variant_data *data;
};
static const struct sprd_efuse_variant_data ums312_data = {
.blk_nums = SPRD_EFUSE_NORMAL_BLOCK_NUMS,
.blk_offset = SPRD_EFUSE_NORMAL_BLOCK_OFFSET,
.blk_double = false,
};
/*
* On Spreadtrum platform, we have multi-subsystems will access the unique
* efuse controller, so we need one hardware spinlock to synchronize between
* the multiple subsystems.
*/
static int sprd_efuse_lock(struct sprd_efuse *efuse)
{
int ret;
mutex_lock(&efuse->mutex);
ret = hwspin_lock_timeout_raw(efuse->hwlock,
SPRD_EFUSE_HWLOCK_TIMEOUT);
if (ret) {
dev_err(efuse->dev, "timeout get the hwspinlock\n");
mutex_unlock(&efuse->mutex);
return ret;
}
return 0;
}
static void sprd_efuse_unlock(struct sprd_efuse *efuse)
{
hwspin_unlock_raw(efuse->hwlock);
mutex_unlock(&efuse->mutex);
}
static void sprd_efuse_set_prog_power(struct sprd_efuse *efuse, bool en)
{
u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
if (en)
val &= ~SPRD_EFUSE_ENK2_ON;
else
val &= ~SPRD_EFUSE_ENK1_ON;
writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
/* Open or close efuse power need wait 1000us to make power stable. */
usleep_range(1000, 1200);
if (en)
val |= SPRD_EFUSE_ENK1_ON;
else
val |= SPRD_EFUSE_ENK2_ON;
writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
/* Open or close efuse power need wait 1000us to make power stable. */
usleep_range(1000, 1200);
}
static void sprd_efuse_set_read_power(struct sprd_efuse *efuse, bool en)
{
u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
if (en)
val |= SPRD_EFUSE_VDD_EN;
else
val &= ~SPRD_EFUSE_VDD_EN;
writel(val, efuse->base + SPRD_EFUSE_ENABLE);
/* Open or close efuse power need wait 1000us to make power stable. */
usleep_range(1000, 1200);
}
static void sprd_efuse_set_prog_lock(struct sprd_efuse *efuse, bool en)
{
u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
if (en)
val |= SPRD_EFUSE_LOCK_WR_EN;
else
val &= ~SPRD_EFUSE_LOCK_WR_EN;
writel(val, efuse->base + SPRD_EFUSE_ENABLE);
}
static void sprd_efuse_set_auto_check(struct sprd_efuse *efuse, bool en)
{
u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
if (en)
val |= SPRD_EFUSE_AUTO_CHECK_EN;
else
val &= ~SPRD_EFUSE_AUTO_CHECK_EN;
writel(val, efuse->base + SPRD_EFUSE_ENABLE);
}
static void sprd_efuse_set_data_double(struct sprd_efuse *efuse, bool en)
{
u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
if (en)
val |= SPRD_EFUSE_DOUBLE_EN;
else
val &= ~SPRD_EFUSE_DOUBLE_EN;
writel(val, efuse->base + SPRD_EFUSE_ENABLE);
}
static void sprd_efuse_set_prog_en(struct sprd_efuse *efuse, bool en)
{
u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
if (en)
val |= SPRD_EFUSE_PROG_EN;
else
val &= ~SPRD_EFUSE_PROG_EN;
writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
}
static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
bool lock, u32 *data)
{
u32 status;
int ret = 0;
/*
* We need set the correct magic number before writing the efuse to
* allow programming, and block other programming until we clear the
* magic number.
*/
writel(SPRD_EFUSE_MAGIC_NUMBER,
efuse->base + SPRD_EFUSE_MAGIC_NUM);
/*
* Power on the efuse, enable programme and enable double data
* if asked.
*/
sprd_efuse_set_prog_power(efuse, true);
sprd_efuse_set_prog_en(efuse, true);
sprd_efuse_set_data_double(efuse, doub);
/*
* Enable the auto-check function to validate if the programming is
* successful.
*/
if (lock)
sprd_efuse_set_auto_check(efuse, true);
writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
/* Disable auto-check and data double after programming */
if (lock)
sprd_efuse_set_auto_check(efuse, false);
sprd_efuse_set_data_double(efuse, false);
/*
* Check the efuse error status, if the programming is successful,
* we should lock this efuse block to avoid programming again.
*/
status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
if (status) {
dev_err(efuse->dev,
"write error status %u of block %d\n", status, blk);
writel(SPRD_EFUSE_ERR_CLR_MASK,
efuse->base + SPRD_EFUSE_ERR_CLR);
ret = -EBUSY;
} else if (lock) {
sprd_efuse_set_prog_lock(efuse, lock);
writel(0, efuse->base + SPRD_EFUSE_MEM(blk));
sprd_efuse_set_prog_lock(efuse, false);
}
sprd_efuse_set_prog_power(efuse, false);
writel(0, efuse->base + SPRD_EFUSE_MAGIC_NUM);
return ret;
}
static int sprd_efuse_raw_read(struct sprd_efuse *efuse, int blk, u32 *val,
bool doub)
{
u32 status;
/*
* Need power on the efuse before reading data from efuse, and will
* power off the efuse after reading process.
*/
sprd_efuse_set_read_power(efuse, true);
/* Enable double data if asked */
sprd_efuse_set_data_double(efuse, doub);
/* Start to read data from efuse block */
*val = readl(efuse->base + SPRD_EFUSE_MEM(blk));
/* Disable double data */
sprd_efuse_set_data_double(efuse, false);
/* Power off the efuse */
sprd_efuse_set_read_power(efuse, false);
/*
* Check the efuse error status and clear them if there are some
* errors occurred.
*/
status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
if (status) {
dev_err(efuse->dev,
"read error status %d of block %d\n", status, blk);
writel(SPRD_EFUSE_ERR_CLR_MASK,
efuse->base + SPRD_EFUSE_ERR_CLR);
return -EBUSY;
}
return 0;
}
static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes)
{
struct sprd_efuse *efuse = context;
bool blk_double = efuse->data->blk_double;
u32 index = offset / SPRD_EFUSE_BLOCK_WIDTH + efuse->data->blk_offset;
u32 blk_offset = (offset % SPRD_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
u32 data;
int ret;
ret = sprd_efuse_lock(efuse);
if (ret)
return ret;
ret = clk_prepare_enable(efuse->clk);
if (ret)
goto unlock;
ret = sprd_efuse_raw_read(efuse, index, &data, blk_double);
if (!ret) {
data >>= blk_offset;
memcpy(val, &data, bytes);
}
clk_disable_unprepare(efuse->clk);
unlock:
sprd_efuse_unlock(efuse);
return ret;
}
static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
{
struct sprd_efuse *efuse = context;
bool blk_double = efuse->data->blk_double;
bool lock;
int ret;
ret = sprd_efuse_lock(efuse);
if (ret)
return ret;
ret = clk_prepare_enable(efuse->clk);
if (ret)
goto unlock;
/*
* If the writing bytes are equal with the block width, which means the
* whole block will be programmed. For this case, we should not allow
* this block to be programmed again by locking this block.
*
* If the block was programmed partially, we should allow this block to
* be programmed again.
*/
if (bytes < SPRD_EFUSE_BLOCK_WIDTH)
lock = false;
else
lock = true;
ret = sprd_efuse_raw_prog(efuse, offset, blk_double, lock, val);
clk_disable_unprepare(efuse->clk);
unlock:
sprd_efuse_unlock(efuse);
return ret;
}
static int sprd_efuse_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct nvmem_device *nvmem;
struct nvmem_config econfig = { };
struct sprd_efuse *efuse;
const struct sprd_efuse_variant_data *pdata;
int ret;
pdata = of_device_get_match_data(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No matching driver data found\n");
return -EINVAL;
}
efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
if (!efuse)
return -ENOMEM;
efuse->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(efuse->base))
return PTR_ERR(efuse->base);
ret = of_hwspin_lock_get_id(np, 0);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get hwlock id\n");
return ret;
}
efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
if (!efuse->hwlock) {
dev_err(&pdev->dev, "failed to request hwlock\n");
return -ENXIO;
}
efuse->clk = devm_clk_get(&pdev->dev, "enable");
if (IS_ERR(efuse->clk)) {
dev_err(&pdev->dev, "failed to get enable clock\n");
return PTR_ERR(efuse->clk);
}
mutex_init(&efuse->mutex);
efuse->dev = &pdev->dev;
efuse->data = pdata;
econfig.stride = 1;
econfig.word_size = 1;
econfig.read_only = false;
econfig.name = "sprd-efuse";
econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH;
econfig.reg_read = sprd_efuse_read;
econfig.reg_write = sprd_efuse_write;
econfig.priv = efuse;
econfig.dev = &pdev->dev;
nvmem = devm_nvmem_register(&pdev->dev, &econfig);
if (IS_ERR(nvmem)) {
dev_err(&pdev->dev, "failed to register nvmem\n");
return PTR_ERR(nvmem);
}
return 0;
}
static const struct of_device_id sprd_efuse_of_match[] = {
{ .compatible = "sprd,ums312-efuse", .data = &ums312_data },
{ }
};
static struct platform_driver sprd_efuse_driver = {
.probe = sprd_efuse_probe,
.driver = {
.name = "sprd-efuse",
.of_match_table = sprd_efuse_of_match,
},
};
module_platform_driver(sprd_efuse_driver);
MODULE_AUTHOR("Freeman Liu <[email protected]>");
MODULE_DESCRIPTION("Spreadtrum AP efuse driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/sprd-efuse.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Apple SoC eFuse driver
*
* Copyright (C) The Asahi Linux Contributors
*/
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
struct apple_efuses_priv {
void __iomem *fuses;
};
static int apple_efuses_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct apple_efuses_priv *priv = context;
u32 *dst = val;
while (bytes >= sizeof(u32)) {
*dst++ = readl_relaxed(priv->fuses + offset);
bytes -= sizeof(u32);
offset += sizeof(u32);
}
return 0;
}
static int apple_efuses_probe(struct platform_device *pdev)
{
struct apple_efuses_priv *priv;
struct resource *res;
struct nvmem_config config = {
.dev = &pdev->dev,
.read_only = true,
.reg_read = apple_efuses_read,
.stride = sizeof(u32),
.word_size = sizeof(u32),
.name = "apple_efuses_nvmem",
.id = NVMEM_DEVID_AUTO,
.root_only = true,
};
priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->fuses))
return PTR_ERR(priv->fuses);
config.priv = priv;
config.size = resource_size(res);
return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config));
}
static const struct of_device_id apple_efuses_of_match[] = {
{ .compatible = "apple,efuses", },
{}
};
MODULE_DEVICE_TABLE(of, apple_efuses_of_match);
static struct platform_driver apple_efuses_driver = {
.driver = {
.name = "apple_efuses",
.of_match_table = apple_efuses_of_match,
},
.probe = apple_efuses_probe,
};
module_platform_driver(apple_efuses_driver);
MODULE_AUTHOR("Sven Peter <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/apple-efuses.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* UniPhier eFuse driver
*
* Copyright (C) 2017 Socionext Inc.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
struct uniphier_efuse_priv {
void __iomem *base;
};
static int uniphier_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct uniphier_efuse_priv *priv = context;
u8 *val = _val;
int offs;
for (offs = 0; offs < bytes; offs += sizeof(u8))
*val++ = readb(priv->base + reg + offs);
return 0;
}
static int uniphier_efuse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
struct nvmem_config econfig = {};
struct uniphier_efuse_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
econfig.stride = 1;
econfig.word_size = 1;
econfig.read_only = true;
econfig.reg_read = uniphier_reg_read;
econfig.size = resource_size(res);
econfig.priv = priv;
econfig.dev = dev;
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id uniphier_efuse_of_match[] = {
{ .compatible = "socionext,uniphier-efuse",},
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match);
static struct platform_driver uniphier_efuse_driver = {
.probe = uniphier_efuse_probe,
.driver = {
.name = "uniphier-efuse",
.of_match_table = uniphier_efuse_of_match,
},
};
module_platform_driver(uniphier_efuse_driver);
MODULE_AUTHOR("Keiji Hayashibara <[email protected]>");
MODULE_DESCRIPTION("UniPhier eFuse driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/uniphier-efuse.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Srinivas Kandagatla <[email protected]>
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
/* Blow timer clock frequency in Mhz */
#define QFPROM_BLOW_TIMER_OFFSET 0x03c
/* Amount of time required to hold charge to blow fuse in micro-seconds */
#define QFPROM_FUSE_BLOW_POLL_US 100
#define QFPROM_FUSE_BLOW_TIMEOUT_US 10000
#define QFPROM_BLOW_STATUS_OFFSET 0x048
#define QFPROM_BLOW_STATUS_BUSY 0x1
#define QFPROM_BLOW_STATUS_READY 0x0
#define QFPROM_ACCEL_OFFSET 0x044
#define QFPROM_VERSION_OFFSET 0x0
#define QFPROM_MAJOR_VERSION_SHIFT 28
#define QFPROM_MAJOR_VERSION_MASK GENMASK(31, QFPROM_MAJOR_VERSION_SHIFT)
#define QFPROM_MINOR_VERSION_SHIFT 16
#define QFPROM_MINOR_VERSION_MASK GENMASK(27, QFPROM_MINOR_VERSION_SHIFT)
static bool read_raw_data;
module_param(read_raw_data, bool, 0644);
MODULE_PARM_DESC(read_raw_data, "Read raw instead of corrected data");
/**
* struct qfprom_soc_data - config that varies from SoC to SoC.
*
* @accel_value: Should contain qfprom accel value.
* @qfprom_blow_timer_value: The timer value of qfprom when doing efuse blow.
* @qfprom_blow_set_freq: The frequency required to set when we start the
* fuse blowing.
* @qfprom_blow_uV: LDO voltage to be set when doing efuse blow
*/
struct qfprom_soc_data {
u32 accel_value;
u32 qfprom_blow_timer_value;
u32 qfprom_blow_set_freq;
int qfprom_blow_uV;
};
/**
* struct qfprom_priv - structure holding qfprom attributes
*
* @qfpraw: iomapped memory space for qfprom-efuse raw address space.
* @qfpconf: iomapped memory space for qfprom-efuse configuration address
* space.
* @qfpcorrected: iomapped memory space for qfprom corrected address space.
* @qfpsecurity: iomapped memory space for qfprom security control space.
* @dev: qfprom device structure.
* @secclk: Clock supply.
* @vcc: Regulator supply.
* @soc_data: Data that for things that varies from SoC to SoC.
*/
struct qfprom_priv {
void __iomem *qfpraw;
void __iomem *qfpconf;
void __iomem *qfpcorrected;
void __iomem *qfpsecurity;
struct device *dev;
struct clk *secclk;
struct regulator *vcc;
const struct qfprom_soc_data *soc_data;
};
/**
* struct qfprom_touched_values - saved values to restore after blowing
*
* @clk_rate: The rate the clock was at before blowing.
* @accel_val: The value of the accel reg before blowing.
* @timer_val: The value of the timer before blowing.
*/
struct qfprom_touched_values {
unsigned long clk_rate;
u32 accel_val;
u32 timer_val;
};
/**
* struct qfprom_soc_compatible_data - Data matched against the SoC
* compatible string.
*
* @keepout: Array of keepout regions for this SoC.
* @nkeepout: Number of elements in the keepout array.
*/
struct qfprom_soc_compatible_data {
const struct nvmem_keepout *keepout;
unsigned int nkeepout;
};
static const struct nvmem_keepout sc7180_qfprom_keepout[] = {
{.start = 0x128, .end = 0x148},
{.start = 0x220, .end = 0x228}
};
static const struct qfprom_soc_compatible_data sc7180_qfprom = {
.keepout = sc7180_qfprom_keepout,
.nkeepout = ARRAY_SIZE(sc7180_qfprom_keepout)
};
static const struct nvmem_keepout sc7280_qfprom_keepout[] = {
{.start = 0x128, .end = 0x148},
{.start = 0x238, .end = 0x248}
};
static const struct qfprom_soc_compatible_data sc7280_qfprom = {
.keepout = sc7280_qfprom_keepout,
.nkeepout = ARRAY_SIZE(sc7280_qfprom_keepout)
};
/**
* qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
* @priv: Our driver data.
* @old: The data that was stashed from before fuse blowing.
*
* Resets the value of the blow timer, accel register and the clock
* and voltage settings.
*
* Prints messages if there are errors but doesn't return an error code
* since there's not much we can do upon failure.
*/
static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
const struct qfprom_touched_values *old)
{
int ret;
writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
dev_pm_genpd_set_performance_state(priv->dev, 0);
pm_runtime_put(priv->dev);
/*
* This may be a shared rail and may be able to run at a lower rate
* when we're not blowing fuses. At the moment, the regulator framework
* applies voltage constraints even on disabled rails, so remove our
* constraints and allow the rail to be adjusted by other users.
*/
ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
if (ret)
dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
ret = regulator_disable(priv->vcc);
if (ret)
dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
ret = clk_set_rate(priv->secclk, old->clk_rate);
if (ret)
dev_warn(priv->dev,
"Failed to set clock rate for disable (ignoring)\n");
clk_disable_unprepare(priv->secclk);
}
/**
* qfprom_enable_fuse_blowing() - Enable fuse blowing.
* @priv: Our driver data.
* @old: We'll stash stuff here to use when disabling.
*
* Sets the value of the blow timer, accel register and the clock
* and voltage settings.
*
* Prints messages if there are errors so caller doesn't need to.
*
* Return: 0 or -err.
*/
static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
struct qfprom_touched_values *old)
{
int ret;
int qfprom_blow_uV = priv->soc_data->qfprom_blow_uV;
ret = clk_prepare_enable(priv->secclk);
if (ret) {
dev_err(priv->dev, "Failed to enable clock\n");
return ret;
}
old->clk_rate = clk_get_rate(priv->secclk);
ret = clk_set_rate(priv->secclk, priv->soc_data->qfprom_blow_set_freq);
if (ret) {
dev_err(priv->dev, "Failed to set clock rate for enable\n");
goto err_clk_prepared;
}
/*
* Hardware requires a minimum voltage for fuse blowing.
* This may be a shared rail so don't specify a maximum.
* Regulator constraints will cap to the actual maximum.
*/
ret = regulator_set_voltage(priv->vcc, qfprom_blow_uV, INT_MAX);
if (ret) {
dev_err(priv->dev, "Failed to set %duV\n", qfprom_blow_uV);
goto err_clk_rate_set;
}
ret = regulator_enable(priv->vcc);
if (ret) {
dev_err(priv->dev, "Failed to enable regulator\n");
goto err_clk_rate_set;
}
ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0) {
dev_err(priv->dev, "Failed to enable power-domain\n");
goto err_reg_enable;
}
dev_pm_genpd_set_performance_state(priv->dev, INT_MAX);
old->timer_val = readl(priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
old->accel_val = readl(priv->qfpconf + QFPROM_ACCEL_OFFSET);
writel(priv->soc_data->qfprom_blow_timer_value,
priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
writel(priv->soc_data->accel_value,
priv->qfpconf + QFPROM_ACCEL_OFFSET);
return 0;
err_reg_enable:
regulator_disable(priv->vcc);
err_clk_rate_set:
clk_set_rate(priv->secclk, old->clk_rate);
err_clk_prepared:
clk_disable_unprepare(priv->secclk);
return ret;
}
/**
* qfprom_reg_write() - Write to fuses.
* @context: Our driver data.
* @reg: The offset to write at.
* @_val: Pointer to data to write.
* @bytes: The number of bytes to write.
*
* Writes to fuses. WARNING: THIS IS PERMANENT.
*
* Return: 0 or -err.
*/
static int qfprom_reg_write(void *context, unsigned int reg, void *_val,
size_t bytes)
{
struct qfprom_priv *priv = context;
struct qfprom_touched_values old;
int words = bytes / 4;
u32 *value = _val;
u32 blow_status;
int ret;
int i;
dev_dbg(priv->dev,
"Writing to raw qfprom region : %#010x of size: %zu\n",
reg, bytes);
/*
* The hardware only allows us to write word at a time, but we can
* read byte at a time. Until the nvmem framework allows a separate
* word_size and stride for reading vs. writing, we'll enforce here.
*/
if (bytes % 4) {
dev_err(priv->dev,
"%zu is not an integral number of words\n", bytes);
return -EINVAL;
}
if (reg % 4) {
dev_err(priv->dev,
"Invalid offset: %#x. Must be word aligned\n", reg);
return -EINVAL;
}
ret = qfprom_enable_fuse_blowing(priv, &old);
if (ret)
return ret;
ret = readl_relaxed_poll_timeout(
priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET,
blow_status, blow_status == QFPROM_BLOW_STATUS_READY,
QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US);
if (ret) {
dev_err(priv->dev,
"Timeout waiting for initial ready; aborting.\n");
goto exit_enabled_fuse_blowing;
}
for (i = 0; i < words; i++)
writel(value[i], priv->qfpraw + reg + (i * 4));
ret = readl_relaxed_poll_timeout(
priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET,
blow_status, blow_status == QFPROM_BLOW_STATUS_READY,
QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US);
/* Give an error, but not much we can do in this case */
if (ret)
dev_err(priv->dev, "Timeout waiting for finish.\n");
exit_enabled_fuse_blowing:
qfprom_disable_fuse_blowing(priv, &old);
return ret;
}
static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
void __iomem *base = priv->qfpcorrected;
if (read_raw_data && priv->qfpraw)
base = priv->qfpraw;
while (words--)
*val++ = readb(base + reg + i++);
return 0;
}
static void qfprom_runtime_disable(void *data)
{
pm_runtime_disable(data);
}
static const struct qfprom_soc_data qfprom_7_8_data = {
.accel_value = 0xD10,
.qfprom_blow_timer_value = 25,
.qfprom_blow_set_freq = 4800000,
.qfprom_blow_uV = 1800000,
};
static const struct qfprom_soc_data qfprom_7_15_data = {
.accel_value = 0xD08,
.qfprom_blow_timer_value = 24,
.qfprom_blow_set_freq = 4800000,
.qfprom_blow_uV = 1900000,
};
static int qfprom_probe(struct platform_device *pdev)
{
struct nvmem_config econfig = {
.name = "qfprom",
.stride = 1,
.word_size = 1,
.id = NVMEM_DEVID_AUTO,
.reg_read = qfprom_reg_read,
};
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
const struct qfprom_soc_compatible_data *soc_data;
struct qfprom_priv *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* The corrected section is always provided */
priv->qfpcorrected = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->qfpcorrected))
return PTR_ERR(priv->qfpcorrected);
econfig.size = resource_size(res);
econfig.dev = dev;
econfig.priv = priv;
priv->dev = dev;
soc_data = device_get_match_data(dev);
if (soc_data) {
econfig.keepout = soc_data->keepout;
econfig.nkeepout = soc_data->nkeepout;
}
/*
* If more than one region is provided then the OS has the ability
* to write.
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
u32 version;
int major_version, minor_version;
priv->qfpraw = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->qfpraw))
return PTR_ERR(priv->qfpraw);
priv->qfpconf = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(priv->qfpconf))
return PTR_ERR(priv->qfpconf);
priv->qfpsecurity = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(priv->qfpsecurity))
return PTR_ERR(priv->qfpsecurity);
version = readl(priv->qfpsecurity + QFPROM_VERSION_OFFSET);
major_version = (version & QFPROM_MAJOR_VERSION_MASK) >>
QFPROM_MAJOR_VERSION_SHIFT;
minor_version = (version & QFPROM_MINOR_VERSION_MASK) >>
QFPROM_MINOR_VERSION_SHIFT;
if (major_version == 7 && minor_version == 8)
priv->soc_data = &qfprom_7_8_data;
else if (major_version == 7 && minor_version == 15)
priv->soc_data = &qfprom_7_15_data;
priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
if (IS_ERR(priv->vcc))
return PTR_ERR(priv->vcc);
priv->secclk = devm_clk_get(dev, "core");
if (IS_ERR(priv->secclk))
return dev_err_probe(dev, PTR_ERR(priv->secclk), "Error getting clock\n");
/* Only enable writing if we have SoC data. */
if (priv->soc_data)
econfig.reg_write = qfprom_reg_write;
}
pm_runtime_enable(dev);
ret = devm_add_action_or_reset(dev, qfprom_runtime_disable, dev);
if (ret)
return ret;
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id qfprom_of_match[] = {
{ .compatible = "qcom,qfprom",},
{ .compatible = "qcom,sc7180-qfprom", .data = &sc7180_qfprom},
{ .compatible = "qcom,sc7280-qfprom", .data = &sc7280_qfprom},
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, qfprom_of_match);
static struct platform_driver qfprom_driver = {
.probe = qfprom_probe,
.driver = {
.name = "qcom,qfprom",
.of_match_table = qfprom_of_match,
},
};
module_platform_driver(qfprom_driver);
MODULE_AUTHOR("Srinivas Kandagatla <[email protected]>");
MODULE_DESCRIPTION("Qualcomm QFPROM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/qfprom.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Spreadtrum Communications Inc.
#include <linux/hwspinlock.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/nvmem-provider.h>
/* PMIC global registers definition */
#define SC27XX_MODULE_EN 0xc08
#define SC2730_MODULE_EN 0x1808
#define SC27XX_EFUSE_EN BIT(6)
/* Efuse controller registers definition */
#define SC27XX_EFUSE_GLB_CTRL 0x0
#define SC27XX_EFUSE_DATA_RD 0x4
#define SC27XX_EFUSE_DATA_WR 0x8
#define SC27XX_EFUSE_BLOCK_INDEX 0xc
#define SC27XX_EFUSE_MODE_CTRL 0x10
#define SC27XX_EFUSE_STATUS 0x14
#define SC27XX_EFUSE_WR_TIMING_CTRL 0x20
#define SC27XX_EFUSE_RD_TIMING_CTRL 0x24
#define SC27XX_EFUSE_EFUSE_DEB_CTRL 0x28
/* Mask definition for SC27XX_EFUSE_BLOCK_INDEX register */
#define SC27XX_EFUSE_BLOCK_MASK GENMASK(4, 0)
/* Bits definitions for SC27XX_EFUSE_MODE_CTRL register */
#define SC27XX_EFUSE_PG_START BIT(0)
#define SC27XX_EFUSE_RD_START BIT(1)
#define SC27XX_EFUSE_CLR_RDDONE BIT(2)
/* Bits definitions for SC27XX_EFUSE_STATUS register */
#define SC27XX_EFUSE_PGM_BUSY BIT(0)
#define SC27XX_EFUSE_READ_BUSY BIT(1)
#define SC27XX_EFUSE_STANDBY BIT(2)
#define SC27XX_EFUSE_GLOBAL_PROT BIT(3)
#define SC27XX_EFUSE_RD_DONE BIT(4)
/* Block number and block width (bytes) definitions */
#define SC27XX_EFUSE_BLOCK_MAX 32
#define SC27XX_EFUSE_BLOCK_WIDTH 2
/* Timeout (ms) for the trylock of hardware spinlocks */
#define SC27XX_EFUSE_HWLOCK_TIMEOUT 5000
/* Timeout (us) of polling the status */
#define SC27XX_EFUSE_POLL_TIMEOUT 3000000
#define SC27XX_EFUSE_POLL_DELAY_US 10000
/*
* Since different PMICs of SC27xx series can have different
* address , we should save address in the device data structure.
*/
struct sc27xx_efuse_variant_data {
u32 module_en;
};
struct sc27xx_efuse {
struct device *dev;
struct regmap *regmap;
struct hwspinlock *hwlock;
struct mutex mutex;
u32 base;
const struct sc27xx_efuse_variant_data *var_data;
};
static const struct sc27xx_efuse_variant_data sc2731_edata = {
.module_en = SC27XX_MODULE_EN,
};
static const struct sc27xx_efuse_variant_data sc2730_edata = {
.module_en = SC2730_MODULE_EN,
};
/*
* On Spreadtrum platform, we have multi-subsystems will access the unique
* efuse controller, so we need one hardware spinlock to synchronize between
* the multiple subsystems.
*/
static int sc27xx_efuse_lock(struct sc27xx_efuse *efuse)
{
int ret;
mutex_lock(&efuse->mutex);
ret = hwspin_lock_timeout_raw(efuse->hwlock,
SC27XX_EFUSE_HWLOCK_TIMEOUT);
if (ret) {
dev_err(efuse->dev, "timeout to get the hwspinlock\n");
mutex_unlock(&efuse->mutex);
return ret;
}
return 0;
}
static void sc27xx_efuse_unlock(struct sc27xx_efuse *efuse)
{
hwspin_unlock_raw(efuse->hwlock);
mutex_unlock(&efuse->mutex);
}
static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits)
{
int ret;
u32 val;
ret = regmap_read_poll_timeout(efuse->regmap,
efuse->base + SC27XX_EFUSE_STATUS,
val, (val & bits),
SC27XX_EFUSE_POLL_DELAY_US,
SC27XX_EFUSE_POLL_TIMEOUT);
if (ret) {
dev_err(efuse->dev, "timeout to update the efuse status\n");
return ret;
}
return 0;
}
static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
{
struct sc27xx_efuse *efuse = context;
u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH;
u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
int ret;
if (blk_index > SC27XX_EFUSE_BLOCK_MAX ||
bytes > SC27XX_EFUSE_BLOCK_WIDTH)
return -EINVAL;
ret = sc27xx_efuse_lock(efuse);
if (ret)
return ret;
/* Enable the efuse controller. */
ret = regmap_update_bits(efuse->regmap, efuse->var_data->module_en,
SC27XX_EFUSE_EN, SC27XX_EFUSE_EN);
if (ret)
goto unlock_efuse;
/*
* Before reading, we should ensure the efuse controller is in
* standby state.
*/
ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_STANDBY);
if (ret)
goto disable_efuse;
/* Set the block address to be read. */
ret = regmap_write(efuse->regmap,
efuse->base + SC27XX_EFUSE_BLOCK_INDEX,
blk_index & SC27XX_EFUSE_BLOCK_MASK);
if (ret)
goto disable_efuse;
/* Start reading process from efuse memory. */
ret = regmap_update_bits(efuse->regmap,
efuse->base + SC27XX_EFUSE_MODE_CTRL,
SC27XX_EFUSE_RD_START,
SC27XX_EFUSE_RD_START);
if (ret)
goto disable_efuse;
/*
* Polling the read done status to make sure the reading process
* is completed, that means the data can be read out now.
*/
ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_RD_DONE);
if (ret)
goto disable_efuse;
/* Read data from efuse memory. */
ret = regmap_read(efuse->regmap, efuse->base + SC27XX_EFUSE_DATA_RD,
&buf);
if (ret)
goto disable_efuse;
/* Clear the read done flag. */
ret = regmap_update_bits(efuse->regmap,
efuse->base + SC27XX_EFUSE_MODE_CTRL,
SC27XX_EFUSE_CLR_RDDONE,
SC27XX_EFUSE_CLR_RDDONE);
disable_efuse:
/* Disable the efuse controller after reading. */
regmap_update_bits(efuse->regmap, efuse->var_data->module_en, SC27XX_EFUSE_EN, 0);
unlock_efuse:
sc27xx_efuse_unlock(efuse);
if (!ret) {
buf >>= blk_offset;
memcpy(val, &buf, bytes);
}
return ret;
}
static int sc27xx_efuse_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct nvmem_config econfig = { };
struct nvmem_device *nvmem;
struct sc27xx_efuse *efuse;
int ret;
efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
if (!efuse)
return -ENOMEM;
efuse->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!efuse->regmap) {
dev_err(&pdev->dev, "failed to get efuse regmap\n");
return -ENODEV;
}
ret = of_property_read_u32(np, "reg", &efuse->base);
if (ret) {
dev_err(&pdev->dev, "failed to get efuse base address\n");
return ret;
}
ret = of_hwspin_lock_get_id(np, 0);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get hwspinlock id\n");
return ret;
}
efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
if (!efuse->hwlock) {
dev_err(&pdev->dev, "failed to request hwspinlock\n");
return -ENXIO;
}
mutex_init(&efuse->mutex);
efuse->dev = &pdev->dev;
efuse->var_data = of_device_get_match_data(&pdev->dev);
econfig.stride = 1;
econfig.word_size = 1;
econfig.read_only = true;
econfig.name = "sc27xx-efuse";
econfig.size = SC27XX_EFUSE_BLOCK_MAX * SC27XX_EFUSE_BLOCK_WIDTH;
econfig.reg_read = sc27xx_efuse_read;
econfig.priv = efuse;
econfig.dev = &pdev->dev;
nvmem = devm_nvmem_register(&pdev->dev, &econfig);
if (IS_ERR(nvmem)) {
dev_err(&pdev->dev, "failed to register nvmem config\n");
return PTR_ERR(nvmem);
}
return 0;
}
static const struct of_device_id sc27xx_efuse_of_match[] = {
{ .compatible = "sprd,sc2731-efuse", .data = &sc2731_edata},
{ .compatible = "sprd,sc2730-efuse", .data = &sc2730_edata},
{ }
};
static struct platform_driver sc27xx_efuse_driver = {
.probe = sc27xx_efuse_probe,
.driver = {
.name = "sc27xx-efuse",
.of_match_table = sc27xx_efuse_of_match,
},
};
module_platform_driver(sc27xx_efuse_driver);
MODULE_AUTHOR("Freeman Liu <[email protected]>");
MODULE_DESCRIPTION("Spreadtrum SC27xx efuse driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/sc27xx-efuse.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2016 Broadcom
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/*
* # of tries for OTP Status. The time to execute a command varies. The slowest
* commands are writes which also vary based on the # of bits turned on. Writing
* 0xffffffff takes ~3800 us.
*/
#define OTPC_RETRIES 5000
/* Sequence to enable OTP program */
#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd }
/* OTPC Commands */
#define OTPC_CMD_READ 0x0
#define OTPC_CMD_OTP_PROG_ENABLE 0x2
#define OTPC_CMD_OTP_PROG_DISABLE 0x3
#define OTPC_CMD_PROGRAM 0x8
/* OTPC Status Bits */
#define OTPC_STAT_CMD_DONE BIT(1)
#define OTPC_STAT_PROG_OK BIT(2)
/* OTPC register definition */
#define OTPC_MODE_REG_OFFSET 0x0
#define OTPC_MODE_REG_OTPC_MODE 0
#define OTPC_COMMAND_OFFSET 0x4
#define OTPC_COMMAND_COMMAND_WIDTH 6
#define OTPC_CMD_START_OFFSET 0x8
#define OTPC_CMD_START_START 0
#define OTPC_CPU_STATUS_OFFSET 0xc
#define OTPC_CPUADDR_REG_OFFSET 0x28
#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16
#define OTPC_CPU_WRITE_REG_OFFSET 0x2c
#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1)
#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1)
struct otpc_map {
/* in words. */
u32 otpc_row_size;
/* 128 bit row / 4 words support. */
u16 data_r_offset[4];
/* 128 bit row / 4 words support. */
u16 data_w_offset[4];
};
static struct otpc_map otp_map = {
.otpc_row_size = 1,
.data_r_offset = {0x10},
.data_w_offset = {0x2c},
};
static struct otpc_map otp_map_v2 = {
.otpc_row_size = 2,
.data_r_offset = {0x10, 0x5c},
.data_w_offset = {0x2c, 0x64},
};
struct otpc_priv {
struct device *dev;
void __iomem *base;
const struct otpc_map *map;
struct nvmem_config *config;
};
static inline void set_command(void __iomem *base, u32 command)
{
writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET);
}
static inline void set_cpu_address(void __iomem *base, u32 addr)
{
writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET);
}
static inline void set_start_bit(void __iomem *base)
{
writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET);
}
static inline void reset_start_bit(void __iomem *base)
{
writel(0, base + OTPC_CMD_START_OFFSET);
}
static inline void write_cpu_data(void __iomem *base, u32 value)
{
writel(value, base + OTPC_CPU_WRITE_REG_OFFSET);
}
static int poll_cpu_status(void __iomem *base, u32 value)
{
u32 status;
u32 retries;
for (retries = 0; retries < OTPC_RETRIES; retries++) {
status = readl(base + OTPC_CPU_STATUS_OFFSET);
if (status & value)
break;
udelay(1);
}
if (retries == OTPC_RETRIES)
return -EAGAIN;
return 0;
}
static int enable_ocotp_program(void __iomem *base)
{
static const u32 vals[] = OTPC_PROG_EN_SEQ;
int i;
int ret;
/* Write the magic sequence to enable programming */
set_command(base, OTPC_CMD_OTP_PROG_ENABLE);
for (i = 0; i < ARRAY_SIZE(vals); i++) {
write_cpu_data(base, vals[i]);
set_start_bit(base);
ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE);
reset_start_bit(base);
if (ret)
return ret;
}
return poll_cpu_status(base, OTPC_STAT_PROG_OK);
}
static int disable_ocotp_program(void __iomem *base)
{
int ret;
set_command(base, OTPC_CMD_OTP_PROG_DISABLE);
set_start_bit(base);
ret = poll_cpu_status(base, OTPC_STAT_PROG_OK);
reset_start_bit(base);
return ret;
}
static int bcm_otpc_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct otpc_priv *priv = context;
u32 *buf = val;
u32 bytes_read;
u32 address = offset / priv->config->word_size;
int i, ret;
for (bytes_read = 0; bytes_read < bytes;) {
set_command(priv->base, OTPC_CMD_READ);
set_cpu_address(priv->base, address++);
set_start_bit(priv->base);
ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
if (ret) {
dev_err(priv->dev, "otp read error: 0x%x", ret);
return -EIO;
}
for (i = 0; i < priv->map->otpc_row_size; i++) {
*buf++ = readl(priv->base +
priv->map->data_r_offset[i]);
bytes_read += sizeof(*buf);
}
reset_start_bit(priv->base);
}
return 0;
}
static int bcm_otpc_write(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct otpc_priv *priv = context;
u32 *buf = val;
u32 bytes_written;
u32 address = offset / priv->config->word_size;
int i, ret;
if (offset % priv->config->word_size)
return -EINVAL;
ret = enable_ocotp_program(priv->base);
if (ret)
return -EIO;
for (bytes_written = 0; bytes_written < bytes;) {
set_command(priv->base, OTPC_CMD_PROGRAM);
set_cpu_address(priv->base, address++);
for (i = 0; i < priv->map->otpc_row_size; i++) {
writel(*buf, priv->base + priv->map->data_w_offset[i]);
buf++;
bytes_written += sizeof(*buf);
}
set_start_bit(priv->base);
ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
reset_start_bit(priv->base);
if (ret) {
dev_err(priv->dev, "otp write error: 0x%x", ret);
return -EIO;
}
}
disable_ocotp_program(priv->base);
return 0;
}
static struct nvmem_config bcm_otpc_nvmem_config = {
.name = "bcm-ocotp",
.read_only = false,
.word_size = 4,
.stride = 4,
.reg_read = bcm_otpc_read,
.reg_write = bcm_otpc_write,
};
static const struct of_device_id bcm_otpc_dt_ids[] = {
{ .compatible = "brcm,ocotp", .data = &otp_map },
{ .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
static const struct acpi_device_id bcm_otpc_acpi_ids[] __maybe_unused = {
{ .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
{ .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids);
static int bcm_otpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct otpc_priv *priv;
struct nvmem_device *nvmem;
int err;
u32 num_words;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->map = device_get_match_data(dev);
if (!priv->map)
return -ENODEV;
/* Get OTP base address register. */
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(priv->base);
}
/* Enable CPU access to OTPC. */
writel(readl(priv->base + OTPC_MODE_REG_OFFSET) |
BIT(OTPC_MODE_REG_OTPC_MODE),
priv->base + OTPC_MODE_REG_OFFSET);
reset_start_bit(priv->base);
/* Read size of memory in words. */
err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words);
if (err) {
dev_err(dev, "size parameter not specified\n");
return -EINVAL;
} else if (num_words == 0) {
dev_err(dev, "size must be > 0\n");
return -EINVAL;
}
bcm_otpc_nvmem_config.size = 4 * num_words;
bcm_otpc_nvmem_config.dev = dev;
bcm_otpc_nvmem_config.priv = priv;
if (priv->map == &otp_map_v2) {
bcm_otpc_nvmem_config.word_size = 8;
bcm_otpc_nvmem_config.stride = 8;
}
priv->config = &bcm_otpc_nvmem_config;
nvmem = devm_nvmem_register(dev, &bcm_otpc_nvmem_config);
if (IS_ERR(nvmem)) {
dev_err(dev, "error registering nvmem config\n");
return PTR_ERR(nvmem);
}
return 0;
}
static struct platform_driver bcm_otpc_driver = {
.probe = bcm_otpc_probe,
.driver = {
.name = "brcm-otpc",
.of_match_table = bcm_otpc_dt_ids,
.acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids),
},
};
module_platform_driver(bcm_otpc_driver);
MODULE_DESCRIPTION("Broadcom OTPC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/bcm-ocotp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale MXS On-Chip OTP driver
*
* Copyright (C) 2015 Stefan Wahren <[email protected]>
*
* Based on the driver from Huang Shijie and Christoph G. Baumann
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/stmp_device.h>
/* OCOTP registers and bits */
#define BM_OCOTP_CTRL_RD_BANK_OPEN BIT(12)
#define BM_OCOTP_CTRL_ERROR BIT(9)
#define BM_OCOTP_CTRL_BUSY BIT(8)
#define OCOTP_TIMEOUT 10000
#define OCOTP_DATA_OFFSET 0x20
struct mxs_ocotp {
struct clk *clk;
void __iomem *base;
struct nvmem_device *nvmem;
};
static int mxs_ocotp_wait(struct mxs_ocotp *otp)
{
int timeout = OCOTP_TIMEOUT;
unsigned int status = 0;
while (timeout--) {
status = readl(otp->base);
if (!(status & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR)))
break;
cpu_relax();
}
if (status & BM_OCOTP_CTRL_BUSY)
return -EBUSY;
else if (status & BM_OCOTP_CTRL_ERROR)
return -EIO;
return 0;
}
static int mxs_ocotp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct mxs_ocotp *otp = context;
u32 *buf = val;
int ret;
ret = clk_enable(otp->clk);
if (ret)
return ret;
writel(BM_OCOTP_CTRL_ERROR, otp->base + STMP_OFFSET_REG_CLR);
ret = mxs_ocotp_wait(otp);
if (ret)
goto disable_clk;
/* open OCOTP banks for read */
writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_SET);
/* approximately wait 33 hclk cycles */
udelay(1);
ret = mxs_ocotp_wait(otp);
if (ret)
goto close_banks;
while (bytes) {
if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
/* fill up non-data register */
*buf++ = 0;
} else {
*buf++ = readl(otp->base + offset);
}
bytes -= 4;
offset += 4;
}
close_banks:
/* close banks for power saving */
writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_CLR);
disable_clk:
clk_disable(otp->clk);
return ret;
}
static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
.stride = 16,
.word_size = 4,
.reg_read = mxs_ocotp_read,
};
struct mxs_data {
int size;
};
static const struct mxs_data imx23_data = {
.size = 0x220,
};
static const struct mxs_data imx28_data = {
.size = 0x2a0,
};
static const struct of_device_id mxs_ocotp_match[] = {
{ .compatible = "fsl,imx23-ocotp", .data = &imx23_data },
{ .compatible = "fsl,imx28-ocotp", .data = &imx28_data },
{ /* sentinel */},
};
MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
static void mxs_ocotp_action(void *data)
{
clk_unprepare(data);
}
static int mxs_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct mxs_data *data;
struct mxs_ocotp *otp;
const struct of_device_id *match;
int ret;
match = of_match_device(dev->driver->of_match_table, dev);
if (!match || !match->data)
return -EINVAL;
otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
if (!otp)
return -ENOMEM;
otp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(otp->base))
return PTR_ERR(otp->base);
otp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(otp->clk))
return PTR_ERR(otp->clk);
ret = clk_prepare(otp->clk);
if (ret < 0) {
dev_err(dev, "failed to prepare clk: %d\n", ret);
return ret;
}
ret = devm_add_action_or_reset(&pdev->dev, mxs_ocotp_action, otp->clk);
if (ret)
return ret;
data = match->data;
ocotp_config.size = data->size;
ocotp_config.priv = otp;
ocotp_config.dev = dev;
otp->nvmem = devm_nvmem_register(dev, &ocotp_config);
if (IS_ERR(otp->nvmem))
return PTR_ERR(otp->nvmem);
platform_set_drvdata(pdev, otp);
return 0;
}
static struct platform_driver mxs_ocotp_driver = {
.probe = mxs_ocotp_probe,
.driver = {
.name = "mxs-ocotp",
.of_match_table = mxs_ocotp_match,
},
};
module_platform_driver(mxs_ocotp_driver);
MODULE_AUTHOR("Stefan Wahren <[email protected]");
MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/mxs-ocotp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OTP Memory controller
*
* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries
*
* Author: Claudiu Beznea <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#define MCHP_OTPC_CR (0x0)
#define MCHP_OTPC_CR_READ BIT(6)
#define MCHP_OTPC_MR (0x4)
#define MCHP_OTPC_MR_ADDR GENMASK(31, 16)
#define MCHP_OTPC_AR (0x8)
#define MCHP_OTPC_SR (0xc)
#define MCHP_OTPC_SR_READ BIT(6)
#define MCHP_OTPC_HR (0x20)
#define MCHP_OTPC_HR_SIZE GENMASK(15, 8)
#define MCHP_OTPC_DR (0x24)
#define MCHP_OTPC_NAME "mchp-otpc"
#define MCHP_OTPC_SIZE (11 * 1024)
/**
* struct mchp_otpc - OTPC private data structure
* @base: base address
* @dev: struct device pointer
* @packets: list of packets in OTP memory
* @npackets: number of packets in OTP memory
*/
struct mchp_otpc {
void __iomem *base;
struct device *dev;
struct list_head packets;
u32 npackets;
};
/**
* struct mchp_otpc_packet - OTPC packet data structure
* @list: list head
* @id: packet ID
* @offset: packet offset (in words) in OTP memory
*/
struct mchp_otpc_packet {
struct list_head list;
u32 id;
u32 offset;
};
static struct mchp_otpc_packet *mchp_otpc_id_to_packet(struct mchp_otpc *otpc,
u32 id)
{
struct mchp_otpc_packet *packet;
if (id >= otpc->npackets)
return NULL;
list_for_each_entry(packet, &otpc->packets, list) {
if (packet->id == id)
return packet;
}
return NULL;
}
static int mchp_otpc_prepare_read(struct mchp_otpc *otpc,
unsigned int offset)
{
u32 tmp;
/* Set address. */
tmp = readl_relaxed(otpc->base + MCHP_OTPC_MR);
tmp &= ~MCHP_OTPC_MR_ADDR;
tmp |= FIELD_PREP(MCHP_OTPC_MR_ADDR, offset);
writel_relaxed(tmp, otpc->base + MCHP_OTPC_MR);
/* Set read. */
tmp = readl_relaxed(otpc->base + MCHP_OTPC_CR);
tmp |= MCHP_OTPC_CR_READ;
writel_relaxed(tmp, otpc->base + MCHP_OTPC_CR);
/* Wait for packet to be transferred into temporary buffers. */
return read_poll_timeout(readl_relaxed, tmp, !(tmp & MCHP_OTPC_SR_READ),
10000, 2000, false, otpc->base + MCHP_OTPC_SR);
}
/*
* OTPC memory is organized into packets. Each packets contains a header and
* a payload. Header is 4 bytes long and contains the size of the payload.
* Payload size varies. The memory footprint is something as follows:
*
* Memory offset Memory footprint Packet ID
* ------------- ---------------- ---------
*
* 0x0 +------------+ <-- packet 0
* | header 0 |
* 0x4 +------------+
* | payload 0 |
* . .
* . ... .
* . .
* offset1 +------------+ <-- packet 1
* | header 1 |
* offset1 + 0x4 +------------+
* | payload 1 |
* . .
* . ... .
* . .
* offset2 +------------+ <-- packet 2
* . .
* . ... .
* . .
* offsetN +------------+ <-- packet N
* | header N |
* offsetN + 0x4 +------------+
* | payload N |
* . .
* . ... .
* . .
* +------------+
*
* where offset1, offset2, offsetN depends on the size of payload 0, payload 1,
* payload N-1.
*
* The access to memory is done on a per packet basis: the control registers
* need to be updated with an offset address (within a packet range) and the
* data registers will be update by controller with information contained by
* that packet. E.g. if control registers are updated with any address within
* the range [offset1, offset2) the data registers are updated by controller
* with packet 1. Header data is accessible though MCHP_OTPC_HR register.
* Payload data is accessible though MCHP_OTPC_DR and MCHP_OTPC_AR registers.
* There is no direct mapping b/w the offset requested by software and the
* offset returned by hardware.
*
* For this, the read function will return the first requested bytes in the
* packet. The user will have to be aware of the memory footprint before doing
* the read request.
*/
static int mchp_otpc_read(void *priv, unsigned int off, void *val,
size_t bytes)
{
struct mchp_otpc *otpc = priv;
struct mchp_otpc_packet *packet;
u32 *buf = val;
u32 offset;
size_t len = 0;
int ret, payload_size;
/*
* We reach this point with off being multiple of stride = 4 to
* be able to cross the subsystem. Inside the driver we use continuous
* unsigned integer numbers for packet id, thus devide off by 4
* before passing it to mchp_otpc_id_to_packet().
*/
packet = mchp_otpc_id_to_packet(otpc, off / 4);
if (!packet)
return -EINVAL;
offset = packet->offset;
while (len < bytes) {
ret = mchp_otpc_prepare_read(otpc, offset);
if (ret)
return ret;
/* Read and save header content. */
*buf++ = readl_relaxed(otpc->base + MCHP_OTPC_HR);
len += sizeof(*buf);
offset++;
if (len >= bytes)
break;
/* Read and save payload content. */
payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, *(buf - 1));
writel_relaxed(0UL, otpc->base + MCHP_OTPC_AR);
do {
*buf++ = readl_relaxed(otpc->base + MCHP_OTPC_DR);
len += sizeof(*buf);
offset++;
payload_size--;
} while (payload_size >= 0 && len < bytes);
}
return 0;
}
static int mchp_otpc_init_packets_list(struct mchp_otpc *otpc, u32 *size)
{
struct mchp_otpc_packet *packet;
u32 word, word_pos = 0, id = 0, npackets = 0, payload_size;
int ret;
INIT_LIST_HEAD(&otpc->packets);
*size = 0;
while (*size < MCHP_OTPC_SIZE) {
ret = mchp_otpc_prepare_read(otpc, word_pos);
if (ret)
return ret;
word = readl_relaxed(otpc->base + MCHP_OTPC_HR);
payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, word);
if (!payload_size)
break;
packet = devm_kzalloc(otpc->dev, sizeof(*packet), GFP_KERNEL);
if (!packet)
return -ENOMEM;
packet->id = id++;
packet->offset = word_pos;
INIT_LIST_HEAD(&packet->list);
list_add_tail(&packet->list, &otpc->packets);
/* Count size by adding header and paload sizes. */
*size += 4 * (payload_size + 1);
/* Next word: this packet (header, payload) position + 1. */
word_pos += payload_size + 2;
npackets++;
}
otpc->npackets = npackets;
return 0;
}
static struct nvmem_config mchp_nvmem_config = {
.name = MCHP_OTPC_NAME,
.type = NVMEM_TYPE_OTP,
.read_only = true,
.word_size = 4,
.stride = 4,
.reg_read = mchp_otpc_read,
};
static int mchp_otpc_probe(struct platform_device *pdev)
{
struct nvmem_device *nvmem;
struct mchp_otpc *otpc;
u32 size;
int ret;
otpc = devm_kzalloc(&pdev->dev, sizeof(*otpc), GFP_KERNEL);
if (!otpc)
return -ENOMEM;
otpc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(otpc->base))
return PTR_ERR(otpc->base);
otpc->dev = &pdev->dev;
ret = mchp_otpc_init_packets_list(otpc, &size);
if (ret)
return ret;
mchp_nvmem_config.dev = otpc->dev;
mchp_nvmem_config.size = size;
mchp_nvmem_config.priv = otpc;
nvmem = devm_nvmem_register(&pdev->dev, &mchp_nvmem_config);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id __maybe_unused mchp_otpc_ids[] = {
{ .compatible = "microchip,sama7g5-otpc", },
{ },
};
MODULE_DEVICE_TABLE(of, mchp_otpc_ids);
static struct platform_driver mchp_otpc_driver = {
.probe = mchp_otpc_probe,
.driver = {
.name = MCHP_OTPC_NAME,
.of_match_table = of_match_ptr(mchp_otpc_ids),
},
};
module_platform_driver(mchp_otpc_driver);
MODULE_AUTHOR("Claudiu Beznea <[email protected]>");
MODULE_DESCRIPTION("Microchip SAMA7G5 OTPC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/microchip-otpc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2020 Nicolas Saenz Julienne <[email protected]>
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
struct rmem {
struct device *dev;
struct nvmem_device *nvmem;
struct reserved_mem *mem;
phys_addr_t size;
};
static int rmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rmem *priv = context;
size_t available = priv->mem->size;
loff_t off = offset;
void *addr;
int count;
/*
* Only map the reserved memory at this point to avoid potential rogue
* kernel threads inadvertently modifying it. Based on the current
* uses-cases for this driver, the performance hit isn't a concern.
* Nor is likely to be, given the nature of the subsystem. Most nvmem
* devices operate over slow buses to begin with.
*
* An alternative would be setting the memory as RO, set_memory_ro(),
* but as of Dec 2020 this isn't possible on arm64.
*/
addr = memremap(priv->mem->base, available, MEMREMAP_WB);
if (!addr) {
dev_err(priv->dev, "Failed to remap memory region\n");
return -ENOMEM;
}
count = memory_read_from_buffer(val, bytes, &off, addr, available);
memunmap(addr);
return count;
}
static int rmem_probe(struct platform_device *pdev)
{
struct nvmem_config config = { };
struct device *dev = &pdev->dev;
struct reserved_mem *mem;
struct rmem *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
mem = of_reserved_mem_lookup(dev->of_node);
if (!mem) {
dev_err(dev, "Failed to lookup reserved memory\n");
return -EINVAL;
}
priv->mem = mem;
config.dev = dev;
config.priv = priv;
config.name = "rmem";
config.id = NVMEM_DEVID_AUTO;
config.size = mem->size;
config.reg_read = rmem_read;
return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
}
static const struct of_device_id rmem_match[] = {
{ .compatible = "nvmem-rmem", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rmem_match);
static struct platform_driver rmem_driver = {
.probe = rmem_probe,
.driver = {
.name = "rmem",
.of_match_table = rmem_match,
},
};
module_platform_driver(rmem_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <[email protected]>");
MODULE_DESCRIPTION("Reserved Memory Based nvmem Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/rmem.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#define OTP_OTP_PWR_DN(t) (t + 0x00)
#define OTP_OTP_PWR_DN_OTP_PWRDN_N BIT(0)
#define OTP_OTP_ADDR_HI(t) (t + 0x04)
#define OTP_OTP_ADDR_LO(t) (t + 0x08)
#define OTP_OTP_PRGM_DATA(t) (t + 0x10)
#define OTP_OTP_PRGM_MODE(t) (t + 0x14)
#define OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE BIT(0)
#define OTP_OTP_RD_DATA(t) (t + 0x18)
#define OTP_OTP_FUNC_CMD(t) (t + 0x20)
#define OTP_OTP_FUNC_CMD_OTP_PROGRAM BIT(1)
#define OTP_OTP_FUNC_CMD_OTP_READ BIT(0)
#define OTP_OTP_CMD_GO(t) (t + 0x28)
#define OTP_OTP_CMD_GO_OTP_GO BIT(0)
#define OTP_OTP_PASS_FAIL(t) (t + 0x2c)
#define OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED BIT(3)
#define OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED BIT(2)
#define OTP_OTP_PASS_FAIL_OTP_FAIL BIT(0)
#define OTP_OTP_STATUS(t) (t + 0x30)
#define OTP_OTP_STATUS_OTP_CPUMPEN BIT(1)
#define OTP_OTP_STATUS_OTP_BUSY BIT(0)
#define OTP_MEM_SIZE 8192
#define OTP_SLEEP_US 10
#define OTP_TIMEOUT_US 500000
struct lan9662_otp {
struct device *dev;
void __iomem *base;
};
static int lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag)
{
u32 val;
return readl_poll_timeout(reg, val, !(val & flag),
OTP_SLEEP_US, OTP_TIMEOUT_US);
}
static int lan9662_otp_power(struct lan9662_otp *otp, bool up)
{
void __iomem *pwrdn = OTP_OTP_PWR_DN(otp->base);
if (up) {
writel(readl(pwrdn) & ~OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn);
if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base),
OTP_OTP_STATUS_OTP_CPUMPEN))
return -ETIMEDOUT;
} else {
writel(readl(pwrdn) | OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn);
}
return 0;
}
static int lan9662_otp_execute(struct lan9662_otp *otp)
{
if (lan9662_otp_wait_flag_clear(OTP_OTP_CMD_GO(otp->base),
OTP_OTP_CMD_GO_OTP_GO))
return -ETIMEDOUT;
if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base),
OTP_OTP_STATUS_OTP_BUSY))
return -ETIMEDOUT;
return 0;
}
static void lan9662_otp_set_address(struct lan9662_otp *otp, u32 offset)
{
writel(0xff & (offset >> 8), OTP_OTP_ADDR_HI(otp->base));
writel(0xff & offset, OTP_OTP_ADDR_LO(otp->base));
}
static int lan9662_otp_read_byte(struct lan9662_otp *otp, u32 offset, u8 *dst)
{
u32 pass;
int rc;
lan9662_otp_set_address(otp, offset);
writel(OTP_OTP_FUNC_CMD_OTP_READ, OTP_OTP_FUNC_CMD(otp->base));
writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base));
rc = lan9662_otp_execute(otp);
if (!rc) {
pass = readl(OTP_OTP_PASS_FAIL(otp->base));
if (pass & OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED)
return -EACCES;
*dst = (u8) readl(OTP_OTP_RD_DATA(otp->base));
}
return rc;
}
static int lan9662_otp_write_byte(struct lan9662_otp *otp, u32 offset, u8 data)
{
u32 pass;
int rc;
lan9662_otp_set_address(otp, offset);
writel(OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE, OTP_OTP_PRGM_MODE(otp->base));
writel(data, OTP_OTP_PRGM_DATA(otp->base));
writel(OTP_OTP_FUNC_CMD_OTP_PROGRAM, OTP_OTP_FUNC_CMD(otp->base));
writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base));
rc = lan9662_otp_execute(otp);
if (!rc) {
pass = readl(OTP_OTP_PASS_FAIL(otp->base));
if (pass & OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED)
return -EACCES;
if (pass & OTP_OTP_PASS_FAIL_OTP_FAIL)
return -EIO;
}
return rc;
}
static int lan9662_otp_read(void *context, unsigned int offset,
void *_val, size_t bytes)
{
struct lan9662_otp *otp = context;
u8 *val = _val;
uint8_t data;
int i, rc = 0;
lan9662_otp_power(otp, true);
for (i = 0; i < bytes; i++) {
rc = lan9662_otp_read_byte(otp, offset + i, &data);
if (rc < 0)
break;
*val++ = data;
}
lan9662_otp_power(otp, false);
return rc;
}
static int lan9662_otp_write(void *context, unsigned int offset,
void *_val, size_t bytes)
{
struct lan9662_otp *otp = context;
u8 *val = _val;
u8 data, newdata;
int i, rc = 0;
lan9662_otp_power(otp, true);
for (i = 0; i < bytes; i++) {
/* Skip zero bytes */
if (val[i]) {
rc = lan9662_otp_read_byte(otp, offset + i, &data);
if (rc < 0)
break;
newdata = data | val[i];
if (newdata == data)
continue;
rc = lan9662_otp_write_byte(otp, offset + i,
newdata);
if (rc < 0)
break;
}
}
lan9662_otp_power(otp, false);
return rc;
}
static struct nvmem_config otp_config = {
.name = "lan9662-otp",
.stride = 1,
.word_size = 1,
.reg_read = lan9662_otp_read,
.reg_write = lan9662_otp_write,
.size = OTP_MEM_SIZE,
};
static int lan9662_otp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct nvmem_device *nvmem;
struct lan9662_otp *otp;
otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
if (!otp)
return -ENOMEM;
otp->dev = dev;
otp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(otp->base))
return PTR_ERR(otp->base);
otp_config.priv = otp;
otp_config.dev = dev;
nvmem = devm_nvmem_register(dev, &otp_config);
return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id lan9662_otp_match[] = {
{ .compatible = "microchip,lan9662-otpc", },
{ },
};
MODULE_DEVICE_TABLE(of, lan9662_otp_match);
static struct platform_driver lan9662_otp_driver = {
.probe = lan9662_otp_probe,
.driver = {
.name = "lan9662-otp",
.of_match_table = lan9662_otp_match,
},
};
module_platform_driver(lan9662_otp_driver);
MODULE_AUTHOR("Horatiu Vultur <[email protected]>");
MODULE_DESCRIPTION("lan9662 OTP driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/lan9662-otpc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Xilinx, Inc.
*/
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/firmware/xlnx-zynqmp.h>
#define SILICON_REVISION_MASK 0xF
struct zynqmp_nvmem_data {
struct device *dev;
struct nvmem_device *nvmem;
};
static int zynqmp_nvmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
int ret;
int idcode, version;
struct zynqmp_nvmem_data *priv = context;
ret = zynqmp_pm_get_chipid(&idcode, &version);
if (ret < 0)
return ret;
dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
*(int *)val = version & SILICON_REVISION_MASK;
return 0;
}
static struct nvmem_config econfig = {
.name = "zynqmp-nvmem",
.owner = THIS_MODULE,
.word_size = 1,
.size = 1,
.read_only = true,
};
static const struct of_device_id zynqmp_nvmem_match[] = {
{ .compatible = "xlnx,zynqmp-nvmem-fw", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
static int zynqmp_nvmem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zynqmp_nvmem_data *priv;
priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
econfig.dev = dev;
econfig.reg_read = zynqmp_nvmem_read;
econfig.priv = priv;
priv->nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(priv->nvmem);
}
static struct platform_driver zynqmp_nvmem_driver = {
.probe = zynqmp_nvmem_probe,
.driver = {
.name = "zynqmp-nvmem",
.of_match_table = zynqmp_nvmem_match,
},
};
module_platform_driver(zynqmp_nvmem_driver);
MODULE_AUTHOR("Michal Simek <[email protected]>, Nava kishore Manne <[email protected]>");
MODULE_DESCRIPTION("ZynqMP NVMEM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvmem/zynqmp_nvmem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Amlogic Meson GX eFuse Driver
*
* Copyright (c) 2016 Endless Computers, Inc.
* Author: Carlo Caione <[email protected]>
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/firmware/meson/meson_sm.h>
static int meson_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct meson_sm_firmware *fw = context;
return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
bytes, 0, 0, 0);
}
static int meson_efuse_write(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct meson_sm_firmware *fw = context;
return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
bytes, 0, 0, 0);
}
static const struct of_device_id meson_efuse_match[] = {
{ .compatible = "amlogic,meson-gxbb-efuse", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_efuse_match);
static int meson_efuse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_sm_firmware *fw;
struct device_node *sm_np;
struct nvmem_device *nvmem;
struct nvmem_config *econfig;
struct clk *clk;
unsigned int size;
int ret;
sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
if (!sm_np) {
dev_err(&pdev->dev, "no secure-monitor node\n");
return -ENODEV;
}
fw = meson_sm_get(sm_np);
of_node_put(sm_np);
if (!fw)
return -EPROBE_DEFER;
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get efuse gate");
return ret;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(dev, "failed to enable gate");
return ret;
}
ret = devm_add_action_or_reset(dev,
(void(*)(void *))clk_disable_unprepare,
clk);
if (ret) {
dev_err(dev, "failed to add disable callback");
return ret;
}
if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
dev_err(dev, "failed to get max user");
return -EINVAL;
}
econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
if (!econfig)
return -ENOMEM;
econfig->dev = dev;
econfig->name = dev_name(dev);
econfig->stride = 1;
econfig->word_size = 1;
econfig->reg_read = meson_efuse_read;
econfig->reg_write = meson_efuse_write;
econfig->size = size;
econfig->priv = fw;
nvmem = devm_nvmem_register(&pdev->dev, econfig);
return PTR_ERR_OR_ZERO(nvmem);
}
static struct platform_driver meson_efuse_driver = {
.probe = meson_efuse_probe,
.driver = {
.name = "meson-efuse",
.of_match_table = meson_efuse_match,
},
};
module_platform_driver(meson_efuse_driver);
MODULE_AUTHOR("Carlo Caione <[email protected]>");
MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvmem/meson-efuse.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/crc8.h>
#include <linux/etherdevice.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <uapi/linux/if_ether.h>
#define SL28VPD_MAGIC 'V'
struct sl28vpd_header {
u8 magic;
u8 version;
} __packed;
struct sl28vpd_v1 {
struct sl28vpd_header header;
char serial_number[15];
u8 base_mac_address[ETH_ALEN];
u8 crc8;
} __packed;
static int sl28vpd_mac_address_pp(void *priv, const char *id, int index,
unsigned int offset, void *buf,
size_t bytes)
{
if (bytes != ETH_ALEN)
return -EINVAL;
if (index < 0)
return -EINVAL;
if (!is_valid_ether_addr(buf))
return -EINVAL;
eth_addr_add(buf, index);
return 0;
}
static const struct nvmem_cell_info sl28vpd_v1_entries[] = {
{
.name = "serial-number",
.offset = offsetof(struct sl28vpd_v1, serial_number),
.bytes = sizeof_field(struct sl28vpd_v1, serial_number),
},
{
.name = "base-mac-address",
.offset = offsetof(struct sl28vpd_v1, base_mac_address),
.bytes = sizeof_field(struct sl28vpd_v1, base_mac_address),
.read_post_process = sl28vpd_mac_address_pp,
},
};
static int sl28vpd_v1_check_crc(struct device *dev, struct nvmem_device *nvmem)
{
struct sl28vpd_v1 data_v1;
u8 table[CRC8_TABLE_SIZE];
int ret;
u8 crc;
crc8_populate_msb(table, 0x07);
ret = nvmem_device_read(nvmem, 0, sizeof(data_v1), &data_v1);
if (ret < 0)
return ret;
else if (ret != sizeof(data_v1))
return -EIO;
crc = crc8(table, (void *)&data_v1, sizeof(data_v1) - 1, 0);
if (crc != data_v1.crc8) {
dev_err(dev,
"Checksum is invalid (got %02x, expected %02x).\n",
crc, data_v1.crc8);
return -EINVAL;
}
return 0;
}
static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem,
struct nvmem_layout *layout)
{
const struct nvmem_cell_info *pinfo;
struct nvmem_cell_info info = {0};
struct device_node *layout_np;
struct sl28vpd_header hdr;
int ret, i;
/* check header */
ret = nvmem_device_read(nvmem, 0, sizeof(hdr), &hdr);
if (ret < 0)
return ret;
else if (ret != sizeof(hdr))
return -EIO;
if (hdr.magic != SL28VPD_MAGIC) {
dev_err(dev, "Invalid magic value (%02x)\n", hdr.magic);
return -EINVAL;
}
if (hdr.version != 1) {
dev_err(dev, "Version %d is unsupported.\n", hdr.version);
return -EINVAL;
}
ret = sl28vpd_v1_check_crc(dev, nvmem);
if (ret)
return ret;
layout_np = of_nvmem_layout_get_container(nvmem);
if (!layout_np)
return -ENOENT;
for (i = 0; i < ARRAY_SIZE(sl28vpd_v1_entries); i++) {
pinfo = &sl28vpd_v1_entries[i];
info.name = pinfo->name;
info.offset = pinfo->offset;
info.bytes = pinfo->bytes;
info.read_post_process = pinfo->read_post_process;
info.np = of_get_child_by_name(layout_np, pinfo->name);
ret = nvmem_add_one_cell(nvmem, &info);
if (ret) {
of_node_put(layout_np);
return ret;
}
}
of_node_put(layout_np);
return 0;
}
static const struct of_device_id sl28vpd_of_match_table[] = {
{ .compatible = "kontron,sl28-vpd" },
{},
};
MODULE_DEVICE_TABLE(of, sl28vpd_of_match_table);
static struct nvmem_layout sl28vpd_layout = {
.name = "sl28-vpd",
.of_match_table = sl28vpd_of_match_table,
.add_cells = sl28vpd_add_cells,
};
module_nvmem_layout_driver(sl28vpd_layout);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Walle <[email protected]>");
MODULE_DESCRIPTION("NVMEM layout driver for the VPD of Kontron sl28 boards");
| linux-master | drivers/nvmem/layouts/sl28vpd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ONIE tlv NVMEM cells provider
*
* Copyright (C) 2022 Open Compute Group ONIE
* Author: Miquel Raynal <[email protected]>
* Based on the nvmem driver written by: Vadym Kochan <[email protected]>
* Inspired by the first layout written by: Rafał Miłecki <[email protected]>
*/
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#define ONIE_TLV_MAX_LEN 2048
#define ONIE_TLV_CRC_FIELD_SZ 6
#define ONIE_TLV_CRC_SZ 4
#define ONIE_TLV_HDR_ID "TlvInfo"
struct onie_tlv_hdr {
u8 id[8];
u8 version;
__be16 data_len;
} __packed;
struct onie_tlv {
u8 type;
u8 len;
} __packed;
static const char *onie_tlv_cell_name(u8 type)
{
switch (type) {
case 0x21:
return "product-name";
case 0x22:
return "part-number";
case 0x23:
return "serial-number";
case 0x24:
return "mac-address";
case 0x25:
return "manufacture-date";
case 0x26:
return "device-version";
case 0x27:
return "label-revision";
case 0x28:
return "platform-name";
case 0x29:
return "onie-version";
case 0x2A:
return "num-macs";
case 0x2B:
return "manufacturer";
case 0x2C:
return "country-code";
case 0x2D:
return "vendor";
case 0x2E:
return "diag-version";
case 0x2F:
return "service-tag";
case 0xFD:
return "vendor-extension";
case 0xFE:
return "crc32";
default:
break;
}
return NULL;
}
static int onie_tlv_mac_read_cb(void *priv, const char *id, int index,
unsigned int offset, void *buf,
size_t bytes)
{
eth_addr_add(buf, index);
return 0;
}
static nvmem_cell_post_process_t onie_tlv_read_cb(u8 type, u8 *buf)
{
switch (type) {
case 0x24:
return &onie_tlv_mac_read_cb;
default:
break;
}
return NULL;
}
static int onie_tlv_add_cells(struct device *dev, struct nvmem_device *nvmem,
size_t data_len, u8 *data)
{
struct nvmem_cell_info cell = {};
struct device_node *layout;
struct onie_tlv tlv;
unsigned int hdr_len = sizeof(struct onie_tlv_hdr);
unsigned int offset = 0;
int ret;
layout = of_nvmem_layout_get_container(nvmem);
if (!layout)
return -ENOENT;
while (offset < data_len) {
memcpy(&tlv, data + offset, sizeof(tlv));
if (offset + tlv.len >= data_len) {
dev_err(dev, "Out of bounds field (0x%x bytes at 0x%x)\n",
tlv.len, hdr_len + offset);
break;
}
cell.name = onie_tlv_cell_name(tlv.type);
if (!cell.name)
continue;
cell.offset = hdr_len + offset + sizeof(tlv.type) + sizeof(tlv.len);
cell.bytes = tlv.len;
cell.np = of_get_child_by_name(layout, cell.name);
cell.read_post_process = onie_tlv_read_cb(tlv.type, data + offset + sizeof(tlv));
ret = nvmem_add_one_cell(nvmem, &cell);
if (ret) {
of_node_put(layout);
return ret;
}
offset += sizeof(tlv) + tlv.len;
}
of_node_put(layout);
return 0;
}
static bool onie_tlv_hdr_is_valid(struct device *dev, struct onie_tlv_hdr *hdr)
{
if (memcmp(hdr->id, ONIE_TLV_HDR_ID, sizeof(hdr->id))) {
dev_err(dev, "Invalid header\n");
return false;
}
if (hdr->version != 0x1) {
dev_err(dev, "Invalid version number\n");
return false;
}
return true;
}
static bool onie_tlv_crc_is_valid(struct device *dev, size_t table_len, u8 *table)
{
struct onie_tlv crc_hdr;
u32 read_crc, calc_crc;
__be32 crc_be;
memcpy(&crc_hdr, table + table_len - ONIE_TLV_CRC_FIELD_SZ, sizeof(crc_hdr));
if (crc_hdr.type != 0xfe || crc_hdr.len != ONIE_TLV_CRC_SZ) {
dev_err(dev, "Invalid CRC field\n");
return false;
}
/* The table contains a JAMCRC, which is XOR'ed compared to the original
* CRC32 implementation as known in the Ethernet world.
*/
memcpy(&crc_be, table + table_len - ONIE_TLV_CRC_SZ, ONIE_TLV_CRC_SZ);
read_crc = be32_to_cpu(crc_be);
calc_crc = crc32(~0, table, table_len - ONIE_TLV_CRC_SZ) ^ 0xFFFFFFFF;
if (read_crc != calc_crc) {
dev_err(dev, "Invalid CRC read: 0x%08x, expected: 0x%08x\n",
read_crc, calc_crc);
return false;
}
return true;
}
static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem,
struct nvmem_layout *layout)
{
struct onie_tlv_hdr hdr;
size_t table_len, data_len, hdr_len;
u8 *table, *data;
int ret;
ret = nvmem_device_read(nvmem, 0, sizeof(hdr), &hdr);
if (ret < 0)
return ret;
if (!onie_tlv_hdr_is_valid(dev, &hdr)) {
dev_err(dev, "Invalid ONIE TLV header\n");
return -EINVAL;
}
hdr_len = sizeof(hdr.id) + sizeof(hdr.version) + sizeof(hdr.data_len);
data_len = be16_to_cpu(hdr.data_len);
table_len = hdr_len + data_len;
if (table_len > ONIE_TLV_MAX_LEN) {
dev_err(dev, "Invalid ONIE TLV data length\n");
return -EINVAL;
}
table = devm_kmalloc(dev, table_len, GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = nvmem_device_read(nvmem, 0, table_len, table);
if (ret != table_len)
return ret;
if (!onie_tlv_crc_is_valid(dev, table_len, table))
return -EINVAL;
data = table + hdr_len;
ret = onie_tlv_add_cells(dev, nvmem, data_len, data);
if (ret)
return ret;
return 0;
}
static const struct of_device_id onie_tlv_of_match_table[] = {
{ .compatible = "onie,tlv-layout", },
{},
};
MODULE_DEVICE_TABLE(of, onie_tlv_of_match_table);
static struct nvmem_layout onie_tlv_layout = {
.name = "ONIE tlv layout",
.of_match_table = onie_tlv_of_match_table,
.add_cells = onie_tlv_parse_table,
};
module_nvmem_layout_driver(onie_tlv_layout);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Miquel Raynal <[email protected]>");
MODULE_DESCRIPTION("NVMEM layout driver for Onie TLV table parsing");
| linux-master | drivers/nvmem/layouts/onie-tlv.c |
/*
* pata_it821x.c - IT821x PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <[email protected]>
* (C) 2007 Bartlomiej Zolnierkiewicz
*
* based upon
*
* it821x.c
*
* linux/drivers/ide/pci/it821x.c Version 0.09 December 2004
*
* Copyright (C) 2004 Red Hat
*
* May be copied or modified under the terms of the GNU General Public License
* Based in part on the ITE vendor provided SCSI driver.
*
* Documentation available from IT8212F_V04.pdf
* http://www.ite.com.tw/EN/products_more.aspx?CategoryID=3&ID=5,91
* Some other documents are NDA.
*
* The ITE8212 isn't exactly a standard IDE controller. It has two
* modes. In pass through mode then it is an IDE controller. In its smart
* mode its actually quite a capable hardware raid controller disguised
* as an IDE controller. Smart mode only understands DMA read/write and
* identify, none of the fancier commands apply. The IT8211 is identical
* in other respects but lacks the raid mode.
*
* Errata:
* o Rev 0x10 also requires master/slave hold the same DMA timings and
* cannot do ATAPI MWDMA.
* o The identify data for raid volumes lacks CHS info (technically ok)
* but also fails to set the LBA28 and other bits. We fix these in
* the IDE probe quirk code.
* o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
* raid then the controller firmware dies
* o Smart mode without RAID doesn't clear all the necessary identify
* bits to reduce the command set to the one used
*
* This has a few impacts on the driver
* - In pass through mode we do all the work you would expect
* - In smart mode the clocking set up is done by the controller generally
* but we must watch the other limits and filter.
* - There are a few extra vendor commands that actually talk to the
* controller but only work PIO with no IRQ.
*
* Vendor areas of the identify block in smart mode are used for the
* timing and policy set up. Each HDD in raid mode also has a serial
* block on the disk. The hardware extra commands are get/set chip status,
* rebuild, get rebuild status.
*
* In Linux the driver supports pass through mode as if the device was
* just another IDE controller. If the smart mode is running then
* volumes are managed by the controller firmware and each IDE "disk"
* is a raid volume. Even more cute - the controller can do automated
* hotplug and rebuild.
*
* The pass through controller itself is a little demented. It has a
* flaw that it has a single set of PIO/MWDMA timings per channel so
* non UDMA devices restrict each others performance. It also has a
* single clock source per channel so mixed UDMA100/133 performance
* isn't perfect and we have to pick a clock. Thankfully none of this
* matters in smart mode. ATAPI DMA is not currently supported.
*
* It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
*
* TODO
* - ATAPI and other speed filtering
* - RAID configuration ioctls
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_it821x"
#define DRV_VERSION "0.4.2"
struct it821x_dev
{
unsigned int smart:1, /* Are we in smart raid mode */
timing10:1; /* Rev 0x10 */
u8 clock_mode; /* 0, ATA_50 or ATA_66 */
u8 want[2][2]; /* Mode/Pri log for master slave */
/* We need these for switching the clock when DMA goes on/off
The high byte is the 66Mhz timing */
u16 pio[2]; /* Cached PIO values */
u16 mwdma[2]; /* Cached MWDMA values */
u16 udma[2]; /* Cached UDMA values (per drive) */
u16 last_device; /* Master or slave loaded ? */
};
#define ATA_66 0
#define ATA_50 1
#define ATA_ANY 2
#define UDMA_OFF 0
#define MWDMA_OFF 0
/*
* We allow users to force the card into non raid mode without
* flashing the alternative BIOS. This is also necessary right now
* for embedded platforms that cannot run a PC BIOS but are using this
* device.
*/
static int it8212_noraid;
/**
* it821x_program - program the PIO/MWDMA registers
* @ap: ATA port
* @adev: Device to program
* @timing: Timing value (66Mhz in top 8bits, 50 in the low 8)
*
* Program the PIO/MWDMA timing for this channel according to the
* current clock. These share the same register so are managed by
* the DMA start/stop sequence as with the old driver.
*/
static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct it821x_dev *itdev = ap->private_data;
int channel = ap->port_no;
u8 conf;
/* Program PIO/MWDMA timing bits */
if (itdev->clock_mode == ATA_66)
conf = timing >> 8;
else
conf = timing & 0xFF;
pci_write_config_byte(pdev, 0x54 + 4 * channel, conf);
}
/**
* it821x_program_udma - program the UDMA registers
* @ap: ATA port
* @adev: ATA device to update
* @timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz
*
* Program the UDMA timing for this drive according to the
* current clock. Handles the dual clocks and also knows about
* the errata on the 0x10 revision. The UDMA errata is partly handled
* here and partly in start_dma.
*/
static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing)
{
struct it821x_dev *itdev = ap->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int channel = ap->port_no;
int unit = adev->devno;
u8 conf;
/* Program UDMA timing bits */
if (itdev->clock_mode == ATA_66)
conf = timing >> 8;
else
conf = timing & 0xFF;
if (itdev->timing10 == 0)
pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf);
else {
/* Early revision must be programmed for both together */
pci_write_config_byte(pdev, 0x56 + 4 * channel, conf);
pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf);
}
}
/**
* it821x_clock_strategy
* @ap: ATA interface
* @adev: ATA device being updated
*
* Select between the 50 and 66Mhz base clocks to get the best
* results for this interface.
*/
static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct it821x_dev *itdev = ap->private_data;
u8 unit = adev->devno;
struct ata_device *pair = ata_dev_pair(adev);
int clock, altclock;
u8 v;
int sel = 0;
/* Look for the most wanted clocking */
if (itdev->want[0][0] > itdev->want[1][0]) {
clock = itdev->want[0][1];
altclock = itdev->want[1][1];
} else {
clock = itdev->want[1][1];
altclock = itdev->want[0][1];
}
/* Master doesn't care does the slave ? */
if (clock == ATA_ANY)
clock = altclock;
/* Nobody cares - keep the same clock */
if (clock == ATA_ANY)
return;
/* No change */
if (clock == itdev->clock_mode)
return;
/* Load this into the controller */
if (clock == ATA_66)
itdev->clock_mode = ATA_66;
else {
itdev->clock_mode = ATA_50;
sel = 1;
}
pci_read_config_byte(pdev, 0x50, &v);
v &= ~(1 << (1 + ap->port_no));
v |= sel << (1 + ap->port_no);
pci_write_config_byte(pdev, 0x50, v);
/*
* Reprogram the UDMA/PIO of the pair drive for the switch
* MWDMA will be dealt with by the dma switcher
*/
if (pair && itdev->udma[1-unit] != UDMA_OFF) {
it821x_program_udma(ap, pair, itdev->udma[1-unit]);
it821x_program(ap, pair, itdev->pio[1-unit]);
}
/*
* Reprogram the UDMA/PIO of our drive for the switch.
* MWDMA will be dealt with by the dma switcher
*/
if (itdev->udma[unit] != UDMA_OFF) {
it821x_program_udma(ap, adev, itdev->udma[unit]);
it821x_program(ap, adev, itdev->pio[unit]);
}
}
/**
* it821x_passthru_set_piomode - set PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Configure for PIO mode. This is complicated as the register is
* shared by PIO and MWDMA and for both channels.
*/
static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
/* Spec says 89 ref driver uses 88 */
static const u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
static const u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
struct it821x_dev *itdev = ap->private_data;
int unit = adev->devno;
int mode_wanted = adev->pio_mode - XFER_PIO_0;
/* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
itdev->want[unit][1] = pio_want[mode_wanted];
itdev->want[unit][0] = 1; /* PIO is lowest priority */
itdev->pio[unit] = pio[mode_wanted];
it821x_clock_strategy(ap, adev);
it821x_program(ap, adev, itdev->pio[unit]);
}
/**
* it821x_passthru_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Set up the DMA modes. The actions taken depend heavily on the mode
* to use. If UDMA is used as is hopefully the usual case then the
* timing register is private and we need only consider the clock. If
* we are using MWDMA then we have to manage the setting ourself as
* we switch devices and mode.
*/
static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u16 dma[] = { 0x8866, 0x3222, 0x3121 };
static const u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY };
static const u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
static const u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct it821x_dev *itdev = ap->private_data;
int channel = ap->port_no;
int unit = adev->devno;
u8 conf;
if (adev->dma_mode >= XFER_UDMA_0) {
int mode_wanted = adev->dma_mode - XFER_UDMA_0;
itdev->want[unit][1] = udma_want[mode_wanted];
itdev->want[unit][0] = 3; /* UDMA is high priority */
itdev->mwdma[unit] = MWDMA_OFF;
itdev->udma[unit] = udma[mode_wanted];
if (mode_wanted >= 5)
itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */
/* UDMA on. Again revision 0x10 must do the pair */
pci_read_config_byte(pdev, 0x50, &conf);
if (itdev->timing10)
conf &= channel ? 0x9F: 0xE7;
else
conf &= ~ (1 << (3 + 2 * channel + unit));
pci_write_config_byte(pdev, 0x50, conf);
it821x_clock_strategy(ap, adev);
it821x_program_udma(ap, adev, itdev->udma[unit]);
} else {
int mode_wanted = adev->dma_mode - XFER_MW_DMA_0;
itdev->want[unit][1] = mwdma_want[mode_wanted];
itdev->want[unit][0] = 2; /* MWDMA is low priority */
itdev->mwdma[unit] = dma[mode_wanted];
itdev->udma[unit] = UDMA_OFF;
/* UDMA bits off - Revision 0x10 do them in pairs */
pci_read_config_byte(pdev, 0x50, &conf);
if (itdev->timing10)
conf |= channel ? 0x60: 0x18;
else
conf |= 1 << (3 + 2 * channel + unit);
pci_write_config_byte(pdev, 0x50, conf);
it821x_clock_strategy(ap, adev);
}
}
/**
* it821x_passthru_bmdma_start - DMA start callback
* @qc: Command in progress
*
* Usually drivers set the DMA timing at the point the set_dmamode call
* is made. IT821x however requires we load new timings on the
* transitions in some cases.
*/
static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct it821x_dev *itdev = ap->private_data;
int unit = adev->devno;
if (itdev->mwdma[unit] != MWDMA_OFF)
it821x_program(ap, adev, itdev->mwdma[unit]);
else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10)
it821x_program_udma(ap, adev, itdev->udma[unit]);
ata_bmdma_start(qc);
}
/**
* it821x_passthru_bmdma_stop - DMA stop callback
* @qc: ATA command
*
* We loaded new timings in dma_start, as a result we need to restore
* the PIO timings in dma_stop so that the next command issue gets the
* right clock values.
*/
static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct it821x_dev *itdev = ap->private_data;
int unit = adev->devno;
ata_bmdma_stop(qc);
if (itdev->mwdma[unit] != MWDMA_OFF)
it821x_program(ap, adev, itdev->pio[unit]);
}
/**
* it821x_passthru_dev_select - Select master/slave
* @ap: ATA port
* @device: Device number (not pointer)
*
* Device selection hook. If necessary perform clock switching
*/
static void it821x_passthru_dev_select(struct ata_port *ap,
unsigned int device)
{
struct it821x_dev *itdev = ap->private_data;
if (itdev && device != itdev->last_device) {
struct ata_device *adev = &ap->link.device[device];
it821x_program(ap, adev, itdev->pio[adev->devno]);
itdev->last_device = device;
}
ata_sff_dev_select(ap, device);
}
/**
* it821x_smart_qc_issue - wrap qc issue prot
* @qc: command
*
* Wrap the command issue sequence for the IT821x. We need to
* perform out own device selection timing loads before the
* usual happenings kick off
*/
static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
{
switch(qc->tf.command)
{
/* Commands the firmware supports */
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
case ATA_CMD_PIO_READ:
case ATA_CMD_PIO_READ_EXT:
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
case ATA_CMD_READ_MULTI:
case ATA_CMD_READ_MULTI_EXT:
case ATA_CMD_WRITE_MULTI:
case ATA_CMD_WRITE_MULTI_EXT:
case ATA_CMD_ID_ATA:
case ATA_CMD_INIT_DEV_PARAMS:
case 0xFC: /* Internal 'report rebuild state' */
/* Arguably should just no-op this one */
case ATA_CMD_SET_FEATURES:
return ata_bmdma_qc_issue(qc);
}
ata_dev_dbg(qc->dev, "it821x: can't process command 0x%02X\n",
qc->tf.command);
return AC_ERR_DEV;
}
/**
* it821x_passthru_qc_issue - wrap qc issue prot
* @qc: command
*
* Wrap the command issue sequence for the IT821x. We need to
* perform out own device selection timing loads before the
* usual happenings kick off
*/
static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
{
it821x_passthru_dev_select(qc->ap, qc->dev->devno);
return ata_bmdma_qc_issue(qc);
}
/**
* it821x_smart_set_mode - mode setting
* @link: interface to set up
* @unused: device that failed (error only)
*
* Use a non standard set_mode function. We don't want to be tuned.
* The BIOS configured everything. Our job is not to fiddle. We
* read the dma enabled bits from the PCI configuration of the device
* and respect them.
*/
static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
dev->dma_mode = XFER_MW_DMA_0;
/* We do need the right mode information for DMA or PIO
and this comes from the current configuration flags */
if (ata_id_has_dma(dev->id)) {
ata_dev_info(dev, "configured for DMA\n");
dev->xfer_mode = XFER_MW_DMA_0;
dev->xfer_shift = ATA_SHIFT_MWDMA;
dev->flags &= ~ATA_DFLAG_PIO;
} else {
ata_dev_info(dev, "configured for PIO\n");
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
}
}
return 0;
}
/**
* it821x_dev_config - Called each device identify
* @adev: Device that has just been identified
*
* Perform the initial setup needed for each device that is chip
* special. In our case we need to lock the sector count to avoid
* blowing the brains out of the firmware with large LBA48 requests
*
*/
static void it821x_dev_config(struct ata_device *adev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (adev->max_sectors > 255)
adev->max_sectors = 255;
if (strstr(model_num, "Integrated Technology Express")) {
/* RAID mode */
if (adev->id[129] == 1)
ata_dev_info(adev, "%sRAID%d volume\n",
adev->id[147] ? "Bootable " : "",
adev->id[129]);
else
ata_dev_info(adev, "%sRAID%d volume (%dK stripe)\n",
adev->id[147] ? "Bootable " : "",
adev->id[129], adev->id[146]);
}
/* This is a controller firmware triggered funny, don't
report the drive faulty! */
adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
/* No HPA in 'smart' mode */
adev->horkage |= ATA_HORKAGE_BROKEN_HPA;
}
/**
* it821x_read_id - Hack identify data up
* @adev: device to read
* @tf: proposed taskfile
* @id: buffer for returned ident data
*
* Query the devices on this firmware driven port and slightly
* mash the identify data to stop us and common tools trying to
* use features not firmware supported. The firmware itself does
* some masking (eg SMART) but not enough.
*/
static unsigned int it821x_read_id(struct ata_device *adev,
struct ata_taskfile *tf, __le16 *id)
{
unsigned int err_mask;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
err_mask = ata_do_dev_read_id(adev, tf, id);
if (err_mask)
return err_mask;
ata_id_c_string((u16 *)id, model_num, ATA_ID_PROD, sizeof(model_num));
id[83] &= cpu_to_le16(~(1 << 12)); /* Cache flush is firmware handled */
id[84] &= cpu_to_le16(~(1 << 6)); /* No FUA */
id[85] &= cpu_to_le16(~(1 << 10)); /* No HPA */
id[76] = 0; /* No NCQ/AN etc */
if (strstr(model_num, "Integrated Technology Express")) {
/* Set feature bits the firmware neglects */
id[49] |= cpu_to_le16(0x0300); /* LBA, DMA */
id[83] &= cpu_to_le16(0x7FFF);
id[83] |= cpu_to_le16(0x4400); /* Word 83 is valid and LBA48 */
id[86] |= cpu_to_le16(0x0400); /* LBA48 on */
id[ATA_ID_MAJOR_VER] |= cpu_to_le16(0x1F);
/* Clear the serial number because it's different each boot
which breaks validation on resume */
memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN);
}
return err_mask;
}
/**
* it821x_check_atapi_dma - ATAPI DMA handler
* @qc: Command we are about to issue
*
* Decide if this ATAPI command can be issued by DMA on this
* controller. Return 0 if it can be.
*/
static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct it821x_dev *itdev = ap->private_data;
/* Only use dma for transfers to/from the media. */
if (ata_qc_raw_nbytes(qc) < 2048)
return -EOPNOTSUPP;
/* No ATAPI DMA in smart mode */
if (itdev->smart)
return -EOPNOTSUPP;
/* No ATAPI DMA on rev 10 */
if (itdev->timing10)
return -EOPNOTSUPP;
/* Cool */
return 0;
}
/**
* it821x_display_disk - display disk setup
* @ap: ATA port
* @n: Device number
* @buf: Buffer block from firmware
*
* Produce a nice informative display of the device setup as provided
* by the firmware.
*/
static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf)
{
unsigned char id[41];
int mode = 0;
const char *mtype = "";
char mbuf[8];
const char *cbl = "(40 wire cable)";
static const char *types[5] = {
"RAID0", "RAID1", "RAID 0+1", "JBOD", "DISK"
};
if (buf[52] > 4) /* No Disk */
return;
ata_id_c_string((u16 *)buf, id, 0, 41);
if (buf[51]) {
mode = ffs(buf[51]);
mtype = "UDMA";
} else if (buf[49]) {
mode = ffs(buf[49]);
mtype = "MWDMA";
}
if (buf[76])
cbl = "";
if (mode)
snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
else
strcpy(mbuf, "PIO");
if (buf[52] == 4)
ata_port_info(ap, "%d: %-6s %-8s %s %s\n",
n, mbuf, types[buf[52]], id, cbl);
else
ata_port_info(ap, "%d: %-6s %-8s Volume: %1d %s %s\n",
n, mbuf, types[buf[52]], buf[53], id, cbl);
if (buf[125] < 100)
ata_port_info(ap, "%d: Rebuilding: %d%%\n", n, buf[125]);
}
/**
* it821x_firmware_command - issue firmware command
* @ap: IT821x port to interrogate
* @cmd: command
* @len: length
*
* Issue firmware commands expecting data back from the controller. We
* use this to issue commands that do not go via the normal paths. Other
* commands such as 0xFC can be issued normally.
*/
static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
{
u8 status;
int n = 0;
u16 *buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return NULL;
/* This isn't quite a normal ATA command as we are talking to the
firmware not the drives */
ap->ctl |= ATA_NIEN;
iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
ata_wait_idle(ap);
iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
iowrite8(cmd, ap->ioaddr.command_addr);
udelay(1);
/* This should be almost immediate but a little paranoia goes a long
way. */
while(n++ < 10) {
status = ioread8(ap->ioaddr.status_addr);
if (status & ATA_ERR) {
kfree(buf);
ata_port_err(ap, "%s: rejected\n", __func__);
return NULL;
}
if (status & ATA_DRQ) {
ioread16_rep(ap->ioaddr.data_addr, buf, len/2);
return (u8 *)buf;
}
usleep_range(500, 1000);
}
kfree(buf);
ata_port_err(ap, "%s: timeout\n", __func__);
return NULL;
}
/**
* it821x_probe_firmware - firmware reporting/setup
* @ap: IT821x port being probed
*
* Probe the firmware of the controller by issuing firmware command
* 0xFA and analysing the returned data.
*/
static void it821x_probe_firmware(struct ata_port *ap)
{
u8 *buf;
int i;
/* This is a bit ugly as we can't just issue a task file to a device
as this is controller magic */
buf = it821x_firmware_command(ap, 0xFA, 512);
if (buf != NULL) {
ata_port_info(ap, "pata_it821x: Firmware %02X/%02X/%02X%02X\n",
buf[505],
buf[506],
buf[507],
buf[508]);
for (i = 0; i < 4; i++)
it821x_display_disk(ap, i, buf + 128 * i);
kfree(buf);
}
}
/**
* it821x_port_start - port setup
* @ap: ATA port being set up
*
* The it821x needs to maintain private data structures and also to
* use the standard PCI interface which lacks support for this
* functionality. We instead set up the private data on the port
* start hook, and tear it down on port stop
*/
static int it821x_port_start(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct it821x_dev *itdev;
u8 conf;
int ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;
itdev = devm_kzalloc(&pdev->dev, sizeof(struct it821x_dev), GFP_KERNEL);
if (itdev == NULL)
return -ENOMEM;
ap->private_data = itdev;
pci_read_config_byte(pdev, 0x50, &conf);
if (conf & 1) {
itdev->smart = 1;
/* Long I/O's although allowed in LBA48 space cause the
onboard firmware to enter the twighlight zone */
/* No ATAPI DMA in this mode either */
if (ap->port_no == 0)
it821x_probe_firmware(ap);
}
/* Pull the current clocks from 0x50 */
if (conf & (1 << (1 + ap->port_no)))
itdev->clock_mode = ATA_50;
else
itdev->clock_mode = ATA_66;
itdev->want[0][1] = ATA_ANY;
itdev->want[1][1] = ATA_ANY;
itdev->last_device = -1;
if (pdev->revision == 0x10) {
itdev->timing10 = 1;
/* Need to disable ATAPI DMA for this case */
if (!itdev->smart)
dev_warn(&pdev->dev,
"Revision 0x10, workarounds activated.\n");
}
return 0;
}
/**
* it821x_rdc_cable - Cable detect for RDC1010
* @ap: port we are checking
*
* Return the RDC1010 cable type. Unlike the IT821x we know how to do
* this and can do host side cable detect
*/
static int it821x_rdc_cable(struct ata_port *ap)
{
u16 r40;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_word(pdev, 0x40, &r40);
if (r40 & (1 << (2 + ap->port_no)))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
static const struct scsi_host_template it821x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations it821x_smart_port_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma= it821x_check_atapi_dma,
.qc_issue = it821x_smart_qc_issue,
.cable_detect = ata_cable_80wire,
.set_mode = it821x_smart_set_mode,
.dev_config = it821x_dev_config,
.read_id = it821x_read_id,
.port_start = it821x_port_start,
};
static struct ata_port_operations it821x_passthru_port_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma= it821x_check_atapi_dma,
.sff_dev_select = it821x_passthru_dev_select,
.bmdma_start = it821x_passthru_bmdma_start,
.bmdma_stop = it821x_passthru_bmdma_stop,
.qc_issue = it821x_passthru_qc_issue,
.cable_detect = ata_cable_unknown,
.set_piomode = it821x_passthru_set_piomode,
.set_dmamode = it821x_passthru_set_dmamode,
.port_start = it821x_port_start,
};
static struct ata_port_operations it821x_rdc_port_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma= it821x_check_atapi_dma,
.sff_dev_select = it821x_passthru_dev_select,
.bmdma_start = it821x_passthru_bmdma_start,
.bmdma_stop = it821x_passthru_bmdma_stop,
.qc_issue = it821x_passthru_qc_issue,
.cable_detect = it821x_rdc_cable,
.set_piomode = it821x_passthru_set_piomode,
.set_dmamode = it821x_passthru_set_dmamode,
.port_start = it821x_port_start,
};
static void it821x_disable_raid(struct pci_dev *pdev)
{
/* Neither the RDC nor the IT8211 */
if (pdev->vendor != PCI_VENDOR_ID_ITE ||
pdev->device != PCI_DEVICE_ID_ITE_8212)
return;
/* Reset local CPU, and set BIOS not ready */
pci_write_config_byte(pdev, 0x5E, 0x01);
/* Set to bypass mode, and reset PCI bus */
pci_write_config_byte(pdev, 0x50, 0x00);
pci_write_config_word(pdev, PCI_COMMAND,
PCI_COMMAND_PARITY | PCI_COMMAND_IO |
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
pci_write_config_word(pdev, 0x40, 0xA0F3);
pci_write_config_dword(pdev,0x4C, 0x02040204);
pci_write_config_byte(pdev, 0x42, 0x36);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20);
}
static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
u8 conf;
static const struct ata_port_info info_smart = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &it821x_smart_port_ops
};
static const struct ata_port_info info_passthru = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &it821x_passthru_port_ops
};
static const struct ata_port_info info_rdc = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &it821x_rdc_port_ops
};
static const struct ata_port_info info_rdc_11 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
/* No UDMA */
.port_ops = &it821x_rdc_port_ops
};
const struct ata_port_info *ppi[] = { NULL, NULL };
static const char *mode[2] = { "pass through", "smart" };
int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (pdev->vendor == PCI_VENDOR_ID_RDC) {
/* Deal with Vortex86SX */
if (pdev->revision == 0x11)
ppi[0] = &info_rdc_11;
else
ppi[0] = &info_rdc;
} else {
/* Force the card into bypass mode if so requested */
if (it8212_noraid) {
dev_info(&pdev->dev, "forcing bypass mode.\n");
it821x_disable_raid(pdev);
}
pci_read_config_byte(pdev, 0x50, &conf);
conf &= 1;
dev_info(&pdev->dev, "controller in %s mode.\n", mode[conf]);
if (conf == 0)
ppi[0] = &info_passthru;
else
ppi[0] = &info_smart;
}
return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0);
}
#ifdef CONFIG_PM_SLEEP
static int it821x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
/* Resume - turn raid back off if need be */
if (it8212_noraid)
it821x_disable_raid(pdev);
ata_host_resume(host);
return rc;
}
#endif
static const struct pci_device_id it821x[] = {
{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
{ PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), },
{ },
};
static struct pci_driver it821x_pci_driver = {
.name = DRV_NAME,
.id_table = it821x,
.probe = it821x_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = it821x_reinit_one,
#endif
};
module_pci_driver(it821x_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, it821x);
MODULE_VERSION(DRV_VERSION);
module_param_named(noraid, it8212_noraid, int, S_IRUGO);
MODULE_PARM_DESC(noraid, "Force card into bypass mode");
| linux-master | drivers/ata/pata_it821x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* copyright (c) 2013 Freescale Semiconductor, Inc.
* Freescale IMX AHCI SATA platform driver
*
* based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/ahci_platform.h>
#include <linux/gpio/consumer.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/libata.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/thermal.h>
#include "ahci.h"
#define DRV_NAME "ahci-imx"
enum {
/* Timer 1-ms Register */
IMX_TIMER1MS = 0x00e0,
/* Port0 PHY Control Register */
IMX_P0PHYCR = 0x0178,
IMX_P0PHYCR_TEST_PDDQ = 1 << 20,
IMX_P0PHYCR_CR_READ = 1 << 19,
IMX_P0PHYCR_CR_WRITE = 1 << 18,
IMX_P0PHYCR_CR_CAP_DATA = 1 << 17,
IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16,
/* Port0 PHY Status Register */
IMX_P0PHYSR = 0x017c,
IMX_P0PHYSR_CR_ACK = 1 << 18,
IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0,
/* Lane0 Output Status Register */
IMX_LANE0_OUT_STAT = 0x2003,
IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1,
/* Clock Reset Register */
IMX_CLOCK_RESET = 0x7f3f,
IMX_CLOCK_RESET_RESET = 1 << 0,
/* IMX8QM HSIO AHCI definitions */
IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET = 0x03,
IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET = 0x09,
IMX8QM_SATA_PHY_IMPED_RATIO_85OHM = 0x6c,
IMX8QM_LPCG_PHYX2_OFFSET = 0x00000,
IMX8QM_CSR_PHYX2_OFFSET = 0x90000,
IMX8QM_CSR_PHYX1_OFFSET = 0xa0000,
IMX8QM_CSR_PHYX_STTS0_OFFSET = 0x4,
IMX8QM_CSR_PCIEA_OFFSET = 0xb0000,
IMX8QM_CSR_PCIEB_OFFSET = 0xc0000,
IMX8QM_CSR_SATA_OFFSET = 0xd0000,
IMX8QM_CSR_PCIE_CTRL2_OFFSET = 0x8,
IMX8QM_CSR_MISC_OFFSET = 0xe0000,
IMX8QM_LPCG_PHYX2_PCLK0_MASK = (0x3 << 16),
IMX8QM_LPCG_PHYX2_PCLK1_MASK = (0x3 << 20),
IMX8QM_PHY_APB_RSTN_0 = BIT(0),
IMX8QM_PHY_MODE_SATA = BIT(19),
IMX8QM_PHY_MODE_MASK = (0xf << 17),
IMX8QM_PHY_PIPE_RSTN_0 = BIT(24),
IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0 = BIT(25),
IMX8QM_PHY_PIPE_RSTN_1 = BIT(26),
IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1 = BIT(27),
IMX8QM_STTS0_LANE0_TX_PLL_LOCK = BIT(4),
IMX8QM_MISC_IOB_RXENA = BIT(0),
IMX8QM_MISC_IOB_TXENA = BIT(1),
IMX8QM_MISC_PHYX1_EPCS_SEL = BIT(12),
IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 = BIT(24),
IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 = BIT(25),
IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 = BIT(28),
IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0 = BIT(29),
IMX8QM_SATA_CTRL_RESET_N = BIT(12),
IMX8QM_SATA_CTRL_EPCS_PHYRESET_N = BIT(7),
IMX8QM_CTRL_BUTTON_RST_N = BIT(21),
IMX8QM_CTRL_POWER_UP_RST_N = BIT(23),
IMX8QM_CTRL_LTSSM_ENABLE = BIT(4),
};
enum ahci_imx_type {
AHCI_IMX53,
AHCI_IMX6Q,
AHCI_IMX6QP,
AHCI_IMX8QM,
};
struct imx_ahci_priv {
struct platform_device *ahci_pdev;
enum ahci_imx_type type;
struct clk *sata_clk;
struct clk *sata_ref_clk;
struct clk *ahb_clk;
struct clk *epcs_tx_clk;
struct clk *epcs_rx_clk;
struct clk *phy_apbclk;
struct clk *phy_pclk0;
struct clk *phy_pclk1;
void __iomem *phy_base;
struct gpio_desc *clkreq_gpiod;
struct regmap *gpr;
bool no_device;
bool first_time;
u32 phy_params;
u32 imped_ratio;
};
static int ahci_imx_hotplug;
module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
static void ahci_imx_host_stop(struct ata_host *host);
static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
{
int timeout = 10;
u32 crval;
u32 srval;
/* Assert or deassert the bit */
crval = readl(mmio + IMX_P0PHYCR);
if (assert)
crval |= bit;
else
crval &= ~bit;
writel(crval, mmio + IMX_P0PHYCR);
/* Wait for the cr_ack signal */
do {
srval = readl(mmio + IMX_P0PHYSR);
if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
break;
usleep_range(100, 200);
} while (--timeout);
return timeout ? 0 : -ETIMEDOUT;
}
static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
{
u32 crval = addr;
int ret;
/* Supply the address on cr_data_in */
writel(crval, mmio + IMX_P0PHYCR);
/* Assert the cr_cap_addr signal */
ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
if (ret)
return ret;
/* Deassert cr_cap_addr */
ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
if (ret)
return ret;
return 0;
}
static int imx_phy_reg_write(u16 val, void __iomem *mmio)
{
u32 crval = val;
int ret;
/* Supply the data on cr_data_in */
writel(crval, mmio + IMX_P0PHYCR);
/* Assert the cr_cap_data signal */
ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
if (ret)
return ret;
/* Deassert cr_cap_data */
ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
if (ret)
return ret;
if (val & IMX_CLOCK_RESET_RESET) {
/*
* In case we're resetting the phy, it's unable to acknowledge,
* so we return immediately here.
*/
crval |= IMX_P0PHYCR_CR_WRITE;
writel(crval, mmio + IMX_P0PHYCR);
goto out;
}
/* Assert the cr_write signal */
ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
if (ret)
return ret;
/* Deassert cr_write */
ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
if (ret)
return ret;
out:
return 0;
}
static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
{
int ret;
/* Assert the cr_read signal */
ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
if (ret)
return ret;
/* Capture the data from cr_data_out[] */
*val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
/* Deassert cr_read */
ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
if (ret)
return ret;
return 0;
}
static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
{
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
void __iomem *mmio = hpriv->mmio;
int timeout = 10;
u16 val;
int ret;
if (imxpriv->type == AHCI_IMX6QP) {
/* 6qp adds the sata reset mechanism, use it for 6qp sata */
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR5,
IMX6Q_GPR5_SATA_SW_PD, 0);
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR5,
IMX6Q_GPR5_SATA_SW_RST, 0);
udelay(50);
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR5,
IMX6Q_GPR5_SATA_SW_RST,
IMX6Q_GPR5_SATA_SW_RST);
return 0;
}
/* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
if (ret)
return ret;
ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
if (ret)
return ret;
/* Wait for PHY RX_PLL to be stable */
do {
usleep_range(100, 200);
ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
if (ret)
return ret;
ret = imx_phy_reg_read(&val, mmio);
if (ret)
return ret;
if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
break;
} while (--timeout);
return timeout ? 0 : -ETIMEDOUT;
}
enum {
/* SATA PHY Register */
SATA_PHY_CR_CLOCK_CRCMP_LT_LIMIT = 0x0001,
SATA_PHY_CR_CLOCK_DAC_CTL = 0x0008,
SATA_PHY_CR_CLOCK_RTUNE_CTL = 0x0009,
SATA_PHY_CR_CLOCK_ADC_OUT = 0x000A,
SATA_PHY_CR_CLOCK_MPLL_TST = 0x0017,
};
static int read_adc_sum(void *dev, u16 rtune_ctl_reg, void __iomem * mmio)
{
u16 adc_out_reg, read_sum;
u32 index, read_attempt;
const u32 attempt_limit = 200;
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
imx_phy_reg_write(rtune_ctl_reg, mmio);
/* two dummy read */
index = 0;
read_attempt = 0;
adc_out_reg = 0;
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_ADC_OUT, mmio);
while (index < 2) {
imx_phy_reg_read(&adc_out_reg, mmio);
/* check if valid */
if (adc_out_reg & 0x400)
index++;
read_attempt++;
if (read_attempt > attempt_limit) {
dev_err(dev, "Read REG more than %d times!\n",
attempt_limit);
break;
}
}
index = 0;
read_attempt = 0;
read_sum = 0;
while (index < 80) {
imx_phy_reg_read(&adc_out_reg, mmio);
if (adc_out_reg & 0x400) {
read_sum = read_sum + (adc_out_reg & 0x3FF);
index++;
}
read_attempt++;
if (read_attempt > attempt_limit) {
dev_err(dev, "Read REG more than %d times!\n",
attempt_limit);
break;
}
}
/* Use the U32 to make 1000 precision */
return (read_sum * 1000) / 80;
}
/* SATA AHCI temperature monitor */
static int __sata_ahci_read_temperature(void *dev, int *temp)
{
u16 mpll_test_reg, rtune_ctl_reg, dac_ctl_reg, read_sum;
u32 str1, str2, str3, str4;
int m1, m2, a;
struct ahci_host_priv *hpriv = dev_get_drvdata(dev);
void __iomem *mmio = hpriv->mmio;
/* check rd-wr to reg */
read_sum = 0;
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_CRCMP_LT_LIMIT, mmio);
imx_phy_reg_write(read_sum, mmio);
imx_phy_reg_read(&read_sum, mmio);
if ((read_sum & 0xffff) != 0)
dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
imx_phy_reg_write(0x5A5A, mmio);
imx_phy_reg_read(&read_sum, mmio);
if ((read_sum & 0xffff) != 0x5A5A)
dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
imx_phy_reg_write(0x1234, mmio);
imx_phy_reg_read(&read_sum, mmio);
if ((read_sum & 0xffff) != 0x1234)
dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
/* start temperature test */
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
imx_phy_reg_read(&mpll_test_reg, mmio);
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
imx_phy_reg_read(&rtune_ctl_reg, mmio);
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
imx_phy_reg_read(&dac_ctl_reg, mmio);
/* mpll_tst.meas_iv ([12:2]) */
str1 = (mpll_test_reg >> 2) & 0x7FF;
/* rtune_ctl.mode ([1:0]) */
str2 = (rtune_ctl_reg) & 0x3;
/* dac_ctl.dac_mode ([14:12]) */
str3 = (dac_ctl_reg >> 12) & 0x7;
/* rtune_ctl.sel_atbp ([4]) */
str4 = (rtune_ctl_reg >> 4);
/* Calculate the m1 */
/* mpll_tst.meas_iv */
mpll_test_reg = (mpll_test_reg & 0xE03) | (512) << 2;
/* rtune_ctl.mode */
rtune_ctl_reg = (rtune_ctl_reg & 0xFFC) | (1);
/* dac_ctl.dac_mode */
dac_ctl_reg = (dac_ctl_reg & 0x8FF) | (4) << 12;
/* rtune_ctl.sel_atbp */
rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (0) << 4;
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
imx_phy_reg_write(mpll_test_reg, mmio);
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
imx_phy_reg_write(dac_ctl_reg, mmio);
m1 = read_adc_sum(dev, rtune_ctl_reg, mmio);
/* Calculate the m2 */
/* rtune_ctl.sel_atbp */
rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (1) << 4;
m2 = read_adc_sum(dev, rtune_ctl_reg, mmio);
/* restore the status */
/* mpll_tst.meas_iv */
mpll_test_reg = (mpll_test_reg & 0xE03) | (str1) << 2;
/* rtune_ctl.mode */
rtune_ctl_reg = (rtune_ctl_reg & 0xFFC) | (str2);
/* dac_ctl.dac_mode */
dac_ctl_reg = (dac_ctl_reg & 0x8FF) | (str3) << 12;
/* rtune_ctl.sel_atbp */
rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (str4) << 4;
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
imx_phy_reg_write(mpll_test_reg, mmio);
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
imx_phy_reg_write(dac_ctl_reg, mmio);
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
imx_phy_reg_write(rtune_ctl_reg, mmio);
/* Compute temperature */
if (!(m2 / 1000))
m2 = 1000;
a = (m2 - m1) / (m2/1000);
*temp = ((-559) * a * a) / 1000 + (1379) * a + (-458000);
return 0;
}
static int sata_ahci_read_temperature(struct thermal_zone_device *tz, int *temp)
{
return __sata_ahci_read_temperature(thermal_zone_device_priv(tz), temp);
}
static ssize_t sata_ahci_show_temp(struct device *dev,
struct device_attribute *da,
char *buf)
{
unsigned int temp = 0;
int err;
err = __sata_ahci_read_temperature(dev, &temp);
if (err < 0)
return err;
return sprintf(buf, "%u\n", temp);
}
static const struct thermal_zone_device_ops fsl_sata_ahci_of_thermal_ops = {
.get_temp = sata_ahci_read_temperature,
};
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sata_ahci_show_temp, NULL, 0);
static struct attribute *fsl_sata_ahci_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(fsl_sata_ahci);
static int imx8_sata_enable(struct ahci_host_priv *hpriv)
{
u32 val, reg;
int i, ret;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
struct device *dev = &imxpriv->ahci_pdev->dev;
/* configure the hsio for sata */
ret = clk_prepare_enable(imxpriv->phy_pclk0);
if (ret < 0) {
dev_err(dev, "can't enable phy_pclk0.\n");
return ret;
}
ret = clk_prepare_enable(imxpriv->phy_pclk1);
if (ret < 0) {
dev_err(dev, "can't enable phy_pclk1.\n");
goto disable_phy_pclk0;
}
ret = clk_prepare_enable(imxpriv->epcs_tx_clk);
if (ret < 0) {
dev_err(dev, "can't enable epcs_tx_clk.\n");
goto disable_phy_pclk1;
}
ret = clk_prepare_enable(imxpriv->epcs_rx_clk);
if (ret < 0) {
dev_err(dev, "can't enable epcs_rx_clk.\n");
goto disable_epcs_tx_clk;
}
ret = clk_prepare_enable(imxpriv->phy_apbclk);
if (ret < 0) {
dev_err(dev, "can't enable phy_apbclk.\n");
goto disable_epcs_rx_clk;
}
/* Configure PHYx2 PIPE_RSTN */
regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEA_OFFSET +
IMX8QM_CSR_PCIE_CTRL2_OFFSET, &val);
if ((val & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
/* The link of the PCIEA of HSIO is down */
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_PHYX2_OFFSET,
IMX8QM_PHY_PIPE_RSTN_0 |
IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0,
IMX8QM_PHY_PIPE_RSTN_0 |
IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0);
}
regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEB_OFFSET +
IMX8QM_CSR_PCIE_CTRL2_OFFSET, ®);
if ((reg & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
/* The link of the PCIEB of HSIO is down */
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_PHYX2_OFFSET,
IMX8QM_PHY_PIPE_RSTN_1 |
IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1,
IMX8QM_PHY_PIPE_RSTN_1 |
IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1);
}
if (((reg | val) & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
/* The links of both PCIA and PCIEB of HSIO are down */
regmap_update_bits(imxpriv->gpr,
IMX8QM_LPCG_PHYX2_OFFSET,
IMX8QM_LPCG_PHYX2_PCLK0_MASK |
IMX8QM_LPCG_PHYX2_PCLK1_MASK,
0);
}
/* set PWR_RST and BT_RST of csr_pciea */
val = IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET;
regmap_update_bits(imxpriv->gpr,
val,
IMX8QM_CTRL_BUTTON_RST_N,
IMX8QM_CTRL_BUTTON_RST_N);
regmap_update_bits(imxpriv->gpr,
val,
IMX8QM_CTRL_POWER_UP_RST_N,
IMX8QM_CTRL_POWER_UP_RST_N);
/* PHYX1_MODE to SATA */
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_PHYX1_OFFSET,
IMX8QM_PHY_MODE_MASK,
IMX8QM_PHY_MODE_SATA);
/*
* BIT0 RXENA 1, BIT1 TXENA 0
* BIT12 PHY_X1_EPCS_SEL 1.
*/
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_MISC_OFFSET,
IMX8QM_MISC_IOB_RXENA,
IMX8QM_MISC_IOB_RXENA);
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_MISC_OFFSET,
IMX8QM_MISC_IOB_TXENA,
0);
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_MISC_OFFSET,
IMX8QM_MISC_PHYX1_EPCS_SEL,
IMX8QM_MISC_PHYX1_EPCS_SEL);
/*
* It is possible, for PCIe and SATA are sharing
* the same clock source, HPLL or external oscillator.
* When PCIe is in low power modes (L1.X or L2 etc),
* the clock source can be turned off. In this case,
* if this clock source is required to be toggling by
* SATA, then SATA functions will be abnormal.
* Set the override here to avoid it.
*/
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_MISC_OFFSET,
IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0,
IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0);
/* clear PHY RST, then set it */
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_SATA_OFFSET,
IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
0);
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_SATA_OFFSET,
IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
IMX8QM_SATA_CTRL_EPCS_PHYRESET_N);
/* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_SATA_OFFSET,
IMX8QM_SATA_CTRL_RESET_N,
IMX8QM_SATA_CTRL_RESET_N);
udelay(1);
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_SATA_OFFSET,
IMX8QM_SATA_CTRL_RESET_N,
0);
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_SATA_OFFSET,
IMX8QM_SATA_CTRL_RESET_N,
IMX8QM_SATA_CTRL_RESET_N);
/* APB reset */
regmap_update_bits(imxpriv->gpr,
IMX8QM_CSR_PHYX1_OFFSET,
IMX8QM_PHY_APB_RSTN_0,
IMX8QM_PHY_APB_RSTN_0);
for (i = 0; i < 100; i++) {
reg = IMX8QM_CSR_PHYX1_OFFSET +
IMX8QM_CSR_PHYX_STTS0_OFFSET;
regmap_read(imxpriv->gpr, reg, &val);
val &= IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
if (val == IMX8QM_STTS0_LANE0_TX_PLL_LOCK)
break;
udelay(1);
}
if (val != IMX8QM_STTS0_LANE0_TX_PLL_LOCK) {
dev_err(dev, "TX PLL of the PHY is not locked\n");
ret = -ENODEV;
} else {
writeb(imxpriv->imped_ratio, imxpriv->phy_base +
IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
writeb(imxpriv->imped_ratio, imxpriv->phy_base +
IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
reg = readb(imxpriv->phy_base +
IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
if (unlikely(reg != imxpriv->imped_ratio))
dev_info(dev, "Can't set PHY RX impedance ratio.\n");
reg = readb(imxpriv->phy_base +
IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
if (unlikely(reg != imxpriv->imped_ratio))
dev_info(dev, "Can't set PHY TX impedance ratio.\n");
usleep_range(50, 100);
/*
* To reduce the power consumption, gate off
* the PHY clks
*/
clk_disable_unprepare(imxpriv->phy_apbclk);
clk_disable_unprepare(imxpriv->phy_pclk1);
clk_disable_unprepare(imxpriv->phy_pclk0);
return ret;
}
clk_disable_unprepare(imxpriv->phy_apbclk);
disable_epcs_rx_clk:
clk_disable_unprepare(imxpriv->epcs_rx_clk);
disable_epcs_tx_clk:
clk_disable_unprepare(imxpriv->epcs_tx_clk);
disable_phy_pclk1:
clk_disable_unprepare(imxpriv->phy_pclk1);
disable_phy_pclk0:
clk_disable_unprepare(imxpriv->phy_pclk0);
return ret;
}
static int imx_sata_enable(struct ahci_host_priv *hpriv)
{
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
struct device *dev = &imxpriv->ahci_pdev->dev;
int ret;
if (imxpriv->no_device)
return 0;
ret = ahci_platform_enable_regulators(hpriv);
if (ret)
return ret;
ret = clk_prepare_enable(imxpriv->sata_ref_clk);
if (ret < 0)
goto disable_regulator;
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
/*
* set PHY Paremeters, two steps to configure the GPR13,
* one write for rest of parameters, mask of first write
* is 0x07ffffff, and the other one write for setting
* the mpll_clk_en.
*/
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
IMX6Q_GPR13_SATA_SPD_MODE_MASK |
IMX6Q_GPR13_SATA_MPLL_SS_EN |
IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
IMX6Q_GPR13_SATA_TX_BOOST_MASK |
IMX6Q_GPR13_SATA_TX_LVL_MASK |
IMX6Q_GPR13_SATA_MPLL_CLK_EN |
IMX6Q_GPR13_SATA_TX_EDGE_RATE,
imxpriv->phy_params);
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
IMX6Q_GPR13_SATA_MPLL_CLK_EN,
IMX6Q_GPR13_SATA_MPLL_CLK_EN);
usleep_range(100, 200);
ret = imx_sata_phy_reset(hpriv);
if (ret) {
dev_err(dev, "failed to reset phy: %d\n", ret);
goto disable_clk;
}
} else if (imxpriv->type == AHCI_IMX8QM) {
ret = imx8_sata_enable(hpriv);
}
usleep_range(1000, 2000);
return 0;
disable_clk:
clk_disable_unprepare(imxpriv->sata_ref_clk);
disable_regulator:
ahci_platform_disable_regulators(hpriv);
return ret;
}
static void imx_sata_disable(struct ahci_host_priv *hpriv)
{
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
if (imxpriv->no_device)
return;
switch (imxpriv->type) {
case AHCI_IMX6QP:
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR5,
IMX6Q_GPR5_SATA_SW_PD,
IMX6Q_GPR5_SATA_SW_PD);
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
IMX6Q_GPR13_SATA_MPLL_CLK_EN,
!IMX6Q_GPR13_SATA_MPLL_CLK_EN);
break;
case AHCI_IMX6Q:
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
IMX6Q_GPR13_SATA_MPLL_CLK_EN,
!IMX6Q_GPR13_SATA_MPLL_CLK_EN);
break;
case AHCI_IMX8QM:
clk_disable_unprepare(imxpriv->epcs_rx_clk);
clk_disable_unprepare(imxpriv->epcs_tx_clk);
break;
default:
break;
}
clk_disable_unprepare(imxpriv->sata_ref_clk);
ahci_platform_disable_regulators(hpriv);
}
static void ahci_imx_error_handler(struct ata_port *ap)
{
u32 reg_val;
struct ata_device *dev;
struct ata_host *host = dev_get_drvdata(ap->dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
ahci_error_handler(ap);
if (!(imxpriv->first_time) || ahci_imx_hotplug)
return;
imxpriv->first_time = false;
ata_for_each_dev(dev, &ap->link, ENABLED)
return;
/*
* Disable link to save power. An imx ahci port can't be recovered
* without full reset once the pddq mode is enabled making it
* impossible to use as part of libata LPM.
*/
reg_val = readl(mmio + IMX_P0PHYCR);
writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
imx_sata_disable(hpriv);
imxpriv->no_device = true;
dev_info(ap->dev, "no device found, disabling link.\n");
dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n");
}
static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ata_host *host = dev_get_drvdata(ap->dev);
struct ahci_host_priv *hpriv = host->private_data;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
int ret;
if (imxpriv->type == AHCI_IMX53)
ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
else
ret = ahci_ops.softreset(link, class, deadline);
return ret;
}
static struct ata_port_operations ahci_imx_ops = {
.inherits = &ahci_ops,
.host_stop = ahci_imx_host_stop,
.error_handler = ahci_imx_error_handler,
.softreset = ahci_imx_softreset,
};
static const struct ata_port_info ahci_imx_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_imx_ops,
};
static const struct of_device_id imx_ahci_of_match[] = {
{ .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
{ .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
{ .compatible = "fsl,imx6qp-ahci", .data = (void *)AHCI_IMX6QP },
{ .compatible = "fsl,imx8qm-ahci", .data = (void *)AHCI_IMX8QM },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
struct reg_value {
u32 of_value;
u32 reg_value;
};
struct reg_property {
const char *name;
const struct reg_value *values;
size_t num_values;
u32 def_value;
u32 set_value;
};
static const struct reg_value gpr13_tx_level[] = {
{ 937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V },
{ 947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V },
{ 957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V },
{ 966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V },
{ 976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V },
{ 986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V },
{ 996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V },
{ 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V },
{ 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V },
{ 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V },
{ 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V },
{ 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V },
{ 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V },
{ 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V },
{ 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V },
{ 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V },
{ 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V },
{ 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V },
{ 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V },
{ 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V },
{ 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V },
{ 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V },
{ 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V },
{ 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V },
{ 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V },
{ 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V },
{ 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V },
{ 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V },
{ 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V },
{ 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V },
{ 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V },
{ 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V }
};
static const struct reg_value gpr13_tx_boost[] = {
{ 0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB },
{ 370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB },
{ 740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB },
{ 1110, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB },
{ 1480, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB },
{ 1850, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB },
{ 2220, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB },
{ 2590, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB },
{ 2960, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB },
{ 3330, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB },
{ 3700, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB },
{ 4070, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB },
{ 4440, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB },
{ 4810, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB },
{ 5280, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB },
{ 5750, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB }
};
static const struct reg_value gpr13_tx_atten[] = {
{ 8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 },
{ 9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 },
{ 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 },
{ 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 },
{ 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 },
{ 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 },
};
static const struct reg_value gpr13_rx_eq[] = {
{ 500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB },
{ 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB },
{ 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB },
{ 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB },
{ 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB },
{ 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB },
{ 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB },
{ 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB },
};
static const struct reg_property gpr13_props[] = {
{
.name = "fsl,transmit-level-mV",
.values = gpr13_tx_level,
.num_values = ARRAY_SIZE(gpr13_tx_level),
.def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V,
}, {
.name = "fsl,transmit-boost-mdB",
.values = gpr13_tx_boost,
.num_values = ARRAY_SIZE(gpr13_tx_boost),
.def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB,
}, {
.name = "fsl,transmit-atten-16ths",
.values = gpr13_tx_atten,
.num_values = ARRAY_SIZE(gpr13_tx_atten),
.def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16,
}, {
.name = "fsl,receive-eq-mdB",
.values = gpr13_rx_eq,
.num_values = ARRAY_SIZE(gpr13_rx_eq),
.def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB,
}, {
.name = "fsl,no-spread-spectrum",
.def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN,
.set_value = 0,
},
};
static u32 imx_ahci_parse_props(struct device *dev,
const struct reg_property *prop, size_t num)
{
struct device_node *np = dev->of_node;
u32 reg_value = 0;
int i, j;
for (i = 0; i < num; i++, prop++) {
u32 of_val;
if (prop->num_values == 0) {
if (of_property_read_bool(np, prop->name))
reg_value |= prop->set_value;
else
reg_value |= prop->def_value;
continue;
}
if (of_property_read_u32(np, prop->name, &of_val)) {
dev_info(dev, "%s not specified, using %08x\n",
prop->name, prop->def_value);
reg_value |= prop->def_value;
continue;
}
for (j = 0; j < prop->num_values; j++) {
if (prop->values[j].of_value == of_val) {
dev_info(dev, "%s value %u, using %08x\n",
prop->name, of_val, prop->values[j].reg_value);
reg_value |= prop->values[j].reg_value;
break;
}
}
if (j == prop->num_values) {
dev_err(dev, "DT property %s is not a valid value\n",
prop->name);
reg_value |= prop->def_value;
}
}
return reg_value;
}
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
{
struct resource *phy_res;
struct platform_device *pdev = imxpriv->ahci_pdev;
struct device_node *np = dev->of_node;
if (of_property_read_u32(np, "fsl,phy-imp", &imxpriv->imped_ratio))
imxpriv->imped_ratio = IMX8QM_SATA_PHY_IMPED_RATIO_85OHM;
phy_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
if (phy_res) {
imxpriv->phy_base = devm_ioremap(dev, phy_res->start,
resource_size(phy_res));
if (!imxpriv->phy_base) {
dev_err(dev, "error with ioremap\n");
return -ENOMEM;
}
} else {
dev_err(dev, "missing *phy* reg region.\n");
return -ENOMEM;
}
imxpriv->gpr =
syscon_regmap_lookup_by_phandle(np, "hsio");
if (IS_ERR(imxpriv->gpr)) {
dev_err(dev, "unable to find gpr registers\n");
return PTR_ERR(imxpriv->gpr);
}
imxpriv->epcs_tx_clk = devm_clk_get(dev, "epcs_tx");
if (IS_ERR(imxpriv->epcs_tx_clk)) {
dev_err(dev, "can't get epcs_tx_clk clock.\n");
return PTR_ERR(imxpriv->epcs_tx_clk);
}
imxpriv->epcs_rx_clk = devm_clk_get(dev, "epcs_rx");
if (IS_ERR(imxpriv->epcs_rx_clk)) {
dev_err(dev, "can't get epcs_rx_clk clock.\n");
return PTR_ERR(imxpriv->epcs_rx_clk);
}
imxpriv->phy_pclk0 = devm_clk_get(dev, "phy_pclk0");
if (IS_ERR(imxpriv->phy_pclk0)) {
dev_err(dev, "can't get phy_pclk0 clock.\n");
return PTR_ERR(imxpriv->phy_pclk0);
}
imxpriv->phy_pclk1 = devm_clk_get(dev, "phy_pclk1");
if (IS_ERR(imxpriv->phy_pclk1)) {
dev_err(dev, "can't get phy_pclk1 clock.\n");
return PTR_ERR(imxpriv->phy_pclk1);
}
imxpriv->phy_apbclk = devm_clk_get(dev, "phy_apbclk");
if (IS_ERR(imxpriv->phy_apbclk)) {
dev_err(dev, "can't get phy_apbclk clock.\n");
return PTR_ERR(imxpriv->phy_apbclk);
}
/* Fetch GPIO, then enable the external OSC */
imxpriv->clkreq_gpiod = devm_gpiod_get_optional(dev, "clkreq",
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(imxpriv->clkreq_gpiod))
return PTR_ERR(imxpriv->clkreq_gpiod);
if (imxpriv->clkreq_gpiod)
gpiod_set_consumer_name(imxpriv->clkreq_gpiod, "SATA CLKREQ");
return 0;
}
static int imx_ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct ahci_host_priv *hpriv;
struct imx_ahci_priv *imxpriv;
unsigned int reg_val;
int ret;
of_id = of_match_device(imx_ahci_of_match, dev);
if (!of_id)
return -EINVAL;
imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
if (!imxpriv)
return -ENOMEM;
imxpriv->ahci_pdev = pdev;
imxpriv->no_device = false;
imxpriv->first_time = true;
imxpriv->type = (unsigned long)of_id->data;
imxpriv->sata_clk = devm_clk_get(dev, "sata");
if (IS_ERR(imxpriv->sata_clk)) {
dev_err(dev, "can't get sata clock.\n");
return PTR_ERR(imxpriv->sata_clk);
}
imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
if (IS_ERR(imxpriv->sata_ref_clk)) {
dev_err(dev, "can't get sata_ref clock.\n");
return PTR_ERR(imxpriv->sata_ref_clk);
}
imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(imxpriv->ahb_clk)) {
dev_err(dev, "can't get ahb clock.\n");
return PTR_ERR(imxpriv->ahb_clk);
}
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
u32 reg_value;
imxpriv->gpr = syscon_regmap_lookup_by_compatible(
"fsl,imx6q-iomuxc-gpr");
if (IS_ERR(imxpriv->gpr)) {
dev_err(dev,
"failed to find fsl,imx6q-iomux-gpr regmap\n");
return PTR_ERR(imxpriv->gpr);
}
reg_value = imx_ahci_parse_props(dev, gpr13_props,
ARRAY_SIZE(gpr13_props));
imxpriv->phy_params =
IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
reg_value;
} else if (imxpriv->type == AHCI_IMX8QM) {
ret = imx8_sata_probe(dev, imxpriv);
if (ret)
return ret;
}
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->plat_data = imxpriv;
ret = clk_prepare_enable(imxpriv->sata_clk);
if (ret)
return ret;
if (imxpriv->type == AHCI_IMX53 &&
IS_ENABLED(CONFIG_HWMON)) {
/* Add the temperature monitor */
struct device *hwmon_dev;
hwmon_dev =
devm_hwmon_device_register_with_groups(dev,
"sata_ahci",
hpriv,
fsl_sata_ahci_groups);
if (IS_ERR(hwmon_dev)) {
ret = PTR_ERR(hwmon_dev);
goto disable_clk;
}
devm_thermal_of_zone_register(hwmon_dev, 0, hwmon_dev,
&fsl_sata_ahci_of_thermal_ops);
dev_info(dev, "%s: sensor 'sata_ahci'\n", dev_name(hwmon_dev));
}
ret = imx_sata_enable(hpriv);
if (ret)
goto disable_clk;
/*
* Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
* and IP vendor specific register IMX_TIMER1MS.
* Configure CAP_SSS (support stagered spin up).
* Implement the port0.
* Get the ahb clock rate, and configure the TIMER1MS register.
*/
reg_val = readl(hpriv->mmio + HOST_CAP);
if (!(reg_val & HOST_CAP_SSS)) {
reg_val |= HOST_CAP_SSS;
writel(reg_val, hpriv->mmio + HOST_CAP);
}
reg_val = readl(hpriv->mmio + HOST_PORTS_IMPL);
if (!(reg_val & 0x1)) {
reg_val |= 0x1;
writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
}
reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
&ahci_platform_sht);
if (ret)
goto disable_sata;
return 0;
disable_sata:
imx_sata_disable(hpriv);
disable_clk:
clk_disable_unprepare(imxpriv->sata_clk);
return ret;
}
static void ahci_imx_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
imx_sata_disable(hpriv);
clk_disable_unprepare(imxpriv->sata_clk);
}
#ifdef CONFIG_PM_SLEEP
static int imx_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int ret;
ret = ahci_platform_suspend_host(dev);
if (ret)
return ret;
imx_sata_disable(hpriv);
return 0;
}
static int imx_ahci_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int ret;
ret = imx_sata_enable(hpriv);
if (ret)
return ret;
return ahci_platform_resume_host(dev);
}
#endif
static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
static struct platform_driver imx_ahci_driver = {
.probe = imx_ahci_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_ahci_of_match,
.pm = &ahci_imx_pm_ops,
},
};
module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
MODULE_AUTHOR("Richard Zhu <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/ahci_imx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* libata-scsi.c - helper library for ATA
*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available from
* - http://www.t10.org/
* - http://www.t13.org/
*/
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h>
#include <linux/libata.h>
#include <linux/hdreg.h>
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <asm/unaligned.h>
#include <linux/ioprio.h>
#include <linux/of.h>
#include "libata.h"
#include "libata-transport.h"
#define ATA_SCSI_RBUF_SIZE 2048
static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
const struct scsi_device *scsidev);
#define RW_RECOVERY_MPAGE 0x1
#define RW_RECOVERY_MPAGE_LEN 12
#define CACHE_MPAGE 0x8
#define CACHE_MPAGE_LEN 20
#define CONTROL_MPAGE 0xa
#define CONTROL_MPAGE_LEN 12
#define ALL_MPAGES 0x3f
#define ALL_SUB_MPAGES 0xff
#define CDL_T2A_SUB_MPAGE 0x07
#define CDL_T2B_SUB_MPAGE 0x08
#define CDL_T2_SUB_MPAGE_LEN 232
#define ATA_FEATURE_SUB_MPAGE 0xf2
#define ATA_FEATURE_SUB_MPAGE_LEN 16
static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
RW_RECOVERY_MPAGE,
RW_RECOVERY_MPAGE_LEN - 2,
(1 << 7), /* AWRE */
0, /* read retry count */
0, 0, 0, 0,
0, /* write retry count */
0, 0, 0
};
static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
CACHE_MPAGE,
CACHE_MPAGE_LEN - 2,
0, /* contains WCE, needs to be 0 for logic */
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, /* contains DRA, needs to be 0 for logic */
0, 0, 0, 0, 0, 0, 0
};
static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
CONTROL_MPAGE,
CONTROL_MPAGE_LEN - 2,
2, /* DSENSE=0, GLTSD=1 */
0, /* [QAM+QERR may be 1, see 05-359r1] */
0, 0, 0, 0, 0xff, 0xff,
0, 30 /* extended self test time, see 05-359r1 */
};
static ssize_t ata_scsi_park_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_link *link;
struct ata_device *dev;
unsigned long now;
unsigned int msecs;
int rc = 0;
ap = ata_shost_to_port(sdev->host);
spin_lock_irq(ap->lock);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev) {
rc = -ENODEV;
goto unlock;
}
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
rc = -EOPNOTSUPP;
goto unlock;
}
link = dev->link;
now = jiffies;
if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
link->eh_context.unloaded_mask & (1 << dev->devno) &&
time_after(dev->unpark_deadline, now))
msecs = jiffies_to_msecs(dev->unpark_deadline - now);
else
msecs = 0;
unlock:
spin_unlock_irq(ap->lock);
return rc ? rc : sysfs_emit(buf, "%u\n", msecs);
}
static ssize_t ata_scsi_park_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_device *dev;
int input;
unsigned long flags;
int rc;
rc = kstrtoint(buf, 10, &input);
if (rc)
return rc;
if (input < -2)
return -EINVAL;
if (input > ATA_TMOUT_MAX_PARK) {
rc = -EOVERFLOW;
input = ATA_TMOUT_MAX_PARK;
}
ap = ata_shost_to_port(sdev->host);
spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (unlikely(!dev)) {
rc = -ENODEV;
goto unlock;
}
if (dev->class != ATA_DEV_ATA &&
dev->class != ATA_DEV_ZAC) {
rc = -EOPNOTSUPP;
goto unlock;
}
if (input >= 0) {
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
rc = -EOPNOTSUPP;
goto unlock;
}
dev->unpark_deadline = ata_deadline(jiffies, input);
dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
ata_port_schedule_eh(ap);
complete(&ap->park_req_pending);
} else {
switch (input) {
case -1:
dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
break;
case -2:
dev->flags |= ATA_DFLAG_NO_UNLOAD;
break;
}
}
unlock:
spin_unlock_irqrestore(ap->lock, flags);
return rc ? rc : len;
}
DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq)
{
/*
* If sk == NO_SENSE, and asc + ascq == NO ADDITIONAL SENSE INFORMATION,
* then there is no sense data to add.
*/
if (sk == 0 && asc == 0 && ascq == 0)
return false;
/* If sk > COMPLETED, sense data is bogus. */
if (sk > COMPLETED)
return false;
return true;
}
void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
u8 sk, u8 asc, u8 ascq)
{
bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
void ata_scsi_set_sense_information(struct ata_device *dev,
struct scsi_cmnd *cmd,
const struct ata_taskfile *tf)
{
u64 information;
information = ata_tf_read_block(tf, dev);
if (information == U64_MAX)
return;
scsi_set_sense_information(cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, information);
}
static void ata_scsi_set_invalid_field(struct ata_device *dev,
struct scsi_cmnd *cmd, u16 field, u8 bit)
{
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0);
/* "Invalid field in CDB" */
scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
field, bit, 1);
}
static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
struct scsi_cmnd *cmd, u16 field)
{
/* "Invalid field in parameter list" */
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0);
scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
field, 0xff, 0);
}
static struct attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads.attr,
NULL
};
static const struct attribute_group ata_common_sdev_attr_group = {
.attrs = ata_common_sdev_attrs
};
const struct attribute_group *ata_common_sdev_groups[] = {
&ata_common_sdev_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
* @bdev: block device associated with @sdev
* @capacity: capacity of SCSI device
* @geom: location to which geometry will be output
*
* Generic bios head/sector/cylinder calculator
* used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
* mapping. Some situations may arise where the disk is not
* bootable if this is not used.
*
* LOCKING:
* Defined by the SCSI layer. We don't really care.
*
* RETURNS:
* Zero.
*/
int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
geom[0] = 255;
geom[1] = 63;
sector_div(capacity, 255*63);
geom[2] = capacity;
return 0;
}
EXPORT_SYMBOL_GPL(ata_std_bios_param);
/**
* ata_scsi_unlock_native_capacity - unlock native capacity
* @sdev: SCSI device to adjust device capacity for
*
* This function is called if a partition on @sdev extends beyond
* the end of the device. It requests EH to unlock HPA.
*
* LOCKING:
* Defined by the SCSI layer. Might sleep.
*/
void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (dev && dev->n_sectors < dev->n_native_sectors) {
dev->flags |= ATA_DFLAG_UNLOCK_HPA;
dev->link->eh_info.action |= ATA_EH_RESET;
ata_port_schedule_eh(ap);
}
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
}
EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
/**
* ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
* @ap: target port
* @sdev: SCSI device to get identify data for
* @arg: User buffer area for identify data
*
* LOCKING:
* Defined by the SCSI layer. We don't really care.
*
* RETURNS:
* Zero on success, negative errno on error.
*/
static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
void __user *arg)
{
struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
u16 __user *dst = arg;
char buf[40];
if (!dev)
return -ENOMSG;
if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
return -EFAULT;
ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
return -EFAULT;
ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
return -EFAULT;
ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
return -EFAULT;
return 0;
}
/**
* ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
* @scsidev: Device to which we are issuing command
* @arg: User provided data for issuing command
*
* LOCKING:
* Defined by the SCSI layer. We don't really care.
*
* RETURNS:
* Zero on success, negative errno on error.
*/
int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
u8 scsi_cmd[MAX_COMMAND_SIZE];
u8 args[4], *argbuf = NULL;
int argsize = 0;
struct scsi_sense_hdr sshdr;
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
.sense = sensebuf,
.sense_len = sizeof(sensebuf),
};
int cmd_result;
if (arg == NULL)
return -EINVAL;
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
memset(sensebuf, 0, sizeof(sensebuf));
memset(scsi_cmd, 0, sizeof(scsi_cmd));
if (args[3]) {
argsize = ATA_SECT_SIZE * args[3];
argbuf = kmalloc(argsize, GFP_KERNEL);
if (argbuf == NULL) {
rc = -ENOMEM;
goto error;
}
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
block count in sector count field */
} else {
scsi_cmd[1] = (3 << 1); /* Non-data */
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
}
scsi_cmd[0] = ATA_16;
scsi_cmd[4] = args[2];
if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */
scsi_cmd[6] = args[3];
scsi_cmd[8] = args[1];
scsi_cmd[10] = ATA_SMART_LBAM_PASS;
scsi_cmd[12] = ATA_SMART_LBAH_PASS;
} else {
scsi_cmd[6] = args[1];
}
scsi_cmd[14] = args[0];
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, argbuf,
argsize, 10 * HZ, 5, &exec_args);
if (cmd_result < 0) {
rc = cmd_result;
goto error;
}
if (scsi_sense_valid(&sshdr)) {/* sense data available */
u8 *desc = sensebuf + 8;
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
if (scsi_status_is_check_condition(cmd_result)) {
if (sshdr.sense_key == RECOVERED_ERROR &&
sshdr.asc == 0 && sshdr.ascq == 0x1d)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
/* Send userspace a few ATA registers (same as drivers/ide) */
if (sensebuf[0] == 0x72 && /* format is "descriptor" */
desc[0] == 0x09) { /* code is "ATA Descriptor" */
args[0] = desc[13]; /* status */
args[1] = desc[3]; /* error */
args[2] = desc[5]; /* sector count (0:7) */
if (copy_to_user(arg, args, sizeof(args)))
rc = -EFAULT;
}
}
if (cmd_result) {
rc = -EIO;
goto error;
}
if ((argbuf)
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
rc = -EFAULT;
error:
kfree(argbuf);
return rc;
}
/**
* ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
* @scsidev: Device to which we are issuing command
* @arg: User provided data for issuing command
*
* LOCKING:
* Defined by the SCSI layer. We don't really care.
*
* RETURNS:
* Zero on success, negative errno on error.
*/
int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
u8 scsi_cmd[MAX_COMMAND_SIZE];
u8 args[7];
struct scsi_sense_hdr sshdr;
int cmd_result;
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
.sense = sensebuf,
.sense_len = sizeof(sensebuf),
};
if (arg == NULL)
return -EINVAL;
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
memset(sensebuf, 0, sizeof(sensebuf));
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
scsi_cmd[1] = (3 << 1); /* Non-data */
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
scsi_cmd[4] = args[1];
scsi_cmd[6] = args[2];
scsi_cmd[8] = args[3];
scsi_cmd[10] = args[4];
scsi_cmd[12] = args[5];
scsi_cmd[13] = args[6] & 0x4f;
scsi_cmd[14] = args[0];
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, NULL,
0, 10 * HZ, 5, &exec_args);
if (cmd_result < 0) {
rc = cmd_result;
goto error;
}
if (scsi_sense_valid(&sshdr)) {/* sense data available */
u8 *desc = sensebuf + 8;
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
if (sshdr.sense_key == RECOVERED_ERROR &&
sshdr.asc == 0 && sshdr.ascq == 0x1d)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
/* Send userspace ATA registers */
if (sensebuf[0] == 0x72 && /* format is "descriptor" */
desc[0] == 0x09) {/* code is "ATA Descriptor" */
args[0] = desc[13]; /* status */
args[1] = desc[3]; /* error */
args[2] = desc[5]; /* sector count (0:7) */
args[3] = desc[7]; /* lbal */
args[4] = desc[9]; /* lbam */
args[5] = desc[11]; /* lbah */
args[6] = desc[12]; /* select */
if (copy_to_user(arg, args, sizeof(args)))
rc = -EFAULT;
}
}
if (cmd_result) {
rc = -EIO;
goto error;
}
error:
return rc;
}
static bool ata_ioc32(struct ata_port *ap)
{
if (ap->flags & ATA_FLAG_PIO_DMA)
return true;
if (ap->pflags & ATA_PFLAG_PIO32)
return true;
return false;
}
/*
* This handles both native and compat commands, so anything added
* here must have a compatible argument, or check in_compat_syscall()
*/
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
unsigned int cmd, void __user *arg)
{
unsigned long val;
int rc = -EINVAL;
unsigned long flags;
switch (cmd) {
case HDIO_GET_32BIT:
spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags);
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
return put_user(val, (compat_ulong_t __user *)arg);
#endif
return put_user(val, (unsigned long __user *)arg);
case HDIO_SET_32BIT:
val = (unsigned long) arg;
rc = 0;
spin_lock_irqsave(ap->lock, flags);
if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
if (val)
ap->pflags |= ATA_PFLAG_PIO32;
else
ap->pflags &= ~ATA_PFLAG_PIO32;
} else {
if (val != ata_ioc32(ap))
rc = -EINVAL;
}
spin_unlock_irqrestore(ap->lock, flags);
return rc;
case HDIO_GET_IDENTITY:
return ata_get_identity(ap, scsidev, arg);
case HDIO_DRIVE_CMD:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
return ata_cmd_ioctl(scsidev, arg);
case HDIO_DRIVE_TASK:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
return ata_task_ioctl(scsidev, arg);
default:
rc = -ENOTTY;
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
int ata_scsi_ioctl(struct scsi_device *scsidev, unsigned int cmd,
void __user *arg)
{
return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
scsidev, cmd, arg);
}
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
/**
* ata_scsi_qc_new - acquire new ata_queued_cmd reference
* @dev: ATA device to which the new command is attached
* @cmd: SCSI command that originated this ATA command
*
* Obtain a reference to an unused ata_queued_cmd structure,
* which is the basic libata structure representing a single
* ATA command sent to the hardware.
*
* If a command was available, fill in the SCSI-specific
* portions of the structure with information on the
* current command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Command allocated, or %NULL if none available.
*/
static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
struct scsi_cmnd *cmd)
{
struct ata_port *ap = dev->link->ap;
struct ata_queued_cmd *qc;
int tag;
if (unlikely(ata_port_is_frozen(ap)))
goto fail;
if (ap->flags & ATA_FLAG_SAS_HOST) {
/*
* SAS hosts may queue > ATA_MAX_QUEUE commands so use
* unique per-device budget token as a tag.
*/
if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE))
goto fail;
tag = cmd->budget_token;
} else {
tag = scsi_cmd_to_rq(cmd)->tag;
}
qc = __ata_qc_from_tag(ap, tag);
qc->tag = qc->hw_tag = tag;
qc->ap = ap;
qc->dev = dev;
ata_qc_reinit(qc);
qc->scsicmd = cmd;
qc->scsidone = scsi_done;
qc->sg = scsi_sglist(cmd);
qc->n_elem = scsi_sg_count(cmd);
if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
return qc;
fail:
set_host_byte(cmd, DID_OK);
set_status_byte(cmd, SAM_STAT_TASK_SET_FULL);
scsi_done(cmd);
return NULL;
}
static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
qc->extrabytes = scmd->extra_len;
qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
}
/**
* ata_to_sense_error - convert ATA error to SCSI error
* @id: ATA device number
* @drv_stat: value contained in ATA status register
* @drv_err: value contained in ATA error register
* @sk: the sense key we'll fill out
* @asc: the additional sense code we'll fill out
* @ascq: the additional sense code qualifier we'll fill out
*
* Converts an ATA error into a SCSI error. Fill out pointers to
* SK, ASC, and ASCQ bytes for later use in fixed or descriptor
* format sense blocks.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
u8 *asc, u8 *ascq)
{
int i;
/* Based on the 3ware driver translation table */
static const unsigned char sense_table[][4] = {
/* BBD|ECC|ID|MAR */
{0xd1, ABORTED_COMMAND, 0x00, 0x00},
// Device busy Aborted command
/* BBD|ECC|ID */
{0xd0, ABORTED_COMMAND, 0x00, 0x00},
// Device busy Aborted command
/* ECC|MC|MARK */
{0x61, HARDWARE_ERROR, 0x00, 0x00},
// Device fault Hardware error
/* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
{0x84, ABORTED_COMMAND, 0x47, 0x00},
// Data CRC error SCSI parity error
/* MC|ID|ABRT|TRK0|MARK */
{0x37, NOT_READY, 0x04, 0x00},
// Unit offline Not ready
/* MCR|MARK */
{0x09, NOT_READY, 0x04, 0x00},
// Unrecovered disk error Not ready
/* Bad address mark */
{0x01, MEDIUM_ERROR, 0x13, 0x00},
// Address mark not found for data field
/* TRK0 - Track 0 not found */
{0x02, HARDWARE_ERROR, 0x00, 0x00},
// Hardware error
/* Abort: 0x04 is not translated here, see below */
/* Media change request */
{0x08, NOT_READY, 0x04, 0x00},
// FIXME: faking offline
/* SRV/IDNF - ID not found */
{0x10, ILLEGAL_REQUEST, 0x21, 0x00},
// Logical address out of range
/* MC - Media Changed */
{0x20, UNIT_ATTENTION, 0x28, 0x00},
// Not ready to ready change, medium may have changed
/* ECC - Uncorrectable ECC error */
{0x40, MEDIUM_ERROR, 0x11, 0x04},
// Unrecovered read error
/* BBD - block marked bad */
{0x80, MEDIUM_ERROR, 0x11, 0x04},
// Block marked bad Medium error, unrecovered read error
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
static const unsigned char stat_table[][4] = {
/* Must be first because BUSY means no other bits valid */
{0x80, ABORTED_COMMAND, 0x47, 0x00},
// Busy, fake parity for now
{0x40, ILLEGAL_REQUEST, 0x21, 0x04},
// Device ready, unaligned write command
{0x20, HARDWARE_ERROR, 0x44, 0x00},
// Device fault, internal target failure
{0x08, ABORTED_COMMAND, 0x47, 0x00},
// Timed out in xfer, fake parity for now
{0x04, RECOVERED_ERROR, 0x11, 0x00},
// Recovered ECC error Medium error, recovered
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
/*
* Is this an error we can process/parse
*/
if (drv_stat & ATA_BUSY) {
drv_err = 0; /* Ignore the err bits, they're invalid */
}
if (drv_err) {
/* Look for drv_err */
for (i = 0; sense_table[i][0] != 0xFF; i++) {
/* Look for best matches first */
if ((sense_table[i][0] & drv_err) ==
sense_table[i][0]) {
*sk = sense_table[i][1];
*asc = sense_table[i][2];
*ascq = sense_table[i][3];
return;
}
}
}
/*
* Fall back to interpreting status bits. Note that if the drv_err
* has only the ABRT bit set, we decode drv_stat. ABRT by itself
* is not descriptive enough.
*/
for (i = 0; stat_table[i][0] != 0xFF; i++) {
if (stat_table[i][0] & drv_stat) {
*sk = stat_table[i][1];
*asc = stat_table[i][2];
*ascq = stat_table[i][3];
return;
}
}
/*
* We need a sensible error return here, which is tricky, and one
* that won't cause people to do things like return a disk wrongly.
*/
*sk = ABORTED_COMMAND;
*asc = 0x00;
*ascq = 0x00;
}
/*
* ata_gen_passthru_sense - Generate check condition sense block.
* @qc: Command that completed.
*
* This function is specific to the ATA descriptor format sense
* block specified for the ATA pass through commands. Regardless
* of whether the command errored or not, return a sense
* block. Copy all controller registers into the sense
* block. If there was no error, we get the request from an ATA
* passthrough command, so we use the following sense data:
* sk = RECOVERED ERROR
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*
*
* LOCKING:
* None.
*/
static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
unsigned char *desc = sb + 8;
u8 sense_key, asc, ascq;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
/*
* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
/*
* ATA PASS-THROUGH INFORMATION AVAILABLE
* Always in descriptor format sense.
*/
scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
}
if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
u8 len;
/* descriptor format */
len = sb[7];
desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
if (!desc) {
if (SCSI_SENSE_BUFFERSIZE < len + 14)
return;
sb[7] = len + 14;
desc = sb + 8 + len;
}
desc[0] = 9;
desc[1] = 12;
/*
* Copy registers into sense buffer.
*/
desc[2] = 0x00;
desc[3] = tf->error;
desc[5] = tf->nsect;
desc[7] = tf->lbal;
desc[9] = tf->lbam;
desc[11] = tf->lbah;
desc[12] = tf->device;
desc[13] = tf->status;
/*
* Fill in Extend bit, and the high order bytes
* if applicable.
*/
if (tf->flags & ATA_TFLAG_LBA48) {
desc[2] |= 0x01;
desc[4] = tf->hob_nsect;
desc[6] = tf->hob_lbal;
desc[8] = tf->hob_lbam;
desc[10] = tf->hob_lbah;
}
} else {
/* Fixed sense format */
desc[0] = tf->error;
desc[1] = tf->status;
desc[2] = tf->device;
desc[3] = tf->nsect;
desc[7] = 0;
if (tf->flags & ATA_TFLAG_LBA48) {
desc[8] |= 0x80;
if (tf->hob_nsect)
desc[8] |= 0x40;
if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
desc[8] |= 0x20;
}
desc[9] = tf->lbal;
desc[10] = tf->lbam;
desc[11] = tf->lbah;
}
}
/**
* ata_gen_ata_sense - generate a SCSI fixed sense block
* @qc: Command that we are erroring out
*
* Generate sense block for a failed ATA command @qc. Descriptor
* format is used to accommodate LBA48 block address.
*
* LOCKING:
* None.
*/
static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
{
struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
u64 block;
u8 sense_key, asc, ascq;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
if (ata_dev_disabled(dev)) {
/* Device disabled after error recovery */
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
return;
}
/* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
} else {
/* Could not decode error */
ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
tf->status, qc->err_mask);
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
block = ata_tf_read_block(&qc->result_tf, dev);
if (block == U64_MAX)
return;
scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
* prevent SCSI midlayer from automatically deferring
* requests.
*/
sdev->max_device_blocked = 1;
}
/**
* ata_scsi_dma_need_drain - Check whether data transfer may overflow
* @rq: request to be checked
*
* ATAPI commands which transfer variable length data to host
* might overflow due to application error or hardware bug. This
* function checks whether overflow should be drained and ignored
* for @request.
*
* LOCKING:
* None.
*
* RETURNS:
* 1 if ; otherwise, 0.
*/
bool ata_scsi_dma_need_drain(struct request *rq)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC;
}
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
int depth = 1;
if (!ata_id_has_unload(dev->id))
dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */
dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
sdev->sector_size = ATA_SECT_SIZE;
/* set DMA padding */
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
/* make room for appending the drain */
blk_queue_max_segments(q, queue_max_segments(q) - 1);
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
if (!sdev->dma_drain_buf) {
ata_dev_err(dev, "drain buffer allocation failed\n");
return -ENOMEM;
}
} else {
sdev->sector_size = ata_id_logical_sector_size(dev->id);
/*
* Stop the drive on suspend but do not issue START STOP UNIT
* on resume as this is not necessary and may fail: the device
* will be woken up by ata_port_pm_resume() with a port reset
* and device revalidation.
*/
sdev->manage_start_stop = 1;
sdev->no_start_on_resume = 1;
}
/*
* ata_pio_sectors() expects buffer for each sector to not cross
* page boundary. Enforce it by requiring buffers to be sector
* aligned, which works iff sector_size is not larger than
* PAGE_SIZE. ATAPI devices also need the alignment as
* IDENTIFY_PACKET is executed as ATA_PROT_PIO.
*/
if (sdev->sector_size > PAGE_SIZE)
ata_dev_warn(dev,
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size);
blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
if (ata_ncq_supported(dev))
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
depth = min(ATA_MAX_QUEUE, depth);
scsi_change_queue_depth(sdev, depth);
if (dev->flags & ATA_DFLAG_TRUSTED)
sdev->security_supported = 1;
dev->sdev = sdev;
return 0;
}
/**
* ata_scsi_slave_config - Set SCSI device attributes
* @sdev: SCSI device to examine
*
* This is called before we actually start reading
* and writing to the device, to configure certain
* SCSI mid-layer behaviors.
*
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
int ata_scsi_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
int rc = 0;
ata_scsi_sdev_config(sdev);
if (dev)
rc = ata_scsi_dev_config(sdev, dev);
return rc;
}
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
/**
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
* @sdev: SCSI device to be destroyed
*
* @sdev is about to be destroyed for hot/warm unplugging. If
* this unplugging was initiated by libata as indicated by NULL
* dev->sdev, this function doesn't have to do anything.
* Otherwise, SCSI layer initiated warm-unplug is in progress.
* Clear dev->sdev, schedule the device for ATA detach and invoke
* EH.
*
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
void ata_scsi_slave_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
unsigned long flags;
struct ata_device *dev;
spin_lock_irqsave(ap->lock, flags);
dev = __ata_scsi_find_dev(ap, sdev);
if (dev && dev->sdev) {
/* SCSI device already in CANCEL state, no need to offline it */
dev->sdev = NULL;
dev->flags |= ATA_DFLAG_DETACH;
ata_port_schedule_eh(ap);
}
spin_unlock_irqrestore(ap->lock, flags);
kfree(sdev->dma_drain_buf);
}
EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile
*
* Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
* (to start). Perhaps these commands should be preceded by
* CHECK POWER MODE to see what power mode the device is already in.
* [See SAT revision 5 at www.t10.org]
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->tf;
const u8 *cdb = scmd->cmnd;
u16 fp;
u8 bp = 0xff;
if (scmd->cmd_len < 5) {
fp = 4;
goto invalid_fld;
}
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf->protocol = ATA_PROT_NODATA;
if (cdb[1] & 0x1) {
; /* ignore IMMED bit, violates sat-r05 */
}
if (cdb[4] & 0x2) {
fp = 4;
bp = 1;
goto invalid_fld; /* LOEJ bit set not supported */
}
if (((cdb[4] >> 4) & 0xf) != 0) {
fp = 4;
bp = 3;
goto invalid_fld; /* power conditions not supported */
}
if (cdb[4] & 0x1) {
tf->nsect = 1; /* 1 sector, lba=0 */
if (qc->dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
tf->lbah = 0x0;
tf->lbam = 0x0;
tf->lbal = 0x0;
tf->device |= ATA_LBA;
} else {
/* CHS */
tf->lbal = 0x1; /* sect */
tf->lbam = 0x0; /* cyl low */
tf->lbah = 0x0; /* cyl high */
}
tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
} else {
/* Some odd clown BIOSen issue spindown on power off (ACPI S4
* or S5) causing some drives to spin up and down again.
*/
if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
system_state == SYSTEM_POWER_OFF)
goto skip;
if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
system_entering_hibernation())
goto skip;
/* Issue ATA STANDBY IMMEDIATE command */
tf->command = ATA_CMD_STANDBYNOW1;
}
/*
* Standby and Idle condition timers could be implemented but that
* would require libata to implement the Power condition mode page
* and allow the user to change it. Changing mode pages requires
* MODE SELECT to be implemented.
*/
return 0;
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
return 1;
skip:
scmd->result = SAM_STAT_GOOD;
return 1;
}
/**
* ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
* @qc: Storage for translated ATA taskfile
*
* Sets up an ATA taskfile to issue FLUSH CACHE or
* FLUSH CACHE EXT.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
tf->flags |= ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
tf->command = ATA_CMD_FLUSH_EXT;
else
tf->command = ATA_CMD_FLUSH;
/* flush is critical for IO integrity, consider it an IO command */
qc->flags |= ATA_QCFLAG_IO;
return 0;
}
/**
* scsi_6_lba_len - Get LBA and transfer length
* @cdb: SCSI command to translate
*
* Calculate LBA and transfer length for 6-byte commands.
*
* RETURNS:
* @plba: the LBA
* @plen: the transfer length
*/
static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
u64 lba = 0;
u32 len;
lba |= ((u64)(cdb[1] & 0x1f)) << 16;
lba |= ((u64)cdb[2]) << 8;
lba |= ((u64)cdb[3]);
len = cdb[4];
*plba = lba;
*plen = len;
}
/**
* scsi_10_lba_len - Get LBA and transfer length
* @cdb: SCSI command to translate
*
* Calculate LBA and transfer length for 10-byte commands.
*
* RETURNS:
* @plba: the LBA
* @plen: the transfer length
*/
static inline void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
*plba = get_unaligned_be32(&cdb[2]);
*plen = get_unaligned_be16(&cdb[7]);
}
/**
* scsi_16_lba_len - Get LBA and transfer length
* @cdb: SCSI command to translate
*
* Calculate LBA and transfer length for 16-byte commands.
*
* RETURNS:
* @plba: the LBA
* @plen: the transfer length
*/
static inline void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
*plba = get_unaligned_be64(&cdb[2]);
*plen = get_unaligned_be32(&cdb[10]);
}
/**
* scsi_dld - Get duration limit descriptor index
* @cdb: SCSI command to translate
*
* Returns the dld bits indicating the index of a command duration limit
* descriptor.
*/
static inline int scsi_dld(const u8 *cdb)
{
return ((cdb[1] & 0x01) << 2) | ((cdb[14] >> 6) & 0x03);
}
/**
* ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
* @qc: Storage for translated ATA taskfile
*
* Converts SCSI VERIFY command to an ATA READ VERIFY command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
u64 dev_sectors = qc->dev->n_sectors;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
u16 fp;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
switch (cdb[0]) {
case VERIFY:
if (scmd->cmd_len < 10) {
fp = 9;
goto invalid_fld;
}
scsi_10_lba_len(cdb, &block, &n_block);
break;
case VERIFY_16:
if (scmd->cmd_len < 16) {
fp = 15;
goto invalid_fld;
}
scsi_16_lba_len(cdb, &block, &n_block);
break;
default:
fp = 0;
goto invalid_fld;
}
if (!n_block)
goto nothing_to_do;
if (block >= dev_sectors)
goto out_of_range;
if ((block + n_block) > dev_sectors)
goto out_of_range;
if (dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
if (lba_28_ok(block, n_block)) {
/* use LBA28 */
tf->command = ATA_CMD_VERIFY;
tf->device |= (block >> 24) & 0xf;
} else if (lba_48_ok(block, n_block)) {
if (!(dev->flags & ATA_DFLAG_LBA48))
goto out_of_range;
/* use LBA48 */
tf->flags |= ATA_TFLAG_LBA48;
tf->command = ATA_CMD_VERIFY_EXT;
tf->hob_nsect = (n_block >> 8) & 0xff;
tf->hob_lbah = (block >> 40) & 0xff;
tf->hob_lbam = (block >> 32) & 0xff;
tf->hob_lbal = (block >> 24) & 0xff;
} else
/* request too large even for LBA48 */
goto out_of_range;
tf->nsect = n_block & 0xff;
tf->lbah = (block >> 16) & 0xff;
tf->lbam = (block >> 8) & 0xff;
tf->lbal = block & 0xff;
tf->device |= ATA_LBA;
} else {
/* CHS */
u32 sect, head, cyl, track;
if (!lba_28_ok(block, n_block))
goto out_of_range;
/* Convert LBA to CHS */
track = (u32)block / dev->sectors;
cyl = track / dev->heads;
head = track % dev->heads;
sect = (u32)block % dev->sectors + 1;
/* Check whether the converted CHS can fit.
Cylinder: 0-65535
Head: 0-15
Sector: 1-255*/
if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
goto out_of_range;
tf->command = ATA_CMD_VERIFY;
tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
tf->lbal = sect;
tf->lbam = cyl;
tf->lbah = cyl >> 8;
tf->device |= head;
}
return 0;
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
return 1;
out_of_range:
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
/* "Logical Block Address out of range" */
return 1;
nothing_to_do:
scmd->result = SAM_STAT_GOOD;
return 1;
}
static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
{
struct request *rq = scsi_cmd_to_rq(scmd);
u32 req_blocks;
if (!blk_rq_is_passthrough(rq))
return true;
req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
if (n_blocks > req_blocks)
return false;
return true;
}
/**
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
* @qc: Storage for translated ATA taskfile
*
* Converts any of six SCSI read/write commands into the
* ATA counterpart, including starting sector (LBA),
* sector count, and taking into account the device's LBA48
* support.
*
* Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
* %WRITE_16 are currently supported.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
struct request *rq = scsi_cmd_to_rq(scmd);
int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
unsigned int tf_flags = 0;
int dld = 0;
u64 block;
u32 n_block;
int rc;
u16 fp = 0;
switch (cdb[0]) {
case WRITE_6:
case WRITE_10:
case WRITE_16:
tf_flags |= ATA_TFLAG_WRITE;
break;
}
/* Calculate the SCSI LBA, transfer length and FUA. */
switch (cdb[0]) {
case READ_10:
case WRITE_10:
if (unlikely(scmd->cmd_len < 10)) {
fp = 9;
goto invalid_fld;
}
scsi_10_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
if (!ata_check_nblocks(scmd, n_block))
goto invalid_fld;
break;
case READ_6:
case WRITE_6:
if (unlikely(scmd->cmd_len < 6)) {
fp = 5;
goto invalid_fld;
}
scsi_6_lba_len(cdb, &block, &n_block);
/* for 6-byte r/w commands, transfer length 0
* means 256 blocks of data, not 0 block.
*/
if (!n_block)
n_block = 256;
if (!ata_check_nblocks(scmd, n_block))
goto invalid_fld;
break;
case READ_16:
case WRITE_16:
if (unlikely(scmd->cmd_len < 16)) {
fp = 15;
goto invalid_fld;
}
scsi_16_lba_len(cdb, &block, &n_block);
dld = scsi_dld(cdb);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
if (!ata_check_nblocks(scmd, n_block))
goto invalid_fld;
break;
default:
fp = 0;
goto invalid_fld;
}
/* Check and compose ATA command */
if (!n_block)
/* For 10-byte and 16-byte SCSI R/W commands, transfer
* length 0 means transfer 0 block of data.
* However, for ATA R/W commands, sector count 0 means
* 256 or 65536 sectors, not 0 sectors as in SCSI.
*
* WARNING: one or two older ATA drives treat 0 as 0...
*/
goto nothing_to_do;
qc->flags |= ATA_QCFLAG_IO;
qc->nbytes = n_block * scmd->device->sector_size;
rc = ata_build_rw_tf(qc, block, n_block, tf_flags, dld, class);
if (likely(rc == 0))
return 0;
if (rc == -ERANGE)
goto out_of_range;
/* treat all other errors as -EINVAL, fall through */
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
return 1;
out_of_range:
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
/* "Logical Block Address out of range" */
return 1;
nothing_to_do:
scmd->result = SAM_STAT_GOOD;
return 1;
}
static void ata_qc_done(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
void (*done)(struct scsi_cmnd *) = qc->scsidone;
ata_qc_free(qc);
done(cmd);
}
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
int need_sense = (qc->err_mask != 0) &&
!(qc->flags & ATA_QCFLAG_SENSE_VALID);
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
* generate because the user forced us to [CK_COND =1], a check
* condition is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
* was no error, we use the following sense data:
* sk = RECOVERED ERROR
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense))
ata_gen_passthru_sense(qc);
else if (need_sense)
ata_gen_ata_sense(qc);
else
/* Keep the SCSI ML and status byte, clear host byte. */
cmd->result &= 0x0000ffff;
ata_qc_done(qc);
}
/**
* ata_scsi_translate - Translate then issue SCSI command to ATA device
* @dev: ATA device to which the command is addressed
* @cmd: SCSI command to execute
* @xlat_func: Actor which translates @cmd to an ATA taskfile
*
* Our ->queuecommand() function has decided that the SCSI
* command issued can be directly translated into an ATA
* command, rather than handled internally.
*
* This function sets up an ata_queued_cmd structure for the
* SCSI command, and sends that ata_queued_cmd to the hardware.
*
* The xlat_func argument (actor) returns 0 if ready to execute
* ATA command, else 1 to finish translation. If 1 is returned
* then cmd->result (and possibly cmd->sense_buffer) are assumed
* to be set reflecting an error condition or clean (early)
* termination.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
* needs to be deferred.
*/
static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
ata_xlat_func_t xlat_func)
{
struct ata_port *ap = dev->link->ap;
struct ata_queued_cmd *qc;
int rc;
qc = ata_scsi_qc_new(dev, cmd);
if (!qc)
goto err_mem;
/* data is present; dma-map it */
if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
cmd->sc_data_direction == DMA_TO_DEVICE) {
if (unlikely(scsi_bufflen(cmd) < 1)) {
ata_dev_warn(dev, "WARNING: zero len r/w req\n");
goto err_did;
}
ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
qc->dma_dir = cmd->sc_data_direction;
}
qc->complete_fn = ata_scsi_qc_complete;
if (xlat_func(qc))
goto early_finish;
if (ap->ops->qc_defer) {
if ((rc = ap->ops->qc_defer(qc)))
goto defer;
}
/* select device, send command to hardware */
ata_qc_issue(qc);
return 0;
early_finish:
ata_qc_free(qc);
scsi_done(cmd);
return 0;
err_did:
ata_qc_free(qc);
cmd->result = (DID_ERROR << 16);
scsi_done(cmd);
err_mem:
return 0;
defer:
ata_qc_free(qc);
if (rc == ATA_DEFER_LINK)
return SCSI_MLQUEUE_DEVICE_BUSY;
else
return SCSI_MLQUEUE_HOST_BUSY;
}
struct ata_scsi_args {
struct ata_device *dev;
u16 *id;
struct scsi_cmnd *cmd;
};
/**
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
* @args: device IDENTIFY data / SCSI command of interest.
* @actor: Callback hook for desired SCSI command simulator
*
* Takes care of the hard work of simulating a SCSI command...
* Mapping the response buffer, calling the command's handler,
* and handling the handler's return value. This return value
* indicates whether the handler wishes the SCSI command to be
* completed successfully (0), or not (in which case cmd->result
* and sense buffer are assumed to be set).
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
{
unsigned int rc;
struct scsi_cmnd *cmd = args->cmd;
unsigned long flags;
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
rc = actor(args, ata_scsi_rbuf);
if (rc == 0)
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
if (rc == 0)
cmd->result = SAM_STAT_GOOD;
}
/**
* ata_scsiop_inq_std - Simulate INQUIRY command
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns standard device identification data associated
* with non-VPD INQUIRY command output.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
{
static const u8 versions[] = {
0x00,
0x60, /* SAM-3 (no version claimed) */
0x03,
0x20, /* SBC-2 (no version claimed) */
0x03,
0x00 /* SPC-3 (no version claimed) */
};
static const u8 versions_zbc[] = {
0x00,
0xA0, /* SAM-5 (no version claimed) */
0x06,
0x00, /* SBC-4 (no version claimed) */
0x05,
0xC0, /* SPC-5 (no version claimed) */
0x60,
0x24, /* ZBC r05 */
};
u8 hdr[] = {
TYPE_DISK,
0,
0x5, /* claim SPC-3 version compatibility */
2,
95 - 4,
0,
0,
2
};
/* set scsi removable (RMB) bit per ata bit, or if the
* AHCI port says it's external (Hotplug-capable, eSATA).
*/
if (ata_id_removable(args->id) ||
(args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
hdr[0] = TYPE_ZBC;
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
}
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
/* From SAT, use last 2 words from fw rev unless they are spaces */
ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
if (strncmp(&rbuf[32], " ", 4) == 0)
ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
else
memcpy(rbuf + 58, versions, sizeof(versions));
return 0;
}
/**
* ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns list of inquiry VPD pages available.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{
int i, num_pages = 0;
static const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
0x83, /* page 0x83, device ident page */
0x89, /* page 0x89, ata info page */
0xb0, /* page 0xb0, block limits page */
0xb1, /* page 0xb1, block device characteristics page */
0xb2, /* page 0xb2, thin provisioning page */
0xb6, /* page 0xb6, zoned block device characteristics */
0xb9, /* page 0xb9, concurrent positioning ranges */
};
for (i = 0; i < sizeof(pages); i++) {
if (pages[i] == 0xb6 &&
!(args->dev->flags & ATA_DFLAG_ZAC))
continue;
rbuf[num_pages + 4] = pages[i];
num_pages++;
}
rbuf[3] = num_pages; /* number of supported VPD pages */
return 0;
}
/**
* ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns ATA device serial number.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
{
static const u8 hdr[] = {
0,
0x80, /* this page code */
0,
ATA_ID_SERNO_LEN, /* page len */
};
memcpy(rbuf, hdr, sizeof(hdr));
ata_id_string(args->id, (unsigned char *) &rbuf[4],
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
return 0;
}
/**
* ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields two logical unit device identification designators:
* - vendor specific ASCII containing the ATA serial number
* - SAT defined "t10 vendor id based" containing ASCII vendor
* name ("ATA "), model and serial numbers.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
{
const int sat_model_serial_desc_len = 68;
int num;
rbuf[1] = 0x83; /* this page code */
num = 4;
/* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
rbuf[num + 0] = 2;
rbuf[num + 3] = ATA_ID_SERNO_LEN;
num += 4;
ata_id_string(args->id, (unsigned char *) rbuf + num,
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
/* SAT defined lu model and serial numbers descriptor */
/* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
rbuf[num + 0] = 2;
rbuf[num + 1] = 1;
rbuf[num + 3] = sat_model_serial_desc_len;
num += 4;
memcpy(rbuf + num, "ATA ", 8);
num += 8;
ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
ATA_ID_PROD_LEN);
num += ATA_ID_PROD_LEN;
ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
if (ata_id_has_wwn(args->id)) {
/* SAT defined lu world wide name */
/* piv=0, assoc=lu, code_set=binary, designator=NAA */
rbuf[num + 0] = 1;
rbuf[num + 1] = 3;
rbuf[num + 3] = ATA_ID_WWN_LEN;
num += 4;
ata_id_string(args->id, (unsigned char *) rbuf + num,
ATA_ID_WWN, ATA_ID_WWN_LEN);
num += ATA_ID_WWN_LEN;
}
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
return 0;
}
/**
* ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields SAT-specified ATA VPD page.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
{
rbuf[1] = 0x89; /* our page code */
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
rbuf[3] = (0x238 & 0xff);
memcpy(&rbuf[8], "linux ", 8);
memcpy(&rbuf[16], "libata ", 16);
memcpy(&rbuf[32], DRV_VERSION, 4);
rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */
rbuf[37] = (1 << 7); /* bit 7 indicates Command FIS */
/* TODO: PMP? */
/* we don't store the ATA device signature, so we fake it */
rbuf[38] = ATA_DRDY; /* really, this is Status reg */
rbuf[40] = 0x1;
rbuf[48] = 0x1;
rbuf[56] = ATA_CMD_ID_ATA;
memcpy(&rbuf[60], &args->id[0], 512);
return 0;
}
static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_device *dev = args->dev;
u16 min_io_sectors;
rbuf[1] = 0xb0;
rbuf[3] = 0x3c; /* required VPD size with unmap support */
/*
* Optimal transfer length granularity.
*
* This is always one physical block, but for disks with a smaller
* logical than physical sector size we need to figure out what the
* latter is.
*/
min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
put_unaligned_be16(min_io_sectors, &rbuf[6]);
/*
* Optimal unmap granularity.
*
* The ATA spec doesn't even know about a granularity or alignment
* for the TRIM command. We can leave away most of the unmap related
* VPD page entries, but we have specifify a granularity to signal
* that we support some form of unmap - in thise case via WRITE SAME
* with the unmap bit set.
*/
if (ata_id_has_trim(args->id)) {
u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
max_blocks = 128 << (20 - SECTOR_SHIFT);
put_unaligned_be64(max_blocks, &rbuf[36]);
put_unaligned_be32(1, &rbuf[28]);
}
return 0;
}
static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
{
int form_factor = ata_id_form_factor(args->id);
int media_rotation_rate = ata_id_rotation_rate(args->id);
u8 zoned = ata_id_zoned_cap(args->id);
rbuf[1] = 0xb1;
rbuf[3] = 0x3c;
rbuf[4] = media_rotation_rate >> 8;
rbuf[5] = media_rotation_rate;
rbuf[7] = form_factor;
if (zoned)
rbuf[8] = (zoned << 4);
return 0;
}
static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
{
/* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
rbuf[1] = 0xb2;
rbuf[3] = 0x4;
rbuf[5] = 1 << 6; /* TPWS */
return 0;
}
static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
{
/*
* zbc-r05 SCSI Zoned Block device characteristics VPD page
*/
rbuf[1] = 0xb6;
rbuf[3] = 0x3C;
/*
* URSWRZ bit is only meaningful for host-managed ZAC drives
*/
if (args->dev->zac_zoned_cap & 1)
rbuf[4] |= 1;
put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
return 0;
}
static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_cpr_log *cpr_log = args->dev->cpr_log;
u8 *desc = &rbuf[64];
int i;
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
rbuf[1] = 0xb9;
put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
desc[0] = cpr_log->cpr[i].num;
desc[1] = cpr_log->cpr[i].num_storage_elements;
put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]);
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
}
return 0;
}
/**
* modecpy - Prepare response for MODE SENSE
* @dest: output buffer
* @src: data being copied
* @n: length of mode page
* @changeable: whether changeable parameters are requested
*
* Generate a generic MODE SENSE page for either current or changeable
* parameters.
*
* LOCKING:
* None.
*/
static void modecpy(u8 *dest, const u8 *src, int n, bool changeable)
{
if (changeable) {
memcpy(dest, src, 2);
memset(dest + 2, 0, n - 2);
} else {
memcpy(dest, src, n);
}
}
/**
* ata_msense_caching - Simulate MODE SENSE caching info page
* @id: device IDENTIFY data
* @buf: output buffer
* @changeable: whether changeable parameters are requested
*
* Generate a caching info page, which conditionally indicates
* write caching to the SCSI layer, depending on device
* capabilities.
*
* LOCKING:
* None.
*/
static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
{
modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable);
if (changeable) {
buf[2] |= (1 << 2); /* ata_mselect_caching() */
} else {
buf[2] |= (ata_id_wcache_enabled(id) << 2); /* write cache enable */
buf[12] |= (!ata_id_rahead_enabled(id) << 5); /* disable read ahead */
}
return sizeof(def_cache_mpage);
}
/*
* Simulate MODE SENSE control mode page, sub-page 0.
*/
static unsigned int ata_msense_control_spg0(struct ata_device *dev, u8 *buf,
bool changeable)
{
modecpy(buf, def_control_mpage,
sizeof(def_control_mpage), changeable);
if (changeable) {
/* ata_mselect_control() */
buf[2] |= (1 << 2);
} else {
bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
/* descriptor format sense data */
buf[2] |= (d_sense << 2);
}
return sizeof(def_control_mpage);
}
/*
* Translate an ATA duration limit in microseconds to a SCSI duration limit
* using the t2cdlunits 0xa (10ms). Since the SCSI duration limits are 2-bytes
* only, take care of overflows.
*/
static inline u16 ata_xlat_cdl_limit(u8 *buf)
{
u32 limit = get_unaligned_le32(buf);
return min_t(u32, limit / 10000, 65535);
}
/*
* Simulate MODE SENSE control mode page, sub-pages 07h and 08h
* (command duration limits T2A and T2B mode pages).
*/
static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf,
u8 spg)
{
u8 *b, *cdl = dev->cdl, *desc;
u32 policy;
int i;
/*
* Fill the subpage. The first four bytes of the T2A/T2B mode pages
* are a header. The PAGE LENGTH field is the size of the page
* excluding the header.
*/
buf[0] = CONTROL_MPAGE;
buf[1] = spg;
put_unaligned_be16(CDL_T2_SUB_MPAGE_LEN - 4, &buf[2]);
if (spg == CDL_T2A_SUB_MPAGE) {
/*
* Read descriptors map to the T2A page:
* set perf_vs_duration_guidleine.
*/
buf[7] = (cdl[0] & 0x03) << 4;
desc = cdl + 64;
} else {
/* Write descriptors map to the T2B page */
desc = cdl + 288;
}
/* Fill the T2 page descriptors */
b = &buf[8];
policy = get_unaligned_le32(&cdl[0]);
for (i = 0; i < 7; i++, b += 32, desc += 32) {
/* t2cdlunits: fixed to 10ms */
b[0] = 0x0a;
/* Max inactive time and its policy */
put_unaligned_be16(ata_xlat_cdl_limit(&desc[8]), &b[2]);
b[6] = ((policy >> 8) & 0x0f) << 4;
/* Max active time and its policy */
put_unaligned_be16(ata_xlat_cdl_limit(&desc[4]), &b[4]);
b[6] |= (policy >> 4) & 0x0f;
/* Command duration guideline and its policy */
put_unaligned_be16(ata_xlat_cdl_limit(&desc[16]), &b[10]);
b[14] = policy & 0x0f;
}
return CDL_T2_SUB_MPAGE_LEN;
}
/*
* Simulate MODE SENSE control mode page, sub-page f2h
* (ATA feature control mode page).
*/
static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
u8 *buf)
{
/* PS=0, SPF=1 */
buf[0] = CONTROL_MPAGE | (1 << 6);
buf[1] = ATA_FEATURE_SUB_MPAGE;
/*
* The first four bytes of ATA Feature Control mode page are a header.
* The PAGE LENGTH field is the size of the page excluding the header.
*/
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
if (dev->flags & ATA_DFLAG_CDL)
buf[4] = 0x02; /* Support T2A and T2B pages */
else
buf[4] = 0;
return ATA_FEATURE_SUB_MPAGE_LEN;
}
/**
* ata_msense_control - Simulate MODE SENSE control mode page
* @dev: ATA device of interest
* @buf: output buffer
* @spg: sub-page code
* @changeable: whether changeable parameters are requested
*
* Generate a generic MODE SENSE control mode page.
*
* LOCKING:
* None.
*/
static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
u8 spg, bool changeable)
{
unsigned int n;
switch (spg) {
case 0:
return ata_msense_control_spg0(dev, buf, changeable);
case CDL_T2A_SUB_MPAGE:
case CDL_T2B_SUB_MPAGE:
return ata_msense_control_spgt2(dev, buf, spg);
case ATA_FEATURE_SUB_MPAGE:
return ata_msense_control_ata_feature(dev, buf);
case ALL_SUB_MPAGES:
n = ata_msense_control_spg0(dev, buf, changeable);
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
n += ata_msense_control_ata_feature(dev, buf + n);
return n;
default:
return 0;
}
}
/**
* ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
* @buf: output buffer
* @changeable: whether changeable parameters are requested
*
* Generate a generic MODE SENSE r/w error recovery page.
*
* LOCKING:
* None.
*/
static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
{
modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage),
changeable);
return sizeof(def_rw_recovery_mpage);
}
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate MODE SENSE commands. Assume this is invoked for direct
* access devices (e.g. disks) only. There should be no block
* descriptor for other device types.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_device *dev = args->dev;
u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
static const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
0,
0, 0x2, 0x0 /* block length: 512 bytes */
};
u8 pg, spg;
unsigned int ebd, page_control, six_byte;
u8 dpofua = 0, bp = 0xff;
u16 fp;
six_byte = (scsicmd[0] == MODE_SENSE);
ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
/*
* LLBA bit in msense(10) ignored (compliant)
*/
page_control = scsicmd[2] >> 6;
switch (page_control) {
case 0: /* current */
case 1: /* changeable */
case 2: /* defaults */
break; /* supported */
case 3: /* saved */
goto saving_not_supp;
default:
fp = 2;
bp = 6;
goto invalid_fld;
}
if (six_byte)
p += 4 + (ebd ? 8 : 0);
else
p += 8 + (ebd ? 8 : 0);
pg = scsicmd[2] & 0x3f;
spg = scsicmd[3];
/*
* Supported subpages: all subpages and sub-pages 07h, 08h and f2h of
* the control page.
*/
if (spg) {
switch (spg) {
case ALL_SUB_MPAGES:
break;
case CDL_T2A_SUB_MPAGE:
case CDL_T2B_SUB_MPAGE:
case ATA_FEATURE_SUB_MPAGE:
if (dev->flags & ATA_DFLAG_CDL && pg == CONTROL_MPAGE)
break;
fallthrough;
default:
fp = 3;
goto invalid_fld;
}
}
switch(pg) {
case RW_RECOVERY_MPAGE:
p += ata_msense_rw_recovery(p, page_control == 1);
break;
case CACHE_MPAGE:
p += ata_msense_caching(args->id, p, page_control == 1);
break;
case CONTROL_MPAGE:
p += ata_msense_control(args->dev, p, spg, page_control == 1);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p, page_control == 1);
p += ata_msense_caching(args->id, p, page_control == 1);
p += ata_msense_control(args->dev, p, spg, page_control == 1);
break;
default: /* invalid page code */
fp = 2;
goto invalid_fld;
}
if (dev->flags & ATA_DFLAG_FUA)
dpofua = 1 << 4;
if (six_byte) {
rbuf[0] = p - rbuf - 1;
rbuf[2] |= dpofua;
if (ebd) {
rbuf[3] = sizeof(sat_blk_desc);
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
}
} else {
put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
rbuf[3] |= dpofua;
if (ebd) {
rbuf[7] = sizeof(sat_blk_desc);
memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
}
}
return 0;
invalid_fld:
ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
return 1;
saving_not_supp:
ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
/* "Saving parameters not supported" */
return 1;
}
/**
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate READ CAPACITY commands.
*
* LOCKING:
* None.
*/
static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_device *dev = args->dev;
u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
u32 sector_size; /* physical sector size in bytes */
u8 log2_per_phys;
u16 lowest_aligned;
sector_size = ata_id_logical_sector_size(dev->id);
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
if (args->cmd->cmnd[0] == READ_CAPACITY) {
if (last_lba >= 0xffffffffULL)
last_lba = 0xffffffff;
/* sector count, 32-bit */
rbuf[0] = last_lba >> (8 * 3);
rbuf[1] = last_lba >> (8 * 2);
rbuf[2] = last_lba >> (8 * 1);
rbuf[3] = last_lba;
/* sector size */
rbuf[4] = sector_size >> (8 * 3);
rbuf[5] = sector_size >> (8 * 2);
rbuf[6] = sector_size >> (8 * 1);
rbuf[7] = sector_size;
} else {
/* sector count, 64-bit */
rbuf[0] = last_lba >> (8 * 7);
rbuf[1] = last_lba >> (8 * 6);
rbuf[2] = last_lba >> (8 * 5);
rbuf[3] = last_lba >> (8 * 4);
rbuf[4] = last_lba >> (8 * 3);
rbuf[5] = last_lba >> (8 * 2);
rbuf[6] = last_lba >> (8 * 1);
rbuf[7] = last_lba;
/* sector size */
rbuf[ 8] = sector_size >> (8 * 3);
rbuf[ 9] = sector_size >> (8 * 2);
rbuf[10] = sector_size >> (8 * 1);
rbuf[11] = sector_size;
rbuf[12] = 0;
rbuf[13] = log2_per_phys;
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id) &&
!(dev->horkage & ATA_HORKAGE_NOTRIM)) {
rbuf[14] |= 0x80; /* LBPME */
if (ata_id_has_zero_after_trim(args->id) &&
dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
ata_dev_info(dev, "Enabling discard_zeroes_data\n");
rbuf[14] |= 0x40; /* LBPRZ */
}
}
if (ata_id_zoned_cap(args->id) ||
args->dev->class == ATA_DEV_ZAC)
rbuf[12] = (1 << 4); /* RC_BASIS */
}
return 0;
}
/**
* ata_scsiop_report_luns - Simulate REPORT LUNS command
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate REPORT LUNS command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
{
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
return 0;
}
/*
* ATAPI devices typically report zero for their SCSI version, and sometimes
* deviate from the spec WRT response data format. If SCSI version is
* reported as zero like normal, then we make the following fixups:
* 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
* modern device.
* 2) Ensure response data format / ATAPI information are always correct.
*/
static void atapi_fixup_inquiry(struct scsi_cmnd *cmd)
{
u8 buf[4];
sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
if (buf[2] == 0) {
buf[2] = 0x5;
buf[3] = 0x32;
}
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
}
static void atapi_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
unsigned int err_mask = qc->err_mask;
/* handle completion from EH */
if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
/* FIXME: not quite right; we don't want the
* translation of taskfile registers into a
* sense descriptors, since that's only
* correct for ATA, not ATAPI
*/
ata_gen_passthru_sense(qc);
}
/* SCSI EH automatically locks door if sdev->locked is
* set. Sometimes door lock request continues to
* fail, for example, when no media is present. This
* creates a loop - SCSI EH issues door lock which
* fails and gets invoked again to acquire sense data
* for the failed command.
*
* If door lock fails, always clear sdev->locked to
* avoid this infinite loop.
*
* This may happen before SCSI scan is complete. Make
* sure qc->dev->sdev isn't NULL before dereferencing.
*/
if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
qc->dev->sdev->locked = 0;
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
ata_qc_done(qc);
return;
}
/* successful completion path */
if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
atapi_fixup_inquiry(cmd);
cmd->result = SAM_STAT_GOOD;
ata_qc_done(qc);
}
/**
* atapi_xlat - Initialize PACKET taskfile
* @qc: command structure to be initialized
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on failure.
*/
static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
int nodata = (scmd->sc_data_direction == DMA_NONE);
int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
unsigned int nbytes;
memset(qc->cdb, 0, dev->cdb_len);
memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
qc->complete_fn = atapi_qc_complete;
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
qc->tf.flags |= ATA_TFLAG_WRITE;
}
qc->tf.command = ATA_CMD_PACKET;
ata_qc_set_pc_nbytes(qc);
/* check whether ATAPI DMA is safe */
if (!nodata && !using_pio && atapi_check_dma(qc))
using_pio = 1;
/* Some controller variants snoop this value for Packet
* transfers to do state machine and FIFO management. Thus we
* want to set it properly, and for DMA where it is
* effectively meaningless.
*/
nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
/* Most ATAPI devices which honor transfer chunk size don't
* behave according to the spec when odd chunk size which
* matches the transfer length is specified. If the number of
* bytes to transfer is 2n+1. According to the spec, what
* should happen is to indicate that 2n+1 is going to be
* transferred and transfer 2n+2 bytes where the last byte is
* padding.
*
* In practice, this doesn't happen. ATAPI devices first
* indicate and transfer 2n bytes and then indicate and
* transfer 2 bytes where the last byte is padding.
*
* This inconsistency confuses several controllers which
* perform PIO using DMA such as Intel AHCIs and sil3124/32.
* These controllers use actual number of transferred bytes to
* update DMA pointer and transfer of 4n+2 bytes make those
* controller push DMA pointer by 4n+4 bytes because SATA data
* FISes are aligned to 4 bytes. This causes data corruption
* and buffer overrun.
*
* Always setting nbytes to even number solves this problem
* because then ATAPI devices don't have to split data at 2n
* boundaries.
*/
if (nbytes & 0x1)
nbytes++;
qc->tf.lbam = (nbytes & 0xFF);
qc->tf.lbah = (nbytes >> 8);
if (nodata)
qc->tf.protocol = ATAPI_PROT_NODATA;
else if (using_pio)
qc->tf.protocol = ATAPI_PROT_PIO;
else {
/* DMA data xfer */
qc->tf.protocol = ATAPI_PROT_DMA;
qc->tf.feature |= ATAPI_PKT_DMA;
if ((dev->flags & ATA_DFLAG_DMADIR) &&
(scmd->sc_data_direction != DMA_TO_DEVICE))
/* some SATA bridges need us to indicate data xfer direction */
qc->tf.feature |= ATAPI_DMADIR;
}
/* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE
as ATAPI tape drives don't get this right otherwise */
return 0;
}
static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno)
{
/*
* For the non-PMP case, ata_link_max_devices() returns 1 (SATA case),
* or 2 (IDE master + slave case). However, the former case includes
* libsas hosted devices which are numbered per scsi host, leading
* to devno potentially being larger than 0 but with each struct
* ata_device having its own struct ata_port and struct ata_link.
* To accommodate these, ignore devno and always use device number 0.
*/
if (likely(!sata_pmp_attached(ap))) {
int link_max_devices = ata_link_max_devices(&ap->link);
if (link_max_devices == 1)
return &ap->link.device[0];
if (devno < link_max_devices)
return &ap->link.device[devno];
return NULL;
}
/*
* For PMP-attached devices, the device number corresponds to C
* (channel) of SCSI [H:C:I:L], indicating the port pmp link
* for the device.
*/
if (devno < ap->nr_pmp_links)
return &ap->pmp_link[devno].device[0];
return NULL;
}
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
const struct scsi_device *scsidev)
{
int devno;
/* skip commands not addressed to targets we simulate */
if (!sata_pmp_attached(ap)) {
if (unlikely(scsidev->channel || scsidev->lun))
return NULL;
devno = scsidev->id;
} else {
if (unlikely(scsidev->id || scsidev->lun))
return NULL;
devno = scsidev->channel;
}
return ata_find_dev(ap, devno);
}
/**
* ata_scsi_find_dev - lookup ata_device from scsi_cmnd
* @ap: ATA port to which the device is attached
* @scsidev: SCSI device from which we derive the ATA device
*
* Given various information provided in struct scsi_cmnd,
* map that onto an ATA bus, and using that mapping
* determine which ata_device is associated with the
* SCSI command to be sent.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Associated ATA device, or %NULL if not found.
*/
struct ata_device *
ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
{
struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
if (unlikely(!dev || !ata_dev_enabled(dev)))
return NULL;
return dev;
}
/*
* ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
* @byte1: Byte 1 from pass-thru CDB.
*
* RETURNS:
* ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
*/
static u8
ata_scsi_map_proto(u8 byte1)
{
switch((byte1 & 0x1e) >> 1) {
case 3: /* Non-data */
return ATA_PROT_NODATA;
case 6: /* DMA */
case 10: /* UDMA Data-in */
case 11: /* UDMA Data-Out */
return ATA_PROT_DMA;
case 4: /* PIO Data-in */
case 5: /* PIO Data-out */
return ATA_PROT_PIO;
case 12: /* FPDMA */
return ATA_PROT_NCQ;
case 0: /* Hard Reset */
case 1: /* SRST */
case 8: /* Device Diagnostic */
case 9: /* Device Reset */
case 7: /* DMA Queued */
case 15: /* Return Response Info */
default: /* Reserved */
break;
}
return ATA_PROT_UNKNOWN;
}
/**
* ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
* @qc: command structure to be initialized
*
* Handles either 12, 16, or 32-byte versions of the CDB.
*
* RETURNS:
* Zero on success, non-zero on failure.
*/
static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &(qc->tf);
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u16 fp;
u16 cdb_offset = 0;
/* 7Fh variable length cmd means a ata pass-thru(32) */
if (cdb[0] == VARIABLE_LENGTH_CMD)
cdb_offset = 9;
tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]);
if (tf->protocol == ATA_PROT_UNKNOWN) {
fp = 1;
goto invalid_fld;
}
if ((cdb[2 + cdb_offset] & 0x3) == 0) {
/*
* When T_LENGTH is zero (No data is transferred), dir should
* be DMA_NONE.
*/
if (scmd->sc_data_direction != DMA_NONE) {
fp = 2 + cdb_offset;
goto invalid_fld;
}
if (ata_is_ncq(tf->protocol))
tf->protocol = ATA_PROT_NCQ_NODATA;
}
/* enable LBA */
tf->flags |= ATA_TFLAG_LBA;
/*
* 12 and 16 byte CDBs use different offsets to
* provide the various register values.
*/
switch (cdb[0]) {
case ATA_16:
/*
* 16-byte CDB - may contain extended commands.
*
* If that is the case, copy the upper byte register values.
*/
if (cdb[1] & 0x01) {
tf->hob_feature = cdb[3];
tf->hob_nsect = cdb[5];
tf->hob_lbal = cdb[7];
tf->hob_lbam = cdb[9];
tf->hob_lbah = cdb[11];
tf->flags |= ATA_TFLAG_LBA48;
} else
tf->flags &= ~ATA_TFLAG_LBA48;
/*
* Always copy low byte, device and command registers.
*/
tf->feature = cdb[4];
tf->nsect = cdb[6];
tf->lbal = cdb[8];
tf->lbam = cdb[10];
tf->lbah = cdb[12];
tf->device = cdb[13];
tf->command = cdb[14];
break;
case ATA_12:
/*
* 12-byte CDB - incapable of extended commands.
*/
tf->flags &= ~ATA_TFLAG_LBA48;
tf->feature = cdb[3];
tf->nsect = cdb[4];
tf->lbal = cdb[5];
tf->lbam = cdb[6];
tf->lbah = cdb[7];
tf->device = cdb[8];
tf->command = cdb[9];
break;
default:
/*
* 32-byte CDB - may contain extended command fields.
*
* If that is the case, copy the upper byte register values.
*/
if (cdb[10] & 0x01) {
tf->hob_feature = cdb[20];
tf->hob_nsect = cdb[22];
tf->hob_lbal = cdb[16];
tf->hob_lbam = cdb[15];
tf->hob_lbah = cdb[14];
tf->flags |= ATA_TFLAG_LBA48;
} else
tf->flags &= ~ATA_TFLAG_LBA48;
tf->feature = cdb[21];
tf->nsect = cdb[23];
tf->lbal = cdb[19];
tf->lbam = cdb[18];
tf->lbah = cdb[17];
tf->device = cdb[24];
tf->command = cdb[25];
tf->auxiliary = get_unaligned_be32(&cdb[28]);
break;
}
/* For NCQ commands copy the tag value */
if (ata_is_ncq(tf->protocol))
tf->nsect = qc->hw_tag << 3;
/* enforce correct master/slave bit */
tf->device = dev->devno ?
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
switch (tf->command) {
/* READ/WRITE LONG use a non-standard sect_size */
case ATA_CMD_READ_LONG:
case ATA_CMD_READ_LONG_ONCE:
case ATA_CMD_WRITE_LONG:
case ATA_CMD_WRITE_LONG_ONCE:
if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) {
fp = 1;
goto invalid_fld;
}
qc->sect_size = scsi_bufflen(scmd);
break;
/* commands using reported Logical Block size (e.g. 512 or 4K) */
case ATA_CMD_CFA_WRITE_NE:
case ATA_CMD_CFA_TRANS_SECT:
case ATA_CMD_CFA_WRITE_MULT_NE:
/* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_READ_QUEUED:
/* XXX: case ATA_CMD_READ_QUEUED_EXT: */
case ATA_CMD_FPDMA_READ:
case ATA_CMD_READ_MULTI:
case ATA_CMD_READ_MULTI_EXT:
case ATA_CMD_PIO_READ:
case ATA_CMD_PIO_READ_EXT:
case ATA_CMD_READ_STREAM_DMA_EXT:
case ATA_CMD_READ_STREAM_EXT:
case ATA_CMD_VERIFY:
case ATA_CMD_VERIFY_EXT:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
case ATA_CMD_WRITE_FUA_EXT:
case ATA_CMD_WRITE_QUEUED:
case ATA_CMD_WRITE_QUEUED_FUA_EXT:
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_WRITE_MULTI:
case ATA_CMD_WRITE_MULTI_EXT:
case ATA_CMD_WRITE_MULTI_FUA_EXT:
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
case ATA_CMD_WRITE_STREAM_DMA_EXT:
case ATA_CMD_WRITE_STREAM_EXT:
qc->sect_size = scmd->device->sector_size;
break;
/* Everything else uses 512 byte "sectors" */
default:
qc->sect_size = ATA_SECT_SIZE;
}
/*
* Set flags so that all registers will be written, pass on
* write indication (used for PIO/DMA setup), result TF is
* copied back and we don't whine too much about its failure.
*/
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE)
tf->flags |= ATA_TFLAG_WRITE;
qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
/*
* Set transfer length.
*
* TODO: find out if we need to do more here to
* cover scatter/gather case.
*/
ata_qc_set_pc_nbytes(qc);
/* We may not issue DMA commands if no DMA mode is set */
if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) {
fp = 1;
goto invalid_fld;
}
/* We may not issue NCQ commands to devices not supporting NCQ */
if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
fp = 1;
goto invalid_fld;
}
/* sanity check for pio multi commands */
if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
fp = 1;
goto invalid_fld;
}
if (is_multi_taskfile(tf)) {
unsigned int multi_count = 1 << (cdb[1] >> 5);
/* compare the passed through multi_count
* with the cached multi_count of libata
*/
if (multi_count != dev->multi_count)
ata_dev_warn(dev, "invalid multi_count %u ignored\n",
multi_count);
}
/*
* Filter SET_FEATURES - XFER MODE command -- otherwise,
* SET_FEATURES - XFER MODE must be preceded/succeeded
* by an update to hardware-specific registers for each
* controller (i.e. the reason for ->set_piomode(),
* ->set_dmamode(), and ->post_set_mode() hooks).
*/
if (tf->command == ATA_CMD_SET_FEATURES &&
tf->feature == SETFEATURES_XFER) {
fp = (cdb[0] == ATA_16) ? 4 : 3;
goto invalid_fld;
}
/*
* Filter TPM commands by default. These provide an
* essentially uncontrolled encrypted "back door" between
* applications and the disk. Set libata.allow_tpm=1 if you
* have a real reason for wanting to use them. This ensures
* that installed software cannot easily mess stuff up without
* user intent. DVR type users will probably ship with this enabled
* for movie content management.
*
* Note that for ATA8 we can issue a DCS change and DCS freeze lock
* for this and should do in future but that it is not sufficient as
* DCS is an optional feature set. Thus we also do the software filter
* so that we comply with the TC consortium stated goal that the user
* can turn off TC features of their system.
*/
if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) {
fp = (cdb[0] == ATA_16) ? 14 : 9;
goto invalid_fld;
}
return 0;
invalid_fld:
ata_scsi_set_invalid_field(dev, scmd, fp, 0xff);
return 1;
}
/**
* ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
* @cmd: SCSI command being translated
* @trmax: Maximum number of entries that will fit in sector_size bytes.
* @sector: Starting sector
* @count: Total Range of request in logical sectors
*
* Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
* descriptor.
*
* Upto 64 entries of the format:
* 63:48 Range Length
* 47:0 LBA
*
* Range Length of 0 is ignored.
* LBA's should be sorted order and not overlap.
*
* NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
*
* Return: Number of bytes copied into sglist.
*/
static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
u64 sector, u32 count)
{
struct scsi_device *sdp = cmd->device;
size_t len = sdp->sector_size;
size_t r;
__le64 *buf;
u32 i = 0;
unsigned long flags;
WARN_ON(len > ATA_SCSI_RBUF_SIZE);
if (len > ATA_SCSI_RBUF_SIZE)
len = ATA_SCSI_RBUF_SIZE;
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
buf = ((void *)ata_scsi_rbuf);
memset(buf, 0, len);
while (i < trmax) {
u64 entry = sector |
((u64)(count > 0xffff ? 0xffff : count) << 48);
buf[i++] = __cpu_to_le64(entry);
if (count <= 0xffff)
break;
count -= 0xffff;
sector += 0xffff;
}
r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
return r;
}
/**
* ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
* @qc: Command to be translated
*
* Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
* an SCT Write Same command.
* Based on WRITE SAME has the UNMAP flag:
*
* - When set translate to DSM TRIM
* - When clear translate to SCT Write Same
*/
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
struct scsi_device *sdp = scmd->device;
size_t len = sdp->sector_size;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
const u32 trmax = len >> 3;
u32 size;
u16 fp;
u8 bp = 0xff;
u8 unmap = cdb[1] & 0x8;
/* we may not issue DMA commands if no DMA mode is set */
if (unlikely(!ata_dma_enabled(dev)))
goto invalid_opcode;
/*
* We only allow sending this command through the block layer,
* as it modifies the DATA OUT buffer, which would corrupt user
* memory for SG_IO commands.
*/
if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))))
goto invalid_opcode;
if (unlikely(scmd->cmd_len < 16)) {
fp = 15;
goto invalid_fld;
}
scsi_16_lba_len(cdb, &block, &n_block);
if (!unmap ||
(dev->horkage & ATA_HORKAGE_NOTRIM) ||
!ata_id_has_trim(dev->id)) {
fp = 1;
bp = 3;
goto invalid_fld;
}
/* If the request is too large the cmd is invalid */
if (n_block > 0xffff * trmax) {
fp = 2;
goto invalid_fld;
}
/*
* WRITE SAME always has a sector sized buffer as payload, this
* should never be a multiple entry S/G list.
*/
if (!scsi_sg_count(scmd))
goto invalid_param_len;
/*
* size must match sector size in bytes
* For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
* is defined as number of 512 byte blocks to be transferred.
*/
size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
if (size != len)
goto invalid_param_len;
if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
/* Newer devices support queued TRIM commands */
tf->protocol = ATA_PROT_NCQ;
tf->command = ATA_CMD_FPDMA_SEND;
tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
tf->nsect = qc->hw_tag << 3;
tf->hob_feature = (size / 512) >> 8;
tf->feature = size / 512;
tf->auxiliary = 1;
} else {
tf->protocol = ATA_PROT_DMA;
tf->hob_feature = 0;
tf->feature = ATA_DSM_TRIM;
tf->hob_nsect = (size / 512) >> 8;
tf->nsect = size / 512;
tf->command = ATA_CMD_DSM;
}
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
ATA_TFLAG_WRITE;
ata_qc_set_pc_nbytes(qc);
return 0;
invalid_fld:
ata_scsi_set_invalid_field(dev, scmd, fp, bp);
return 1;
invalid_param_len:
/* "Parameter list length error" */
ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
return 1;
invalid_opcode:
/* "Invalid command operation code" */
ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0);
return 1;
}
/**
* ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
* @args: device MAINTENANCE_IN data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields a subset to satisfy scsi_report_opcode()
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_device *dev = args->dev;
u8 *cdb = args->cmd->cmnd;
u8 supported = 0, cdlp = 0, rwcdlp = 0;
unsigned int err = 0;
if (cdb[2] != 1 && cdb[2] != 3) {
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
err = 2;
goto out;
}
switch (cdb[3]) {
case INQUIRY:
case MODE_SENSE:
case MODE_SENSE_10:
case READ_CAPACITY:
case SERVICE_ACTION_IN_16:
case REPORT_LUNS:
case REQUEST_SENSE:
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
case REZERO_UNIT:
case SEEK_6:
case SEEK_10:
case TEST_UNIT_READY:
case SEND_DIAGNOSTIC:
case MAINTENANCE_IN:
case READ_6:
case READ_10:
case WRITE_6:
case WRITE_10:
case ATA_12:
case ATA_16:
case VERIFY:
case VERIFY_16:
case MODE_SELECT:
case MODE_SELECT_10:
case START_STOP:
supported = 3;
break;
case READ_16:
supported = 3;
if (dev->flags & ATA_DFLAG_CDL) {
/*
* CDL read descriptors map to the T2A page, that is,
* rwcdlp = 0x01 and cdlp = 0x01
*/
rwcdlp = 0x01;
cdlp = 0x01 << 3;
}
break;
case WRITE_16:
supported = 3;
if (dev->flags & ATA_DFLAG_CDL) {
/*
* CDL write descriptors map to the T2B page, that is,
* rwcdlp = 0x01 and cdlp = 0x02
*/
rwcdlp = 0x01;
cdlp = 0x02 << 3;
}
break;
case ZBC_IN:
case ZBC_OUT:
if (ata_id_zoned_cap(dev->id) ||
dev->class == ATA_DEV_ZAC)
supported = 3;
break;
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
if (dev->flags & ATA_DFLAG_TRUSTED)
supported = 3;
break;
default:
break;
}
out:
/* One command format */
rbuf[0] = rwcdlp;
rbuf[1] = cdlp | supported;
return err;
}
/**
* ata_scsi_report_zones_complete - convert ATA output
* @qc: command structure returning the data
*
* Convert T-13 little-endian field representation into
* T-10 big-endian field representation.
* What a mess.
*/
static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
struct sg_mapping_iter miter;
unsigned long flags;
unsigned int bytes = 0;
sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
SG_MITER_TO_SG | SG_MITER_ATOMIC);
local_irq_save(flags);
while (sg_miter_next(&miter)) {
unsigned int offset = 0;
if (bytes == 0) {
char *hdr;
u32 list_length;
u64 max_lba, opt_lba;
u16 same;
/* Swizzle header */
hdr = miter.addr;
list_length = get_unaligned_le32(&hdr[0]);
same = get_unaligned_le16(&hdr[4]);
max_lba = get_unaligned_le64(&hdr[8]);
opt_lba = get_unaligned_le64(&hdr[16]);
put_unaligned_be32(list_length, &hdr[0]);
hdr[4] = same & 0xf;
put_unaligned_be64(max_lba, &hdr[8]);
put_unaligned_be64(opt_lba, &hdr[16]);
offset += 64;
bytes += 64;
}
while (offset < miter.length) {
char *rec;
u8 cond, type, non_seq, reset;
u64 size, start, wp;
/* Swizzle zone descriptor */
rec = miter.addr + offset;
type = rec[0] & 0xf;
cond = (rec[1] >> 4) & 0xf;
non_seq = (rec[1] & 2);
reset = (rec[1] & 1);
size = get_unaligned_le64(&rec[8]);
start = get_unaligned_le64(&rec[16]);
wp = get_unaligned_le64(&rec[24]);
rec[0] = type;
rec[1] = (cond << 4) | non_seq | reset;
put_unaligned_be64(size, &rec[8]);
put_unaligned_be64(start, &rec[16]);
put_unaligned_be64(wp, &rec[24]);
WARN_ON(offset + 64 > miter.length);
offset += 64;
bytes += 64;
}
}
sg_miter_stop(&miter);
local_irq_restore(flags);
ata_scsi_qc_complete(qc);
}
static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
u16 sect, fp = (u16)-1;
u8 sa, options, bp = 0xff;
u64 block;
u32 n_block;
if (unlikely(scmd->cmd_len < 16)) {
ata_dev_warn(qc->dev, "invalid cdb length %d\n",
scmd->cmd_len);
fp = 15;
goto invalid_fld;
}
scsi_16_lba_len(cdb, &block, &n_block);
if (n_block != scsi_bufflen(scmd)) {
ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n",
n_block, scsi_bufflen(scmd));
goto invalid_param_len;
}
sa = cdb[1] & 0x1f;
if (sa != ZI_REPORT_ZONES) {
ata_dev_warn(qc->dev, "invalid service action %d\n", sa);
fp = 1;
goto invalid_fld;
}
/*
* ZAC allows only for transfers in 512 byte blocks,
* and uses a 16 bit value for the transfer count.
*/
if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) {
ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block);
goto invalid_param_len;
}
sect = n_block / 512;
options = cdb[14] & 0xbf;
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_in_supported(qc->dev)) {
tf->protocol = ATA_PROT_NCQ;
tf->command = ATA_CMD_FPDMA_RECV;
tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f;
tf->nsect = qc->hw_tag << 3;
tf->feature = sect & 0xff;
tf->hob_feature = (sect >> 8) & 0xff;
tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8);
} else {
tf->command = ATA_CMD_ZAC_MGMT_IN;
tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
tf->protocol = ATA_PROT_DMA;
tf->hob_feature = options;
tf->hob_nsect = (sect >> 8) & 0xff;
tf->nsect = sect & 0xff;
}
tf->device = ATA_LBA;
tf->lbah = (block >> 16) & 0xff;
tf->lbam = (block >> 8) & 0xff;
tf->lbal = block & 0xff;
tf->hob_lbah = (block >> 40) & 0xff;
tf->hob_lbam = (block >> 32) & 0xff;
tf->hob_lbal = (block >> 24) & 0xff;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
qc->flags |= ATA_QCFLAG_RESULT_TF;
ata_qc_set_pc_nbytes(qc);
qc->complete_fn = ata_scsi_report_zones_complete;
return 0;
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
return 1;
invalid_param_len:
/* "Parameter list length error" */
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
return 1;
}
static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u8 all, sa;
u64 block;
u32 n_block;
u16 fp = (u16)-1;
if (unlikely(scmd->cmd_len < 16)) {
fp = 15;
goto invalid_fld;
}
sa = cdb[1] & 0x1f;
if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) &&
(sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) {
fp = 1;
goto invalid_fld;
}
scsi_16_lba_len(cdb, &block, &n_block);
if (n_block) {
/*
* ZAC MANAGEMENT OUT doesn't define any length
*/
goto invalid_param_len;
}
all = cdb[14] & 0x1;
if (all) {
/*
* Ignore the block address (zone ID) as defined by ZBC.
*/
block = 0;
} else if (block >= dev->n_sectors) {
/*
* Block must be a valid zone ID (a zone start LBA).
*/
fp = 2;
goto invalid_fld;
}
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
tf->protocol = ATA_PROT_NCQ_NODATA;
tf->command = ATA_CMD_NCQ_NON_DATA;
tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
tf->nsect = qc->hw_tag << 3;
tf->auxiliary = sa | ((u16)all << 8);
} else {
tf->protocol = ATA_PROT_NODATA;
tf->command = ATA_CMD_ZAC_MGMT_OUT;
tf->feature = sa;
tf->hob_feature = all;
}
tf->lbah = (block >> 16) & 0xff;
tf->lbam = (block >> 8) & 0xff;
tf->lbal = block & 0xff;
tf->hob_lbah = (block >> 40) & 0xff;
tf->hob_lbam = (block >> 32) & 0xff;
tf->hob_lbal = (block >> 24) & 0xff;
tf->device = ATA_LBA;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
return 0;
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
return 1;
invalid_param_len:
/* "Parameter list length error" */
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
return 1;
}
/**
* ata_mselect_caching - Simulate MODE SELECT for caching info page
* @qc: Storage for translated ATA taskfile
* @buf: input buffer
* @len: number of valid bytes in the input buffer
* @fp: out parameter for the failed field on error
*
* Prepare a taskfile to modify caching information for the device.
*
* LOCKING:
* None.
*/
static int ata_mselect_caching(struct ata_queued_cmd *qc,
const u8 *buf, int len, u16 *fp)
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
u8 mpage[CACHE_MPAGE_LEN];
u8 wce;
int i;
/*
* The first two bytes of def_cache_mpage are a header, so offsets
* in mpage are off by 2 compared to buf. Same for len.
*/
if (len != CACHE_MPAGE_LEN - 2) {
*fp = min(len, CACHE_MPAGE_LEN - 2);
return -EINVAL;
}
wce = buf[0] & (1 << 2);
/*
* Check that read-only bits are not modified.
*/
ata_msense_caching(dev->id, mpage, false);
for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) {
if (i == 0)
continue;
if (mpage[i + 2] != buf[i]) {
*fp = i;
return -EINVAL;
}
}
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf->protocol = ATA_PROT_NODATA;
tf->nsect = 0;
tf->command = ATA_CMD_SET_FEATURES;
tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF;
return 0;
}
/*
* Simulate MODE SELECT control mode page, sub-page 0.
*/
static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
u8 mpage[CONTROL_MPAGE_LEN];
u8 d_sense;
int i;
/*
* The first two bytes of def_control_mpage are a header, so offsets
* in mpage are off by 2 compared to buf. Same for len.
*/
if (len != CONTROL_MPAGE_LEN - 2) {
*fp = min(len, CONTROL_MPAGE_LEN - 2);
return -EINVAL;
}
d_sense = buf[0] & (1 << 2);
/*
* Check that read-only bits are not modified.
*/
ata_msense_control_spg0(dev, mpage, false);
for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) {
if (i == 0)
continue;
if (mpage[2 + i] != buf[i]) {
*fp = i;
return -EINVAL;
}
}
if (d_sense & (1 << 2))
dev->flags |= ATA_DFLAG_D_SENSE;
else
dev->flags &= ~ATA_DFLAG_D_SENSE;
return 0;
}
/*
* Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
* page) into a SET FEATURES command.
*/
static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
const u8 *buf, int len,
u16 *fp)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
u8 cdl_action;
/*
* The first four bytes of ATA Feature Control mode page are a header,
* so offsets in mpage are off by 4 compared to buf. Same for len.
*/
if (len != ATA_FEATURE_SUB_MPAGE_LEN - 4) {
*fp = min(len, ATA_FEATURE_SUB_MPAGE_LEN - 4);
return -EINVAL;
}
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
/* Disable CDL */
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
/* Enable CDL T2A/T2B: NCQ priority must be disabled */
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
return -EINVAL;
}
cdl_action = 1;
dev->flags |= ATA_DFLAG_CDL_ENABLED;
break;
default:
*fp = 0;
return -EINVAL;
}
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf->protocol = ATA_PROT_NODATA;
tf->command = ATA_CMD_SET_FEATURES;
tf->feature = SETFEATURES_CDL;
tf->nsect = cdl_action;
return 1;
}
/**
* ata_mselect_control - Simulate MODE SELECT for control page
* @qc: Storage for translated ATA taskfile
* @spg: target sub-page of the control page
* @buf: input buffer
* @len: number of valid bytes in the input buffer
* @fp: out parameter for the failed field on error
*
* Prepare a taskfile to modify caching information for the device.
*
* LOCKING:
* None.
*/
static int ata_mselect_control(struct ata_queued_cmd *qc, u8 spg,
const u8 *buf, int len, u16 *fp)
{
switch (spg) {
case 0:
return ata_mselect_control_spg0(qc, buf, len, fp);
case ATA_FEATURE_SUB_MPAGE:
return ata_mselect_control_ata_feature(qc, buf, len, fp);
default:
return -EINVAL;
}
}
/**
* ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
* @qc: Storage for translated ATA taskfile
*
* Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
* Assume this is invoked for direct access devices (e.g. disks) only.
* There should be no block descriptor for other device types.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
u8 pg, spg;
unsigned six_byte, pg_len, hdr_len, bd_len;
int len, ret;
u16 fp = (u16)-1;
u8 bp = 0xff;
u8 buffer[64];
const u8 *p = buffer;
six_byte = (cdb[0] == MODE_SELECT);
if (six_byte) {
if (scmd->cmd_len < 5) {
fp = 4;
goto invalid_fld;
}
len = cdb[4];
hdr_len = 4;
} else {
if (scmd->cmd_len < 9) {
fp = 8;
goto invalid_fld;
}
len = get_unaligned_be16(&cdb[7]);
hdr_len = 8;
}
/* We only support PF=1, SP=0. */
if ((cdb[1] & 0x11) != 0x10) {
fp = 1;
bp = (cdb[1] & 0x01) ? 1 : 5;
goto invalid_fld;
}
/* Test early for possible overrun. */
if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
goto invalid_param_len;
/* Move past header and block descriptors. */
if (len < hdr_len)
goto invalid_param_len;
if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
buffer, sizeof(buffer)))
goto invalid_param_len;
if (six_byte)
bd_len = p[3];
else
bd_len = get_unaligned_be16(&p[6]);
len -= hdr_len;
p += hdr_len;
if (len < bd_len)
goto invalid_param_len;
if (bd_len != 0 && bd_len != 8) {
fp = (six_byte) ? 3 : 6;
fp += bd_len + hdr_len;
goto invalid_param;
}
len -= bd_len;
p += bd_len;
if (len == 0)
goto skip;
/* Parse both possible formats for the mode page headers. */
pg = p[0] & 0x3f;
if (p[0] & 0x40) {
if (len < 4)
goto invalid_param_len;
spg = p[1];
pg_len = get_unaligned_be16(&p[2]);
p += 4;
len -= 4;
} else {
if (len < 2)
goto invalid_param_len;
spg = 0;
pg_len = p[1];
p += 2;
len -= 2;
}
/*
* Supported subpages: all subpages and ATA feature sub-page f2h of
* the control page.
*/
if (spg) {
switch (spg) {
case ALL_SUB_MPAGES:
/* All subpages is not supported for the control page */
if (pg == CONTROL_MPAGE) {
fp = (p[0] & 0x40) ? 1 : 0;
fp += hdr_len + bd_len;
goto invalid_param;
}
break;
case ATA_FEATURE_SUB_MPAGE:
if (qc->dev->flags & ATA_DFLAG_CDL &&
pg == CONTROL_MPAGE)
break;
fallthrough;
default:
fp = (p[0] & 0x40) ? 1 : 0;
fp += hdr_len + bd_len;
goto invalid_param;
}
}
if (pg_len > len)
goto invalid_param_len;
switch (pg) {
case CACHE_MPAGE:
if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) {
fp += hdr_len + bd_len;
goto invalid_param;
}
break;
case CONTROL_MPAGE:
ret = ata_mselect_control(qc, spg, p, pg_len, &fp);
if (ret < 0) {
fp += hdr_len + bd_len;
goto invalid_param;
}
if (!ret)
goto skip; /* No ATA command to send */
break;
default:
/* Invalid page code */
fp = bd_len + hdr_len;
goto invalid_param;
}
/*
* Only one page has changeable data, so we only support setting one
* page at a time.
*/
if (len > pg_len)
goto invalid_param;
return 0;
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
return 1;
invalid_param:
ata_scsi_set_invalid_parameter(qc->dev, scmd, fp);
return 1;
invalid_param_len:
/* "Parameter list length error" */
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
return 1;
skip:
scmd->result = SAM_STAT_GOOD;
return 1;
}
static u8 ata_scsi_trusted_op(u32 len, bool send, bool dma)
{
if (len == 0)
return ATA_CMD_TRUSTED_NONDATA;
else if (send)
return dma ? ATA_CMD_TRUSTED_SND_DMA : ATA_CMD_TRUSTED_SND;
else
return dma ? ATA_CMD_TRUSTED_RCV_DMA : ATA_CMD_TRUSTED_RCV;
}
static unsigned int ata_scsi_security_inout_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
struct ata_taskfile *tf = &qc->tf;
u8 secp = cdb[1];
bool send = (cdb[0] == SECURITY_PROTOCOL_OUT);
u16 spsp = get_unaligned_be16(&cdb[2]);
u32 len = get_unaligned_be32(&cdb[6]);
bool dma = !(qc->dev->flags & ATA_DFLAG_PIO);
/*
* We don't support the ATA "security" protocol.
*/
if (secp == 0xef) {
ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0);
return 1;
}
if (cdb[4] & 7) { /* INC_512 */
if (len > 0xffff) {
ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
return 1;
}
} else {
if (len > 0x01fffe00) {
ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
return 1;
}
/* convert to the sector-based ATA addressing */
len = (len + 511) / 512;
}
tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO;
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA;
if (send)
tf->flags |= ATA_TFLAG_WRITE;
tf->command = ata_scsi_trusted_op(len, send, dma);
tf->feature = secp;
tf->lbam = spsp & 0xff;
tf->lbah = spsp >> 8;
if (len) {
tf->nsect = len & 0xff;
tf->lbal = len >> 8;
} else {
if (!send)
tf->lbah = (1 << 7);
}
ata_qc_set_pc_nbytes(qc);
return 0;
}
/**
* ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
* @qc: Command to be translated
*
* Translate a SCSI variable length CDB to specified commands.
* It checks a service action value in CDB to call corresponding handler.
*
* RETURNS:
* Zero on success, non-zero on failure
*
*/
static unsigned int ata_scsi_var_len_cdb_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
const u16 sa = get_unaligned_be16(&cdb[8]);
/*
* if service action represents a ata pass-thru(32) command,
* then pass it to ata_scsi_pass_thru handler.
*/
if (sa == ATA_32)
return ata_scsi_pass_thru(qc);
/* unsupported service action */
return 1;
}
/**
* ata_get_xlat_func - check if SCSI to ATA translation is possible
* @dev: ATA device
* @cmd: SCSI command opcode to consider
*
* Look up the SCSI command given, and determine whether the
* SCSI command is to be translated or simulated.
*
* RETURNS:
* Pointer to translation function if possible, %NULL if not.
*/
static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
{
switch (cmd) {
case READ_6:
case READ_10:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_16:
return ata_scsi_rw_xlat;
case WRITE_SAME_16:
return ata_scsi_write_same_xlat;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
if (ata_try_flush_cache(dev))
return ata_scsi_flush_xlat;
break;
case VERIFY:
case VERIFY_16:
return ata_scsi_verify_xlat;
case ATA_12:
case ATA_16:
return ata_scsi_pass_thru;
case VARIABLE_LENGTH_CMD:
return ata_scsi_var_len_cdb_xlat;
case MODE_SELECT:
case MODE_SELECT_10:
return ata_scsi_mode_select_xlat;
case ZBC_IN:
return ata_scsi_zbc_in_xlat;
case ZBC_OUT:
return ata_scsi_zbc_out_xlat;
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
if (!(dev->flags & ATA_DFLAG_TRUSTED))
break;
return ata_scsi_security_inout_xlat;
case START_STOP:
return ata_scsi_start_stop_xlat;
}
return NULL;
}
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
u8 scsi_op = scmd->cmnd[0];
ata_xlat_func_t xlat_func;
/*
* scsi_queue_rq() will defer commands if scsi_host_in_recovery().
* However, this check is done without holding the ap->lock (a libata
* specific lock), so we can have received an error irq since then,
* therefore we must check if EH is pending, while holding ap->lock.
*/
if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
return SCSI_MLQUEUE_DEVICE_BUSY;
if (unlikely(!scmd->cmd_len))
goto bad_cdb_len;
if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
if (unlikely(scmd->cmd_len > dev->cdb_len))
goto bad_cdb_len;
xlat_func = ata_get_xlat_func(dev, scsi_op);
} else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
/* relay SCSI command to ATAPI device */
int len = COMMAND_SIZE(scsi_op);
if (unlikely(len > scmd->cmd_len ||
len > dev->cdb_len ||
scmd->cmd_len > ATAPI_CDB_LEN))
goto bad_cdb_len;
xlat_func = atapi_xlat;
} else {
/* ATA_16 passthru, treat as an ATA command */
if (unlikely(scmd->cmd_len > 16))
goto bad_cdb_len;
xlat_func = ata_get_xlat_func(dev, scsi_op);
}
if (xlat_func)
return ata_scsi_translate(dev, scmd, xlat_func);
ata_scsi_simulate(dev, scmd);
return 0;
bad_cdb_len:
scmd->result = DID_ERROR << 16;
scsi_done(scmd);
return 0;
}
/**
* ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
* @shost: SCSI host of command to be sent
* @cmd: SCSI command to be sent
*
* In some cases, this function translates SCSI commands into
* ATA taskfiles, and queues the taskfiles to be sent to
* hardware. In other cases, this function simulates a
* SCSI device by evaluating and responding to certain
* SCSI commands. This creates the overall effect of
* ATA and ATAPI devices appearing as SCSI devices.
*
* LOCKING:
* ATA host lock
*
* RETURNS:
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
* 0 otherwise.
*/
int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct ata_port *ap;
struct ata_device *dev;
struct scsi_device *scsidev = cmd->device;
int rc = 0;
unsigned long irq_flags;
ap = ata_shost_to_port(shost);
spin_lock_irqsave(ap->lock, irq_flags);
dev = ata_scsi_find_dev(ap, scsidev);
if (likely(dev))
rc = __ata_scsi_queuecmd(cmd, dev);
else {
cmd->result = (DID_BAD_TARGET << 16);
scsi_done(cmd);
}
spin_unlock_irqrestore(ap->lock, irq_flags);
return rc;
}
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
/**
* ata_scsi_simulate - simulate SCSI command on ATA device
* @dev: the target device
* @cmd: SCSI command being sent to device.
*
* Interprets and directly executes a select list of SCSI commands
* that can be handled internally.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
{
struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd;
u8 tmp8;
args.dev = dev;
args.id = dev->id;
args.cmd = cmd;
switch(scsicmd[0]) {
case INQUIRY:
if (scsicmd[1] & 2) /* is CmdDt set? */
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
else switch (scsicmd[2]) {
case 0x00:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
break;
case 0x80:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
break;
case 0x83:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
break;
case 0x89:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
break;
case 0xb0:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
break;
case 0xb1:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
break;
case 0xb2:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
break;
case 0xb6:
if (dev->flags & ATA_DFLAG_ZAC)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
else
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
case 0xb9:
if (dev->cpr_log)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
else
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
default:
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
}
break;
case MODE_SENSE:
case MODE_SENSE_10:
ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
break;
case READ_CAPACITY:
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
break;
case SERVICE_ACTION_IN_16:
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
else
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
break;
case REPORT_LUNS:
ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
break;
case REQUEST_SENSE:
ata_scsi_set_sense(dev, cmd, 0, 0, 0);
break;
/* if we reach this, then writeback caching is disabled,
* turning this into a no-op.
*/
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
fallthrough;
/* no-op's, complete with success */
case REZERO_UNIT:
case SEEK_6:
case SEEK_10:
case TEST_UNIT_READY:
break;
case SEND_DIAGNOSTIC:
tmp8 = scsicmd[1] & ~(1 << 3);
if (tmp8 != 0x4 || scsicmd[3] || scsicmd[4])
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
break;
case MAINTENANCE_IN:
if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
else
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
break;
/* all other commands */
default:
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
/* "Invalid command operation code" */
break;
}
scsi_done(cmd);
}
int ata_scsi_add_hosts(struct ata_host *host, const struct scsi_host_template *sht)
{
int i, rc;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct Scsi_Host *shost;
rc = -ENOMEM;
shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
if (!shost)
goto err_alloc;
shost->eh_noresume = 1;
*(struct ata_port **)&shost->hostdata[0] = ap;
ap->scsi_host = shost;
shost->transportt = ata_scsi_transport_template;
shost->unique_id = ap->print_id;
shost->max_id = 16;
shost->max_lun = 1;
shost->max_channel = 1;
shost->max_cmd_len = 32;
/* Schedule policy is determined by ->qc_defer()
* callback and it needs to see every deferred qc.
* Set host_blocked to 1 to prevent SCSI midlayer from
* automatically deferring requests.
*/
shost->max_host_blocked = 1;
rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
if (rc)
goto err_alloc;
}
return 0;
err_alloc:
while (--i >= 0) {
struct Scsi_Host *shost = host->ports[i]->scsi_host;
/* scsi_host_put() is in ata_devres_release() */
scsi_remove_host(shost);
}
return rc;
}
#ifdef CONFIG_OF
static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
{
struct scsi_device *sdev = dev->sdev;
struct device *d = ap->host->dev;
struct device_node *np = d->of_node;
struct device_node *child;
for_each_available_child_of_node(np, child) {
int ret;
u32 val;
ret = of_property_read_u32(child, "reg", &val);
if (ret)
continue;
if (val == dev->devno) {
dev_dbg(d, "found matching device node\n");
sdev->sdev_gendev.of_node = child;
return;
}
}
}
#else
static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
{
}
#endif
void ata_scsi_scan_host(struct ata_port *ap, int sync)
{
int tries = 5;
struct ata_device *last_failed_dev = NULL;
struct ata_link *link;
struct ata_device *dev;
repeat:
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
struct scsi_device *sdev;
int channel = 0, id = 0;
if (dev->sdev)
continue;
if (ata_is_host_link(link))
id = dev->devno;
else
channel = link->pmp;
sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
NULL);
if (!IS_ERR(sdev)) {
dev->sdev = sdev;
ata_scsi_assign_ofnode(dev, ap);
scsi_device_put(sdev);
} else {
dev->sdev = NULL;
}
}
}
/* If we scanned while EH was in progress or allocation
* failure occurred, scan would have failed silently. Check
* whether all devices are attached.
*/
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
if (!dev->sdev)
goto exit_loop;
}
}
exit_loop:
if (!link)
return;
/* we're missing some SCSI devices */
if (sync) {
/* If caller requested synchrnous scan && we've made
* any progress, sleep briefly and repeat.
*/
if (dev != last_failed_dev) {
msleep(100);
last_failed_dev = dev;
goto repeat;
}
/* We might be failing to detect boot device, give it
* a few more chances.
*/
if (--tries) {
msleep(100);
goto repeat;
}
ata_port_err(ap,
"WARNING: synchronous SCSI scan failed without making any progress, switching to async\n");
}
queue_delayed_work(system_long_wq, &ap->hotplug_task,
round_jiffies_relative(HZ));
}
/**
* ata_scsi_offline_dev - offline attached SCSI device
* @dev: ATA device to offline attached SCSI device for
*
* This function is called from ata_eh_hotplug() and responsible
* for taking the SCSI device attached to @dev offline. This
* function is called with host lock which protects dev->sdev
* against clearing.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 1 if attached SCSI device exists, 0 otherwise.
*/
int ata_scsi_offline_dev(struct ata_device *dev)
{
if (dev->sdev) {
scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
return 1;
}
return 0;
}
/**
* ata_scsi_remove_dev - remove attached SCSI device
* @dev: ATA device to remove attached SCSI device for
*
* This function is called from ata_eh_scsi_hotplug() and
* responsible for removing the SCSI device attached to @dev.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
static void ata_scsi_remove_dev(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
struct scsi_device *sdev;
unsigned long flags;
/* Alas, we need to grab scan_mutex to ensure SCSI device
* state doesn't change underneath us and thus
* scsi_device_get() always succeeds. The mutex locking can
* be removed if there is __scsi_device_get() interface which
* increments reference counts regardless of device state.
*/
mutex_lock(&ap->scsi_host->scan_mutex);
spin_lock_irqsave(ap->lock, flags);
/* clearing dev->sdev is protected by host lock */
sdev = dev->sdev;
dev->sdev = NULL;
if (sdev) {
/* If user initiated unplug races with us, sdev can go
* away underneath us after the host lock and
* scan_mutex are released. Hold onto it.
*/
if (scsi_device_get(sdev) == 0) {
/* The following ensures the attached sdev is
* offline on return from ata_scsi_offline_dev()
* regardless it wins or loses the race
* against this function.
*/
scsi_device_set_state(sdev, SDEV_OFFLINE);
} else {
WARN_ON(1);
sdev = NULL;
}
}
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_host->scan_mutex);
if (sdev) {
ata_dev_info(dev, "detaching (SCSI %s)\n",
dev_name(&sdev->sdev_gendev));
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
}
static void ata_scsi_handle_link_detach(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
ata_for_each_dev(dev, link, ALL) {
unsigned long flags;
if (!(dev->flags & ATA_DFLAG_DETACHED))
continue;
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_DETACHED;
spin_unlock_irqrestore(ap->lock, flags);
if (zpodd_dev_enabled(dev))
zpodd_exit(dev);
ata_scsi_remove_dev(dev);
}
}
/**
* ata_scsi_media_change_notify - send media change event
* @dev: Pointer to the disk device with media change event
*
* Tell the block layer to send a media change notification
* event.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_scsi_media_change_notify(struct ata_device *dev)
{
if (dev->sdev)
sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
GFP_ATOMIC);
}
/**
* ata_scsi_hotplug - SCSI part of hotplug
* @work: Pointer to ATA port to perform SCSI hotplug on
*
* Perform SCSI part of hotplug. It's executed from a separate
* workqueue after EH completes. This is necessary because SCSI
* hot plugging requires working EH and hot unplugging is
* synchronized with hot plugging with a mutex.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_scsi_hotplug(struct work_struct *work)
{
struct ata_port *ap =
container_of(work, struct ata_port, hotplug_task.work);
int i;
if (ap->pflags & ATA_PFLAG_UNLOADING)
return;
mutex_lock(&ap->scsi_scan_mutex);
/* Unplug detached devices. We cannot use link iterator here
* because PMP links have to be scanned even if PMP is
* currently not attached. Iterate manually.
*/
ata_scsi_handle_link_detach(&ap->link);
if (ap->pmp_link)
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
ata_scsi_handle_link_detach(&ap->pmp_link[i]);
/* scan for new ones */
ata_scsi_scan_host(ap, 0);
mutex_unlock(&ap->scsi_scan_mutex);
}
/**
* ata_scsi_user_scan - indication for user-initiated bus scan
* @shost: SCSI host to scan
* @channel: Channel to scan
* @id: ID to scan
* @lun: LUN to scan
*
* This function is called when user explicitly requests bus
* scan. Set probe pending flag and invoke EH.
*
* LOCKING:
* SCSI layer (we don't care)
*
* RETURNS:
* Zero.
*/
int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun)
{
struct ata_port *ap = ata_shost_to_port(shost);
unsigned long flags;
int devno, rc = 0;
if (lun != SCAN_WILD_CARD && lun)
return -EINVAL;
if (!sata_pmp_attached(ap)) {
if (channel != SCAN_WILD_CARD && channel)
return -EINVAL;
devno = id;
} else {
if (id != SCAN_WILD_CARD && id)
return -EINVAL;
devno = channel;
}
spin_lock_irqsave(ap->lock, flags);
if (devno == SCAN_WILD_CARD) {
struct ata_link *link;
ata_for_each_link(link, ap, EDGE) {
struct ata_eh_info *ehi = &link->eh_info;
ehi->probe_mask |= ATA_ALL_DEVICES;
ehi->action |= ATA_EH_RESET;
}
} else {
struct ata_device *dev = ata_find_dev(ap, devno);
if (dev) {
struct ata_eh_info *ehi = &dev->link->eh_info;
ehi->probe_mask |= 1 << dev->devno;
ehi->action |= ATA_EH_RESET;
} else
rc = -EINVAL;
}
if (rc == 0) {
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
} else
spin_unlock_irqrestore(ap->lock, flags);
return rc;
}
/**
* ata_scsi_dev_rescan - initiate scsi_rescan_device()
* @work: Pointer to ATA port to perform scsi_rescan_device()
*
* After ATA pass thru (SAT) commands are executed successfully,
* libata need to propagate the changes to SCSI layer.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_scsi_dev_rescan(struct work_struct *work)
{
struct ata_port *ap =
container_of(work, struct ata_port, scsi_rescan_task.work);
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
bool delay_rescan = false;
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
struct scsi_device *sdev = dev->sdev;
if (!sdev)
continue;
if (scsi_device_get(sdev))
continue;
/*
* If the rescan work was scheduled because of a resume
* event, the port is already fully resumed, but the
* SCSI device may not yet be fully resumed. In such
* case, executing scsi_rescan_device() may cause a
* deadlock with the PM code on device_lock(). Prevent
* this by giving up and retrying rescan after a short
* delay.
*/
delay_rescan = sdev->sdev_gendev.power.is_suspended;
if (delay_rescan) {
scsi_device_put(sdev);
break;
}
spin_unlock_irqrestore(ap->lock, flags);
scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
}
}
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
if (delay_rescan)
schedule_delayed_work(&ap->scsi_rescan_task,
msecs_to_jiffies(5));
}
| linux-master | drivers/ata/libata-scsi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_via.c - VIA Serial ATA controllers
*
* Maintained by: Tejun Heo <[email protected]>
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available under NDA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "sata_via"
#define DRV_VERSION "2.6"
/*
* vt8251 is different from other sata controllers of VIA. It has two
* channels, each channel has both Master and Slave slot.
*/
enum board_ids_enum {
vt6420,
vt6421,
vt8251,
};
enum {
SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
SATA_INT_GATE = 0x41, /* SATA interrupt gating */
SATA_NATIVE_MODE = 0x42, /* Native mode enable */
SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */
PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
PATA_PIO_TIMING = 0xAB, /* PATA timing register */
PORT0 = (1 << 1),
PORT1 = (1 << 0),
ALL_PORTS = PORT0 | PORT1,
NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */
};
struct svia_priv {
bool wd_workaround;
};
static int vt6420_hotplug;
module_param_named(vt6420_hotplug, vt6420_hotplug, int, 0644);
MODULE_PARM_DESC(vt6420_hotplug, "Enable hot-plug support for VT6420 (0=Don't support, 1=support)");
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int svia_pci_device_resume(struct pci_dev *pdev);
#endif
static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
static void svia_noop_freeze(struct ata_port *ap);
static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
static int vt6421_pata_cable_detect(struct ata_port *ap);
static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
static void vt6421_error_handler(struct ata_port *ap);
static const struct pci_device_id svia_pci_tbl[] = {
{ PCI_VDEVICE(VIA, 0x5337), vt6420 },
{ PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */
{ PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */
{ PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */
{ PCI_VDEVICE(VIA, 0x5372), vt6420 },
{ PCI_VDEVICE(VIA, 0x7372), vt6420 },
{ PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
{ PCI_VDEVICE(VIA, 0x9000), vt8251 },
{ } /* terminate list */
};
static struct pci_driver svia_pci_driver = {
.name = DRV_NAME,
.id_table = svia_pci_tbl,
.probe = svia_init_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = svia_pci_device_resume,
#endif
.remove = ata_pci_remove_one,
};
static const struct scsi_host_template svia_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations svia_base_ops = {
.inherits = &ata_bmdma_port_ops,
.sff_tf_load = svia_tf_load,
};
static struct ata_port_operations vt6420_sata_ops = {
.inherits = &svia_base_ops,
.freeze = svia_noop_freeze,
.prereset = vt6420_prereset,
.bmdma_start = vt6420_bmdma_start,
};
static struct ata_port_operations vt6421_pata_ops = {
.inherits = &svia_base_ops,
.cable_detect = vt6421_pata_cable_detect,
.set_piomode = vt6421_set_pio_mode,
.set_dmamode = vt6421_set_dma_mode,
};
static struct ata_port_operations vt6421_sata_ops = {
.inherits = &svia_base_ops,
.scr_read = svia_scr_read,
.scr_write = svia_scr_write,
.error_handler = vt6421_error_handler,
};
static struct ata_port_operations vt8251_ops = {
.inherits = &svia_base_ops,
.hardreset = sata_std_hardreset,
.scr_read = vt8251_scr_read,
.scr_write = vt8251_scr_write,
};
static const struct ata_port_info vt6420_port_info = {
.flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &vt6420_sata_ops,
};
static const struct ata_port_info vt6421_sport_info = {
.flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &vt6421_sata_ops,
};
static const struct ata_port_info vt6421_pport_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
.port_ops = &vt6421_pata_ops,
};
static const struct ata_port_info vt8251_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &vt8251_ops,
};
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
MODULE_VERSION(DRV_VERSION);
static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
return 0;
}
static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
return 0;
}
static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
{
static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
int slot = 2 * link->ap->port_no + link->pmp;
u32 v = 0;
u8 raw;
switch (scr) {
case SCR_STATUS:
pci_read_config_byte(pdev, 0xA0 + slot, &raw);
/* read the DET field, bit0 and 1 of the config byte */
v |= raw & 0x03;
/* read the SPD field, bit4 of the configure byte */
if (raw & (1 << 4))
v |= 0x02 << 4;
else
v |= 0x01 << 4;
/* read the IPM field, bit2 and 3 of the config byte */
v |= ipm_tbl[(raw >> 2) & 0x3];
break;
case SCR_ERROR:
/* devices other than 5287 uses 0xA8 as base */
WARN_ON(pdev->device != 0x5287);
pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
break;
case SCR_CONTROL:
pci_read_config_byte(pdev, 0xA4 + slot, &raw);
/* read the DET field, bit0 and bit1 */
v |= ((raw & 0x02) << 1) | (raw & 0x01);
/* read the IPM field, bit2 and bit3 */
v |= ((raw >> 2) & 0x03) << 8;
break;
default:
return -EINVAL;
}
*val = v;
return 0;
}
static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
int slot = 2 * link->ap->port_no + link->pmp;
u32 v = 0;
switch (scr) {
case SCR_ERROR:
/* devices other than 5287 uses 0xA8 as base */
WARN_ON(pdev->device != 0x5287);
pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
return 0;
case SCR_CONTROL:
/* set the DET field */
v |= ((val & 0x4) >> 1) | (val & 0x1);
/* set the IPM field */
v |= ((val >> 8) & 0x3) << 2;
pci_write_config_byte(pdev, 0xA4 + slot, v);
return 0;
default:
return -EINVAL;
}
}
/**
* svia_tf_load - send taskfile registers to host controller
* @ap: Port to which output is sent
* @tf: ATA taskfile register set
*
* Outputs ATA taskfile to standard ATA host controller.
*
* This is to fix the internal bug of via chipsets, which will
* reset the device register after changing the IEN bit on ctl
* register.
*/
static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_taskfile ttf;
if (tf->ctl != ap->last_ctl) {
ttf = *tf;
ttf.flags |= ATA_TFLAG_DEVICE;
tf = &ttf;
}
ata_sff_tf_load(ap, tf);
}
static void svia_noop_freeze(struct ata_port *ap)
{
/* Some VIA controllers choke if ATA_NIEN is manipulated in
* certain way. Leave it alone and just clear pending IRQ.
*/
ap->ops->sff_check_status(ap);
ata_bmdma_irq_clear(ap);
}
/**
* vt6420_prereset - prereset for vt6420
* @link: target ATA link
* @deadline: deadline jiffies for the operation
*
* SCR registers on vt6420 are pieces of shit and may hang the
* whole machine completely if accessed with the wrong timing.
* To avoid such catastrophe, vt6420 doesn't provide generic SCR
* access operations, but uses SStatus and SControl only during
* boot probing in controlled way.
*
* As the old (pre EH update) probing code is proven to work, we
* strictly follow the access pattern.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &ap->link.eh_context;
unsigned long timeout = jiffies + (HZ * 5);
u32 sstatus, scontrol;
int online;
/* don't do any SCR stuff if we're not loading */
if (!(ap->pflags & ATA_PFLAG_LOADING))
goto skip_scr;
/* Resume phy. This is the old SATA resume sequence */
svia_scr_write(link, SCR_CONTROL, 0x300);
svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
/* wait for phy to become ready, if necessary */
do {
ata_msleep(link->ap, 200);
svia_scr_read(link, SCR_STATUS, &sstatus);
if ((sstatus & 0xf) != 1)
break;
} while (time_before(jiffies, timeout));
/* open code sata_print_link_status() */
svia_scr_read(link, SCR_STATUS, &sstatus);
svia_scr_read(link, SCR_CONTROL, &scontrol);
online = (sstatus & 0xf) == 0x3;
ata_port_info(ap,
"SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
online ? "up" : "down", sstatus, scontrol);
/* SStatus is read one more time */
svia_scr_read(link, SCR_STATUS, &sstatus);
if (!online) {
/* tell EH to bail */
ehc->i.action &= ~ATA_EH_RESET;
return 0;
}
skip_scr:
/* wait for !BSY */
ata_sff_wait_ready(link, deadline);
return 0;
}
static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
if ((qc->tf.command == ATA_CMD_PACKET) &&
(qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
/* Prevents corruption on some ATAPI burners */
ata_sff_pause(ap);
}
ata_bmdma_start(qc);
}
static int vt6421_pata_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
if (tmp & 0x10)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno,
pio_bits[adev->pio_mode - XFER_PIO_0]);
}
static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno,
udma_bits[adev->dma_mode - XFER_UDMA_0]);
}
static const unsigned int svia_bar_sizes[] = {
8, 4, 8, 4, 16, 256
};
static const unsigned int vt6421_bar_sizes[] = {
16, 16, 16, 16, 32, 128
};
static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
{
return addr + (port * 128);
}
static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
{
return addr + (port * 64);
}
static void vt6421_init_addrs(struct ata_port *ap)
{
void __iomem * const * iomap = ap->host->iomap;
void __iomem *reg_addr = iomap[ap->port_no];
void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
struct ata_ioports *ioaddr = &ap->ioaddr;
ioaddr->cmd_addr = reg_addr;
ioaddr->altstatus_addr =
ioaddr->ctl_addr = (void __iomem *)
((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
ioaddr->bmdma_addr = bmdma_addr;
ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
ata_sff_std_ports(ioaddr);
ata_port_pbar_desc(ap, ap->port_no, -1, "port");
ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
}
static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
struct ata_host *host;
int rc;
if (vt6420_hotplug) {
ppi[0]->port_ops->scr_read = svia_scr_read;
ppi[0]->port_ops->scr_write = svia_scr_write;
}
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
if (rc) {
dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
return rc;
}
host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
return 0;
}
static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
const struct ata_port_info *ppi[] =
{ &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
struct ata_host *host;
int i, rc;
*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
if (!host) {
dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
if (rc) {
dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
rc);
return rc;
}
host->iomap = pcim_iomap_table(pdev);
for (i = 0; i < host->n_ports; i++)
vt6421_init_addrs(host->ports[i]);
return dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
}
static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
struct ata_host *host;
int i, rc;
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
if (rc) {
dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
return rc;
}
/* 8251 hosts four sata ports as M/S of the two channels */
for (i = 0; i < host->n_ports; i++)
ata_slave_link_init(host->ports[i]);
return 0;
}
static void svia_wd_fix(struct pci_dev *pdev)
{
u8 tmp8;
pci_read_config_byte(pdev, 0x52, &tmp8);
pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
}
static irqreturn_t vt642x_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
/* if the IRQ was not handled, it might be a hotplug IRQ */
if (rc != IRQ_HANDLED) {
u32 serror;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
/* check for hotplug on port 0 */
svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
if (serror & SERR_PHYRDY_CHG) {
ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
ata_port_freeze(host->ports[0]);
rc = IRQ_HANDLED;
}
/* check for hotplug on port 1 */
svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
if (serror & SERR_PHYRDY_CHG) {
ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
ata_port_freeze(host->ports[1]);
rc = IRQ_HANDLED;
}
spin_unlock_irqrestore(&host->lock, flags);
}
return rc;
}
static void vt6421_error_handler(struct ata_port *ap)
{
struct svia_priv *hpriv = ap->host->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 serror;
/* see svia_configure() for description */
if (!hpriv->wd_workaround) {
svia_scr_read(&ap->link, SCR_ERROR, &serror);
if (serror == 0x1000500) {
ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
svia_wd_fix(pdev);
hpriv->wd_workaround = true;
ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
}
}
ata_sff_error_handler(ap);
}
static void svia_configure(struct pci_dev *pdev, int board_id,
struct svia_priv *hpriv)
{
u8 tmp8;
pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
dev_info(&pdev->dev, "routed to hard irq line %d\n",
(int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
/* make sure SATA channels are enabled */
pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
(int)tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
}
/* make sure interrupts for each channel sent to us */
pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
(int) tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
}
/* make sure native mode is enabled */
pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
dev_dbg(&pdev->dev,
"enabling SATA channel native mode (0x%x)\n",
(int) tmp8);
tmp8 |= NATIVE_MODE_ALL;
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421) {
/* enable IRQ on hotplug */
pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
dev_dbg(&pdev->dev,
"enabling SATA hotplug (0x%x)\n",
(int) tmp8);
tmp8 |= SATA_HOTPLUG;
pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
}
}
/*
* vt6420/1 has problems talking to some drives. The following
* is the fix from Joseph Chan <[email protected]>.
*
* When host issues HOLD, device may send up to 20DW of data
* before acknowledging it with HOLDA and the host should be
* able to buffer them in FIFO. Unfortunately, some WD drives
* send up to 40DW before acknowledging HOLD and, in the
* default configuration, this ends up overflowing vt6421's
* FIFO, making the controller abort the transaction with
* R_ERR.
*
* Rx52[2] is the internal 128DW FIFO Flow control watermark
* adjusting mechanism enable bit and the default value 0
* means host will issue HOLD to device when the left FIFO
* size goes below 32DW. Setting it to 1 makes the watermark
* 64DW.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=15173
* http://article.gmane.org/gmane.linux.ide/46352
* http://thread.gmane.org/gmane.linux.kernel/1062139
*
* As the fix slows down data transfer, apply it only if the error
* actually appears - see vt6421_error_handler()
* Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
* read safely.
*/
if (board_id == vt6420) {
svia_wd_fix(pdev);
hpriv->wd_workaround = true;
}
}
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int i;
int rc;
struct ata_host *host = NULL;
int board_id = (int) ent->driver_data;
const unsigned *bar_sizes;
struct svia_priv *hpriv;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (board_id == vt6421)
bar_sizes = &vt6421_bar_sizes[0];
else
bar_sizes = &svia_bar_sizes[0];
for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
if ((pci_resource_start(pdev, i) == 0) ||
(pci_resource_len(pdev, i) < bar_sizes[i])) {
dev_err(&pdev->dev,
"invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
i,
(unsigned long long)pci_resource_start(pdev, i),
(unsigned long long)pci_resource_len(pdev, i));
return -ENODEV;
}
switch (board_id) {
case vt6420:
rc = vt6420_prepare_host(pdev, &host);
break;
case vt6421:
rc = vt6421_prepare_host(pdev, &host);
break;
case vt8251:
rc = vt8251_prepare_host(pdev, &host);
break;
default:
rc = -EINVAL;
}
if (rc)
return rc;
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
host->private_data = hpriv;
svia_configure(pdev, board_id, hpriv);
pci_set_master(pdev);
if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421)
return ata_host_activate(host, pdev->irq, vt642x_interrupt,
IRQF_SHARED, &svia_sht);
else
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &svia_sht);
}
#ifdef CONFIG_PM_SLEEP
static int svia_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
struct svia_priv *hpriv = host->private_data;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
if (hpriv->wd_workaround)
svia_wd_fix(pdev);
ata_host_resume(host);
return 0;
}
#endif
module_pci_driver(svia_pci_driver);
| linux-master | drivers/ata/sata_via.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer
* (C) 2006 Red Hat Inc
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_ns87410"
#define DRV_VERSION "0.4.6"
/**
* ns87410_pre_reset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Check enabled ports
*/
static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits ns87410_enable_bits[] = {
{ 0x43, 1, 0x08, 0x08 },
{ 0x47, 1, 0x08, 0x08 }
};
if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* ns87410_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program timing data. This is kept per channel not per device,
* and only affects the data port.
*/
static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x40 + 4 * ap->port_no;
u8 idetcr, idefr;
struct ata_timing at;
static const u8 activebits[15] = {
0, 1, 2, 3, 4,
5, 5, 6, 6, 6,
6, 7, 7, 7, 7
};
static const u8 recoverbits[12] = {
0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 7, 7
};
pci_read_config_byte(pdev, port + 3, &idefr);
if (ata_pio_need_iordy(adev))
idefr |= 0x04; /* IORDY enable */
else
idefr &= ~0x04;
if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
dev_err(&pdev->dev, "unknown mode %d\n", adev->pio_mode);
return;
}
at.active = clamp_val(at.active, 2, 16) - 2;
at.setup = clamp_val(at.setup, 1, 4) - 1;
at.recover = clamp_val(at.recover, 1, 12) - 1;
idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
pci_write_config_byte(pdev, port, idetcr);
pci_write_config_byte(pdev, port + 3, idefr);
/* We use ap->private_data as a pointer to the device currently
loaded for timing */
ap->private_data = adev;
}
/**
* ns87410_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary.
*/
static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
/* If modes have been configured and the channel data is not loaded
then load it. We have to check if pio_mode is set as the core code
does not set adev->pio_mode to XFER_PIO_0 while probing as would be
logical */
if (adev->pio_mode && adev != ap->private_data)
ns87410_set_piomode(ap, adev);
return ata_sff_qc_issue(qc);
}
static const struct scsi_host_template ns87410_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations ns87410_port_ops = {
.inherits = &ata_sff_port_ops,
.qc_issue = ns87410_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = ns87410_set_piomode,
.prereset = ns87410_pre_reset,
};
static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO3,
.port_ops = &ns87410_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
return ata_pci_sff_init_one(dev, ppi, &ns87410_sht, NULL, 0);
}
static const struct pci_device_id ns87410[] = {
{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), },
{ },
};
static struct pci_driver ns87410_pci_driver = {
.name = DRV_NAME,
.id_table = ns87410,
.probe = ns87410_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(ns87410_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ns87410);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_ns87410.c |
/*
* pata_ali.c - ALI 15x3 PATA for new ATA layer
* (C) 2005 Red Hat Inc
*
* based in part upon
* linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
*
* Copyright (C) 1998-2000 Michel Aubry, Maintainer
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
* Copyright (C) 1999-2000 CJ, [email protected], Maintainer
*
* Copyright (C) 1998-2000 Andre Hedrick ([email protected])
* May be copied or modified under the terms of the GNU General Public License
* Copyright (C) 2002 Alan Cox <[email protected]>
* ALi (now ULi M5228) support by Clear Zhang <[email protected]>
*
* Documentation
* Chipset documentation available under NDA only
*
* TODO/CHECK
* Cannot have ATAPI on both master & slave for rev < c2 (???) but
* otherwise should do atapi DMA (For now for old we do PIO only for
* ATAPI)
* Review Sunblade workaround.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_ali"
#define DRV_VERSION "0.7.8"
static int ali_atapi_dma;
module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
static struct pci_dev *ali_isa_bridge;
/*
* Cable special cases
*/
static const struct dmi_system_id cable_dmi_table[] = {
{
.ident = "HP Pavilion N5430",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
},
},
{
.ident = "Toshiba Satellite S1800-814",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
},
},
{ }
};
static int ali_cable_override(struct pci_dev *pdev)
{
/* Fujitsu P2000 */
if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
return 1;
/* Mitac 8317 (Winbook-A) and relatives */
if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317)
return 1;
/* Systems by DMI */
if (dmi_check_system(cable_dmi_table))
return 1;
return 0;
}
/**
* ali_c2_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection for C2 and later revisions
*/
static int ali_c2_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ata66;
/* Certain laptops use short but suitable cables and don't
implement the detect logic */
if (ali_cable_override(pdev))
return ATA_CBL_PATA40_SHORT;
/* Host view cable detect 0x4A bit 0 primary bit 1 secondary
Bit set for 40 pin */
pci_read_config_byte(pdev, 0x4A, &ata66);
if (ata66 & (1 << ap->port_no))
return ATA_CBL_PATA40;
else
return ATA_CBL_PATA80;
}
/**
* ali_20_filter - filter for earlier ALI DMA
* @adev: ATA device
* @mask: received mask to manipulate and pass back
*
* Ensure that we do not do DMA on CD devices. We may be able to
* fix that later on. Also ensure we do not do UDMA on WDC drives
*/
static unsigned int ali_20_filter(struct ata_device *adev, unsigned int mask)
{
char model_num[ATA_ID_PROD_LEN + 1];
/* No DMA on anything but a disk for now */
if (adev->class != ATA_DEV_ATA)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strstr(model_num, "WDC"))
mask &= ~ATA_MASK_UDMA;
return mask;
}
/**
* ali_fifo_control - FIFO manager
* @ap: ALi channel to control
* @adev: device for FIFO control
* @on: 0 for off 1 for on
*
* Enable or disable the FIFO on a given device. Because of the way the
* ALi FIFO works it provides a boost on ATA disk but can be confused by
* ATAPI and we must therefore manage it.
*/
static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio_fifo = 0x54 + ap->port_no;
u8 fifo;
int shift = 4 * adev->devno;
/* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to
0x00. Not all the docs agree but the behaviour we now use is the
one stated in the BIOS Programming Guide */
pci_read_config_byte(pdev, pio_fifo, &fifo);
fifo &= ~(0x0F << shift);
fifo |= (on << shift);
pci_write_config_byte(pdev, pio_fifo, fifo);
}
/**
* ali_program_modes - load mode registers
* @ap: ALi channel to load
* @adev: Device the timing is for
* @t: timing data
* @ultra: UDMA timing or zero for off
*
* Loads the timing registers for cmd/data and disable UDMA if
* ultra is zero. If ultra is set then load and enable the UDMA
* timing but do not touch the command/data timing.
*/
static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int cas = 0x58 + 4 * ap->port_no; /* Command timing */
int cbt = 0x59 + 4 * ap->port_no; /* Command timing */
int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */
int udmat = 0x56 + ap->port_no; /* UDMA timing */
int shift = 4 * adev->devno;
u8 udma;
if (t != NULL) {
t->setup = clamp_val(t->setup, 1, 8) & 7;
t->act8b = clamp_val(t->act8b, 1, 8) & 7;
t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
t->active = clamp_val(t->active, 1, 8) & 7;
t->recover = clamp_val(t->recover, 1, 16) & 15;
pci_write_config_byte(pdev, cas, t->setup);
pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
}
/* Set up the UDMA enable */
pci_read_config_byte(pdev, udmat, &udma);
udma &= ~(0x0F << shift);
udma |= ultra << shift;
pci_write_config_byte(pdev, udmat, udma);
}
/**
* ali_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the ALi registers for PIO mode.
*/
static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_device *pair = ata_dev_pair(adev);
struct ata_timing t;
unsigned long T = 1000000000 / 33333; /* PCI clock based */
ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
if (pair) {
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
if (ata_dma_enabled(pair)) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
}
/* PIO FIFO is only permitted on ATA disk */
if (adev->class != ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x00);
ali_program_modes(ap, adev, &t, 0);
if (adev->class == ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x05);
}
/**
* ali_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the ALi registers for DMA mode.
*/
static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
struct ata_device *pair = ata_dev_pair(adev);
struct ata_timing t;
unsigned long T = 1000000000 / 33333; /* PCI clock based */
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (adev->class == ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x08);
if (adev->dma_mode >= XFER_UDMA_0) {
ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]);
if (adev->dma_mode >= XFER_UDMA_3) {
u8 reg4b;
pci_read_config_byte(pdev, 0x4B, ®4b);
reg4b |= 1;
pci_write_config_byte(pdev, 0x4B, reg4b);
}
} else {
ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
if (pair) {
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
if (ata_dma_enabled(pair)) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
}
ali_program_modes(ap, adev, &t, 0);
}
}
/**
* ali_warn_atapi_dma - Warn about ATAPI DMA disablement
* @adev: Device
*
* Whine about ATAPI DMA disablement if @adev is an ATAPI device.
* Can be used as ->dev_config.
*/
static void ali_warn_atapi_dma(struct ata_device *adev)
{
struct ata_eh_context *ehc = &adev->link->eh_context;
int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) {
ata_dev_warn(adev,
"WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n");
ata_dev_warn(adev,
"WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
}
}
/**
* ali_lock_sectors - Keep older devices to 255 sector mode
* @adev: Device
*
* Called during the bus probe for each device that is found. We use
* this call to lock the sector count of the device to 255 or less on
* older ALi controllers. If we didn't do this then large I/O's would
* require LBA48 commands which the older ALi requires are issued by
* slower PIO methods
*/
static void ali_lock_sectors(struct ata_device *adev)
{
adev->max_sectors = 255;
ali_warn_atapi_dma(adev);
}
/**
* ali_check_atapi_dma - DMA check for most ALi controllers
* @qc: Command to complete
*
* Called to decide whether commands should be sent by DMA or PIO
*/
static int ali_check_atapi_dma(struct ata_queued_cmd *qc)
{
if (!ali_atapi_dma) {
/* FIXME: pata_ali can't do ATAPI DMA reliably but the
* IDE alim15x3 driver can. I tried lots of things
* but couldn't find what the actual difference was.
* If you got an idea, please write it to
* [email protected] and cc [email protected].
*
* Disable ATAPI DMA for now.
*/
return -EOPNOTSUPP;
}
/* If its not a media command, its not worth it */
if (atapi_cmd_type(qc->cdb[0]) == ATAPI_MISC)
return -EOPNOTSUPP;
return 0;
}
static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes)
{
u8 r;
int port_bit = 4 << link->ap->port_no;
/* If our bridge is an ALI 1533 then do the extra work */
if (ali_isa_bridge) {
/* Tristate and re-enable the bus signals */
pci_read_config_byte(ali_isa_bridge, 0x58, &r);
r &= ~port_bit;
pci_write_config_byte(ali_isa_bridge, 0x58, r);
r |= port_bit;
pci_write_config_byte(ali_isa_bridge, 0x58, r);
}
ata_sff_postreset(link, classes);
}
static const struct scsi_host_template ali_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/*
* Port operations for PIO only ALi
*/
static struct ata_port_operations ali_early_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = ali_set_piomode,
.sff_data_xfer = ata_sff_data_xfer32,
};
static const struct ata_port_operations ali_dma_base_ops = {
.inherits = &ata_bmdma32_port_ops,
.set_piomode = ali_set_piomode,
.set_dmamode = ali_set_dmamode,
};
/*
* Port operations for DMA capable ALi without cable
* detect
*/
static struct ata_port_operations ali_20_port_ops = {
.inherits = &ali_dma_base_ops,
.cable_detect = ata_cable_40wire,
.mode_filter = ali_20_filter,
.check_atapi_dma = ali_check_atapi_dma,
.dev_config = ali_lock_sectors,
};
/*
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c2_port_ops = {
.inherits = &ali_dma_base_ops,
.check_atapi_dma = ali_check_atapi_dma,
.cable_detect = ali_c2_cable_detect,
.dev_config = ali_lock_sectors,
.postreset = ali_c2_c3_postreset,
};
/*
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c4_port_ops = {
.inherits = &ali_dma_base_ops,
.check_atapi_dma = ali_check_atapi_dma,
.cable_detect = ali_c2_cable_detect,
.dev_config = ali_lock_sectors,
};
/*
* Port operations for DMA capable ALi with cable detect and LBA48
*/
static struct ata_port_operations ali_c5_port_ops = {
.inherits = &ali_dma_base_ops,
.check_atapi_dma = ali_check_atapi_dma,
.dev_config = ali_warn_atapi_dma,
.cable_detect = ali_c2_cable_detect,
};
/**
* ali_init_chipset - chip setup function
* @pdev: PCI device of ATA controller
*
* Perform the setup on the device that must be done both at boot
* and at resume time.
*/
static void ali_init_chipset(struct pci_dev *pdev)
{
u8 tmp;
struct pci_dev *north;
/*
* The chipset revision selects the driver operations and
* mode data.
*/
if (pdev->revision <= 0x20) {
pci_read_config_byte(pdev, 0x53, &tmp);
tmp |= 0x03;
pci_write_config_byte(pdev, 0x53, tmp);
} else {
pci_read_config_byte(pdev, 0x4a, &tmp);
pci_write_config_byte(pdev, 0x4a, tmp | 0x20);
pci_read_config_byte(pdev, 0x4B, &tmp);
if (pdev->revision < 0xC2)
/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
/* Clear CD-ROM DMA write bit */
tmp &= 0x7F;
/* Cable and UDMA */
if (pdev->revision >= 0xc2)
tmp |= 0x01;
pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
/*
* CD_ROM DMA on (0x53 bit 0). Enable this even if we want
* to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
* via 0x54/55.
*/
pci_read_config_byte(pdev, 0x53, &tmp);
if (pdev->revision >= 0xc7)
tmp |= 0x03;
else
tmp |= 0x01; /* CD_ROM enable for DMA */
pci_write_config_byte(pdev, 0x53, tmp);
}
north = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0,
PCI_DEVFN(0, 0));
if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) {
/* Configure the ALi bridge logic. For non ALi rely on BIOS.
Set the south bridge enable bit */
pci_read_config_byte(ali_isa_bridge, 0x79, &tmp);
if (pdev->revision == 0xC2)
pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x04);
else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x02);
}
pci_dev_put(north);
ata_pci_bmdma_clear_simplex(pdev);
}
/**
* ali_init_one - discovery callback
* @pdev: PCI device ID
* @id: PCI table info
*
* An ALi IDE interface has been discovered. Figure out what revision
* and perform configuration work before handing it to the ATA layer
*/
static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info_early = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &ali_early_port_ops
};
/* Revision 0x20 added DMA */
static const struct ata_port_info info_20 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &ali_20_port_ops
};
/* Revision 0x20 with support logic added UDMA */
static const struct ata_port_info info_20_udma = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &ali_20_port_ops
};
/* Revision 0xC2 adds UDMA66 */
static const struct ata_port_info info_c2 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &ali_c2_port_ops
};
/* Revision 0xC3 is UDMA66 for now */
static const struct ata_port_info info_c3 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &ali_c2_port_ops
};
/* Revision 0xC4 is UDMA100 */
static const struct ata_port_info info_c4 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &ali_c4_port_ops
};
/* Revision 0xC5 is UDMA133 with LBA48 DMA */
static const struct ata_port_info info_c5 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &ali_c5_port_ops
};
const struct ata_port_info *ppi[] = { NULL, NULL };
u8 tmp;
int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/*
* The chipset revision selects the driver operations and
* mode data.
*/
if (pdev->revision < 0x20) {
ppi[0] = &info_early;
} else if (pdev->revision < 0xC2) {
ppi[0] = &info_20;
} else if (pdev->revision == 0xC2) {
ppi[0] = &info_c2;
} else if (pdev->revision == 0xC3) {
ppi[0] = &info_c3;
} else if (pdev->revision == 0xC4) {
ppi[0] = &info_c4;
} else
ppi[0] = &info_c5;
ali_init_chipset(pdev);
if (ali_isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
/* Are we paired with a UDMA capable chip */
pci_read_config_byte(ali_isa_bridge, 0x5E, &tmp);
if ((tmp & 0x1E) == 0x12)
ppi[0] = &info_20_udma;
}
if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask)
return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
else
return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
}
#ifdef CONFIG_PM_SLEEP
static int ali_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
ali_init_chipset(pdev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id ali[] = {
{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
{ },
};
static struct pci_driver ali_pci_driver = {
.name = DRV_NAME,
.id_table = ali,
.probe = ali_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ali_reinit_one,
#endif
};
static int __init ali_init(void)
{
int ret;
ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
ret = pci_register_driver(&ali_pci_driver);
if (ret < 0)
pci_dev_put(ali_isa_bridge);
return ret;
}
static void __exit ali_exit(void)
{
pci_unregister_driver(&ali_pci_driver);
pci_dev_put(ali_isa_bridge);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ALi PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ali);
MODULE_VERSION(DRV_VERSION);
module_init(ali_init);
module_exit(ali_exit);
| linux-master | drivers/ata/pata_ali.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <[email protected]>
* (C) 2007,2009,2010 Bartlomiej Zolnierkiewicz
*
* Based in part on linux/drivers/ide/pci/pdc202xx_old.c
*
* First cut with LBA48/ATAPI
*
* TODO:
* Channel interlock/reset on both required ?
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_pdc202xx_old"
#define DRV_VERSION "0.4.3"
static int pdc2026x_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 cis;
pci_read_config_word(pdev, 0x50, &cis);
if (cis & (1 << (10 + ap->port_no)))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
static void pdc202xx_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
iowrite8(tf->command, ap->ioaddr.command_addr);
ndelay(400);
}
static bool pdc202xx_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long master = pci_resource_start(pdev, 4);
u8 sc1d = inb(master + 0x1d);
if (ap->port_no) {
/*
* bit 7: error, bit 6: interrupting,
* bit 5: FIFO full, bit 4: FIFO empty
*/
return sc1d & 0x40;
} else {
/*
* bit 3: error, bit 2: interrupting,
* bit 1: FIFO full, bit 0: FIFO empty
*/
return sc1d & 0x04;
}
}
/**
* pdc202xx_configure_piomode - set chip PIO timing
* @ap: ATA interface
* @adev: ATA device
* @pio: PIO mode
*
* Called to do the PIO mode setup. Our timing registers are shared
* so a configure_dmamode call will undo any work we do here and vice
* versa
*/
static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
static const u16 pio_timing[5] = {
0x0913, 0x050C , 0x0308, 0x0206, 0x0104
};
u8 r_ap, r_bp;
pci_read_config_byte(pdev, port, &r_ap);
pci_read_config_byte(pdev, port + 1, &r_bp);
r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
r_bp &= ~0x1F;
r_ap |= (pio_timing[pio] >> 8);
r_bp |= (pio_timing[pio] & 0xFF);
if (ata_pio_need_iordy(adev))
r_ap |= 0x20; /* IORDY enable */
if (adev->class == ATA_DEV_ATA)
r_ap |= 0x10; /* FIFO enable */
pci_write_config_byte(pdev, port, r_ap);
pci_write_config_byte(pdev, port + 1, r_bp);
}
/**
* pdc202xx_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Our timing registers are shared
* but we want to set the PIO timing by default.
*/
static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
}
/**
* pdc202xx_set_dmamode - set DMA mode in chip
* @ap: ATA interface
* @adev: ATA device
*
* Load DMA cycle times into the chip ready for a DMA transfer
* to occur.
*/
static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
static u8 udma_timing[6][2] = {
{ 0x60, 0x03 }, /* 33 Mhz Clock */
{ 0x40, 0x02 },
{ 0x20, 0x01 },
{ 0x40, 0x02 }, /* 66 Mhz Clock */
{ 0x20, 0x01 },
{ 0x20, 0x01 }
};
static u8 mdma_timing[3][2] = {
{ 0xe0, 0x0f },
{ 0x60, 0x04 },
{ 0x60, 0x03 },
};
u8 r_bp, r_cp;
pci_read_config_byte(pdev, port + 1, &r_bp);
pci_read_config_byte(pdev, port + 2, &r_cp);
r_bp &= ~0xE0;
r_cp &= ~0x0F;
if (adev->dma_mode >= XFER_UDMA_0) {
int speed = adev->dma_mode - XFER_UDMA_0;
r_bp |= udma_timing[speed][0];
r_cp |= udma_timing[speed][1];
} else {
int speed = adev->dma_mode - XFER_MW_DMA_0;
r_bp |= mdma_timing[speed][0];
r_cp |= mdma_timing[speed][1];
}
pci_write_config_byte(pdev, port + 1, r_bp);
pci_write_config_byte(pdev, port + 2, r_cp);
}
/**
* pdc2026x_bmdma_start - DMA engine begin
* @qc: ATA command
*
* In UDMA3 or higher we have to clock switch for the duration of the
* DMA transfer sequence.
*
* Note: The host lock held by the libata layer protects
* us from two channels both trying to set DMA bits at once
*/
static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
int sel66 = ap->port_no ? 0x08: 0x02;
void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
void __iomem *clock = master + 0x11;
void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
u32 len;
/* Check we keep host level locking here */
if (adev->dma_mode > XFER_UDMA_2)
iowrite8(ioread8(clock) | sel66, clock);
else
iowrite8(ioread8(clock) & ~sel66, clock);
/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
and move to qc_issue ? */
pdc202xx_set_dmamode(ap, qc->dev);
/* Cases the state machine will not complete correctly without help */
if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATAPI_PROT_DMA) {
len = qc->nbytes / 2;
if (tf->flags & ATA_TFLAG_WRITE)
len |= 0x06000000;
else
len |= 0x05000000;
iowrite32(len, atapi_reg);
}
/* Activate DMA */
ata_bmdma_start(qc);
}
/**
* pdc2026x_bmdma_stop - DMA engine stop
* @qc: ATA command
*
* After a DMA completes we need to put the clock back to 33MHz for
* PIO timings.
*
* Note: The host lock held by the libata layer protects
* us from two channels both trying to set DMA bits at once
*/
static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
int sel66 = ap->port_no ? 0x08: 0x02;
/* The clock bits are in the same register for both channels */
void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
void __iomem *clock = master + 0x11;
void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
/* Cases the state machine will not complete correctly */
if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) {
iowrite32(0, atapi_reg);
iowrite8(ioread8(clock) & ~sel66, clock);
}
/* Flip back to 33Mhz for PIO */
if (adev->dma_mode > XFER_UDMA_2)
iowrite8(ioread8(clock) & ~sel66, clock);
ata_bmdma_stop(qc);
pdc202xx_set_piomode(ap, adev);
}
/**
* pdc2026x_dev_config - device setup hook
* @adev: newly found device
*
* Perform chip specific early setup. We need to lock the transfer
* sizes to 8bit to avoid making the state engine on the 2026x cards
* barf.
*/
static void pdc2026x_dev_config(struct ata_device *adev)
{
adev->max_sectors = 256;
}
static int pdc2026x_port_start(struct ata_port *ap)
{
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
if (bmdma) {
/* Enable burst mode */
u8 burst = ioread8(bmdma + 0x1f);
iowrite8(burst | 0x01, bmdma + 0x1f);
}
return ata_bmdma_port_start(ap);
}
/**
* pdc2026x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
* @qc: Metadata associated with taskfile to check
*
* Just say no - not supported on older Promise.
*
* LOCKING:
* None (inherited from caller).
*
* RETURNS: 0 when ATAPI DMA can be used
* 1 otherwise
*/
static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc)
{
return 1;
}
static const struct scsi_host_template pdc202xx_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations pdc2024x_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = pdc202xx_set_piomode,
.set_dmamode = pdc202xx_set_dmamode,
.sff_exec_command = pdc202xx_exec_command,
.sff_irq_check = pdc202xx_irq_check,
};
static struct ata_port_operations pdc2026x_port_ops = {
.inherits = &pdc2024x_port_ops,
.check_atapi_dma = pdc2026x_check_atapi_dma,
.bmdma_start = pdc2026x_bmdma_start,
.bmdma_stop = pdc2026x_bmdma_stop,
.cable_detect = pdc2026x_cable_detect,
.dev_config = pdc2026x_dev_config,
.port_start = pdc2026x_port_start,
.sff_exec_command = pdc202xx_exec_command,
.sff_irq_check = pdc202xx_irq_check,
};
static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info[3] = {
{
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &pdc2024x_port_ops
},
{
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &pdc2026x_port_ops
},
{
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &pdc2026x_port_ops
}
};
const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
struct pci_dev *bridge = dev->bus->self;
/* Don't grab anything behind a Promise I2O RAID */
if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
if (bridge->device == PCI_DEVICE_ID_INTEL_I960)
return -ENODEV;
if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
return -ENODEV;
}
}
return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
}
static const struct pci_device_id pdc202xx[] = {
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
{ },
};
static struct pci_driver pdc202xx_pci_driver = {
.name = DRV_NAME,
.id_table = pdc202xx,
.probe = pdc202xx_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(pdc202xx_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pdc202xx);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_pdc202xx_old.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_sis.c - Silicon Integrated Systems SATA
*
* Maintained by: Uwe Koziolek
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2004 Uwe Koziolek
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available under NDA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include "sis.h"
#define DRV_NAME "sata_sis"
#define DRV_VERSION "1.0"
enum {
sis_180 = 0,
SIS_SCR_PCI_BAR = 5,
/* PCI configuration registers */
SIS_GENCTL = 0x54, /* IDE General Control register */
SIS_SCR_BASE = 0xc0, /* sata0 phy SCR registers */
SIS180_SATA1_OFS = 0x10, /* offset from sata0->sata1 phy regs */
SIS182_SATA1_OFS = 0x20, /* offset from sata0->sata1 phy regs */
SIS_PMR = 0x90, /* port mapping register */
SIS_PMR_COMBINED = 0x30,
/* random bits */
SIS_FLAG_CFGSCR = (1 << 30), /* host flag: SCRs via PCI cfg */
GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */
};
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static const struct pci_device_id sis_pci_tbl[] = {
{ PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
{ PCI_VDEVICE(SI, 0x0181), sis_180 }, /* SiS 964/180 */
{ PCI_VDEVICE(SI, 0x0182), sis_180 }, /* SiS 965/965L */
{ PCI_VDEVICE(SI, 0x0183), sis_180 }, /* SiS 965/965L */
{ PCI_VDEVICE(SI, 0x1182), sis_180 }, /* SiS 966/680 */
{ PCI_VDEVICE(SI, 0x1183), sis_180 }, /* SiS 966/966L/968/680 */
{ } /* terminate list */
};
static struct pci_driver sis_pci_driver = {
.name = DRV_NAME,
.id_table = sis_pci_tbl,
.probe = sis_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static const struct scsi_host_template sis_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations sis_ops = {
.inherits = &ata_bmdma_port_ops,
.scr_read = sis_scr_read,
.scr_write = sis_scr_write,
};
static const struct ata_port_info sis_port_info = {
.flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sis_ops,
};
MODULE_AUTHOR("Uwe Koziolek");
MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
MODULE_VERSION(DRV_VERSION);
static unsigned int get_scr_cfg_addr(struct ata_link *link, unsigned int sc_reg)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
u8 pmr;
if (ap->port_no) {
switch (pdev->device) {
case 0x0180:
case 0x0181:
pci_read_config_byte(pdev, SIS_PMR, &pmr);
if ((pmr & SIS_PMR_COMBINED) == 0)
addr += SIS180_SATA1_OFS;
break;
case 0x0182:
case 0x0183:
case 0x1182:
addr += SIS182_SATA1_OFS;
break;
}
}
if (link->pmp)
addr += 0x10;
return addr;
}
static u32 sis_scr_cfg_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
return -EINVAL;
pci_read_config_dword(pdev, cfg_addr, val);
return 0;
}
static int sis_scr_cfg_write(struct ata_link *link,
unsigned int sc_reg, u32 val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
pci_write_config_dword(pdev, cfg_addr, val);
return 0;
}
static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
struct ata_port *ap = link->ap;
void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
if (sc_reg > SCR_CONTROL)
return -EINVAL;
if (ap->flags & SIS_FLAG_CFGSCR)
return sis_scr_cfg_read(link, sc_reg, val);
*val = ioread32(base + sc_reg * 4);
return 0;
}
static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
struct ata_port *ap = link->ap;
void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
if (sc_reg > SCR_CONTROL)
return -EINVAL;
if (ap->flags & SIS_FLAG_CFGSCR)
return sis_scr_cfg_write(link, sc_reg, val);
iowrite32(val, base + (sc_reg * 4));
return 0;
}
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct ata_port_info pi = sis_port_info;
const struct ata_port_info *ppi[] = { &pi, &pi };
struct ata_host *host;
u32 genctl, val;
u8 pmr;
u8 port2_start = 0x20;
int i, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* check and see if the SCRs are in IO space or PCI cfg space */
pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
pi.flags |= SIS_FLAG_CFGSCR;
/* if hardware thinks SCRs are in IO space, but there are
* no IO resources assigned, change to PCI cfg space.
*/
if ((!(pi.flags & SIS_FLAG_CFGSCR)) &&
((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
(pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
genctl &= ~GENCTL_IOMAPPED_SCR;
pci_write_config_dword(pdev, SIS_GENCTL, genctl);
pi.flags |= SIS_FLAG_CFGSCR;
}
pci_read_config_byte(pdev, SIS_PMR, &pmr);
switch (ent->device) {
case 0x0180:
case 0x0181:
/* The PATA-handling is provided by pata_sis */
switch (pmr & 0x30) {
case 0x10:
ppi[1] = &sis_info133_for_sata;
break;
case 0x30:
ppi[0] = &sis_info133_for_sata;
break;
}
if ((pmr & SIS_PMR_COMBINED) == 0) {
dev_info(&pdev->dev,
"Detected SiS 180/181/964 chipset in SATA mode\n");
port2_start = 64;
} else {
dev_info(&pdev->dev,
"Detected SiS 180/181 chipset in combined mode\n");
port2_start = 0;
pi.flags |= ATA_FLAG_SLAVE_POSS;
}
break;
case 0x0182:
case 0x0183:
pci_read_config_dword(pdev, 0x6C, &val);
if (val & (1L << 31)) {
dev_info(&pdev->dev, "Detected SiS 182/965 chipset\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
} else {
dev_info(&pdev->dev, "Detected SiS 182/965L chipset\n");
}
break;
case 0x1182:
dev_info(&pdev->dev,
"Detected SiS 1182/966/680 SATA controller\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
break;
case 0x1183:
dev_info(&pdev->dev,
"Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
ppi[0] = &sis_info133_for_sata;
ppi[1] = &sis_info133_for_sata;
break;
}
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
for (i = 0; i < 2; i++) {
struct ata_port *ap = host->ports[i];
if (ap->flags & ATA_FLAG_SATA &&
ap->flags & ATA_FLAG_SLAVE_POSS) {
rc = ata_slave_link_init(ap);
if (rc)
return rc;
}
}
if (!(pi.flags & SIS_FLAG_CFGSCR)) {
void __iomem *mmio;
rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME);
if (rc)
return rc;
mmio = host->iomap[SIS_SCR_PCI_BAR];
host->ports[0]->ioaddr.scr_addr = mmio;
host->ports[1]->ioaddr.scr_addr = mmio + port2_start;
}
pci_set_master(pdev);
pci_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
module_pci_driver(sis_pci_driver);
| linux-master | drivers/ata/sata_sis.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale QorIQ AHCI SATA platform driver
*
* Copyright 2015 Freescale, Inc.
* Tang Yuantian <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/ahci_platform.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include "ahci.h"
#define DRV_NAME "ahci-qoriq"
/* port register definition */
#define PORT_PHY1 0xA8
#define PORT_PHY2 0xAC
#define PORT_PHY3 0xB0
#define PORT_PHY4 0xB4
#define PORT_PHY5 0xB8
#define PORT_AXICC 0xBC
#define PORT_TRANS 0xC8
/* port register default value */
#define AHCI_PORT_PHY_1_CFG 0xa003fffe
#define AHCI_PORT_PHY2_CFG 0x28184d1f
#define AHCI_PORT_PHY3_CFG 0x0e081509
#define AHCI_PORT_TRANS_CFG 0x08000029
#define AHCI_PORT_AXICC_CFG 0x3fffffff
/* for ls1021a */
#define LS1021A_PORT_PHY2 0x28183414
#define LS1021A_PORT_PHY3 0x0e080e06
#define LS1021A_PORT_PHY4 0x064a080b
#define LS1021A_PORT_PHY5 0x2aa86470
#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
#define ECC_DIS_ARMV8_CH2 0x80000000
#define ECC_DIS_LS1088A 0x40000000
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1028A,
AHCI_LS1043A,
AHCI_LS2080A,
AHCI_LS1046A,
AHCI_LS1088A,
AHCI_LS2088A,
AHCI_LX2160A,
};
struct ahci_qoriq_priv {
struct ccsr_ahci *reg_base;
enum ahci_qoriq_type type;
void __iomem *ecc_addr;
bool is_dmacoherent;
};
static bool ecc_initialized;
static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
{ .compatible = "fsl,ls1028a-ahci", .data = (void *)AHCI_LS1028A},
{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
{ .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
{ .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
{ .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
{ .compatible = "fsl,lx2160a-ahci", .data = (void *)AHCI_LX2160A},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
static const struct acpi_device_id ahci_qoriq_acpi_match[] = {
{"NXP0004", .driver_data = (kernel_ulong_t)AHCI_LX2160A},
{ }
};
MODULE_DEVICE_TABLE(acpi, ahci_qoriq_acpi_match);
static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
void __iomem *port_mmio = ahci_port_base(link->ap);
u32 px_cmd, px_is, px_val;
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_qoriq_priv *qoriq_priv = hpriv->plat_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
int rc;
bool ls1021a_workaround = (qoriq_priv->type == AHCI_LS1021A);
hpriv->stop_engine(ap);
/*
* There is a errata on ls1021a Rev1.0 and Rev2.0 which is:
* A-009042: The device detection initialization sequence
* mistakenly resets some registers.
*
* Workaround for this is:
* The software should read and store PxCMD and PxIS values
* before issuing the device detection initialization sequence.
* After the sequence is complete, software should restore the
* PxCMD and PxIS with the stored values.
*/
if (ls1021a_workaround) {
px_cmd = readl(port_mmio + PORT_CMD);
px_is = readl(port_mmio + PORT_IRQ_STAT);
}
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, &online,
ahci_check_ready);
/* restore the PxCMD and PxIS on ls1021 */
if (ls1021a_workaround) {
px_val = readl(port_mmio + PORT_CMD);
if (px_val != px_cmd)
writel(px_cmd, port_mmio + PORT_CMD);
px_val = readl(port_mmio + PORT_IRQ_STAT);
if (px_val != px_is)
writel(px_is, port_mmio + PORT_IRQ_STAT);
}
hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
return rc;
}
static struct ata_port_operations ahci_qoriq_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_qoriq_hardreset,
};
static const struct ata_port_info ahci_qoriq_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_qoriq_ops,
};
static const struct scsi_host_template ahci_qoriq_sht = {
AHCI_SHT(DRV_NAME),
};
static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
{
struct ahci_qoriq_priv *qpriv = hpriv->plat_data;
void __iomem *reg_base = hpriv->mmio;
switch (qpriv->type) {
case AHCI_LS1021A:
if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
else if (qpriv->ecc_addr && !ecc_initialized)
writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(LS1021A_PORT_PHY2, reg_base + PORT_PHY2);
writel(LS1021A_PORT_PHY3, reg_base + PORT_PHY3);
writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG,
reg_base + LS1021A_AXICC_ADDR);
break;
case AHCI_LS1043A:
if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
else if (qpriv->ecc_addr && !ecc_initialized)
writel(readl(qpriv->ecc_addr) |
ECC_DIS_ARMV8_CH2,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS2080A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS1046A:
if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
else if (qpriv->ecc_addr && !ecc_initialized)
writel(readl(qpriv->ecc_addr) |
ECC_DIS_ARMV8_CH2,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS1028A:
case AHCI_LS1088A:
case AHCI_LX2160A:
if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
else if (qpriv->ecc_addr && !ecc_initialized)
writel(readl(qpriv->ecc_addr) |
ECC_DIS_LS1088A,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS2088A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
}
ecc_initialized = true;
return 0;
}
static int ahci_qoriq_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct acpi_device_id *acpi_id;
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ahci_qoriq_priv *qoriq_priv;
const struct of_device_id *of_id;
struct resource *res;
int rc;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
of_id = of_match_node(ahci_qoriq_of_match, np);
acpi_id = acpi_match_device(ahci_qoriq_acpi_match, &pdev->dev);
if (!(of_id || acpi_id))
return -ENODEV;
qoriq_priv = devm_kzalloc(dev, sizeof(*qoriq_priv), GFP_KERNEL);
if (!qoriq_priv)
return -ENOMEM;
if (of_id)
qoriq_priv->type = (unsigned long)of_id->data;
else
qoriq_priv->type = (enum ahci_qoriq_type)acpi_id->driver_data;
if (unlikely(!ecc_initialized)) {
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
"sata-ecc");
if (res) {
qoriq_priv->ecc_addr =
devm_ioremap_resource(dev, res);
if (IS_ERR(qoriq_priv->ecc_addr))
return PTR_ERR(qoriq_priv->ecc_addr);
}
}
if (device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT)
qoriq_priv->is_dmacoherent = true;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
hpriv->plat_data = qoriq_priv;
rc = ahci_qoriq_phy_init(hpriv);
if (rc)
goto disable_resources;
rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
&ahci_qoriq_sht);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
#ifdef CONFIG_PM_SLEEP
static int ahci_qoriq_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
rc = ahci_qoriq_phy_init(hpriv);
if (rc)
goto disable_resources;
rc = ahci_platform_resume_host(dev);
if (rc)
goto disable_resources;
/* We resumed so update PM runtime state */
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
#endif
static SIMPLE_DEV_PM_OPS(ahci_qoriq_pm_ops, ahci_platform_suspend,
ahci_qoriq_resume);
static struct platform_driver ahci_qoriq_driver = {
.probe = ahci_qoriq_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_qoriq_of_match,
.acpi_match_table = ahci_qoriq_acpi_match,
.pm = &ahci_qoriq_pm_ops,
},
};
module_platform_driver(ahci_qoriq_driver);
MODULE_DESCRIPTION("Freescale QorIQ AHCI SATA platform driver");
MODULE_AUTHOR("Tang Yuantian <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/ata/ahci_qoriq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Libata driver for the highpoint 366 and 368 UDMA66 ATA controllers.
*
* This driver is heavily based upon:
*
* linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
*
* Copyright (C) 1999-2003 Andre Hedrick <[email protected]>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
*
*
* TODO
* Look into engine reset on timeout errors. Should not be required.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_hpt366"
#define DRV_VERSION "0.6.13"
struct hpt_clock {
u8 xfer_mode;
u32 timing;
};
/* key for bus clock timings
* bit
* 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
* cycles = value + 1
* 4:7 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
* cycles = value + 1
* 8:11 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
* register access.
* 12:15 cmd_low_time. Active time of DIOW_/DIOR_ during task file
* register access.
* 16:18 udma_cycle_time. Clock cycles for UDMA xfer?
* 19:21 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer.
* 22:24 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file
* register access.
* 28 UDMA enable.
* 29 DMA enable.
* 30 PIO_MST enable. If set, the chip is in bus master mode during
* PIO xfer.
* 31 FIFO enable.
*/
static const struct hpt_clock hpt366_40[] = {
{ XFER_UDMA_4, 0x900fd943 },
{ XFER_UDMA_3, 0x900ad943 },
{ XFER_UDMA_2, 0x900bd943 },
{ XFER_UDMA_1, 0x9008d943 },
{ XFER_UDMA_0, 0x9008d943 },
{ XFER_MW_DMA_2, 0xa008d943 },
{ XFER_MW_DMA_1, 0xa010d955 },
{ XFER_MW_DMA_0, 0xa010d9fc },
{ XFER_PIO_4, 0xc008d963 },
{ XFER_PIO_3, 0xc010d974 },
{ XFER_PIO_2, 0xc010d997 },
{ XFER_PIO_1, 0xc010d9c7 },
{ XFER_PIO_0, 0xc018d9d9 },
{ 0, 0x0120d9d9 }
};
static const struct hpt_clock hpt366_33[] = {
{ XFER_UDMA_4, 0x90c9a731 },
{ XFER_UDMA_3, 0x90cfa731 },
{ XFER_UDMA_2, 0x90caa731 },
{ XFER_UDMA_1, 0x90cba731 },
{ XFER_UDMA_0, 0x90c8a731 },
{ XFER_MW_DMA_2, 0xa0c8a731 },
{ XFER_MW_DMA_1, 0xa0c8a732 }, /* 0xa0c8a733 */
{ XFER_MW_DMA_0, 0xa0c8a797 },
{ XFER_PIO_4, 0xc0c8a731 },
{ XFER_PIO_3, 0xc0c8a742 },
{ XFER_PIO_2, 0xc0d0a753 },
{ XFER_PIO_1, 0xc0d0a7a3 }, /* 0xc0d0a793 */
{ XFER_PIO_0, 0xc0d0a7aa }, /* 0xc0d0a7a7 */
{ 0, 0x0120a7a7 }
};
static const struct hpt_clock hpt366_25[] = {
{ XFER_UDMA_4, 0x90c98521 },
{ XFER_UDMA_3, 0x90cf8521 },
{ XFER_UDMA_2, 0x90cf8521 },
{ XFER_UDMA_1, 0x90cb8521 },
{ XFER_UDMA_0, 0x90cb8521 },
{ XFER_MW_DMA_2, 0xa0ca8521 },
{ XFER_MW_DMA_1, 0xa0ca8532 },
{ XFER_MW_DMA_0, 0xa0ca8575 },
{ XFER_PIO_4, 0xc0ca8521 },
{ XFER_PIO_3, 0xc0ca8532 },
{ XFER_PIO_2, 0xc0ca8542 },
{ XFER_PIO_1, 0xc0d08572 },
{ XFER_PIO_0, 0xc0d08585 },
{ 0, 0x01208585 }
};
/**
* hpt36x_find_mode - find the hpt36x timing
* @ap: ATA port
* @speed: transfer mode
*
* Return the 32bit register programming information for this channel
* that matches the speed provided.
*/
static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
{
struct hpt_clock *clocks = ap->host->private_data;
while (clocks->xfer_mode) {
if (clocks->xfer_mode == speed)
return clocks->timing;
clocks++;
}
BUG();
return 0xffffffffU; /* silence compiler warning */
}
static const char * const bad_ata33[] = {
"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
"Maxtor 90845U3", "Maxtor 90650U2",
"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
"Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
"Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
"Maxtor 90510D4",
"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
"Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
"Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
NULL
};
static const char * const bad_ata66_4[] = {
"IBM-DTLA-307075",
"IBM-DTLA-307060",
"IBM-DTLA-307045",
"IBM-DTLA-307030",
"IBM-DTLA-307020",
"IBM-DTLA-307015",
"IBM-DTLA-305040",
"IBM-DTLA-305030",
"IBM-DTLA-305020",
"IC35L010AVER07-0",
"IC35L020AVER07-0",
"IC35L030AVER07-0",
"IC35L040AVER07-0",
"IC35L060AVER07-0",
"WDC AC310200R",
NULL
};
static const char * const bad_ata66_3[] = {
"WDC AC310200R",
NULL
};
static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
i = match_string(list, -1, model_num);
if (i >= 0) {
ata_dev_warn(dev, "%s is not supported for %s\n", modestr, list[i]);
return 1;
}
return 0;
}
/**
* hpt366_filter - mode selection filter
* @adev: ATA device
* @mask: Current mask to manipulate and pass back
*
* Block UDMA on devices that cause trouble with this controller.
*/
static unsigned int hpt366_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
mask &= ~(0xF8 << ATA_SHIFT_UDMA);
if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
mask &= ~(0xF0 << ATA_SHIFT_UDMA);
} else if (adev->class == ATA_DEV_ATAPI)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
return mask;
}
static int hpt36x_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ata66;
/*
* Each channel of pata_hpt366 occupies separate PCI function
* as the primary channel and bit1 indicates the cable type.
*/
pci_read_config_byte(pdev, 0x5A, &ata66);
if (ata66 & 2)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr = 0x40 + 4 * adev->devno;
u32 mask, reg, t;
/* determine timing mask and find matching clock entry */
if (mode < XFER_MW_DMA_0)
mask = 0xc1f8ffff;
else if (mode < XFER_UDMA_0)
mask = 0x303800ff;
else
mask = 0x30070000;
t = hpt36x_find_mode(ap, mode);
/*
* Combine new mode bits with old config bits and disable
* on-chip PIO FIFO/buffer (and PIO MST mode as well) to avoid
* problems handling I/O errors later.
*/
pci_read_config_dword(pdev, addr, ®);
reg = ((reg & ~mask) | (t & mask)) & ~0xc0000000;
pci_write_config_dword(pdev, addr, reg);
}
/**
* hpt366_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Perform PIO mode setup.
*/
static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
hpt366_set_mode(ap, adev, adev->pio_mode);
}
/**
* hpt366_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes. Much the same as with
* PIO, load the mode number and then set MWDMA or UDMA flag.
*/
static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
hpt366_set_mode(ap, adev, adev->dma_mode);
}
/**
* hpt366_prereset - reset the hpt36x bus
* @link: ATA link to reset
* @deadline: deadline jiffies for the operation
*
* Perform the initial reset handling for the 36x series controllers.
* Reset the hardware and state machine,
*/
static int hpt366_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/*
* HPT36x chips have one channel per function and have
* both channel enable bits located differently and visible
* to both functions -- really stupid design decision... :-(
* Bit 4 is for the primary channel, bit 5 for the secondary.
*/
static const struct pci_bits hpt366_enable_bits = {
0x50, 1, 0x30, 0x30
};
u8 mcr2;
if (!pci_test_config_bits(pdev, &hpt366_enable_bits))
return -ENOENT;
pci_read_config_byte(pdev, 0x51, &mcr2);
if (mcr2 & 0x80)
pci_write_config_byte(pdev, 0x51, mcr2 & ~0x80);
return ata_sff_prereset(link, deadline);
}
static const struct scsi_host_template hpt36x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/*
* Configuration for HPT366/68
*/
static struct ata_port_operations hpt366_port_ops = {
.inherits = &ata_bmdma_port_ops,
.prereset = hpt366_prereset,
.cable_detect = hpt36x_cable_detect,
.mode_filter = hpt366_filter,
.set_piomode = hpt366_set_piomode,
.set_dmamode = hpt366_set_dmamode,
};
/**
* hpt36x_init_chipset - common chip setup
* @dev: PCI device
*
* Perform the chip setup work that must be done at both init and
* resume time
*/
static void hpt36x_init_chipset(struct pci_dev *dev)
{
u8 mcr1;
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
/*
* Now we'll have to force both channels enabled if at least one
* of them has been enabled by BIOS...
*/
pci_read_config_byte(dev, 0x50, &mcr1);
if (mcr1 & 0x30)
pci_write_config_byte(dev, 0x50, mcr1 | 0x30);
}
/**
* hpt36x_init_one - Initialise an HPT366/368
* @dev: PCI device
* @id: Entry in match table
*
* Initialise an HPT36x device. There are some interesting complications
* here. Firstly the chip may report 366 and be one of several variants.
* Secondly all the timings depend on the clock for the chip which we must
* detect and look up
*
* This is the known chip mappings. It may be missing a couple of later
* releases.
*
* Chip version PCI Rev Notes
* HPT366 4 (HPT366) 0 UDMA66
* HPT366 4 (HPT366) 1 UDMA66
* HPT368 4 (HPT366) 2 UDMA66
* HPT37x/30x 4 (HPT366) 3+ Other driver
*
*/
static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info_hpt366 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &hpt366_port_ops
};
const struct ata_port_info *ppi[] = { &info_hpt366, NULL };
const void *hpriv = NULL;
u32 reg1;
int rc;
rc = pcim_enable_device(dev);
if (rc)
return rc;
/* May be a later chip in disguise. Check */
/* Newer chips are not in the HPT36x driver. Ignore them */
if (dev->revision > 2)
return -ENODEV;
hpt36x_init_chipset(dev);
pci_read_config_dword(dev, 0x40, ®1);
/* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */
switch ((reg1 & 0xf00) >> 8) {
case 9:
hpriv = &hpt366_40;
break;
case 5:
hpriv = &hpt366_25;
break;
default:
hpriv = &hpt366_33;
break;
}
/* Now kick off ATA set up */
return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, (void *)hpriv, 0);
}
#ifdef CONFIG_PM_SLEEP
static int hpt36x_reinit_one(struct pci_dev *dev)
{
struct ata_host *host = pci_get_drvdata(dev);
int rc;
rc = ata_pci_device_do_resume(dev);
if (rc)
return rc;
hpt36x_init_chipset(dev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id hpt36x[] = {
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
{ },
};
static struct pci_driver hpt36x_pci_driver = {
.name = DRV_NAME,
.id_table = hpt36x,
.probe = hpt36x_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = hpt36x_reinit_one,
#endif
};
module_pci_driver(hpt36x_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, hpt36x);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_hpt366.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pata_pcmcia.c - PCMCIA PATA controller driver.
* Copyright 2005-2006 Red Hat Inc, all rights reserved.
* PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
* <[email protected]>
*
* Heavily based upon ide-cs.c
* The initial developer of the original code is David A. Hinds
* <[email protected]>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#define DRV_NAME "pata_pcmcia"
#define DRV_VERSION "0.3.5"
/**
* pcmcia_set_mode - PCMCIA specific mode setup
* @link: link
* @r_failed_dev: Return pointer for failed device
*
* Perform the tuning and setup of the devices and timings, which
* for PCMCIA is the same as any other controller. We wrap it however
* as we need to spot hardware with incorrect or missing master/slave
* decode, which alas is embarrassingly common in the PC world
*/
static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
{
struct ata_device *master = &link->device[0];
struct ata_device *slave = &link->device[1];
if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
return ata_do_set_mode(link, r_failed_dev);
if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
/* Suspicious match, but could be two cards from
the same vendor - check serial */
if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) {
ata_dev_warn(slave, "is a ghost device, ignoring\n");
ata_dev_disable(slave);
}
}
return ata_do_set_mode(link, r_failed_dev);
}
/**
* pcmcia_set_mode_8bit - PCMCIA specific mode setup
* @link: link
* @r_failed_dev: Return pointer for failed device
*
* For the simple emulated 8bit stuff the less we do the better.
*/
static int pcmcia_set_mode_8bit(struct ata_link *link,
struct ata_device **r_failed_dev)
{
return 0;
}
/**
* ata_data_xfer_8bit - Transfer data by 8bit PIO
* @qc: queued command
* @buf: data buffer
* @buflen: buffer length
* @rw: read/write
*
* Transfer data from/to the device data register by 8 bit PIO.
*
* LOCKING:
* Inherited from caller.
*/
static unsigned int ata_data_xfer_8bit(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
struct ata_port *ap = qc->dev->link->ap;
if (rw == READ)
ioread8_rep(ap->ioaddr.data_addr, buf, buflen);
else
iowrite8_rep(ap->ioaddr.data_addr, buf, buflen);
return buflen;
}
/**
* pcmcia_8bit_drain_fifo - Stock FIFO drain logic for SFF controllers
* @qc: command
*
* Drain the FIFO and device of any stuck data following a command
* failing to complete. In some cases this is necessary before a
* reset will recover the device.
*
*/
static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
{
int count;
struct ata_port *ap;
/* We only need to flush incoming data when a command was running */
if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
return;
ap = qc->ap;
/* Drain up to 64K of data before we give up this recovery method */
for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
&& count++ < 65536;)
ioread8(ap->ioaddr.data_addr);
if (count)
ata_port_warn(ap, "drained %d bytes to clear DRQ\n", count);
}
static const struct scsi_host_template pcmcia_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations pcmcia_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
.cable_detect = ata_cable_40wire,
.set_mode = pcmcia_set_mode,
};
static struct ata_port_operations pcmcia_8bit_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_data_xfer_8bit,
.cable_detect = ata_cable_40wire,
.set_mode = pcmcia_set_mode_8bit,
.sff_drain_fifo = pcmcia_8bit_drain_fifo,
};
static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
{
int *is_kme = priv_data;
if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
!= IO_DATA_PATH_WIDTH_8) {
pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
}
pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
if (pdev->resource[1]->end) {
pdev->resource[0]->end = 8;
pdev->resource[1]->end = (*is_kme) ? 2 : 1;
} else {
if (pdev->resource[0]->end < 16)
return -ENODEV;
}
return pcmcia_request_io(pdev);
}
/**
* pcmcia_init_one - attach a PCMCIA interface
* @pdev: pcmcia device
*
* Register a PCMCIA IDE interface. Such interfaces are PIO 0 and
* shared IRQ.
*/
static int pcmcia_init_one(struct pcmcia_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
int is_kme = 0, ret = -ENOMEM, p;
unsigned long io_base, ctl_base;
void __iomem *io_addr, *ctl_addr;
int n_ports = 1;
struct ata_port_operations *ops = &pcmcia_port_ops;
/* Set up attributes in order to probe card and get resources */
pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO |
CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
/* See if we have a manufacturer identifier. Use it to set is_kme for
vendor quirks */
is_kme = ((pdev->manf_id == MANFID_KME) &&
((pdev->card_id == PRODID_KME_KXLC005_A) ||
(pdev->card_id == PRODID_KME_KXLC005_B)));
if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) {
pdev->config_flags &= ~CONF_AUTO_CHECK_VCC;
if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme))
goto failed; /* No suitable config found */
}
io_base = pdev->resource[0]->start;
if (pdev->resource[1]->end)
ctl_base = pdev->resource[1]->start;
else
ctl_base = pdev->resource[0]->start + 0x0e;
if (!pdev->irq)
goto failed;
ret = pcmcia_enable_device(pdev);
if (ret)
goto failed;
/* iomap */
ret = -ENOMEM;
io_addr = devm_ioport_map(&pdev->dev, io_base, 8);
ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1);
if (!io_addr || !ctl_addr)
goto failed;
/* Success. Disable the IRQ nIEN line, do quirks */
iowrite8(0x02, ctl_addr);
if (is_kme)
iowrite8(0x81, ctl_addr + 0x01);
/* FIXME: Could be more ports at base + 0x10 but we only deal with
one right now */
if (resource_size(pdev->resource[0]) >= 0x20)
n_ports = 2;
if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620)
ops = &pcmcia_8bit_port_ops;
/*
* Having done the PCMCIA plumbing the ATA side is relatively
* sane.
*/
ret = -ENOMEM;
host = ata_host_alloc(&pdev->dev, n_ports);
if (!host)
goto failed;
for (p = 0; p < n_ports; p++) {
ap = host->ports[p];
ap->ops = ops;
ap->pio_mask = ATA_PIO0; /* ISA so PIO 0 cycles */
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->ioaddr.cmd_addr = io_addr + 0x10 * p;
ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p;
ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p;
ata_sff_std_ports(&ap->ioaddr);
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
}
/* activate */
ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
IRQF_SHARED, &pcmcia_sht);
if (ret)
goto failed;
pdev->priv = host;
return 0;
failed:
pcmcia_disable_device(pdev);
return ret;
}
/**
* pcmcia_remove_one - unplug an pcmcia interface
* @pdev: pcmcia device
*
* A PCMCIA ATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
static void pcmcia_remove_one(struct pcmcia_device *pdev)
{
struct ata_host *host = pdev->priv;
if (host)
ata_host_detach(host);
pcmcia_disable_device(pdev);
}
static const struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_FUNC_ID(4),
PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */
PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */
PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
PCMCIA_DEVICE_MANF_CARD(0x00f1, 0x0101), /* SanDisk High (>8G) CFA */
PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb),
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee),
PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices);
static struct pcmcia_driver pcmcia_driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.id_table = pcmcia_devices,
.probe = pcmcia_init_one,
.remove = pcmcia_remove_one,
};
module_pcmcia_driver(pcmcia_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for PCMCIA ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_pcmcia.c |
/*
* AHCI glue platform driver for Marvell EBU SOCs
*
* Copyright (C) 2014 Marvell
*
* Thomas Petazzoni <[email protected]>
* Marcin Wojtas <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/ahci_platform.h>
#include <linux/kernel.h>
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "ahci.h"
#define DRV_NAME "ahci-mvebu"
#define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0
#define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4
#define AHCI_WINDOW_CTRL(win) (0x60 + ((win) << 4))
#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4))
#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4))
struct ahci_mvebu_plat_data {
int (*plat_config)(struct ahci_host_priv *hpriv);
unsigned int flags;
};
static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
const struct mbus_dram_target_info *dram)
{
int i;
for (i = 0; i < 4; i++) {
writel(0, hpriv->mmio + AHCI_WINDOW_CTRL(i));
writel(0, hpriv->mmio + AHCI_WINDOW_BASE(i));
writel(0, hpriv->mmio + AHCI_WINDOW_SIZE(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
hpriv->mmio + AHCI_WINDOW_CTRL(i));
writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
writel(((cs->size - 1) & 0xffff0000),
hpriv->mmio + AHCI_WINDOW_SIZE(i));
}
}
static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
{
/*
* Enable the regret bit to allow the SATA unit to regret a
* request that didn't receive an acknowlegde and avoid a
* deadlock
*/
writel(0x4, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
}
static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv)
{
const struct mbus_dram_target_info *dram;
int rc = 0;
dram = mv_mbus_dram_info();
if (dram)
ahci_mvebu_mbus_config(hpriv, dram);
else
rc = -ENODEV;
ahci_mvebu_regret_option(hpriv);
return rc;
}
static int ahci_mvebu_armada_3700_config(struct ahci_host_priv *hpriv)
{
u32 reg;
writel(0, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
reg = readl(hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
reg |= BIT(6);
writel(reg, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
return 0;
}
/**
* ahci_mvebu_stop_engine
*
* @ap: Target ata port
*
* Errata Ref#226 - SATA Disk HOT swap issue when connected through
* Port Multiplier in FIS-based Switching mode.
*
* To avoid the issue, according to design, the bits[11:8, 0] of
* register PxFBS are cleared when Port Command and Status (0x18) bit[0]
* changes its value from 1 to 0, i.e. falling edge of Port
* Command and Status bit[0] sends PULSE that resets PxFBS
* bits[11:8; 0].
*
* This function is used to override function of "ahci_stop_engine"
* from libahci.c by adding the mvebu work around(WA) to save PxFBS
* value before the PxCMD ST write of 0, then restore PxFBS value.
*
* Return: 0 on success; Error code otherwise.
*/
static int ahci_mvebu_stop_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp, port_fbs;
tmp = readl(port_mmio + PORT_CMD);
/* check if the HBA is idle */
if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
return 0;
/* save the port PxFBS register for later restore */
port_fbs = readl(port_mmio + PORT_FBS);
/* setting HBA to idle */
tmp &= ~PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
/*
* bit #15 PxCMD signal doesn't clear PxFBS,
* restore the PxFBS register right after clearing the PxCMD ST,
* no need to wait for the PxCMD bit #15.
*/
writel(port_fbs, port_mmio + PORT_FBS);
/* wait for engine to stop. This could be as long as 500 msec */
tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
if (tmp & PORT_CMD_LIST_ON)
return -EIO;
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
{
return ahci_platform_suspend_host(&pdev->dev);
}
static int ahci_mvebu_resume(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ahci_host_priv *hpriv = host->private_data;
const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data;
pdata->plat_config(hpriv);
return ahci_platform_resume_host(&pdev->dev);
}
#else
#define ahci_mvebu_suspend NULL
#define ahci_mvebu_resume NULL
#endif
static const struct ata_port_info ahci_mvebu_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,
};
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int ahci_mvebu_probe(struct platform_device *pdev)
{
const struct ahci_mvebu_plat_data *pdata;
struct ahci_host_priv *hpriv;
int rc;
pdata = of_device_get_match_data(&pdev->dev);
if (!pdata)
return -EINVAL;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->flags |= pdata->flags;
hpriv->plat_data = (void *)pdata;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
hpriv->stop_engine = ahci_mvebu_stop_engine;
rc = pdata->plat_config(hpriv);
if (rc)
goto disable_resources;
rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
&ahci_platform_sht);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = {
.plat_config = ahci_mvebu_armada_380_config,
};
static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = {
.plat_config = ahci_mvebu_armada_3700_config,
.flags = AHCI_HFLAG_SUSPEND_PHYS,
};
static const struct of_device_id ahci_mvebu_of_match[] = {
{
.compatible = "marvell,armada-380-ahci",
.data = &ahci_mvebu_armada_380_plat_data,
},
{
.compatible = "marvell,armada-3700-ahci",
.data = &ahci_mvebu_armada_3700_plat_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
static struct platform_driver ahci_mvebu_driver = {
.probe = ahci_mvebu_probe,
.remove_new = ata_platform_remove_one,
.suspend = ahci_mvebu_suspend,
.resume = ahci_mvebu_resume,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_mvebu_of_match,
},
};
module_platform_driver(ahci_mvebu_driver);
MODULE_DESCRIPTION("Marvell EBU AHCI SATA driver");
MODULE_AUTHOR("Thomas Petazzoni <[email protected]>, Marcin Wojtas <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ahci_mvebu");
| linux-master | drivers/ata/ahci_mvebu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Atari Falcon PATA controller driver
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Based on falconide.c:
*
* Created 12 Jul 1997 by Geert Uytterhoeven
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/atarihw.h>
#include <asm/atariints.h>
#include <asm/atari_stdma.h>
#define DRV_NAME "pata_falcon"
#define DRV_VERSION "0.1.0"
static int pata_falcon_swap_mask;
module_param_named(data_swab, pata_falcon_swap_mask, int, 0444);
MODULE_PARM_DESC(data_swab, "Data byte swap enable/disable bitmap (0x1==drive1, 0x2==drive2, 0x4==drive3, 0x8==drive4, default==0)");
static const struct scsi_host_template pata_falcon_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_device *dev = qc->dev;
struct ata_port *ap = dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1;
struct scsi_cmnd *cmd = qc->scsicmd;
bool swap = 1;
if (dev->class == ATA_DEV_ATA && cmd &&
!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
swap = (uintptr_t)ap->private_data & BIT(dev->devno);
/* Transfer multiple of 2 bytes */
if (rw == READ) {
if (swap)
raw_insw_swapw(data_addr, (u16 *)buf, words);
else
raw_insw(data_addr, (u16 *)buf, words);
} else {
if (swap)
raw_outsw_swapw(data_addr, (u16 *)buf, words);
else
raw_outsw(data_addr, (u16 *)buf, words);
}
/* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
unsigned char pad[2] = { };
/* Point buf to the tail of buffer */
buf += buflen - 1;
if (rw == READ) {
if (swap)
raw_insw_swapw(data_addr, (u16 *)pad, 1);
else
raw_insw(data_addr, (u16 *)pad, 1);
*buf = pad[0];
} else {
pad[0] = *buf;
if (swap)
raw_outsw_swapw(data_addr, (u16 *)pad, 1);
else
raw_outsw(data_addr, (u16 *)pad, 1);
}
words++;
}
return words << 1;
}
/*
* Provide our own set_mode() as we don't want to change anything that has
* already been configured..
*/
static int pata_falcon_set_mode(struct ata_link *link,
struct ata_device **unused)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
static struct ata_port_operations pata_falcon_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = pata_falcon_data_xfer,
.cable_detect = ata_cable_unknown,
.set_mode = pata_falcon_set_mode,
};
static int __init pata_falcon_init_one(struct platform_device *pdev)
{
struct resource *base_mem_res, *ctl_mem_res;
struct resource *base_res, *ctl_res, *irq_res;
struct ata_host *host;
struct ata_port *ap;
void __iomem *base, *ctl_base;
int mask_shift = 0; /* Q40 & Falcon default */
int irq = 0, io_offset = 1, reg_shift = 2; /* Falcon defaults */
dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");
base_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (base_res && !devm_request_region(&pdev->dev, base_res->start,
resource_size(base_res), DRV_NAME)) {
dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (ctl_res && !devm_request_region(&pdev->dev, ctl_res->start,
resource_size(ctl_res), DRV_NAME)) {
dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
base_mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!base_mem_res)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, base_mem_res->start,
resource_size(base_mem_res), DRV_NAME)) {
dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
ctl_mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!ctl_mem_res)
return -ENODEV;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
ap->ops = &pata_falcon_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
/* N.B. this assumes data_addr will be used for word-sized I/O only */
ap->ioaddr.data_addr = (void __iomem *)base_mem_res->start;
if (base_res) { /* only Q40 has IO resources */
io_offset = 0x10000;
reg_shift = 0;
base = (void __iomem *)base_res->start;
ctl_base = (void __iomem *)ctl_res->start;
} else {
base = (void __iomem *)base_mem_res->start;
ctl_base = (void __iomem *)ctl_mem_res->start;
}
ap->ioaddr.error_addr = base + io_offset + (1 << reg_shift);
ap->ioaddr.feature_addr = base + io_offset + (1 << reg_shift);
ap->ioaddr.nsect_addr = base + io_offset + (2 << reg_shift);
ap->ioaddr.lbal_addr = base + io_offset + (3 << reg_shift);
ap->ioaddr.lbam_addr = base + io_offset + (4 << reg_shift);
ap->ioaddr.lbah_addr = base + io_offset + (5 << reg_shift);
ap->ioaddr.device_addr = base + io_offset + (6 << reg_shift);
ap->ioaddr.status_addr = base + io_offset + (7 << reg_shift);
ap->ioaddr.command_addr = base + io_offset + (7 << reg_shift);
ap->ioaddr.altstatus_addr = ctl_base + io_offset;
ap->ioaddr.ctl_addr = ctl_base + io_offset;
ata_port_desc(ap, "cmd %px ctl %px data %px",
base, ctl_base, ap->ioaddr.data_addr);
if (pdev->id > 0)
mask_shift = 2;
ap->private_data = (void *)(uintptr_t)(pata_falcon_swap_mask >> mask_shift);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res && irq_res->start > 0) {
irq = irq_res->start;
} else {
ap->flags |= ATA_FLAG_PIO_POLLING;
ata_port_desc(ap, "no IRQ, using PIO polling");
}
/* activate */
return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
IRQF_SHARED, &pata_falcon_sht);
}
static int __exit pata_falcon_remove_one(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
ata_host_detach(host);
return 0;
}
static struct platform_driver pata_falcon_driver = {
.remove = __exit_p(pata_falcon_remove_one),
.driver = {
.name = "atari-falcon-ide",
},
};
module_platform_driver_probe(pata_falcon_driver, pata_falcon_init_one);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for Atari Falcon PATA");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:atari-falcon-ide");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_falcon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_nv.c - NVIDIA nForce SATA
*
* Copyright 2004 NVIDIA Corp. All rights reserved.
* Copyright 2004 Andrew Chew
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* No hardware documentation available outside of NVIDIA.
* This driver programs the NVIDIA SATA controller in a similar
* fashion as with other PCI IDE BMDMA controllers, with a few
* NV-specific details such as register offsets, SATA phy location,
* hotplug info, etc.
*
* CK804/MCP04 controllers support an alternate programming interface
* similar to the ADMA specification (with some modifications).
* This allows the use of NCQ. Non-DMA-mapped ATA commands are still
* sent through the legacy interface.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <linux/libata.h>
#include <trace/events/libata.h>
#define DRV_NAME "sata_nv"
#define DRV_VERSION "3.5"
#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
enum {
NV_MMIO_BAR = 5,
NV_PORTS = 2,
NV_PIO_MASK = ATA_PIO4,
NV_MWDMA_MASK = ATA_MWDMA2,
NV_UDMA_MASK = ATA_UDMA6,
NV_PORT0_SCR_REG_OFFSET = 0x00,
NV_PORT1_SCR_REG_OFFSET = 0x40,
/* INT_STATUS/ENABLE */
NV_INT_STATUS = 0x10,
NV_INT_ENABLE = 0x11,
NV_INT_STATUS_CK804 = 0x440,
NV_INT_ENABLE_CK804 = 0x441,
/* INT_STATUS/ENABLE bits */
NV_INT_DEV = 0x01,
NV_INT_PM = 0x02,
NV_INT_ADDED = 0x04,
NV_INT_REMOVED = 0x08,
NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
NV_INT_ALL = 0x0f,
NV_INT_MASK = NV_INT_DEV |
NV_INT_ADDED | NV_INT_REMOVED,
/* INT_CONFIG */
NV_INT_CONFIG = 0x12,
NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
// For PCI config register 20
NV_MCP_SATA_CFG_20 = 0x50,
NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
NV_ADMA_MAX_CPBS = 32,
NV_ADMA_CPB_SZ = 128,
NV_ADMA_APRD_SZ = 16,
NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
NV_ADMA_APRD_SZ,
NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
(NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
/* BAR5 offset to ADMA general registers */
NV_ADMA_GEN = 0x400,
NV_ADMA_GEN_CTL = 0x00,
NV_ADMA_NOTIFIER_CLEAR = 0x30,
/* BAR5 offset to ADMA ports */
NV_ADMA_PORT = 0x480,
/* size of ADMA port register space */
NV_ADMA_PORT_SIZE = 0x100,
/* ADMA port registers */
NV_ADMA_CTL = 0x40,
NV_ADMA_CPB_COUNT = 0x42,
NV_ADMA_NEXT_CPB_IDX = 0x43,
NV_ADMA_STAT = 0x44,
NV_ADMA_CPB_BASE_LOW = 0x48,
NV_ADMA_CPB_BASE_HIGH = 0x4C,
NV_ADMA_APPEND = 0x50,
NV_ADMA_NOTIFIER = 0x68,
NV_ADMA_NOTIFIER_ERROR = 0x6C,
/* NV_ADMA_CTL register bits */
NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
NV_ADMA_CTL_GO = (1 << 7),
NV_ADMA_CTL_AIEN = (1 << 8),
NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
/* CPB response flag bits */
NV_CPB_RESP_DONE = (1 << 0),
NV_CPB_RESP_ATA_ERR = (1 << 3),
NV_CPB_RESP_CMD_ERR = (1 << 4),
NV_CPB_RESP_CPB_ERR = (1 << 7),
/* CPB control flag bits */
NV_CPB_CTL_CPB_VALID = (1 << 0),
NV_CPB_CTL_QUEUE = (1 << 1),
NV_CPB_CTL_APRD_VALID = (1 << 2),
NV_CPB_CTL_IEN = (1 << 3),
NV_CPB_CTL_FPDMA = (1 << 4),
/* APRD flags */
NV_APRD_WRITE = (1 << 1),
NV_APRD_END = (1 << 2),
NV_APRD_CONT = (1 << 3),
/* NV_ADMA_STAT flags */
NV_ADMA_STAT_TIMEOUT = (1 << 0),
NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
NV_ADMA_STAT_HOTPLUG = (1 << 2),
NV_ADMA_STAT_CPBERR = (1 << 4),
NV_ADMA_STAT_SERROR = (1 << 5),
NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
NV_ADMA_STAT_IDLE = (1 << 8),
NV_ADMA_STAT_LEGACY = (1 << 9),
NV_ADMA_STAT_STOPPED = (1 << 10),
NV_ADMA_STAT_DONE = (1 << 12),
NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
NV_ADMA_STAT_TIMEOUT,
/* port flags */
NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
/* MCP55 reg offset */
NV_CTL_MCP55 = 0x400,
NV_INT_STATUS_MCP55 = 0x440,
NV_INT_ENABLE_MCP55 = 0x444,
NV_NCQ_REG_MCP55 = 0x448,
/* MCP55 */
NV_INT_ALL_MCP55 = 0xffff,
NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
/* SWNCQ ENABLE BITS*/
NV_CTL_PRI_SWNCQ = 0x02,
NV_CTL_SEC_SWNCQ = 0x04,
/* SW NCQ status bits*/
NV_SWNCQ_IRQ_DEV = (1 << 0),
NV_SWNCQ_IRQ_PM = (1 << 1),
NV_SWNCQ_IRQ_ADDED = (1 << 2),
NV_SWNCQ_IRQ_REMOVED = (1 << 3),
NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
NV_SWNCQ_IRQ_REMOVED,
};
/* ADMA Physical Region Descriptor - one SG segment */
struct nv_adma_prd {
__le64 addr;
__le32 len;
u8 flags;
u8 packet_len;
__le16 reserved;
};
enum nv_adma_regbits {
CMDEND = (1 << 15), /* end of command list */
WNB = (1 << 14), /* wait-not-BSY */
IGN = (1 << 13), /* ignore this entry */
CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
DA2 = (1 << (2 + 8)),
DA1 = (1 << (1 + 8)),
DA0 = (1 << (0 + 8)),
};
/* ADMA Command Parameter Block
The first 5 SG segments are stored inside the Command Parameter Block itself.
If there are more than 5 segments the remainder are stored in a separate
memory area indicated by next_aprd. */
struct nv_adma_cpb {
u8 resp_flags; /* 0 */
u8 reserved1; /* 1 */
u8 ctl_flags; /* 2 */
/* len is length of taskfile in 64 bit words */
u8 len; /* 3 */
u8 tag; /* 4 */
u8 next_cpb_idx; /* 5 */
__le16 reserved2; /* 6-7 */
__le16 tf[12]; /* 8-31 */
struct nv_adma_prd aprd[5]; /* 32-111 */
__le64 next_aprd; /* 112-119 */
__le64 reserved3; /* 120-127 */
};
struct nv_adma_port_priv {
struct nv_adma_cpb *cpb;
dma_addr_t cpb_dma;
struct nv_adma_prd *aprd;
dma_addr_t aprd_dma;
void __iomem *ctl_block;
void __iomem *gen_block;
void __iomem *notifier_clear_block;
u64 adma_dma_mask;
u8 flags;
int last_issue_ncq;
};
struct nv_host_priv {
unsigned long type;
};
struct defer_queue {
u32 defer_bits;
unsigned int head;
unsigned int tail;
unsigned int tag[ATA_MAX_QUEUE];
};
enum ncq_saw_flag_list {
ncq_saw_d2h = (1U << 0),
ncq_saw_dmas = (1U << 1),
ncq_saw_sdb = (1U << 2),
ncq_saw_backout = (1U << 3),
};
struct nv_swncq_port_priv {
struct ata_bmdma_prd *prd; /* our SG list */
dma_addr_t prd_dma; /* and its DMA mapping */
void __iomem *sactive_block;
void __iomem *irq_block;
void __iomem *tag_block;
u32 qc_active;
unsigned int last_issue_tag;
/* fifo circular queue to store deferral command */
struct defer_queue defer_queue;
/* for NCQ interrupt analysis */
u32 dhfis_bits;
u32 dmafis_bits;
u32 sdbfis_bits;
unsigned int ncq_flags;
};
#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int nv_pci_device_resume(struct pci_dev *pdev);
#endif
static void nv_ck804_host_stop(struct ata_host *host);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int nv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
static int nv_adma_slave_config(struct scsi_device *sdev);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
static void nv_adma_irq_clear(struct ata_port *ap);
static int nv_adma_port_start(struct ata_port *ap);
static void nv_adma_port_stop(struct ata_port *ap);
#ifdef CONFIG_PM
static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int nv_adma_port_resume(struct ata_port *ap);
#endif
static void nv_adma_freeze(struct ata_port *ap);
static void nv_adma_thaw(struct ata_port *ap);
static void nv_adma_error_handler(struct ata_port *ap);
static void nv_adma_host_stop(struct ata_host *host);
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
static int nv_swncq_slave_config(struct scsi_device *sdev);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
#ifdef CONFIG_PM
static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int nv_swncq_port_resume(struct ata_port *ap);
#endif
enum nv_host_type
{
GENERIC,
NFORCE2,
NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
CK804,
ADMA,
MCP5x,
SWNCQ,
};
static const struct pci_device_id nv_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
{ } /* terminate list */
};
static struct pci_driver nv_pci_driver = {
.name = DRV_NAME,
.id_table = nv_pci_tbl,
.probe = nv_init_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = nv_pci_device_resume,
#endif
.remove = ata_pci_remove_one,
};
static const struct scsi_host_template nv_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static const struct scsi_host_template nv_adma_sht = {
__ATA_BASE_SHT(DRV_NAME),
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
.slave_configure = nv_adma_slave_config,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static const struct scsi_host_template nv_swncq_sht = {
__ATA_BASE_SHT(DRV_NAME),
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = nv_swncq_slave_config,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
/*
* NV SATA controllers have various different problems with hardreset
* protocol depending on the specific controller and device.
*
* GENERIC:
*
* bko11195 reports that link doesn't come online after hardreset on
* generic nv's and there have been several other similar reports on
* linux-ide.
*
* bko12351#c23 reports that warmplug on MCP61 doesn't work with
* softreset.
*
* NF2/3:
*
* bko3352 reports nf2/3 controllers can't determine device signature
* reliably after hardreset. The following thread reports detection
* failure on cold boot with the standard debouncing timing.
*
* http://thread.gmane.org/gmane.linux.ide/34098
*
* bko12176 reports that hardreset fails to bring up the link during
* boot on nf2.
*
* CK804:
*
* For initial probing after boot and hot plugging, hardreset mostly
* works fine on CK804 but curiously, reprobing on the initial port
* by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
* FIS in somewhat undeterministic way.
*
* SWNCQ:
*
* bko12351 reports that when SWNCQ is enabled, for hotplug to work,
* hardreset should be used and hardreset can't report proper
* signature, which suggests that mcp5x is closer to nf2 as long as
* reset quirkiness is concerned.
*
* bko12703 reports that boot probing fails for intel SSD with
* hardreset. Link fails to come online. Softreset works fine.
*
* The failures are varied but the following patterns seem true for
* all flavors.
*
* - Softreset during boot always works.
*
* - Hardreset during boot sometimes fails to bring up the link on
* certain comibnations and device signature acquisition is
* unreliable.
*
* - Hardreset is often necessary after hotplug.
*
* So, preferring softreset for boot probing and error handling (as
* hardreset might bring down the link) but using hardreset for
* post-boot probing should work around the above issues in most
* cases. Define nv_hardreset() which only kicks in for post-boot
* probing and use it for all variants.
*/
static struct ata_port_operations nv_generic_ops = {
.inherits = &ata_bmdma_port_ops,
.lost_interrupt = ATA_OP_NULL,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
.hardreset = nv_hardreset,
};
static struct ata_port_operations nv_nf2_ops = {
.inherits = &nv_generic_ops,
.freeze = nv_nf2_freeze,
.thaw = nv_nf2_thaw,
};
static struct ata_port_operations nv_ck804_ops = {
.inherits = &nv_generic_ops,
.freeze = nv_ck804_freeze,
.thaw = nv_ck804_thaw,
.host_stop = nv_ck804_host_stop,
};
static struct ata_port_operations nv_adma_ops = {
.inherits = &nv_ck804_ops,
.check_atapi_dma = nv_adma_check_atapi_dma,
.sff_tf_read = nv_adma_tf_read,
.qc_defer = ata_std_qc_defer,
.qc_prep = nv_adma_qc_prep,
.qc_issue = nv_adma_qc_issue,
.sff_irq_clear = nv_adma_irq_clear,
.freeze = nv_adma_freeze,
.thaw = nv_adma_thaw,
.error_handler = nv_adma_error_handler,
.post_internal_cmd = nv_adma_post_internal_cmd,
.port_start = nv_adma_port_start,
.port_stop = nv_adma_port_stop,
#ifdef CONFIG_PM
.port_suspend = nv_adma_port_suspend,
.port_resume = nv_adma_port_resume,
#endif
.host_stop = nv_adma_host_stop,
};
static struct ata_port_operations nv_swncq_ops = {
.inherits = &nv_generic_ops,
.qc_defer = ata_std_qc_defer,
.qc_prep = nv_swncq_qc_prep,
.qc_issue = nv_swncq_qc_issue,
.freeze = nv_mcp55_freeze,
.thaw = nv_mcp55_thaw,
.error_handler = nv_swncq_error_handler,
#ifdef CONFIG_PM
.port_suspend = nv_swncq_port_suspend,
.port_resume = nv_swncq_port_resume,
#endif
.port_start = nv_swncq_port_start,
};
struct nv_pi_priv {
irq_handler_t irq_handler;
const struct scsi_host_template *sht;
};
#define NV_PI_PRIV(_irq_handler, _sht) \
&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
static const struct ata_port_info nv_port_info[] = {
/* generic */
{
.flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_generic_ops,
.private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
},
/* nforce2/3 */
{
.flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_nf2_ops,
.private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
},
/* ck804 */
{
.flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_ck804_ops,
.private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
},
/* ADMA */
{
.flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_adma_ops,
.private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
},
/* MCP5x */
{
.flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_generic_ops,
.private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
},
/* SWNCQ */
{
.flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_swncq_ops,
.private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
},
};
MODULE_AUTHOR("NVIDIA");
MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
static bool adma_enabled;
static bool swncq_enabled = true;
static bool msi_enabled;
static void nv_adma_register_mode(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
u16 tmp, status;
int count = 0;
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
return;
status = readw(mmio + NV_ADMA_STAT);
while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
ndelay(50);
status = readw(mmio + NV_ADMA_STAT);
count++;
}
if (count == 20)
ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
status);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
count = 0;
status = readw(mmio + NV_ADMA_STAT);
while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
ndelay(50);
status = readw(mmio + NV_ADMA_STAT);
count++;
}
if (count == 20)
ata_port_warn(ap,
"timeout waiting for ADMA LEGACY, stat=0x%hx\n",
status);
pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
}
static void nv_adma_mode(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
u16 tmp, status;
int count = 0;
if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
return;
WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
status = readw(mmio + NV_ADMA_STAT);
while (((status & NV_ADMA_STAT_LEGACY) ||
!(status & NV_ADMA_STAT_IDLE)) && count < 20) {
ndelay(50);
status = readw(mmio + NV_ADMA_STAT);
count++;
}
if (count == 20)
ata_port_warn(ap,
"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
status);
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
static int nv_adma_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
struct nv_adma_port_priv *port0, *port1;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long segment_boundary, flags;
unsigned short sg_tablesize;
int rc;
int adma_enable;
u32 current_reg, new_reg, config_mask;
rc = ata_scsi_slave_config(sdev);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
spin_lock_irqsave(ap->lock, flags);
if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
/*
* NVIDIA reports that ADMA mode does not support ATAPI commands.
* Therefore ATAPI commands are sent through the legacy interface.
* However, the legacy interface only supports 32-bit DMA.
* Restrict DMA parameters as required by the legacy interface
* when an ATAPI device is connected.
*/
segment_boundary = ATA_DMA_BOUNDARY;
/* Subtract 1 since an extra entry may be needed for padding, see
libata-scsi.c */
sg_tablesize = LIBATA_MAX_PRD - 1;
/* Since the legacy DMA engine is in use, we need to disable ADMA
on the port. */
adma_enable = 0;
nv_adma_register_mode(ap);
} else {
segment_boundary = NV_ADMA_DMA_BOUNDARY;
sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
adma_enable = 1;
}
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
if (ap->port_no == 1)
config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
else
config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
if (adma_enable) {
new_reg = current_reg | config_mask;
pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
} else {
new_reg = current_reg & ~config_mask;
pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
}
if (current_reg != new_reg)
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
port0 = ap->host->ports[0]->private_data;
port1 = ap->host->ports[1]->private_data;
if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
/*
* We have to set the DMA mask to 32-bit if either port is in
* ATAPI mode, since they are on the same PCI device which is
* used for DMA mapping. If either SCSI device is not allocated
* yet, it's OK since that port will discover its correct
* setting when it does get allocated.
*/
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
} else {
rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
}
blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
blk_queue_max_segments(sdev->request_queue, sg_tablesize);
ata_port_info(ap,
"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
(unsigned long long)*ap->host->dev->dma_mask,
segment_boundary, sg_tablesize);
spin_unlock_irqrestore(ap->lock, flags);
return rc;
}
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
}
static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
/* Other than when internal or pass-through commands are executed,
the only time this function will be called in ADMA mode will be
if a command fails. In the failure case we don't care about going
into register mode with ADMA commands pending, as the commands will
all shortly be aborted anyway. We assume that NCQ commands are not
issued via passthrough, which is the only way that switching into
ADMA mode could abort outstanding commands. */
nv_adma_register_mode(ap);
ata_sff_tf_read(ap, tf);
}
static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
{
unsigned int idx = 0;
if (tf->flags & ATA_TFLAG_ISADDR) {
if (tf->flags & ATA_TFLAG_LBA48) {
cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
} else
cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
}
if (tf->flags & ATA_TFLAG_DEVICE)
cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
while (idx < 12)
cpb[idx++] = cpu_to_le16(IGN);
return idx;
}
static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
{
struct nv_adma_port_priv *pp = ap->private_data;
u8 flags = pp->cpb[cpb_num].resp_flags;
ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
if (unlikely((force_err ||
flags & (NV_CPB_RESP_ATA_ERR |
NV_CPB_RESP_CMD_ERR |
NV_CPB_RESP_CPB_ERR)))) {
struct ata_eh_info *ehi = &ap->link.eh_info;
int freeze = 0;
ata_ehi_clear_desc(ehi);
__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
if (flags & NV_CPB_RESP_ATA_ERR) {
ata_ehi_push_desc(ehi, "ATA error");
ehi->err_mask |= AC_ERR_DEV;
} else if (flags & NV_CPB_RESP_CMD_ERR) {
ata_ehi_push_desc(ehi, "CMD error");
ehi->err_mask |= AC_ERR_DEV;
} else if (flags & NV_CPB_RESP_CPB_ERR) {
ata_ehi_push_desc(ehi, "CPB error");
ehi->err_mask |= AC_ERR_SYSTEM;
freeze = 1;
} else {
/* notifier error, but no error in CPB flags? */
ata_ehi_push_desc(ehi, "unknown");
ehi->err_mask |= AC_ERR_OTHER;
freeze = 1;
}
/* Kill all commands. EH will determine what actually failed. */
if (freeze)
ata_port_freeze(ap);
else
ata_port_abort(ap);
return -1;
}
if (likely(flags & NV_CPB_RESP_DONE))
return 1;
return 0;
}
static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
{
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
/* freeze if hotplugged */
if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
ata_port_freeze(ap);
return 1;
}
/* bail out if not our interrupt */
if (!(irq_stat & NV_INT_DEV))
return 0;
/* DEV interrupt w/ no active qc? */
if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
ata_sff_check_status(ap);
return 1;
}
/* handle interrupt */
return ata_bmdma_port_intr(ap, qc);
}
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
int i, handled = 0;
u32 notifier_clears[2];
spin_lock(&host->lock);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
u16 status;
u32 gen_ctl;
u32 notifier, notifier_error;
notifier_clears[i] = 0;
/* if ADMA is disabled, use standard ata interrupt handler */
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
>> (NV_INT_PORT_SHIFT * i);
handled += nv_host_intr(ap, irq_stat);
continue;
}
/* if in ATA register mode, check for standard interrupts */
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
>> (NV_INT_PORT_SHIFT * i);
if (ata_tag_valid(ap->link.active_tag))
/** NV_INT_DEV indication seems unreliable
at times at least in ADMA mode. Force it
on always when a command is active, to
prevent losing interrupts. */
irq_stat |= NV_INT_DEV;
handled += nv_host_intr(ap, irq_stat);
}
notifier = readl(mmio + NV_ADMA_NOTIFIER);
notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
notifier_clears[i] = notifier | notifier_error;
gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
!notifier_error)
/* Nothing to do */
continue;
status = readw(mmio + NV_ADMA_STAT);
/*
* Clear status. Ensure the controller sees the
* clearing before we start looking at any of the CPB
* statuses, so that any CPB completions after this
* point in the handler will raise another interrupt.
*/
writew(status, mmio + NV_ADMA_STAT);
readw(mmio + NV_ADMA_STAT); /* flush posted write */
rmb();
handled++; /* irq handled if we got here */
/* freeze if hotplugged or controller error */
if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
NV_ADMA_STAT_HOTUNPLUG |
NV_ADMA_STAT_TIMEOUT |
NV_ADMA_STAT_SERROR))) {
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
if (status & NV_ADMA_STAT_TIMEOUT) {
ehi->err_mask |= AC_ERR_SYSTEM;
ata_ehi_push_desc(ehi, "timeout");
} else if (status & NV_ADMA_STAT_HOTPLUG) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "hotplug");
} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "hot unplug");
} else if (status & NV_ADMA_STAT_SERROR) {
/* let EH analyze SError and figure out cause */
ata_ehi_push_desc(ehi, "SError");
} else
ata_ehi_push_desc(ehi, "unknown");
ata_port_freeze(ap);
continue;
}
if (status & (NV_ADMA_STAT_DONE |
NV_ADMA_STAT_CPBERR |
NV_ADMA_STAT_CMD_COMPLETE)) {
u32 check_commands = notifier_clears[i];
u32 done_mask = 0;
int pos, rc;
if (status & NV_ADMA_STAT_CPBERR) {
/* check all active commands */
if (ata_tag_valid(ap->link.active_tag))
check_commands = 1 <<
ap->link.active_tag;
else
check_commands = ap->link.sactive;
}
/* check CPBs for completed commands */
while ((pos = ffs(check_commands))) {
pos--;
rc = nv_adma_check_cpb(ap, pos,
notifier_error & (1 << pos));
if (rc > 0)
done_mask |= 1 << pos;
else if (unlikely(rc < 0))
check_commands = 0;
check_commands &= ~(1 << pos);
}
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
}
}
if (notifier_clears[0] || notifier_clears[1]) {
/* Note: Both notifier clear registers must be written
if either is set, even if one is zero, according to NVIDIA. */
struct nv_adma_port_priv *pp = host->ports[0]->private_data;
writel(notifier_clears[0], pp->notifier_clear_block);
pp = host->ports[1]->private_data;
writel(notifier_clears[1], pp->notifier_clear_block);
}
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void nv_adma_freeze(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
u16 tmp;
nv_ck804_freeze(ap);
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
return;
/* clear any outstanding CK804 notifications */
writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
/* Disable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
static void nv_adma_thaw(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
u16 tmp;
nv_ck804_thaw(ap);
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
return;
/* Enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
static void nv_adma_irq_clear(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
u32 notifier_clears[2];
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
ata_bmdma_irq_clear(ap);
return;
}
/* clear any outstanding CK804 notifications */
writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
/* clear ADMA status */
writew(0xffff, mmio + NV_ADMA_STAT);
/* clear notifiers - note both ports need to be written with
something even though we are only clearing on one */
if (ap->port_no == 0) {
notifier_clears[0] = 0xFFFFFFFF;
notifier_clears[1] = 0;
} else {
notifier_clears[0] = 0;
notifier_clears[1] = 0xFFFFFFFF;
}
pp = ap->host->ports[0]->private_data;
writel(notifier_clears[0], pp->notifier_clear_block);
pp = ap->host->ports[1]->private_data;
writel(notifier_clears[1], pp->notifier_clear_block);
}
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
ata_bmdma_post_internal_cmd(qc);
}
static int nv_adma_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct nv_adma_port_priv *pp;
int rc;
void *mem;
dma_addr_t mem_dma;
void __iomem *mmio;
struct pci_dev *pdev = to_pci_dev(dev);
u16 tmp;
/*
* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
* pad buffers.
*/
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
/* we might fallback to bmdma, allocate bmdma resources */
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
ap->port_no * NV_ADMA_PORT_SIZE;
pp->ctl_block = mmio;
pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
pp->notifier_clear_block = pp->gen_block +
NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
/*
* Now that the legacy PRD and padding buffer are allocated we can
* raise the DMA mask to allocate the CPB/APRD table.
*/
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
pp->adma_dma_mask = *dev->dma_mask;
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
&mem_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
/*
* First item in chunk of DMA memory:
* 128-byte command parameter block (CPB)
* one for each command tag
*/
pp->cpb = mem;
pp->cpb_dma = mem_dma;
writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
/*
* Second item: block of ADMA_SGTBL_LEN s/g entries
*/
pp->aprd = mem;
pp->aprd_dma = mem_dma;
ap->private_data = pp;
/* clear any outstanding interrupt conditions */
writew(0xffff, mmio + NV_ADMA_STAT);
/* initialize port variables */
pp->flags = NV_ADMA_PORT_REGISTER_MODE;
/* clear CPB fetch count */
writew(0, mmio + NV_ADMA_CPB_COUNT);
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL); /* flush posted write */
return 0;
}
static void nv_adma_port_stop(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
writew(0, mmio + NV_ADMA_CTL);
}
#ifdef CONFIG_PM
static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
/* Go to register mode - clears GO */
nv_adma_register_mode(ap);
/* clear CPB fetch count */
writew(0, mmio + NV_ADMA_CPB_COUNT);
/* disable interrupt, shut down port */
writew(0, mmio + NV_ADMA_CTL);
return 0;
}
static int nv_adma_port_resume(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
u16 tmp;
/* set CPB block location */
writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
/* clear any outstanding interrupt conditions */
writew(0xffff, mmio + NV_ADMA_STAT);
/* initialize port variables */
pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
/* clear CPB fetch count */
writew(0, mmio + NV_ADMA_CPB_COUNT);
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL); /* flush posted write */
return 0;
}
#endif
static void nv_adma_setup_port(struct ata_port *ap)
{
void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
struct ata_ioports *ioport = &ap->ioaddr;
mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
ioport->cmd_addr = mmio;
ioport->data_addr = mmio + (ATA_REG_DATA * 4);
ioport->error_addr =
ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
ioport->status_addr =
ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
ioport->altstatus_addr =
ioport->ctl_addr = mmio + 0x20;
}
static int nv_adma_host_init(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
unsigned int i;
u32 tmp32;
/* enable ADMA on the ports */
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
NV_MCP_SATA_CFG_20_PORT1_EN |
NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
for (i = 0; i < host->n_ports; i++)
nv_adma_setup_port(host->ports[i]);
return 0;
}
static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
struct scatterlist *sg,
int idx,
struct nv_adma_prd *aprd)
{
u8 flags = 0;
if (qc->tf.flags & ATA_TFLAG_WRITE)
flags |= NV_APRD_WRITE;
if (idx == qc->n_elem - 1)
flags |= NV_APRD_END;
else if (idx != 4)
flags |= NV_APRD_CONT;
aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
aprd->flags = flags;
aprd->packet_len = 0;
}
static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
struct nv_adma_prd *aprd;
struct scatterlist *sg;
unsigned int si;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
aprd = (si < 5) ? &cpb->aprd[si] :
&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
nv_adma_fill_aprd(qc, sg, si, aprd);
}
if (si > 5)
cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
else
cpb->next_aprd = cpu_to_le64(0);
}
static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
/* ADMA engine can only be used for non-ATAPI DMA commands,
or interrupt-driven no-data commands. */
if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(qc->tf.flags & ATA_TFLAG_POLLING))
return 1;
if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
(qc->tf.protocol == ATA_PROT_NODATA))
return 0;
return 1;
}
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
NV_CPB_CTL_IEN;
if (nv_adma_use_reg_mode(qc)) {
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
ata_bmdma_qc_prep(qc);
return AC_ERR_OK;
}
cpb->resp_flags = NV_CPB_RESP_DONE;
wmb();
cpb->ctl_flags = 0;
wmb();
cpb->len = 3;
cpb->tag = qc->hw_tag;
cpb->next_cpb_idx = 0;
/* turn on NCQ flags for NCQ commands */
if (qc->tf.protocol == ATA_PROT_NCQ)
ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
if (qc->flags & ATA_QCFLAG_DMAMAP) {
nv_adma_fill_sg(qc, cpb);
ctl_flags |= NV_CPB_CTL_APRD_VALID;
} else
memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
until we are finished filling in all of the contents */
wmb();
cpb->ctl_flags = ctl_flags;
wmb();
cpb->resp_flags = 0;
return AC_ERR_OK;
}
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
void __iomem *mmio = pp->ctl_block;
int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
/* We can't handle result taskfile with NCQ commands, since
retrieving the taskfile switches us out of ADMA mode and would abort
existing commands. */
if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
(qc->flags & ATA_QCFLAG_RESULT_TF))) {
ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
return AC_ERR_SYSTEM;
}
if (nv_adma_use_reg_mode(qc)) {
/* use ATA register mode */
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
return ata_bmdma_qc_issue(qc);
} else
nv_adma_mode(qc->ap);
/* write append register, command tag in lower 8 bits
and (number of cpbs to append -1) in top 8 bits */
wmb();
if (curr_ncq != pp->last_issue_ncq) {
/* Seems to need some delay before switching between NCQ and
non-NCQ commands, else we get command timeouts and such. */
udelay(20);
pp->last_issue_ncq = curr_ncq;
}
writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
return 0;
}
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
handled += ata_bmdma_port_intr(ap, qc);
} else {
/*
* No request pending? Clear interrupt status
* anyway, in case there's one pending.
*/
ap->ops->sff_check_status(ap);
}
}
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
{
int i, handled = 0;
for (i = 0; i < host->n_ports; i++) {
handled += nv_host_intr(host->ports[i], irq_stat);
irq_stat >>= NV_INT_PORT_SHIFT;
}
return IRQ_RETVAL(handled);
}
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
u8 irq_stat;
irqreturn_t ret;
spin_lock(&host->lock);
irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
ret = nv_do_interrupt(host, irq_stat);
spin_unlock(&host->lock);
return ret;
}
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
u8 irq_stat;
irqreturn_t ret;
spin_lock(&host->lock);
irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
ret = nv_do_interrupt(host, irq_stat);
spin_unlock(&host->lock);
return ret;
}
static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static int nv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_eh_context *ehc = &link->eh_context;
/* Do hardreset iff it's post-boot probing, please read the
* comment above port ops for details.
*/
if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
!ata_dev_enabled(link->device))
sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
NULL, NULL);
else {
const unsigned int *timing = sata_ehc_deb_timing(ehc);
int rc;
if (!(ehc->i.flags & ATA_EHI_QUIET))
ata_link_info(link,
"nv: skipping hardreset on occupied port\n");
/* make sure the link is online */
rc = sata_link_resume(link, timing, deadline);
/* whine about phy resume failure but proceed */
if (rc && rc != -EOPNOTSUPP)
ata_link_warn(link, "failed to resume link (errno=%d)\n",
rc);
}
/* device signature acquisition is unreliable */
return -EAGAIN;
}
static void nv_nf2_freeze(struct ata_port *ap)
{
void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
mask = ioread8(scr_addr + NV_INT_ENABLE);
mask &= ~(NV_INT_ALL << shift);
iowrite8(mask, scr_addr + NV_INT_ENABLE);
}
static void nv_nf2_thaw(struct ata_port *ap)
{
void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
mask = ioread8(scr_addr + NV_INT_ENABLE);
mask |= (NV_INT_MASK << shift);
iowrite8(mask, scr_addr + NV_INT_ENABLE);
}
static void nv_ck804_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
mask = readb(mmio_base + NV_INT_ENABLE_CK804);
mask &= ~(NV_INT_ALL << shift);
writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
}
static void nv_ck804_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
mask = readb(mmio_base + NV_INT_ENABLE_CK804);
mask |= (NV_INT_MASK << shift);
writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
}
static void nv_mcp55_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
u32 mask;
writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
mask &= ~(NV_INT_ALL_MCP55 << shift);
writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
}
static void nv_mcp55_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
u32 mask;
writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
mask |= (NV_INT_MASK_MCP55 << shift);
writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
}
static void nv_adma_error_handler(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
void __iomem *mmio = pp->ctl_block;
int i;
u16 tmp;
if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
u32 status = readw(mmio + NV_ADMA_STAT);
u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
ata_port_err(ap,
"EH in ADMA mode, notifier 0x%X "
"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
"next cpb count 0x%X next cpb idx 0x%x\n",
notifier, notifier_error, gen_ctl, status,
cpb_count, next_cpb_idx);
for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
struct nv_adma_cpb *cpb = &pp->cpb[i];
if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
ap->link.sactive & (1 << i))
ata_port_err(ap,
"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
i, cpb->ctl_flags, cpb->resp_flags);
}
}
/* Push us back into port register mode for error handling. */
nv_adma_register_mode(ap);
/* Mark all of the CPBs as invalid to prevent them from
being executed */
for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
/* clear CPB fetch count */
writew(0, mmio + NV_ADMA_CPB_COUNT);
/* Reset channel */
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
ata_bmdma_error_handler(ap);
}
static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
{
struct nv_swncq_port_priv *pp = ap->private_data;
struct defer_queue *dq = &pp->defer_queue;
/* queue is full */
WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
dq->defer_bits |= (1 << qc->hw_tag);
dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
}
static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
{
struct nv_swncq_port_priv *pp = ap->private_data;
struct defer_queue *dq = &pp->defer_queue;
unsigned int tag;
if (dq->head == dq->tail) /* null queue */
return NULL;
tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
WARN_ON(!(dq->defer_bits & (1 << tag)));
dq->defer_bits &= ~(1 << tag);
return ata_qc_from_tag(ap, tag);
}
static void nv_swncq_fis_reinit(struct ata_port *ap)
{
struct nv_swncq_port_priv *pp = ap->private_data;
pp->dhfis_bits = 0;
pp->dmafis_bits = 0;
pp->sdbfis_bits = 0;
pp->ncq_flags = 0;
}
static void nv_swncq_pp_reinit(struct ata_port *ap)
{
struct nv_swncq_port_priv *pp = ap->private_data;
struct defer_queue *dq = &pp->defer_queue;
dq->head = 0;
dq->tail = 0;
dq->defer_bits = 0;
pp->qc_active = 0;
pp->last_issue_tag = ATA_TAG_POISON;
nv_swncq_fis_reinit(ap);
}
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
{
struct nv_swncq_port_priv *pp = ap->private_data;
writew(fis, pp->irq_block);
}
static void __ata_bmdma_stop(struct ata_port *ap)
{
struct ata_queued_cmd qc;
qc.ap = ap;
ata_bmdma_stop(&qc);
}
static void nv_swncq_ncq_stop(struct ata_port *ap)
{
struct nv_swncq_port_priv *pp = ap->private_data;
unsigned int i;
u32 sactive;
u32 done_mask;
ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
ap->qc_active, ap->link.sactive);
ata_port_err(ap,
"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
ap->ops->sff_check_status(ap),
ioread8(ap->ioaddr.error_addr));
sactive = readl(pp->sactive_block);
done_mask = pp->qc_active ^ sactive;
ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
for (i = 0; i < ATA_MAX_QUEUE; i++) {
u8 err = 0;
if (pp->qc_active & (1 << i))
err = 0;
else if (done_mask & (1 << i))
err = 1;
else
continue;
ata_port_err(ap,
"tag 0x%x: %01x %01x %01x %01x %s\n", i,
(pp->dhfis_bits >> i) & 0x1,
(pp->dmafis_bits >> i) & 0x1,
(pp->sdbfis_bits >> i) & 0x1,
(sactive >> i) & 0x1,
(err ? "error! tag doesn't exit" : " "));
}
nv_swncq_pp_reinit(ap);
ap->ops->sff_irq_clear(ap);
__ata_bmdma_stop(ap);
nv_swncq_irq_clear(ap, 0xffff);
}
static void nv_swncq_error_handler(struct ata_port *ap)
{
struct ata_eh_context *ehc = &ap->link.eh_context;
if (ap->link.sactive) {
nv_swncq_ncq_stop(ap);
ehc->i.action |= ATA_EH_RESET;
}
ata_bmdma_error_handler(ap);
}
#ifdef CONFIG_PM
static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
u32 tmp;
/* clear irq */
writel(~0, mmio + NV_INT_STATUS_MCP55);
/* disable irq */
writel(0, mmio + NV_INT_ENABLE_MCP55);
/* disable swncq */
tmp = readl(mmio + NV_CTL_MCP55);
tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
writel(tmp, mmio + NV_CTL_MCP55);
return 0;
}
static int nv_swncq_port_resume(struct ata_port *ap)
{
void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
u32 tmp;
/* clear irq */
writel(~0, mmio + NV_INT_STATUS_MCP55);
/* enable irq */
writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
/* enable swncq */
tmp = readl(mmio + NV_CTL_MCP55);
writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
return 0;
}
#endif
static void nv_swncq_host_init(struct ata_host *host)
{
u32 tmp;
void __iomem *mmio = host->iomap[NV_MMIO_BAR];
struct pci_dev *pdev = to_pci_dev(host->dev);
u8 regval;
/* disable ECO 398 */
pci_read_config_byte(pdev, 0x7f, ®val);
regval &= ~(1 << 7);
pci_write_config_byte(pdev, 0x7f, regval);
/* enable swncq */
tmp = readl(mmio + NV_CTL_MCP55);
dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
/* enable irq intr */
tmp = readl(mmio + NV_INT_ENABLE_MCP55);
dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
/* clear port irq */
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
static int nv_swncq_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_device *dev;
int rc;
u8 rev;
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
rc = ata_scsi_slave_config(sdev);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
dev = &ap->link.device[sdev->id];
if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
return rc;
/* if MCP51 and Maxtor, then disable ncq */
if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
check_maxtor = 1;
/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
pci_read_config_byte(pdev, 0x8, &rev);
if (rev <= 0xa2)
check_maxtor = 1;
}
if (!check_maxtor)
return rc;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strncmp(model_num, "Maxtor", 6) == 0) {
ata_scsi_change_queue_depth(sdev, 1);
ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
sdev->queue_depth);
}
return rc;
}
static int nv_swncq_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
struct nv_swncq_port_priv *pp;
int rc;
/* we might fallback to bmdma, allocate bmdma resources */
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
&pp->prd_dma, GFP_KERNEL);
if (!pp->prd)
return -ENOMEM;
ap->private_data = pp;
pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
return 0;
}
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{
if (qc->tf.protocol != ATA_PROT_NCQ) {
ata_bmdma_qc_prep(qc);
return AC_ERR_OK;
}
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
nv_swncq_fill_sg(qc);
return AC_ERR_OK;
}
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg;
struct nv_swncq_port_priv *pp = ap->private_data;
struct ata_bmdma_prd *prd;
unsigned int si, idx;
prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
idx = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len, len;
addr = (u32)sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len & 0xffff);
idx++;
sg_len -= len;
addr += len;
}
}
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
struct nv_swncq_port_priv *pp = ap->private_data;
if (qc == NULL)
return 0;
writel((1 << qc->hw_tag), pp->sactive_block);
pp->last_issue_tag = qc->hw_tag;
pp->dhfis_bits &= ~(1 << qc->hw_tag);
pp->dmafis_bits &= ~(1 << qc->hw_tag);
pp->qc_active |= (0x1 << qc->hw_tag);
trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
ap->ops->sff_exec_command(ap, &qc->tf);
return 0;
}
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct nv_swncq_port_priv *pp = ap->private_data;
if (qc->tf.protocol != ATA_PROT_NCQ)
return ata_bmdma_qc_issue(qc);
if (!pp->qc_active)
nv_swncq_issue_atacmd(ap, qc);
else
nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
return 0;
}
static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
{
u32 serror;
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
/* AHCI needs SError cleared; otherwise, it might lock up */
sata_scr_read(&ap->link, SCR_ERROR, &serror);
sata_scr_write(&ap->link, SCR_ERROR, serror);
/* analyze @irq_stat */
if (fis & NV_SWNCQ_IRQ_ADDED)
ata_ehi_push_desc(ehi, "hot plug");
else if (fis & NV_SWNCQ_IRQ_REMOVED)
ata_ehi_push_desc(ehi, "hot unplug");
ata_ehi_hotplugged(ehi);
/* okay, let's hand over to EH */
ehi->serror |= serror;
ata_port_freeze(ap);
}
static int nv_swncq_sdbfis(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
struct nv_swncq_port_priv *pp = ap->private_data;
struct ata_eh_info *ehi = &ap->link.eh_info;
u32 sactive;
u32 done_mask;
u8 host_stat;
u8 lack_dhfis = 0;
host_stat = ap->ops->bmdma_status(ap);
trace_ata_bmdma_status(ap, host_stat);
if (unlikely(host_stat & ATA_DMA_ERR)) {
/* error when transferring data to/from memory */
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
ehi->err_mask |= AC_ERR_HOST_BUS;
ehi->action |= ATA_EH_RESET;
return -EINVAL;
}
ap->ops->sff_irq_clear(ap);
__ata_bmdma_stop(ap);
sactive = readl(pp->sactive_block);
done_mask = pp->qc_active ^ sactive;
pp->qc_active &= ~done_mask;
pp->dhfis_bits &= ~done_mask;
pp->dmafis_bits &= ~done_mask;
pp->sdbfis_bits |= done_mask;
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
if (!ap->qc_active) {
ata_port_dbg(ap, "over\n");
nv_swncq_pp_reinit(ap);
return 0;
}
if (pp->qc_active & pp->dhfis_bits)
return 0;
if ((pp->ncq_flags & ncq_saw_backout) ||
(pp->qc_active ^ pp->dhfis_bits))
/* if the controller can't get a device to host register FIS,
* The driver needs to reissue the new command.
*/
lack_dhfis = 1;
ata_port_dbg(ap, "QC: qc_active 0x%llx,"
"SWNCQ:qc_active 0x%X defer_bits %X "
"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
ap->qc_active, pp->qc_active,
pp->defer_queue.defer_bits, pp->dhfis_bits,
pp->dmafis_bits, pp->last_issue_tag);
nv_swncq_fis_reinit(ap);
if (lack_dhfis) {
qc = ata_qc_from_tag(ap, pp->last_issue_tag);
nv_swncq_issue_atacmd(ap, qc);
return 0;
}
if (pp->defer_queue.defer_bits) {
/* send deferral queue command */
qc = nv_swncq_qc_from_dq(ap);
WARN_ON(qc == NULL);
nv_swncq_issue_atacmd(ap, qc);
}
return 0;
}
static inline u32 nv_swncq_tag(struct ata_port *ap)
{
struct nv_swncq_port_priv *pp = ap->private_data;
u32 tag;
tag = readb(pp->tag_block) >> 2;
return (tag & 0x1f);
}
static void nv_swncq_dmafis(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
unsigned int rw;
u8 dmactl;
u32 tag;
struct nv_swncq_port_priv *pp = ap->private_data;
__ata_bmdma_stop(ap);
tag = nv_swncq_tag(ap);
ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
qc = ata_qc_from_tag(ap, tag);
if (unlikely(!qc))
return;
rw = qc->tf.flags & ATA_TFLAG_WRITE;
/* load PRD table addr. */
iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
dmactl &= ~ATA_DMA_WR;
if (!rw)
dmactl |= ATA_DMA_WR;
iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
{
struct nv_swncq_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
struct ata_eh_info *ehi = &ap->link.eh_info;
u32 serror;
u8 ata_stat;
ata_stat = ap->ops->sff_check_status(ap);
nv_swncq_irq_clear(ap, fis);
if (!fis)
return;
if (ata_port_is_frozen(ap))
return;
if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
nv_swncq_hotplug(ap, fis);
return;
}
if (!pp->qc_active)
return;
if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
return;
ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
if (ata_stat & ATA_ERR) {
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
ehi->err_mask |= AC_ERR_DEV;
ehi->serror |= serror;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
return;
}
if (fis & NV_SWNCQ_IRQ_BACKOUT) {
/* If the IRQ is backout, driver must issue
* the new command again some time later.
*/
pp->ncq_flags |= ncq_saw_backout;
}
if (fis & NV_SWNCQ_IRQ_SDBFIS) {
pp->ncq_flags |= ncq_saw_sdb;
ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
pp->qc_active, pp->dhfis_bits,
pp->dmafis_bits, readl(pp->sactive_block));
if (nv_swncq_sdbfis(ap) < 0)
goto irq_error;
}
if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
/* The interrupt indicates the new command
* was transmitted correctly to the drive.
*/
pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
pp->ncq_flags |= ncq_saw_d2h;
if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
ata_ehi_push_desc(ehi, "illegal fis transaction");
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_RESET;
goto irq_error;
}
if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
!(pp->ncq_flags & ncq_saw_dmas)) {
ata_stat = ap->ops->sff_check_status(ap);
if (ata_stat & ATA_BUSY)
goto irq_exit;
if (pp->defer_queue.defer_bits) {
ata_port_dbg(ap, "send next command\n");
qc = nv_swncq_qc_from_dq(ap);
nv_swncq_issue_atacmd(ap, qc);
}
}
}
if (fis & NV_SWNCQ_IRQ_DMASETUP) {
/* program the dma controller with appropriate PRD buffers
* and start the DMA transfer for requested command.
*/
pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
pp->ncq_flags |= ncq_saw_dmas;
nv_swncq_dmafis(ap);
}
irq_exit:
return;
irq_error:
ata_ehi_push_desc(ehi, "fis:0x%x", fis);
ata_port_freeze(ap);
return;
}
static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
u32 irq_stat;
spin_lock_irqsave(&host->lock, flags);
irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ap->link.sactive) {
nv_swncq_host_interrupt(ap, (u16)irq_stat);
handled = 1;
} else {
if (irq_stat) /* reserve Hotplug */
nv_swncq_irq_clear(ap, 0xfff0);
handled += nv_host_intr(ap, (u8)irq_stat);
}
irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
}
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] = { NULL, NULL };
struct nv_pi_priv *ipriv;
struct ata_host *host;
struct nv_host_priv *hpriv;
int rc;
u32 bar;
void __iomem *base;
unsigned long type = ent->driver_data;
// Make sure this is a SATA controller by counting the number of bars
// (NVIDIA SATA controllers will always have six bars). Otherwise,
// it's an IDE controller and we ignore it.
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* determine type and allocate host */
if (type == CK804 && adma_enabled) {
dev_notice(&pdev->dev, "Using ADMA mode\n");
type = ADMA;
} else if (type == MCP5x && swncq_enabled) {
dev_notice(&pdev->dev, "Using SWNCQ mode\n");
type = SWNCQ;
}
ppi[0] = &nv_port_info[type];
ipriv = ppi[0]->private_data;
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
hpriv->type = type;
host->private_data = hpriv;
/* request and iomap NV_MMIO_BAR */
rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
if (rc)
return rc;
/* configure SCR access */
base = host->iomap[NV_MMIO_BAR];
host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
/* enable SATA space for CK804 */
if (type >= CK804) {
u8 regval;
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
/* init ADMA */
if (type == ADMA) {
rc = nv_adma_host_init(host);
if (rc)
return rc;
} else if (type == SWNCQ)
nv_swncq_host_init(host);
if (msi_enabled) {
dev_notice(&pdev->dev, "Using MSI\n");
pci_enable_msi(pdev);
}
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
}
#ifdef CONFIG_PM_SLEEP
static int nv_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
struct nv_host_priv *hpriv = host->private_data;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
if (hpriv->type >= CK804) {
u8 regval;
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
if (hpriv->type == ADMA) {
u32 tmp32;
struct nv_adma_port_priv *pp;
/* enable/disable ADMA on the ports appropriately */
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
pp = host->ports[0]->private_data;
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
else
tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
pp = host->ports[1]->private_data;
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
else
tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
}
}
ata_host_resume(host);
return 0;
}
#endif
static void nv_ck804_host_stop(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
u8 regval;
/* disable SATA space for CK804 */
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
static void nv_adma_host_stop(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
u32 tmp32;
/* disable ADMA on the ports */
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
NV_MCP_SATA_CFG_20_PORT1_EN |
NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
nv_ck804_host_stop(host);
}
module_pci_driver(nv_pci_driver);
module_param_named(adma, adma_enabled, bool, 0444);
MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
module_param_named(swncq, swncq_enabled, bool, 0444);
MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
module_param_named(msi, msi_enabled, bool, 0444);
MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
| linux-master | drivers/ata/sata_nv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_cmd640.c - CMD640 PCI PATA for new ATA layer
* (C) 2007 Red Hat Inc
*
* Based upon
* linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996
*
* Copyright (C) 1995-1996 Linus Torvalds & authors (see driver)
*
* This drives only the PCI version of the controller. If you have a
* VLB one then we have enough docs to support it but you can write
* your own code.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_cmd640"
#define DRV_VERSION "0.0.5"
struct cmd640_reg {
int last;
u8 reg58[ATA_MAX_DEVICES];
};
enum {
CFR = 0x50,
CNTRL = 0x51,
CMDTIM = 0x52,
ARTIM0 = 0x53,
DRWTIM0 = 0x54,
ARTIM23 = 0x57,
DRWTIM23 = 0x58,
BRST = 0x59
};
/**
* cmd640_set_piomode - set initial PIO mode data
* @ap: ATA port
* @adev: ATA device
*
* Called to do the PIO mode setup.
*/
static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct cmd640_reg *timing = ap->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_timing t;
const unsigned long T = 1000000 / 33;
const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
u8 reg;
int arttim = ARTIM0 + 2 * adev->devno;
struct ata_device *pair = ata_dev_pair(adev);
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
ata_dev_err(adev, DRV_NAME ": mode computation failed.\n");
return;
}
/* The second channel has shared timings and the setup timing is
messy to switch to merge it for worst case */
if (ap->port_no && pair) {
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP);
}
/* Make the timings fit */
if (t.recover > 16) {
t.active += t.recover - 16;
t.recover = 16;
}
if (t.active > 16)
t.active = 16;
/* Now convert the clocks into values we can actually stuff into
the chip */
if (t.recover > 1)
t.recover--; /* 640B only */
else
t.recover = 15;
if (t.setup > 4)
t.setup = 0xC0;
else
t.setup = setup_data[t.setup];
if (ap->port_no == 0) {
t.active &= 0x0F; /* 0 = 16 */
/* Load setup timing */
pci_read_config_byte(pdev, arttim, ®);
reg &= 0x3F;
reg |= t.setup;
pci_write_config_byte(pdev, arttim, reg);
/* Load active/recovery */
pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover);
} else {
/* Save the shared timings for channel, they will be loaded
by qc_issue. Reloading the setup time is expensive so we
keep a merged one loaded */
pci_read_config_byte(pdev, ARTIM23, ®);
reg &= 0x3F;
reg |= t.setup;
pci_write_config_byte(pdev, ARTIM23, reg);
timing->reg58[adev->devno] = (t.active << 4) | t.recover;
}
}
/**
* cmd640_qc_issue - command preparation hook
* @qc: Command to be issued
*
* Channel 1 has shared timings. We must reprogram the
* clock each drive 2/3 switch we do.
*/
static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct cmd640_reg *timing = ap->private_data;
if (ap->port_no != 0 && adev->devno != timing->last) {
pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]);
timing->last = adev->devno;
}
return ata_sff_qc_issue(qc);
}
/**
* cmd640_port_start - port setup
* @ap: ATA port being set up
*
* The CMD640 needs to maintain private data structures so we
* allocate space here.
*/
static int cmd640_port_start(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct cmd640_reg *timing;
timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
if (timing == NULL)
return -ENOMEM;
timing->last = -1; /* Force a load */
ap->private_data = timing;
return 0;
}
static bool cmd640_sff_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int irq_reg = ap->port_no ? ARTIM23 : CFR;
u8 irq_stat, irq_mask = ap->port_no ? 0x10 : 0x04;
pci_read_config_byte(pdev, irq_reg, &irq_stat);
return irq_stat & irq_mask;
}
static const struct scsi_host_template cmd640_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations cmd640_port_ops = {
.inherits = &ata_sff_port_ops,
/* In theory xfer_noirq is not needed once we kill the prefetcher */
.sff_data_xfer = ata_sff_data_xfer32,
.sff_irq_check = cmd640_sff_irq_check,
.qc_issue = cmd640_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = cmd640_set_piomode,
.port_start = cmd640_port_start,
};
static void cmd640_hardware_init(struct pci_dev *pdev)
{
u8 ctrl;
/* CMD640 detected, commiserations */
pci_write_config_byte(pdev, 0x5B, 0x00);
/* PIO0 command cycles */
pci_write_config_byte(pdev, CMDTIM, 0);
/* 512 byte bursts (sector) */
pci_write_config_byte(pdev, BRST, 0x40);
/*
* A reporter a long time ago
* Had problems with the data fifo
* So don't run the risk
* Of putting crap on the disk
* For its better just to go slow
*/
/* Do channel 0 */
pci_read_config_byte(pdev, CNTRL, &ctrl);
pci_write_config_byte(pdev, CNTRL, ctrl | 0xC0);
/* Ditto for channel 1 */
pci_read_config_byte(pdev, ARTIM23, &ctrl);
ctrl |= 0x0C;
pci_write_config_byte(pdev, ARTIM23, ctrl);
}
static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &cmd640_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
cmd640_hardware_init(pdev);
return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL, 0);
}
#ifdef CONFIG_PM_SLEEP
static int cmd640_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
cmd640_hardware_init(pdev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id cmd640[] = {
{ PCI_VDEVICE(CMD, 0x640), 0 },
{ },
};
static struct pci_driver cmd640_pci_driver = {
.name = DRV_NAME,
.id_table = cmd640,
.probe = cmd640_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = cmd640_reinit_one,
#endif
};
module_pci_driver(cmd640_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cmd640);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_cmd640.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pdc_adma.c - Pacific Digital Corporation ADMA
*
* Maintained by: Tejun Heo <[email protected]>
*
* Copyright 2005 Mark Lord
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Supports ATA disks in single-packet ADMA mode.
* Uses PIO for everything else.
*
* TODO: Use ADMA transfers for ATAPI devices, when possible.
* This requires careful attention to a number of quirks of the chip.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pdc_adma"
#define DRV_VERSION "1.0"
/* macro to calculate base address for ATA regs */
#define ADMA_ATA_REGS(base, port_no) ((base) + ((port_no) * 0x40))
/* macro to calculate base address for ADMA regs */
#define ADMA_REGS(base, port_no) ((base) + 0x80 + ((port_no) * 0x20))
/* macro to obtain addresses from ata_port */
#define ADMA_PORT_REGS(ap) \
ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no)
enum {
ADMA_MMIO_BAR = 4,
ADMA_PORTS = 2,
ADMA_CPB_BYTES = 40,
ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
ADMA_DMA_BOUNDARY = 0xffffffff,
/* global register offsets */
ADMA_MODE_LOCK = 0x00c7,
/* per-channel register offsets */
ADMA_CONTROL = 0x0000, /* ADMA control */
ADMA_STATUS = 0x0002, /* ADMA status */
ADMA_CPB_COUNT = 0x0004, /* CPB count */
ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
ADMA_CPB_NEXT = 0x000c, /* next CPB address */
ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
/* ADMA_CONTROL register bits */
aNIEN = (1 << 8), /* irq mask: 1==masked */
aGO = (1 << 7), /* packet trigger ("Go!") */
aRSTADM = (1 << 5), /* ADMA logic reset */
aPIOMD4 = 0x0003, /* PIO mode 4 */
/* ADMA_STATUS register bits */
aPSD = (1 << 6),
aUIRQ = (1 << 4),
aPERR = (1 << 0),
/* CPB bits */
cDONE = (1 << 0),
cATERR = (1 << 3),
cVLD = (1 << 0),
cDAT = (1 << 2),
cIEN = (1 << 3),
/* PRD bits */
pORD = (1 << 4),
pDIRO = (1 << 5),
pEND = (1 << 7),
/* ATA register flags */
rIGN = (1 << 5),
rEND = (1 << 7),
/* ATA register addresses */
ADMA_REGS_CONTROL = 0x0e,
ADMA_REGS_SECTOR_COUNT = 0x12,
ADMA_REGS_LBA_LOW = 0x13,
ADMA_REGS_LBA_MID = 0x14,
ADMA_REGS_LBA_HIGH = 0x15,
ADMA_REGS_DEVICE = 0x16,
ADMA_REGS_COMMAND = 0x17,
/* PCI device IDs */
board_1841_idx = 0, /* ADMA 2-port controller */
};
typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
struct adma_port_priv {
u8 *pkt;
dma_addr_t pkt_dma;
adma_state_t state;
};
static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static int adma_port_start(struct ata_port *ap);
static void adma_port_stop(struct ata_port *ap);
static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void adma_freeze(struct ata_port *ap);
static void adma_thaw(struct ata_port *ap);
static int adma_prereset(struct ata_link *link, unsigned long deadline);
static const struct scsi_host_template adma_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ADMA_DMA_BOUNDARY,
};
static struct ata_port_operations adma_ata_ops = {
.inherits = &ata_sff_port_ops,
.lost_interrupt = ATA_OP_NULL,
.check_atapi_dma = adma_check_atapi_dma,
.qc_prep = adma_qc_prep,
.qc_issue = adma_qc_issue,
.freeze = adma_freeze,
.thaw = adma_thaw,
.prereset = adma_prereset,
.port_start = adma_port_start,
.port_stop = adma_port_stop,
};
static struct ata_port_info adma_port_info[] = {
/* board_1841_idx */
{
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4_ONLY,
.udma_mask = ATA_UDMA4,
.port_ops = &adma_ata_ops,
},
};
static const struct pci_device_id adma_ata_pci_tbl[] = {
{ PCI_VDEVICE(PDC, 0x1841), board_1841_idx },
{ } /* terminate list */
};
static struct pci_driver adma_ata_pci_driver = {
.name = DRV_NAME,
.id_table = adma_ata_pci_tbl,
.probe = adma_ata_init_one,
.remove = ata_pci_remove_one,
};
static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
{
return 1; /* ATAPI DMA not yet supported */
}
static void adma_reset_engine(struct ata_port *ap)
{
void __iomem *chan = ADMA_PORT_REGS(ap);
/* reset ADMA to idle state */
writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
udelay(2);
writew(aPIOMD4, chan + ADMA_CONTROL);
udelay(2);
}
static void adma_reinit_engine(struct ata_port *ap)
{
struct adma_port_priv *pp = ap->private_data;
void __iomem *chan = ADMA_PORT_REGS(ap);
/* mask/clear ATA interrupts */
writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
ata_sff_check_status(ap);
/* reset the ADMA engine */
adma_reset_engine(ap);
/* set in-FIFO threshold to 0x100 */
writew(0x100, chan + ADMA_FIFO_IN);
/* set CPB pointer */
writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
/* set out-FIFO threshold to 0x100 */
writew(0x100, chan + ADMA_FIFO_OUT);
/* set CPB count */
writew(1, chan + ADMA_CPB_COUNT);
/* read/discard ADMA status */
readb(chan + ADMA_STATUS);
}
static inline void adma_enter_reg_mode(struct ata_port *ap)
{
void __iomem *chan = ADMA_PORT_REGS(ap);
writew(aPIOMD4, chan + ADMA_CONTROL);
readb(chan + ADMA_STATUS); /* flush */
}
static void adma_freeze(struct ata_port *ap)
{
void __iomem *chan = ADMA_PORT_REGS(ap);
/* mask/clear ATA interrupts */
writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
ata_sff_check_status(ap);
/* reset ADMA to idle state */
writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
udelay(2);
writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL);
udelay(2);
}
static void adma_thaw(struct ata_port *ap)
{
adma_reinit_engine(ap);
}
static int adma_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct adma_port_priv *pp = ap->private_data;
if (pp->state != adma_state_idle) /* healthy paranoia */
pp->state = adma_state_mmio;
adma_reinit_engine(ap);
return ata_sff_prereset(link, deadline);
}
static int adma_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct adma_port_priv *pp = ap->private_data;
u8 *buf = pp->pkt, *last_buf = NULL;
int i = (2 + buf[3]) * 8;
u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
unsigned int si;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr;
u32 len;
addr = (u32)sg_dma_address(sg);
*(__le32 *)(buf + i) = cpu_to_le32(addr);
i += 4;
len = sg_dma_len(sg) >> 3;
*(__le32 *)(buf + i) = cpu_to_le32(len);
i += 4;
last_buf = &buf[i];
buf[i++] = pFLAGS;
buf[i++] = qc->dev->dma_mode & 0xf;
buf[i++] = 0; /* pPKLW */
buf[i++] = 0; /* reserved */
*(__le32 *)(buf + i) =
(pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
i += 4;
}
if (likely(last_buf))
*last_buf |= pEND;
return i;
}
static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
{
struct adma_port_priv *pp = qc->ap->private_data;
u8 *buf = pp->pkt;
u32 pkt_dma = (u32)pp->pkt_dma;
int i = 0;
adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
return AC_ERR_OK;
buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */
buf[i++] = cVLD | cDAT | cIEN;
i++; /* cLEN, gets filled in below */
*(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
i += 4; /* cNCPB */
i += 4; /* cPRD, gets filled in below */
buf[i++] = 0; /* reserved */
buf[i++] = 0; /* reserved */
buf[i++] = 0; /* reserved */
buf[i++] = 0; /* reserved */
/* ATA registers; must be a multiple of 4 */
buf[i++] = qc->tf.device;
buf[i++] = ADMA_REGS_DEVICE;
if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
buf[i++] = qc->tf.hob_nsect;
buf[i++] = ADMA_REGS_SECTOR_COUNT;
buf[i++] = qc->tf.hob_lbal;
buf[i++] = ADMA_REGS_LBA_LOW;
buf[i++] = qc->tf.hob_lbam;
buf[i++] = ADMA_REGS_LBA_MID;
buf[i++] = qc->tf.hob_lbah;
buf[i++] = ADMA_REGS_LBA_HIGH;
}
buf[i++] = qc->tf.nsect;
buf[i++] = ADMA_REGS_SECTOR_COUNT;
buf[i++] = qc->tf.lbal;
buf[i++] = ADMA_REGS_LBA_LOW;
buf[i++] = qc->tf.lbam;
buf[i++] = ADMA_REGS_LBA_MID;
buf[i++] = qc->tf.lbah;
buf[i++] = ADMA_REGS_LBA_HIGH;
buf[i++] = 0;
buf[i++] = ADMA_REGS_CONTROL;
buf[i++] = rIGN;
buf[i++] = 0;
buf[i++] = qc->tf.command;
buf[i++] = ADMA_REGS_COMMAND | rEND;
buf[3] = (i >> 3) - 2; /* cLEN */
*(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
i = adma_fill_sg(qc);
wmb(); /* flush PRDs and pkt to memory */
return AC_ERR_OK;
}
static inline void adma_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *chan = ADMA_PORT_REGS(ap);
/* fire up the ADMA engine */
writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
}
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
{
struct adma_port_priv *pp = qc->ap->private_data;
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pp->state = adma_state_pkt;
adma_packet_start(qc);
return 0;
case ATAPI_PROT_DMA:
BUG();
break;
default:
break;
}
pp->state = adma_state_mmio;
return ata_sff_qc_issue(qc);
}
static inline unsigned int adma_intr_pkt(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
struct adma_port_priv *pp;
struct ata_queued_cmd *qc;
void __iomem *chan = ADMA_PORT_REGS(ap);
u8 status = readb(chan + ADMA_STATUS);
if (status == 0)
continue;
handled = 1;
adma_enter_reg_mode(ap);
pp = ap->private_data;
if (!pp || pp->state != adma_state_pkt)
continue;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
if (status & aPERR)
qc->err_mask |= AC_ERR_HOST_BUS;
else if ((status & (aPSD | aUIRQ)))
qc->err_mask |= AC_ERR_OTHER;
if (pp->pkt[0] & cATERR)
qc->err_mask |= AC_ERR_DEV;
else if (pp->pkt[0] != cDONE)
qc->err_mask |= AC_ERR_OTHER;
if (!qc->err_mask)
ata_qc_complete(qc);
else {
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi,
"ADMA-status 0x%02X", status);
ata_ehi_push_desc(ehi,
"pkt[0] 0x%02X", pp->pkt[0]);
if (qc->err_mask == AC_ERR_DEV)
ata_port_abort(ap);
else
ata_port_freeze(ap);
}
}
}
return handled;
}
static inline unsigned int adma_intr_mmio(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
struct adma_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
if (!pp || pp->state != adma_state_mmio)
continue;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
/* check main status, clearing INTRQ */
u8 status = ata_sff_check_status(ap);
if ((status & ATA_BUSY))
continue;
/* complete taskfile transaction */
pp->state = adma_state_idle;
qc->err_mask |= ac_err_mask(status);
if (!qc->err_mask)
ata_qc_complete(qc);
else {
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "status 0x%02X", status);
if (qc->err_mask == AC_ERR_DEV)
ata_port_abort(ap);
else
ata_port_freeze(ap);
}
handled = 1;
}
}
return handled;
}
static irqreturn_t adma_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
unsigned int handled = 0;
spin_lock(&host->lock);
handled = adma_intr_pkt(host) | adma_intr_mmio(host);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr =
port->data_addr = base + 0x000;
port->error_addr =
port->feature_addr = base + 0x004;
port->nsect_addr = base + 0x008;
port->lbal_addr = base + 0x00c;
port->lbam_addr = base + 0x010;
port->lbah_addr = base + 0x014;
port->device_addr = base + 0x018;
port->status_addr =
port->command_addr = base + 0x01c;
port->altstatus_addr =
port->ctl_addr = base + 0x038;
}
static int adma_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct adma_port_priv *pp;
adma_enter_reg_mode(ap);
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
/* paranoia? */
if ((pp->pkt_dma & 7) != 0) {
ata_port_err(ap, "bad alignment for pp->pkt_dma: %08x\n",
(u32)pp->pkt_dma);
return -ENOMEM;
}
ap->private_data = pp;
adma_reinit_engine(ap);
return 0;
}
static void adma_port_stop(struct ata_port *ap)
{
adma_reset_engine(ap);
}
static void adma_host_init(struct ata_host *host, unsigned int chip_id)
{
unsigned int port_no;
/* enable/lock aGO operation */
writeb(7, host->iomap[ADMA_MMIO_BAR] + ADMA_MODE_LOCK);
/* reset the ADMA logic */
for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
adma_reset_engine(host->ports[port_no]);
}
static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL };
struct ata_host *host;
void __iomem *mmio_base;
int rc, port_no;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
return -ENODEV;
rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
mmio_base = host->iomap[ADMA_MMIO_BAR];
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
for (port_no = 0; port_no < ADMA_PORTS; ++port_no) {
struct ata_port *ap = host->ports[port_no];
void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no);
unsigned int offset = port_base - mmio_base;
adma_ata_setup_port(&ap->ioaddr, port_base);
ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port");
}
/* initialize adapter */
adma_host_init(host, board_idx);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, adma_intr, IRQF_SHARED,
&adma_ata_sht);
}
module_pci_driver(adma_ata_pci_driver);
MODULE_AUTHOR("Mark Lord");
MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pdc_adma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
* PATA port of the controller. The SATA ports are
* driven by AHCI in the usual configuration although
* this driver can handle other setups if we need it.
*
* (c) 2006 Red Hat
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_jmicron"
#define DRV_VERSION "0.1.5"
typedef enum {
PORT_PATA0 = 0,
PORT_PATA1 = 1,
PORT_SATA = 2,
} port_type;
/**
* jmicron_pre_reset - check for 40/80 pin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Perform the PATA port setup we need.
*
* On the Jmicron 361/363 there is a single PATA port that can be mapped
* either as primary or secondary (or neither). We don't do any policy
* and setup here. We assume that has been done by init_one and the
* BIOS.
*/
static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 control;
u32 control5;
int port_mask = 1<< (4 * ap->port_no);
int port = ap->port_no;
port_type port_map[2];
/* Check if our port is enabled */
pci_read_config_dword(pdev, 0x40, &control);
if ((control & port_mask) == 0)
return -ENOENT;
/* There are two basic mappings. One has the two SATA ports merged
as master/slave and the secondary as PATA, the other has only the
SATA port mapped */
if (control & (1 << 23)) {
port_map[0] = PORT_SATA;
port_map[1] = PORT_PATA0;
} else {
port_map[0] = PORT_SATA;
port_map[1] = PORT_SATA;
}
/* The 365/366 may have this bit set to map the second PATA port
as the internal primary channel */
pci_read_config_dword(pdev, 0x80, &control5);
if (control5 & (1<<24))
port_map[0] = PORT_PATA1;
/* The two ports may then be logically swapped by the firmware */
if (control & (1 << 22))
port = port ^ 1;
/*
* Now we know which physical port we are talking about we can
* actually do our cable checking etc. Thankfully we don't need
* to do the plumbing for other cases.
*/
switch (port_map[port]) {
case PORT_PATA0:
if ((control & (1 << 5)) == 0)
return -ENOENT;
if (control & (1 << 3)) /* 40/80 pin primary */
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
break;
case PORT_PATA1:
/* Bit 21 is set if the port is enabled */
if ((control5 & (1 << 21)) == 0)
return -ENOENT;
if (control5 & (1 << 19)) /* 40/80 pin secondary */
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
break;
case PORT_SATA:
ap->cbl = ATA_CBL_SATA;
break;
}
return ata_sff_prereset(link, deadline);
}
/* No PIO or DMA methods needed for this device */
static const struct scsi_host_template jmicron_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations jmicron_ops = {
.inherits = &ata_bmdma_port_ops,
.prereset = jmicron_pre_reset,
};
/**
* jmicron_init_one - Register Jmicron ATA PCI device with kernel services
* @pdev: PCI device to register
* @id: PCI device ID
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &jmicron_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
static const struct pci_device_id jmicron_pci_tbl[] = {
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ } /* terminate list */
};
static struct pci_driver jmicron_pci_driver = {
.name = DRV_NAME,
.id_table = jmicron_pci_tbl,
.probe = jmicron_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(jmicron_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_jmicron.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/libata.h>
#include <linux/cdrom.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/pm_qos.h>
#include <scsi/scsi_device.h>
#include "libata.h"
static int zpodd_poweroff_delay = 30; /* 30 seconds for power off delay */
module_param(zpodd_poweroff_delay, int, 0644);
MODULE_PARM_DESC(zpodd_poweroff_delay, "Poweroff delay for ZPODD in seconds");
enum odd_mech_type {
ODD_MECH_TYPE_SLOT,
ODD_MECH_TYPE_DRAWER,
ODD_MECH_TYPE_UNSUPPORTED,
};
struct zpodd {
enum odd_mech_type mech_type; /* init during probe, RO afterwards */
struct ata_device *dev;
/* The following fields are synchronized by PM core. */
bool from_notify; /* resumed as a result of
* acpi wake notification */
bool zp_ready; /* ZP ready state */
unsigned long last_ready; /* last ZP ready timestamp */
bool zp_sampled; /* ZP ready state sampled */
bool powered_off; /* ODD is powered off
* during suspend */
};
static int eject_tray(struct ata_device *dev)
{
struct ata_taskfile tf;
static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT,
0, 0, 0,
0x02, /* LoEj */
0, 0, 0, 0, 0, 0, 0,
};
ata_tf_init(dev, &tf);
tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
tf.protocol = ATAPI_PROT_NODATA;
return ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
}
/* Per the spec, only slot type and drawer type ODD can be supported */
static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
{
char *buf;
unsigned int ret;
struct rm_feature_desc *desc;
struct ata_taskfile tf;
static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
0, 16,
0, 0, 0,
};
buf = kzalloc(16, GFP_KERNEL);
if (!buf)
return ODD_MECH_TYPE_UNSUPPORTED;
desc = (void *)(buf + 8);
ata_tf_init(dev, &tf);
tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
tf.protocol = ATAPI_PROT_PIO;
tf.lbam = 16;
ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
buf, 16, 0);
if (ret) {
kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
}
if (be16_to_cpu(desc->feature_code) != 3) {
kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
}
if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
kfree(buf);
return ODD_MECH_TYPE_SLOT;
} else if (desc->mech_type == 1 && desc->load == 0 &&
desc->eject == 1) {
kfree(buf);
return ODD_MECH_TYPE_DRAWER;
} else {
kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
}
}
/* Test if ODD is zero power ready by sense code */
static bool zpready(struct ata_device *dev)
{
u8 sense_key, *sense_buf;
unsigned int ret, asc, ascq, add_len;
struct zpodd *zpodd = dev->zpodd;
ret = atapi_eh_tur(dev, &sense_key);
if (!ret || sense_key != NOT_READY)
return false;
sense_buf = dev->link->ap->sector_buf;
ret = atapi_eh_request_sense(dev, sense_buf, sense_key);
if (ret)
return false;
/* sense valid */
if ((sense_buf[0] & 0x7f) != 0x70)
return false;
add_len = sense_buf[7];
/* has asc and ascq */
if (add_len < 6)
return false;
asc = sense_buf[12];
ascq = sense_buf[13];
if (zpodd->mech_type == ODD_MECH_TYPE_SLOT)
/* no media inside */
return asc == 0x3a;
else
/* no media inside and door closed */
return asc == 0x3a && ascq == 0x01;
}
/*
* Update the zpodd->zp_ready field. This field will only be set
* if the ODD has stayed in ZP ready state for zpodd_poweroff_delay
* time, and will be used to decide if power off is allowed. If it
* is set, it will be cleared during resume from powered off state.
*/
void zpodd_on_suspend(struct ata_device *dev)
{
struct zpodd *zpodd = dev->zpodd;
unsigned long expires;
if (!zpready(dev)) {
zpodd->zp_sampled = false;
zpodd->zp_ready = false;
return;
}
if (!zpodd->zp_sampled) {
zpodd->zp_sampled = true;
zpodd->last_ready = jiffies;
return;
}
expires = zpodd->last_ready +
msecs_to_jiffies(zpodd_poweroff_delay * 1000);
if (time_before(jiffies, expires))
return;
zpodd->zp_ready = true;
}
bool zpodd_zpready(struct ata_device *dev)
{
struct zpodd *zpodd = dev->zpodd;
return zpodd->zp_ready;
}
/*
* Enable runtime wake capability through ACPI and set the powered_off flag,
* this flag will be used during resume to decide what operations are needed
* to take.
*
* Also, media poll needs to be silenced, so that it doesn't bring the ODD
* back to full power state every few seconds.
*/
void zpodd_enable_run_wake(struct ata_device *dev)
{
struct zpodd *zpodd = dev->zpodd;
sdev_disable_disk_events(dev->sdev);
zpodd->powered_off = true;
acpi_pm_set_device_wakeup(&dev->tdev, true);
}
/* Disable runtime wake capability if it is enabled */
void zpodd_disable_run_wake(struct ata_device *dev)
{
struct zpodd *zpodd = dev->zpodd;
if (zpodd->powered_off)
acpi_pm_set_device_wakeup(&dev->tdev, false);
}
/*
* Post power on processing after the ODD has been recovered. If the
* ODD wasn't powered off during suspend, it doesn't do anything.
*
* For drawer type ODD, if it is powered on due to user pressed the
* eject button, the tray needs to be ejected. This can only be done
* after the ODD has been recovered, i.e. link is initialized and
* device is able to process NON_DATA PIO command, as eject needs to
* send command for the ODD to process.
*
* The from_notify flag set in wake notification handler function
* zpodd_wake_dev represents if power on is due to user's action.
*
* For both types of ODD, several fields need to be reset.
*/
void zpodd_post_poweron(struct ata_device *dev)
{
struct zpodd *zpodd = dev->zpodd;
if (!zpodd->powered_off)
return;
zpodd->powered_off = false;
if (zpodd->from_notify) {
zpodd->from_notify = false;
if (zpodd->mech_type == ODD_MECH_TYPE_DRAWER)
eject_tray(dev);
}
zpodd->zp_sampled = false;
zpodd->zp_ready = false;
sdev_enable_disk_events(dev->sdev);
}
static void zpodd_wake_dev(acpi_handle handle, u32 event, void *context)
{
struct ata_device *ata_dev = context;
struct zpodd *zpodd = ata_dev->zpodd;
struct device *dev = &ata_dev->sdev->sdev_gendev;
if (event == ACPI_NOTIFY_DEVICE_WAKE && pm_runtime_suspended(dev)) {
zpodd->from_notify = true;
pm_runtime_resume(dev);
}
}
static void ata_acpi_add_pm_notifier(struct ata_device *dev)
{
acpi_handle handle = ata_dev_acpi_handle(dev);
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
zpodd_wake_dev, dev);
}
static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
{
acpi_handle handle = ata_dev_acpi_handle(dev);
acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, zpodd_wake_dev);
}
void zpodd_init(struct ata_device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->tdev);
enum odd_mech_type mech_type;
struct zpodd *zpodd;
if (dev->zpodd || !adev || !acpi_device_can_poweroff(adev))
return;
mech_type = zpodd_get_mech_type(dev);
if (mech_type == ODD_MECH_TYPE_UNSUPPORTED)
return;
zpodd = kzalloc(sizeof(struct zpodd), GFP_KERNEL);
if (!zpodd)
return;
zpodd->mech_type = mech_type;
ata_acpi_add_pm_notifier(dev);
zpodd->dev = dev;
dev->zpodd = zpodd;
dev_pm_qos_expose_flags(&dev->tdev, 0);
}
void zpodd_exit(struct ata_device *dev)
{
ata_acpi_remove_pm_notifier(dev);
kfree(dev->zpodd);
dev->zpodd = NULL;
}
| linux-master | drivers/ata/libata-zpodd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_cmd64x.c - CMD64x PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <[email protected]>
* (C) 2009-2010 Bartlomiej Zolnierkiewicz
* (C) 2012 MontaVista Software, LLC <[email protected]>
*
* Based upon
* linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
*
* cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
* Note, this driver is not used at all on other systems because
* there the "BIOS" has done all of the following already.
* Due to massive hardware bugs, UltraDMA is only supported
* on the 646U2 and not on the 646U.
*
* Copyright (C) 1998 Eddie C. Dost ([email protected])
* Copyright (C) 1998 David S. Miller ([email protected])
*
* Copyright (C) 1999-2002 Andre Hedrick <[email protected]>
*
* TODO
* Testing work
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_cmd64x"
#define DRV_VERSION "0.2.18"
/*
* CMD64x specific registers definition.
*/
enum {
CFR = 0x50,
CFR_INTR_CH0 = 0x04,
CNTRL = 0x51,
CNTRL_CH0 = 0x04,
CNTRL_CH1 = 0x08,
CMDTIM = 0x52,
ARTTIM0 = 0x53,
DRWTIM0 = 0x54,
ARTTIM1 = 0x55,
DRWTIM1 = 0x56,
ARTTIM23 = 0x57,
ARTTIM23_DIS_RA2 = 0x04,
ARTTIM23_DIS_RA3 = 0x08,
ARTTIM23_INTR_CH1 = 0x10,
DRWTIM2 = 0x58,
BRST = 0x59,
DRWTIM3 = 0x5b,
BMIDECR0 = 0x70,
MRDMODE = 0x71,
MRDMODE_INTR_CH0 = 0x04,
MRDMODE_INTR_CH1 = 0x08,
BMIDESR0 = 0x72,
UDIDETCR0 = 0x73,
DTPR0 = 0x74,
BMIDECR1 = 0x78,
BMIDECSR = 0x79,
UDIDETCR1 = 0x7B,
DTPR1 = 0x7C
};
static int cmd648_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 r;
/* Check cable detect bits */
pci_read_config_byte(pdev, BMIDECSR, &r);
if (r & (1 << ap->port_no))
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
/**
* cmd64x_set_timing - set PIO and MWDMA timing
* @ap: ATA interface
* @adev: ATA device
* @mode: mode
*
* Called to do the PIO and MWDMA mode setup.
*/
static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_timing t;
const unsigned long T = 1000000 / 33;
const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
u8 reg;
/* Port layout is not logical so use a table */
const u8 arttim_port[2][2] = {
{ ARTTIM0, ARTTIM1 },
{ ARTTIM23, ARTTIM23 }
};
const u8 drwtim_port[2][2] = {
{ DRWTIM0, DRWTIM1 },
{ DRWTIM2, DRWTIM3 }
};
int arttim = arttim_port[ap->port_no][adev->devno];
int drwtim = drwtim_port[ap->port_no][adev->devno];
/* ata_timing_compute is smart and will produce timings for MWDMA
that don't violate the drives PIO capabilities. */
if (ata_timing_compute(adev, mode, &t, T, 0) < 0) {
ata_dev_err(adev, DRV_NAME ": mode computation failed.\n");
return;
}
if (ap->port_no) {
/* Slave has shared address setup */
struct ata_device *pair = ata_dev_pair(adev);
if (pair) {
struct ata_timing tp;
ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
}
}
ata_dev_dbg(adev, DRV_NAME ": active %d recovery %d setup %d.\n",
t.active, t.recover, t.setup);
if (t.recover > 16) {
t.active += t.recover - 16;
t.recover = 16;
}
if (t.active > 16)
t.active = 16;
/* Now convert the clocks into values we can actually stuff into
the chip */
if (t.recover == 16)
t.recover = 0;
else if (t.recover > 1)
t.recover--;
else
t.recover = 15;
if (t.setup > 4)
t.setup = 0xC0;
else
t.setup = setup_data[t.setup];
t.active &= 0x0F; /* 0 = 16 */
/* Load setup timing */
pci_read_config_byte(pdev, arttim, ®);
reg &= 0x3F;
reg |= t.setup;
pci_write_config_byte(pdev, arttim, reg);
/* Load active/recovery */
pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover);
}
/**
* cmd64x_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Used when configuring the devices ot set the PIO timings. All the
* actual work is done by the PIO/MWDMA setting helper
*/
static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
cmd64x_set_timing(ap, adev, adev->pio_mode);
}
/**
* cmd64x_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup.
*/
static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 udma_data[] = {
0x30, 0x20, 0x10, 0x20, 0x10, 0x00
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 regU, regD;
int pciU = UDIDETCR0 + 8 * ap->port_no;
int pciD = BMIDESR0 + 8 * ap->port_no;
int shift = 2 * adev->devno;
pci_read_config_byte(pdev, pciD, ®D);
pci_read_config_byte(pdev, pciU, ®U);
/* DMA bits off */
regD &= ~(0x20 << adev->devno);
/* DMA control bits */
regU &= ~(0x30 << shift);
/* DMA timing bits */
regU &= ~(0x05 << adev->devno);
if (adev->dma_mode >= XFER_UDMA_0) {
/* Merge the timing value */
regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
/* Merge the control bits */
regU |= 1 << adev->devno; /* UDMA on */
if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
regU |= 4 << adev->devno;
} else {
regU &= ~ (1 << adev->devno); /* UDMA off */
cmd64x_set_timing(ap, adev, adev->dma_mode);
}
regD |= 0x20 << adev->devno;
pci_write_config_byte(pdev, pciU, regU);
pci_write_config_byte(pdev, pciD, regD);
}
/**
* cmd64x_sff_irq_check - check IDE interrupt
* @ap: ATA interface
*
* Check IDE interrupt in CFR/ARTTIM23 registers.
*/
static bool cmd64x_sff_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int irq_mask = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
int irq_reg = ap->port_no ? ARTTIM23 : CFR;
u8 irq_stat;
/* NOTE: reading the register should clear the interrupt */
pci_read_config_byte(pdev, irq_reg, &irq_stat);
return irq_stat & irq_mask;
}
/**
* cmd64x_sff_irq_clear - clear IDE interrupt
* @ap: ATA interface
*
* Clear IDE interrupt in CFR/ARTTIM23 and DMA status registers.
*/
static void cmd64x_sff_irq_clear(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int irq_reg = ap->port_no ? ARTTIM23 : CFR;
u8 irq_stat;
ata_bmdma_irq_clear(ap);
/* Reading the register should be enough to clear the interrupt */
pci_read_config_byte(pdev, irq_reg, &irq_stat);
}
/**
* cmd648_sff_irq_check - check IDE interrupt
* @ap: ATA interface
*
* Check IDE interrupt in MRDMODE register.
*/
static bool cmd648_sff_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long base = pci_resource_start(pdev, 4);
int irq_mask = ap->port_no ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0;
u8 mrdmode = inb(base + 1);
return mrdmode & irq_mask;
}
/**
* cmd648_sff_irq_clear - clear IDE interrupt
* @ap: ATA interface
*
* Clear IDE interrupt in MRDMODE and DMA status registers.
*/
static void cmd648_sff_irq_clear(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long base = pci_resource_start(pdev, 4);
int irq_mask = ap->port_no ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0;
u8 mrdmode;
ata_bmdma_irq_clear(ap);
/* Clear this port's interrupt bit (leaving the other port alone) */
mrdmode = inb(base + 1);
mrdmode &= ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1);
outb(mrdmode | irq_mask, base + 1);
}
/**
* cmd646r1_bmdma_stop - DMA stop callback
* @qc: Command in progress
*
* Stub for now while investigating the r1 quirk in the old driver.
*/
static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
{
ata_bmdma_stop(qc);
}
static const struct scsi_host_template cmd64x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static const struct ata_port_operations cmd64x_base_ops = {
.inherits = &ata_bmdma_port_ops,
.set_piomode = cmd64x_set_piomode,
.set_dmamode = cmd64x_set_dmamode,
};
static struct ata_port_operations cmd64x_port_ops = {
.inherits = &cmd64x_base_ops,
.sff_irq_check = cmd64x_sff_irq_check,
.sff_irq_clear = cmd64x_sff_irq_clear,
.cable_detect = ata_cable_40wire,
};
static struct ata_port_operations cmd646r1_port_ops = {
.inherits = &cmd64x_base_ops,
.sff_irq_check = cmd64x_sff_irq_check,
.sff_irq_clear = cmd64x_sff_irq_clear,
.bmdma_stop = cmd646r1_bmdma_stop,
.cable_detect = ata_cable_40wire,
};
static struct ata_port_operations cmd646r3_port_ops = {
.inherits = &cmd64x_base_ops,
.sff_irq_check = cmd648_sff_irq_check,
.sff_irq_clear = cmd648_sff_irq_clear,
.cable_detect = ata_cable_40wire,
};
static struct ata_port_operations cmd648_port_ops = {
.inherits = &cmd64x_base_ops,
.sff_irq_check = cmd648_sff_irq_check,
.sff_irq_clear = cmd648_sff_irq_clear,
.cable_detect = cmd648_cable_detect,
};
static void cmd64x_fixup(struct pci_dev *pdev)
{
u8 mrdmode;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pdev, MRDMODE, &mrdmode);
mrdmode &= ~0x30; /* IRQ set up */
mrdmode |= 0x02; /* Memory read line enable */
pci_write_config_byte(pdev, MRDMODE, mrdmode);
/* PPC specific fixup copied from old driver */
#ifdef CONFIG_PPC
pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
#endif
}
static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info cmd_info[7] = {
{ /* CMD 643 - no UDMA */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &cmd64x_port_ops
},
{ /* CMD 646 with broken UDMA */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &cmd64x_port_ops
},
{ /* CMD 646U with broken UDMA */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &cmd646r3_port_ops
},
{ /* CMD 646U2 with working UDMA */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &cmd646r3_port_ops
},
{ /* CMD 646 rev 1 */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &cmd646r1_port_ops
},
{ /* CMD 648 */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &cmd648_port_ops
},
{ /* CMD 649 */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &cmd648_port_ops
}
};
const struct ata_port_info *ppi[] = {
&cmd_info[id->driver_data],
&cmd_info[id->driver_data],
NULL
};
u8 reg;
int rc;
struct pci_dev *bridge = pdev->bus->self;
/* mobility split bridges don't report enabled ports correctly */
int port_ok = !(bridge && bridge->vendor ==
PCI_VENDOR_ID_MOBILITY_ELECTRONICS);
/* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */
int cntrl_ch0_ok = (id->driver_data != 0);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (id->driver_data == 0) /* 643 */
ata_pci_bmdma_clear_simplex(pdev);
if (pdev->device == PCI_DEVICE_ID_CMD_646)
switch (pdev->revision) {
/* UDMA works since rev 5 */
default:
ppi[0] = &cmd_info[3];
ppi[1] = &cmd_info[3];
break;
/* Interrupts in MRDMODE since rev 3 */
case 3:
case 4:
ppi[0] = &cmd_info[2];
ppi[1] = &cmd_info[2];
break;
/* Rev 1 with other problems? */
case 1:
ppi[0] = &cmd_info[4];
ppi[1] = &cmd_info[4];
fallthrough;
/* Early revs have no CNTRL_CH0 */
case 2:
case 0:
cntrl_ch0_ok = 0;
break;
}
cmd64x_fixup(pdev);
/* check for enabled ports */
pci_read_config_byte(pdev, CNTRL, ®);
if (!port_ok)
dev_notice(&pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
dev_notice(&pdev->dev, "Primary port is disabled\n");
ppi[0] = &ata_dummy_port_info;
}
if (port_ok && !(reg & CNTRL_CH1)) {
dev_notice(&pdev->dev, "Secondary port is disabled\n");
ppi[1] = &ata_dummy_port_info;
}
return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
}
#ifdef CONFIG_PM_SLEEP
static int cmd64x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
cmd64x_fixup(pdev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id cmd64x[] = {
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 5 },
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 6 },
{ },
};
static struct pci_driver cmd64x_pci_driver = {
.name = DRV_NAME,
.id_table = cmd64x,
.probe = cmd64x_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = cmd64x_reinit_one,
#endif
};
module_pci_driver(cmd64x_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cmd64x);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_cmd64x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata-cs5535.c - CS5535 PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
* Alan Cox <[email protected]>
*
* based upon cs5535.c from AMD <[email protected]> as cleaned up and
* made readable and Linux style by Wolfgang Zuleger <[email protected]>
* and Alexander Kiausch <[email protected]>
*
* Loosely based on the piix & svwks drivers.
*
* Documentation:
* Available from AMD web site.
* TODO
* Review errata to see if serializing is necessary
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <asm/msr.h>
#define DRV_NAME "pata_cs5535"
#define DRV_VERSION "0.2.12"
/*
* The Geode (Aka Athlon GX now) uses an internal MSR based
* bus system for control. Demented but there you go.
*/
#define MSR_ATAC_BASE 0x51300000
#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
#define ATAC_RESET (MSR_ATAC_BASE+0x10)
#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
#define ATAC_BM0_CMD_PRIM 0x00
#define ATAC_BM0_STS_PRIM 0x02
#define ATAC_BM0_PRD 0x04
#define CS5535_CABLE_DETECT 0x48
/**
* cs5535_cable_detect - detect cable type
* @ap: Port to detect on
*
* Perform cable detection for ATA66 capable cable. Return a libata
* cable type.
*/
static int cs5535_cable_detect(struct ata_port *ap)
{
u8 cable;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_byte(pdev, CS5535_CABLE_DETECT, &cable);
if (cable & 1)
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
}
/**
* cs5535_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. The CS5535 is pretty clean about all this
*/
static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u16 pio_timings[5] = {
0xF7F4, 0xF173, 0x8141, 0x5131, 0x1131
};
static const u16 pio_cmd_timings[5] = {
0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
};
u32 reg, __maybe_unused dummy;
struct ata_device *pair = ata_dev_pair(adev);
int mode = adev->pio_mode - XFER_PIO_0;
int cmdmode = mode;
/* Command timing has to be for the lowest of the pair of devices */
if (pair) {
int pairmode = pair->pio_mode - XFER_PIO_0;
cmdmode = min(mode, pairmode);
/* Write the other drive timing register if it changed */
if (cmdmode < pairmode)
wrmsr(ATAC_CH0D0_PIO + 2 * pair->devno,
pio_cmd_timings[cmdmode] << 16 | pio_timings[pairmode], 0);
}
/* Write the drive timing register */
wrmsr(ATAC_CH0D0_PIO + 2 * adev->devno,
pio_cmd_timings[cmdmode] << 16 | pio_timings[mode], 0);
/* Set the PIO "format 1" bit in the DMA timing register */
rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg | 0x80000000UL, 0);
}
/**
* cs5535_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
*/
static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 udma_timings[5] = {
0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061
};
static const u32 mwdma_timings[3] = {
0x7F0FFFF3, 0x7F035352, 0x7F024241
};
u32 reg, __maybe_unused dummy;
int mode = adev->dma_mode;
rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
reg &= 0x80000000UL;
if (mode >= XFER_UDMA_0)
reg |= udma_timings[mode - XFER_UDMA_0];
else
reg |= mwdma_timings[mode - XFER_MW_DMA_0];
wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, 0);
}
static const struct scsi_host_template cs5535_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations cs5535_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = cs5535_cable_detect,
.set_piomode = cs5535_set_piomode,
.set_dmamode = cs5535_set_dmamode,
};
/**
* cs5535_init_one - Initialise a CS5530
* @dev: PCI device
* @id: Entry in match table
*
* Install a driver for the newly found CS5530 companion chip. Most of
* this is just housekeeping. We have to set the chip up correctly and
* turn off various bits of emulation magic.
*/
static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &cs5535_port_ops
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
}
static const struct pci_device_id cs5535[] = {
{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), },
{ },
};
static struct pci_driver cs5535_pci_driver = {
.name = DRV_NAME,
.id_table = cs5535,
.probe = cs5535_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(cs5535_pci_driver);
MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
MODULE_DESCRIPTION("low-level driver for the NS/AMD 5535");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cs5535);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_cs5535.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* libahci.c - Common AHCI SATA low-level routines
*
* Maintained by: Tejun Heo <[email protected]>
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2004-2005 Red Hat, Inc.
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* AHCI hardware documentation:
* http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
* http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <linux/pci.h>
#include "ahci.h"
#include "libata.h"
static int ahci_skip_host_reset;
int ahci_ignore_sss;
EXPORT_SYMBOL_GPL(ahci_ignore_sss);
module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
unsigned hints);
static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
size_t size);
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size);
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep);
static void ahci_enable_fbs(struct ata_port *ap);
static void ahci_disable_fbs(struct ata_port *ap);
static void ahci_pmp_attach(struct ata_port *ap);
static void ahci_pmp_detach(struct ata_port *ap);
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void ahci_postreset(struct ata_link *link, unsigned int *class);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
static void ahci_dev_config(struct ata_device *dev);
#ifdef CONFIG_PM
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
#endif
static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
static ssize_t ahci_activity_store(struct ata_device *dev,
enum sw_activity val);
static void ahci_init_sw_activity(struct ata_link *link);
static ssize_t ahci_show_host_caps(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t ahci_show_host_cap2(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t ahci_show_host_version(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t ahci_show_port_cmd(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t ahci_read_em_buffer(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t ahci_store_em_buffer(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size);
static ssize_t ahci_show_em_supported(struct device *dev,
struct device_attribute *attr, char *buf);
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
ahci_read_em_buffer, ahci_store_em_buffer);
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
static struct attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy.attr,
&dev_attr_em_message_type.attr,
&dev_attr_em_message.attr,
&dev_attr_ahci_host_caps.attr,
&dev_attr_ahci_host_cap2.attr,
&dev_attr_ahci_host_version.attr,
&dev_attr_ahci_port_cmd.attr,
&dev_attr_em_buffer.attr,
&dev_attr_em_message_supported.attr,
NULL
};
static const struct attribute_group ahci_shost_attr_group = {
.attrs = ahci_shost_attrs
};
const struct attribute_group *ahci_shost_groups[] = {
&ahci_shost_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(ahci_shost_groups);
static struct attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity.attr,
&dev_attr_unload_heads.attr,
&dev_attr_ncq_prio_supported.attr,
&dev_attr_ncq_prio_enable.attr,
NULL
};
static const struct attribute_group ahci_sdev_attr_group = {
.attrs = ahci_sdev_attrs
};
const struct attribute_group *ahci_sdev_groups[] = {
&ahci_sdev_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(ahci_sdev_groups);
struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,
.qc_defer = ahci_pmp_qc_defer,
.qc_prep = ahci_qc_prep,
.qc_issue = ahci_qc_issue,
.qc_fill_rtf = ahci_qc_fill_rtf,
.qc_ncq_fill_rtf = ahci_qc_ncq_fill_rtf,
.freeze = ahci_freeze,
.thaw = ahci_thaw,
.softreset = ahci_softreset,
.hardreset = ahci_hardreset,
.postreset = ahci_postreset,
.pmp_softreset = ahci_softreset,
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
.dev_config = ahci_dev_config,
.scr_read = ahci_scr_read,
.scr_write = ahci_scr_write,
.pmp_attach = ahci_pmp_attach,
.pmp_detach = ahci_pmp_detach,
.set_lpm = ahci_set_lpm,
.em_show = ahci_led_show,
.em_store = ahci_led_store,
.sw_activity_show = ahci_activity_show,
.sw_activity_store = ahci_activity_store,
.transmit_led_message = ahci_transmit_led_message,
#ifdef CONFIG_PM
.port_suspend = ahci_port_suspend,
.port_resume = ahci_port_resume,
#endif
.port_start = ahci_port_start,
.port_stop = ahci_port_stop,
};
EXPORT_SYMBOL_GPL(ahci_ops);
struct ata_port_operations ahci_pmp_retry_srst_ops = {
.inherits = &ahci_ops,
.softreset = ahci_pmp_retry_softreset,
};
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
static bool ahci_em_messages __read_mostly = true;
module_param(ahci_em_messages, bool, 0444);
/* add other LED protocol types when they become supported */
MODULE_PARM_DESC(ahci_em_messages,
"AHCI Enclosure Management Message control (0 = off, 1 = on)");
/* device sleep idle timeout in ms */
static int devslp_idle_timeout __read_mostly = 1000;
module_param(devslp_idle_timeout, int, 0644);
MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
static void ahci_enable_ahci(void __iomem *mmio)
{
int i;
u32 tmp;
/* turn on AHCI_EN */
tmp = readl(mmio + HOST_CTL);
if (tmp & HOST_AHCI_EN)
return;
/* Some controllers need AHCI_EN to be written multiple times.
* Try a few times before giving up.
*/
for (i = 0; i < 5; i++) {
tmp |= HOST_AHCI_EN;
writel(tmp, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
if (tmp & HOST_AHCI_EN)
return;
msleep(10);
}
WARN_ON(1);
}
/**
* ahci_rpm_get_port - Make sure the port is powered on
* @ap: Port to power on
*
* Whenever there is need to access the AHCI host registers outside of
* normal execution paths, call this function to make sure the host is
* actually powered on.
*/
static int ahci_rpm_get_port(struct ata_port *ap)
{
return pm_runtime_get_sync(ap->dev);
}
/**
* ahci_rpm_put_port - Undoes ahci_rpm_get_port()
* @ap: Port to power down
*
* Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
* if it has no more active users.
*/
static void ahci_rpm_put_port(struct ata_port *ap)
{
pm_runtime_put(ap->dev);
}
static ssize_t ahci_show_host_caps(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
struct ahci_host_priv *hpriv = ap->host->private_data;
return sprintf(buf, "%x\n", hpriv->cap);
}
static ssize_t ahci_show_host_cap2(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
struct ahci_host_priv *hpriv = ap->host->private_data;
return sprintf(buf, "%x\n", hpriv->cap2);
}
static ssize_t ahci_show_host_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
struct ahci_host_priv *hpriv = ap->host->private_data;
return sprintf(buf, "%x\n", hpriv->version);
}
static ssize_t ahci_show_port_cmd(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
void __iomem *port_mmio = ahci_port_base(ap);
ssize_t ret;
ahci_rpm_get_port(ap);
ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
ahci_rpm_put_port(ap);
return ret;
}
static ssize_t ahci_read_em_buffer(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = hpriv->mmio;
void __iomem *em_mmio = mmio + hpriv->em_loc;
u32 em_ctl, msg;
unsigned long flags;
size_t count;
int i;
ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
em_ctl = readl(mmio + HOST_EM_CTL);
if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
!(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
spin_unlock_irqrestore(ap->lock, flags);
ahci_rpm_put_port(ap);
return -EINVAL;
}
if (!(em_ctl & EM_CTL_MR)) {
spin_unlock_irqrestore(ap->lock, flags);
ahci_rpm_put_port(ap);
return -EAGAIN;
}
if (!(em_ctl & EM_CTL_SMB))
em_mmio += hpriv->em_buf_sz;
count = hpriv->em_buf_sz;
/* the count should not be larger than PAGE_SIZE */
if (count > PAGE_SIZE) {
if (printk_ratelimit())
ata_port_warn(ap,
"EM read buffer size too large: "
"buffer size %u, page size %lu\n",
hpriv->em_buf_sz, PAGE_SIZE);
count = PAGE_SIZE;
}
for (i = 0; i < count; i += 4) {
msg = readl(em_mmio + i);
buf[i] = msg & 0xff;
buf[i + 1] = (msg >> 8) & 0xff;
buf[i + 2] = (msg >> 16) & 0xff;
buf[i + 3] = (msg >> 24) & 0xff;
}
spin_unlock_irqrestore(ap->lock, flags);
ahci_rpm_put_port(ap);
return i;
}
static ssize_t ahci_store_em_buffer(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = hpriv->mmio;
void __iomem *em_mmio = mmio + hpriv->em_loc;
const unsigned char *msg_buf = buf;
u32 em_ctl, msg;
unsigned long flags;
int i;
/* check size validity */
if (!(ap->flags & ATA_FLAG_EM) ||
!(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
size % 4 || size > hpriv->em_buf_sz)
return -EINVAL;
ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
em_ctl = readl(mmio + HOST_EM_CTL);
if (em_ctl & EM_CTL_TM) {
spin_unlock_irqrestore(ap->lock, flags);
ahci_rpm_put_port(ap);
return -EBUSY;
}
for (i = 0; i < size; i += 4) {
msg = msg_buf[i] | msg_buf[i + 1] << 8 |
msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
writel(msg, em_mmio + i);
}
writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
spin_unlock_irqrestore(ap->lock, flags);
ahci_rpm_put_port(ap);
return size;
}
static ssize_t ahci_show_em_supported(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 em_ctl;
ahci_rpm_get_port(ap);
em_ctl = readl(mmio + HOST_EM_CTL);
ahci_rpm_put_port(ap);
return sprintf(buf, "%s%s%s%s\n",
em_ctl & EM_CTL_LED ? "led " : "",
em_ctl & EM_CTL_SAFTE ? "saf-te " : "",
em_ctl & EM_CTL_SES ? "ses-2 " : "",
em_ctl & EM_CTL_SGPIO ? "sgpio " : "");
}
/**
* ahci_save_initial_config - Save and fixup initial config values
* @dev: target AHCI device
* @hpriv: host private area to store config values
*
* Some registers containing configuration info might be setup by
* BIOS and might be cleared on reset. This function saves the
* initial values of those registers into @hpriv such that they
* can be restored after controller reset.
*
* If inconsistent, config values are fixed up by this function.
*
* If it is not set already this function sets hpriv->start_engine to
* ahci_start_engine.
*
* LOCKING:
* None.
*/
void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
{
void __iomem *mmio = hpriv->mmio;
void __iomem *port_mmio;
unsigned long port_map;
u32 cap, cap2, vers;
int i;
/* make sure AHCI mode is enabled before accessing CAP */
ahci_enable_ahci(mmio);
/*
* Values prefixed with saved_ are written back to the HBA and ports
* registers after reset. Values without are used for driver operation.
*/
/*
* Override HW-init HBA capability fields with the platform-specific
* values. The rest of the HBA capabilities are defined as Read-only
* and can't be modified in CSR anyway.
*/
cap = readl(mmio + HOST_CAP);
if (hpriv->saved_cap)
cap = (cap & ~(HOST_CAP_SSS | HOST_CAP_MPS)) | hpriv->saved_cap;
hpriv->saved_cap = cap;
/* CAP2 register is only defined for AHCI 1.2 and later */
vers = readl(mmio + HOST_VERSION);
if ((vers >> 16) > 1 ||
((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
else
hpriv->saved_cap2 = cap2 = 0;
/* some chips have errata preventing 64bit use */
if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
dev_info(dev, "controller can't do 64bit DMA, forcing 32bit\n");
cap &= ~HOST_CAP_64;
}
if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
dev_info(dev, "controller can't do NCQ, turning off CAP_NCQ\n");
cap &= ~HOST_CAP_NCQ;
}
if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
dev_info(dev, "controller can do NCQ, turning on CAP_NCQ\n");
cap |= HOST_CAP_NCQ;
}
if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
dev_info(dev, "controller can't do PMP, turning off CAP_PMP\n");
cap &= ~HOST_CAP_PMP;
}
if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
dev_info(dev,
"controller can't do SNTF, turning off CAP_SNTF\n");
cap &= ~HOST_CAP_SNTF;
}
if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
dev_info(dev,
"controller can't do DEVSLP, turning off\n");
cap2 &= ~HOST_CAP2_SDS;
cap2 &= ~HOST_CAP2_SADM;
}
if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
cap |= HOST_CAP_FBS;
}
if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) {
dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n");
cap &= ~HOST_CAP_FBS;
}
if (!(cap & HOST_CAP_ALPM) && (hpriv->flags & AHCI_HFLAG_YES_ALPM)) {
dev_info(dev, "controller can do ALPM, turning on CAP_ALPM\n");
cap |= HOST_CAP_ALPM;
}
if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
cap &= ~HOST_CAP_SXS;
}
/* Override the HBA ports mapping if the platform needs it */
port_map = readl(mmio + HOST_PORTS_IMPL);
if (hpriv->saved_port_map && port_map != hpriv->saved_port_map) {
dev_info(dev, "forcing port_map 0x%lx -> 0x%x\n",
port_map, hpriv->saved_port_map);
port_map = hpriv->saved_port_map;
} else {
hpriv->saved_port_map = port_map;
}
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,
port_map & hpriv->mask_port_map);
port_map &= hpriv->mask_port_map;
}
/* cross check port_map and cap.n_ports */
if (port_map) {
int map_ports = 0;
for (i = 0; i < AHCI_MAX_PORTS; i++)
if (port_map & (1 << i))
map_ports++;
/* If PI has more ports than n_ports, whine, clear
* port_map and let it be generated from n_ports.
*/
if (map_ports > ahci_nr_ports(cap)) {
dev_warn(dev,
"implemented port map (0x%lx) contains more ports than nr_ports (%u), using nr_ports\n",
port_map, ahci_nr_ports(cap));
port_map = 0;
}
}
/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
if (!port_map && vers < 0x10300) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
dev_warn(dev, "forcing PORTS_IMPL to 0x%lx\n", port_map);
/* write the fixed up value to the PI register */
hpriv->saved_port_map = port_map;
}
/*
* Preserve the ports capabilities defined by the platform. Note there
* is no need in storing the rest of the P#.CMD fields since they are
* volatile.
*/
for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
if (hpriv->saved_port_cap[i])
continue;
port_mmio = __ahci_port_base(hpriv, i);
hpriv->saved_port_cap[i] =
readl(port_mmio + PORT_CMD) & PORT_CMD_CAP;
}
/* record values to use during operation */
hpriv->cap = cap;
hpriv->cap2 = cap2;
hpriv->version = vers;
hpriv->port_map = port_map;
if (!hpriv->start_engine)
hpriv->start_engine = ahci_start_engine;
if (!hpriv->stop_engine)
hpriv->stop_engine = ahci_stop_engine;
if (!hpriv->irq_handler)
hpriv->irq_handler = ahci_single_level_irq_intr;
}
EXPORT_SYMBOL_GPL(ahci_save_initial_config);
/**
* ahci_restore_initial_config - Restore initial config
* @host: target ATA host
*
* Restore initial config stored by ahci_save_initial_config().
*
* LOCKING:
* None.
*/
static void ahci_restore_initial_config(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
unsigned long port_map = hpriv->port_map;
void __iomem *mmio = hpriv->mmio;
void __iomem *port_mmio;
int i;
writel(hpriv->saved_cap, mmio + HOST_CAP);
if (hpriv->saved_cap2)
writel(hpriv->saved_cap2, mmio + HOST_CAP2);
writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
(void) readl(mmio + HOST_PORTS_IMPL); /* flush */
for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
port_mmio = __ahci_port_base(hpriv, i);
writel(hpriv->saved_port_cap[i], port_mmio + PORT_CMD);
}
}
static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
{
static const int offset[] = {
[SCR_STATUS] = PORT_SCR_STAT,
[SCR_CONTROL] = PORT_SCR_CTL,
[SCR_ERROR] = PORT_SCR_ERR,
[SCR_ACTIVE] = PORT_SCR_ACT,
[SCR_NOTIFICATION] = PORT_SCR_NTF,
};
struct ahci_host_priv *hpriv = ap->host->private_data;
if (sc_reg < ARRAY_SIZE(offset) &&
(sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
return offset[sc_reg];
return 0;
}
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
int offset = ahci_scr_offset(link->ap, sc_reg);
if (offset) {
*val = readl(port_mmio + offset);
return 0;
}
return -EINVAL;
}
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
int offset = ahci_scr_offset(link->ap, sc_reg);
if (offset) {
writel(val, port_mmio + offset);
return 0;
}
return -EINVAL;
}
void ahci_start_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* start DMA */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
}
EXPORT_SYMBOL_GPL(ahci_start_engine);
int ahci_stop_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
u32 tmp;
/*
* On some controllers, stopping a port's DMA engine while the port
* is in ALPM state (partial or slumber) results in failures on
* subsequent DMA engine starts. For those controllers, put the
* port back in active state before stopping its DMA engine.
*/
if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
(ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
return -EIO;
}
tmp = readl(port_mmio + PORT_CMD);
/* check if the HBA is idle */
if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
return 0;
/*
* Don't try to issue commands but return with ENODEV if the
* AHCI controller not available anymore (e.g. due to PCIe hot
* unplugging). Otherwise a 500ms delay for each port is added.
*/
if (tmp == 0xffffffff) {
dev_err(ap->host->dev, "AHCI controller unavailable!\n");
return -ENODEV;
}
/* setting HBA to idle */
tmp &= ~PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
/* wait for engine to stop. This could be as long as 500 msec */
tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
if (tmp & PORT_CMD_LIST_ON)
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(ahci_stop_engine);
void ahci_start_fis_rx(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
u32 tmp;
/* set FIS registers */
if (hpriv->cap & HOST_CAP_64)
writel((pp->cmd_slot_dma >> 16) >> 16,
port_mmio + PORT_LST_ADDR_HI);
writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
if (hpriv->cap & HOST_CAP_64)
writel((pp->rx_fis_dma >> 16) >> 16,
port_mmio + PORT_FIS_ADDR_HI);
writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
/* enable FIS reception */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_FIS_RX;
writel(tmp, port_mmio + PORT_CMD);
/* flush */
readl(port_mmio + PORT_CMD);
}
EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
static int ahci_stop_fis_rx(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* disable FIS reception */
tmp = readl(port_mmio + PORT_CMD);
tmp &= ~PORT_CMD_FIS_RX;
writel(tmp, port_mmio + PORT_CMD);
/* wait for completion, spec says 500ms, give it 1000 */
tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
PORT_CMD_FIS_ON, 10, 1000);
if (tmp & PORT_CMD_FIS_ON)
return -EBUSY;
return 0;
}
static void ahci_power_up(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 cmd;
cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
/* spin up device */
if (hpriv->cap & HOST_CAP_SSS) {
cmd |= PORT_CMD_SPIN_UP;
writel(cmd, port_mmio + PORT_CMD);
}
/* wake up link */
writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
}
static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
unsigned int hints)
{
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
if (policy != ATA_LPM_MAX_POWER) {
/* wakeup flag only applies to the max power policy */
hints &= ~ATA_LPM_WAKE_ONLY;
/*
* Disable interrupts on Phy Ready. This keeps us from
* getting woken up due to spurious phy ready
* interrupts.
*/
pp->intr_mask &= ~PORT_IRQ_PHYRDY;
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
sata_link_scr_lpm(link, policy, false);
}
if (hpriv->cap & HOST_CAP_ALPM) {
u32 cmd = readl(port_mmio + PORT_CMD);
if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
if (!(hints & ATA_LPM_WAKE_ONLY))
cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
cmd |= PORT_CMD_ICC_ACTIVE;
writel(cmd, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD);
/* wait 10ms to be sure we've come out of LPM state */
ata_msleep(ap, 10);
if (hints & ATA_LPM_WAKE_ONLY)
return 0;
} else {
cmd |= PORT_CMD_ALPE;
if (policy == ATA_LPM_MIN_POWER)
cmd |= PORT_CMD_ASP;
else if (policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
cmd &= ~PORT_CMD_ASP;
/* write out new cmd value */
writel(cmd, port_mmio + PORT_CMD);
}
}
/* set aggressive device sleep */
if ((hpriv->cap2 & HOST_CAP2_SDS) &&
(hpriv->cap2 & HOST_CAP2_SADM) &&
(link->device->flags & ATA_DFLAG_DEVSLP)) {
if (policy == ATA_LPM_MIN_POWER ||
policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
ahci_set_aggressive_devslp(ap, true);
else
ahci_set_aggressive_devslp(ap, false);
}
if (policy == ATA_LPM_MAX_POWER) {
sata_link_scr_lpm(link, policy, false);
/* turn PHYRDY IRQ back on */
pp->intr_mask |= PORT_IRQ_PHYRDY;
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
return 0;
}
#ifdef CONFIG_PM
static void ahci_power_down(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 cmd, scontrol;
if (!(hpriv->cap & HOST_CAP_SSS))
return;
/* put device into listen mode, first set PxSCTL.DET to 0 */
scontrol = readl(port_mmio + PORT_SCR_CTL);
scontrol &= ~0xf;
writel(scontrol, port_mmio + PORT_SCR_CTL);
/* then set PxCMD.SUD to 0 */
cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
cmd &= ~PORT_CMD_SPIN_UP;
writel(cmd, port_mmio + PORT_CMD);
}
#endif
static void ahci_start_port(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
struct ata_link *link;
struct ahci_em_priv *emp;
ssize_t rc;
int i;
/* enable FIS reception */
ahci_start_fis_rx(ap);
/* enable DMA */
if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
hpriv->start_engine(ap);
/* turn on LEDs */
if (ap->flags & ATA_FLAG_EM) {
ata_for_each_link(link, ap, EDGE) {
emp = &pp->em_priv[link->pmp];
/* EM Transmit bit maybe busy during init */
for (i = 0; i < EM_MAX_RETRY; i++) {
rc = ap->ops->transmit_led_message(ap,
emp->led_state,
4);
/*
* If busy, give a breather but do not
* release EH ownership by using msleep()
* instead of ata_msleep(). EM Transmit
* bit is busy for the whole host and
* releasing ownership will cause other
* ports to fail the same way.
*/
if (rc == -EBUSY)
msleep(1);
else
break;
}
}
}
if (ap->flags & ATA_FLAG_SW_ACTIVITY)
ata_for_each_link(link, ap, EDGE)
ahci_init_sw_activity(link);
}
static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
{
int rc;
struct ahci_host_priv *hpriv = ap->host->private_data;
/* disable DMA */
rc = hpriv->stop_engine(ap);
if (rc) {
*emsg = "failed to stop engine";
return rc;
}
/* disable FIS reception */
rc = ahci_stop_fis_rx(ap);
if (rc) {
*emsg = "failed stop FIS RX";
return rc;
}
return 0;
}
int ahci_reset_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 tmp;
/*
* We must be in AHCI mode, before using anything AHCI-specific, such
* as HOST_RESET.
*/
ahci_enable_ahci(mmio);
/* Global controller reset */
if (ahci_skip_host_reset) {
dev_info(host->dev, "Skipping global host reset\n");
return 0;
}
tmp = readl(mmio + HOST_CTL);
if (!(tmp & HOST_RESET)) {
writel(tmp | HOST_RESET, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
}
/*
* To perform host reset, OS should set HOST_RESET and poll until this
* bit is read to be "0". Reset must complete within 1 second, or the
* hardware should be considered fried.
*/
tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET,
HOST_RESET, 10, 1000);
if (tmp & HOST_RESET) {
dev_err(host->dev, "Controller reset failed (0x%x)\n",
tmp);
return -EIO;
}
/* Turn on AHCI mode */
ahci_enable_ahci(mmio);
/* Some registers might be cleared on reset. Restore initial values. */
if (!(hpriv->flags & AHCI_HFLAG_NO_WRITE_TO_RO))
ahci_restore_initial_config(host);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_reset_controller);
static void ahci_sw_activity(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
return;
emp->activity++;
if (!timer_pending(&emp->timer))
mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
}
static void ahci_sw_activity_blink(struct timer_list *t)
{
struct ahci_em_priv *emp = from_timer(emp, t, timer);
struct ata_link *link = emp->link;
struct ata_port *ap = link->ap;
unsigned long led_message = emp->led_state;
u32 activity_led_state;
unsigned long flags;
led_message &= EM_MSG_LED_VALUE;
led_message |= ap->port_no | (link->pmp << 8);
/* check to see if we've had activity. If so,
* toggle state of LED and reset timer. If not,
* turn LED to desired idle state.
*/
spin_lock_irqsave(ap->lock, flags);
if (emp->saved_activity != emp->activity) {
emp->saved_activity = emp->activity;
/* get the current LED state */
activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
if (activity_led_state)
activity_led_state = 0;
else
activity_led_state = 1;
/* clear old state */
led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
/* toggle state */
led_message |= (activity_led_state << 16);
mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
} else {
/* switch to idle */
led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
if (emp->blink_policy == BLINK_OFF)
led_message |= (1 << 16);
}
spin_unlock_irqrestore(ap->lock, flags);
ap->ops->transmit_led_message(ap, led_message, 4);
}
static void ahci_init_sw_activity(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
/* init activity stats, setup timer */
emp->saved_activity = emp->activity = 0;
emp->link = link;
timer_setup(&emp->timer, ahci_sw_activity_blink, 0);
/* check our blink policy and set flag for link if it's enabled */
if (emp->blink_policy)
link->flags |= ATA_LFLAG_SW_ACTIVITY;
}
int ahci_reset_em(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 em_ctl;
em_ctl = readl(mmio + HOST_EM_CTL);
if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
return -EINVAL;
writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_reset_em);
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = hpriv->mmio;
u32 em_ctl;
u32 message[] = {0, 0};
unsigned long flags;
int pmp;
struct ahci_em_priv *emp;
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
if (pmp < EM_MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
return -EINVAL;
ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
/*
* if we are still busy transmitting a previous message,
* do not allow
*/
em_ctl = readl(mmio + HOST_EM_CTL);
if (em_ctl & EM_CTL_TM) {
spin_unlock_irqrestore(ap->lock, flags);
ahci_rpm_put_port(ap);
return -EBUSY;
}
if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
/*
* create message header - this is all zero except for
* the message size, which is 4 bytes.
*/
message[0] |= (4 << 8);
/* ignore 0:4 of byte zero, fill in port info yourself */
message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
/* write message to EM_LOC */
writel(message[0], mmio + hpriv->em_loc);
writel(message[1], mmio + hpriv->em_loc+4);
/*
* tell hardware to transmit the message
*/
writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
}
/* save off new led state for port/slot */
emp->led_state = state;
spin_unlock_irqrestore(ap->lock, flags);
ahci_rpm_put_port(ap);
return size;
}
static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
{
struct ahci_port_priv *pp = ap->private_data;
struct ata_link *link;
struct ahci_em_priv *emp;
int rc = 0;
ata_for_each_link(link, ap, EDGE) {
emp = &pp->em_priv[link->pmp];
rc += sprintf(buf, "%lx\n", emp->led_state);
}
return rc;
}
static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
size_t size)
{
unsigned int state;
int pmp;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp;
if (kstrtouint(buf, 0, &state) < 0)
return -EINVAL;
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
if (pmp < EM_MAX_SLOTS) {
pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
emp = &pp->em_priv[pmp];
} else {
return -EINVAL;
}
/* mask off the activity bits if we are in sw_activity
* mode, user should turn off sw_activity before setting
* activity led through em_message
*/
if (emp->blink_policy)
state &= ~EM_MSG_LED_VALUE_ACTIVITY;
return ap->ops->transmit_led_message(ap, state, size);
}
static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
u32 port_led_state = emp->led_state;
/* save the desired Activity LED behavior */
if (val == OFF) {
/* clear LFLAG */
link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
/* set the LED to OFF */
port_led_state &= EM_MSG_LED_VALUE_OFF;
port_led_state |= (ap->port_no | (link->pmp << 8));
ap->ops->transmit_led_message(ap, port_led_state, 4);
} else {
link->flags |= ATA_LFLAG_SW_ACTIVITY;
if (val == BLINK_OFF) {
/* set LED to ON for idle */
port_led_state &= EM_MSG_LED_VALUE_OFF;
port_led_state |= (ap->port_no | (link->pmp << 8));
port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
ap->ops->transmit_led_message(ap, port_led_state, 4);
}
}
emp->blink_policy = val;
return 0;
}
static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
/* display the saved value of activity behavior for this
* disk.
*/
return sprintf(buf, "%d\n", emp->blink_policy);
}
static void ahci_port_clear_pending_irq(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
}
static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
const char *emsg = NULL;
int rc;
u32 tmp;
/* make sure port is not active */
rc = ahci_deinit_port(ap, &emsg);
if (rc)
dev_warn(dev, "%s (%d)\n", emsg, rc);
ahci_port_clear_pending_irq(ap);
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
ap->pflags |= ATA_PFLAG_EXTERNAL;
}
void ahci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
int i;
void __iomem *port_mmio;
u32 tmp;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
port_mmio = ahci_port_base(ap);
if (ata_port_is_dummy(ap))
continue;
ahci_port_init(host->dev, ap, i, mmio, port_mmio);
}
tmp = readl(mmio + HOST_CTL);
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL);
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
}
EXPORT_SYMBOL_GPL(ahci_init_controller);
static void ahci_dev_config(struct ata_device *dev)
{
struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
if (hpriv->flags & AHCI_HFLAG_SECT255) {
dev->max_sectors = 255;
ata_dev_info(dev,
"SB600 AHCI: limiting to 255 sectors per cmd\n");
}
}
unsigned int ahci_dev_classify(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_taskfile tf;
u32 tmp;
tmp = readl(port_mmio + PORT_SIG);
tf.lbah = (tmp >> 24) & 0xff;
tf.lbam = (tmp >> 16) & 0xff;
tf.lbal = (tmp >> 8) & 0xff;
tf.nsect = (tmp) & 0xff;
return ata_port_classify(ap, &tf);
}
EXPORT_SYMBOL_GPL(ahci_dev_classify);
void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts)
{
dma_addr_t cmd_tbl_dma;
cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
pp->cmd_slot[tag].opts = cpu_to_le32(opts);
pp->cmd_slot[tag].status = 0;
pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
}
EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot);
int ahci_kick_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
u32 tmp;
int busy, rc;
/* stop engine */
rc = hpriv->stop_engine(ap);
if (rc)
goto out_restart;
/* need to do CLO?
* always do CLO if PMP is attached (AHCI-1.3 9.2)
*/
busy = status & (ATA_BUSY | ATA_DRQ);
if (!busy && !sata_pmp_attached(ap)) {
rc = 0;
goto out_restart;
}
if (!(hpriv->cap & HOST_CAP_CLO)) {
rc = -EOPNOTSUPP;
goto out_restart;
}
/* perform CLO */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_CLO;
writel(tmp, port_mmio + PORT_CMD);
rc = 0;
tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
if (tmp & PORT_CMD_CLO)
rc = -EIO;
/* restart engine */
out_restart:
hpriv->start_engine(ap);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_kick_engine);
static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
struct ata_taskfile *tf, int is_cmd, u16 flags,
unsigned int timeout_msec)
{
const u32 cmd_fis_len = 5; /* five dwords */
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u8 *fis = pp->cmd_tbl;
u32 tmp;
/* prep the command */
ata_tf_to_fis(tf, pmp, is_cmd, fis);
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
/* set port value for softreset of Port Multiplier */
if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
tmp = readl(port_mmio + PORT_FBS);
tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
tmp |= pmp << PORT_FBS_DEV_OFFSET;
writel(tmp, port_mmio + PORT_FBS);
pp->fbs_last_dev = pmp;
}
/* issue & wait */
writel(1, port_mmio + PORT_CMD_ISSUE);
if (timeout_msec) {
tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE,
0x1, 0x1, 1, timeout_msec);
if (tmp & 0x1) {
ahci_kick_engine(ap);
return -EBUSY;
}
} else
readl(port_mmio + PORT_CMD_ISSUE); /* flush */
return 0;
}
int ahci_do_softreset(struct ata_link *link, unsigned int *class,
int pmp, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
const char *reason = NULL;
unsigned long now;
unsigned int msecs;
struct ata_taskfile tf;
bool fbs_disabled = false;
int rc;
/* prepare for SRST (AHCI-1.1 10.4.1) */
rc = ahci_kick_engine(ap);
if (rc && rc != -EOPNOTSUPP)
ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
/*
* According to AHCI-1.2 9.3.9: if FBS is enable, software shall
* clear PxFBS.EN to '0' prior to issuing software reset to devices
* that is attached to port multiplier.
*/
if (!ata_is_host_link(link) && pp->fbs_enabled) {
ahci_disable_fbs(ap);
fbs_disabled = true;
}
ata_tf_init(link->device, &tf);
/* issue the first H2D Register FIS */
msecs = 0;
now = jiffies;
if (time_after(deadline, now))
msecs = jiffies_to_msecs(deadline - now);
tf.ctl |= ATA_SRST;
if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
rc = -EIO;
reason = "1st FIS failed";
goto fail;
}
/* spec says at least 5us, but be generous and sleep for 1ms */
ata_msleep(ap, 1);
/* issue the second H2D Register FIS */
tf.ctl &= ~ATA_SRST;
ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
/* wait for link to become ready */
rc = ata_wait_after_reset(link, deadline, check_ready);
if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
/*
* Workaround for cases where link online status can't
* be trusted. Treat device readiness timeout as link
* offline.
*/
ata_link_info(link, "device not ready, treating as offline\n");
*class = ATA_DEV_NONE;
} else if (rc) {
/* link occupied, -ENODEV too is an error */
reason = "device not ready";
goto fail;
} else
*class = ahci_dev_classify(ap);
/* re-enable FBS if disabled before */
if (fbs_disabled)
ahci_enable_fbs(ap);
return 0;
fail:
ata_link_err(link, "softreset failed (%s)\n", reason);
return rc;
}
int ahci_check_ready(struct ata_link *link)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
return ata_check_ready(status);
}
EXPORT_SYMBOL_GPL(ahci_check_ready);
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
int pmp = sata_srst_pmp(link);
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
EXPORT_SYMBOL_GPL(ahci_do_softreset);
static int ahci_bad_pmp_check_ready(struct ata_link *link)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
/*
* There is no need to check TFDATA if BAD PMP is found due to HW bug,
* which can save timeout delay.
*/
if (irq_status & PORT_IRQ_BAD_PMP)
return -EIO;
return ata_check_ready(status);
}
static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
void __iomem *port_mmio = ahci_port_base(ap);
int pmp = sata_srst_pmp(link);
int rc;
u32 irq_sts;
rc = ahci_do_softreset(link, class, pmp, deadline,
ahci_bad_pmp_check_ready);
/*
* Soft reset fails with IPMS set when PMP is enabled but
* SATA HDD/ODD is connected to SATA port, do soft reset
* again to port 0.
*/
if (rc == -EIO) {
irq_sts = readl(port_mmio + PORT_IRQ_STAT);
if (irq_sts & PORT_IRQ_BAD_PMP) {
ata_link_warn(link,
"applying PMP SRST workaround "
"and retrying\n");
rc = ahci_do_softreset(link, class, 0, deadline,
ahci_check_ready);
}
}
return rc;
}
int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline, bool *online)
{
const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
int rc;
hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
ahci_port_clear_pending_irq(ap);
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
hpriv->start_engine(ap);
if (*online)
*class = ahci_dev_classify(ap);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_do_hardreset);
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
bool online;
return ahci_do_hardreset(link, class, deadline, &online);
}
static void ahci_postreset(struct ata_link *link, unsigned int *class)
{
struct ata_port *ap = link->ap;
void __iomem *port_mmio = ahci_port_base(ap);
u32 new_tmp, tmp;
ata_std_postreset(link, class);
/* Make sure port's ATAPI bit is set appropriately */
new_tmp = tmp = readl(port_mmio + PORT_CMD);
if (*class == ATA_DEV_ATAPI)
new_tmp |= PORT_CMD_ATAPI;
else
new_tmp &= ~PORT_CMD_ATAPI;
if (new_tmp != tmp) {
writel(new_tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
}
}
static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
{
struct scatterlist *sg;
struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
unsigned int si;
/*
* Next, the S/G list.
*/
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
}
return si;
}
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
if (!sata_pmp_attached(ap) || pp->fbs_enabled)
return ata_std_qc_defer(qc);
else
return sata_pmp_qc_defer_cmd_switch(qc);
}
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
int is_atapi = ata_is_atapi(qc->tf.protocol);
void *cmd_tbl;
u32 opts;
const u32 cmd_fis_len = 5; /* five dwords */
unsigned int n_elem;
/*
* Fill in command table information. First, the header,
* a SATA Register - Host to Device command FIS.
*/
cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
if (is_atapi) {
memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
n_elem = 0;
if (qc->flags & ATA_QCFLAG_DMAMAP)
n_elem = ahci_fill_sg(qc, cmd_tbl);
/*
* Fill in command slot information.
*/
opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
if (qc->tf.flags & ATA_TFLAG_WRITE)
opts |= AHCI_CMD_WRITE;
if (is_atapi)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
return AC_ERR_OK;
}
static void ahci_fbs_dec_intr(struct ata_port *ap)
{
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs = readl(port_mmio + PORT_FBS);
int retries = 3;
BUG_ON(!pp->fbs_enabled);
/* time to wait for DEC is not specified by AHCI spec,
* add a retry loop for safety.
*/
writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
fbs = readl(port_mmio + PORT_FBS);
while ((fbs & PORT_FBS_DEC) && retries--) {
udelay(1);
fbs = readl(port_mmio + PORT_FBS);
}
if (fbs & PORT_FBS_DEC)
dev_err(ap->host->dev, "failed to clear device error\n");
}
static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
struct ata_eh_info *host_ehi = &ap->link.eh_info;
struct ata_link *link = NULL;
struct ata_queued_cmd *active_qc;
struct ata_eh_info *active_ehi;
bool fbs_need_dec = false;
u32 serror;
/* determine active link with error */
if (pp->fbs_enabled) {
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs = readl(port_mmio + PORT_FBS);
int pmp = fbs >> PORT_FBS_DWE_OFFSET;
if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
link = &ap->pmp_link[pmp];
fbs_need_dec = true;
}
} else
ata_for_each_link(link, ap, EDGE)
if (ata_link_active(link))
break;
if (!link)
link = &ap->link;
active_qc = ata_qc_from_tag(ap, link->active_tag);
active_ehi = &link->eh_info;
/* record irq stat */
ata_ehi_clear_desc(host_ehi);
ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
/* AHCI needs SError cleared; otherwise, it might lock up */
ahci_scr_read(&ap->link, SCR_ERROR, &serror);
ahci_scr_write(&ap->link, SCR_ERROR, serror);
host_ehi->serror |= serror;
/* some controllers set IRQ_IF_ERR on device errors, ignore it */
if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
irq_stat &= ~PORT_IRQ_IF_ERR;
if (irq_stat & PORT_IRQ_TF_ERR) {
/* If qc is active, charge it; otherwise, the active
* link. There's no active qc on NCQ errors. It will
* be determined by EH by reading log page 10h.
*/
if (active_qc)
active_qc->err_mask |= AC_ERR_DEV;
else
active_ehi->err_mask |= AC_ERR_DEV;
if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
host_ehi->serror &= ~SERR_INTERNAL;
}
if (irq_stat & PORT_IRQ_UNK_FIS) {
u32 *unk = pp->rx_fis + RX_FIS_UNK;
active_ehi->err_mask |= AC_ERR_HSM;
active_ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(active_ehi,
"unknown FIS %08x %08x %08x %08x" ,
unk[0], unk[1], unk[2], unk[3]);
}
if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
active_ehi->err_mask |= AC_ERR_HSM;
active_ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(active_ehi, "incorrect PMP");
}
if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
host_ehi->err_mask |= AC_ERR_HOST_BUS;
host_ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(host_ehi, "host bus error");
}
if (irq_stat & PORT_IRQ_IF_ERR) {
if (fbs_need_dec)
active_ehi->err_mask |= AC_ERR_DEV;
else {
host_ehi->err_mask |= AC_ERR_ATA_BUS;
host_ehi->action |= ATA_EH_RESET;
}
ata_ehi_push_desc(host_ehi, "interface fatal error");
}
if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
ata_ehi_hotplugged(host_ehi);
ata_ehi_push_desc(host_ehi, "%s",
irq_stat & PORT_IRQ_CONNECT ?
"connection status changed" : "PHY RDY changed");
}
/* okay, let's hand over to EH */
if (irq_stat & PORT_IRQ_FREEZE)
ata_port_freeze(ap);
else if (fbs_need_dec) {
ata_link_abort(link);
ahci_fbs_dec_intr(ap);
} else
ata_port_abort(ap);
}
static void ahci_qc_complete(struct ata_port *ap, void __iomem *port_mmio)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ahci_port_priv *pp = ap->private_data;
u32 qc_active = 0;
int rc;
/*
* pp->active_link is not reliable once FBS is enabled, both
* PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
* NCQ and non-NCQ commands may be in flight at the same time.
*/
if (pp->fbs_enabled) {
if (ap->qc_active) {
qc_active = readl(port_mmio + PORT_SCR_ACT);
qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
}
} else {
/* pp->active_link is valid iff any command is in flight */
if (ap->qc_active && pp->active_link->sactive)
qc_active = readl(port_mmio + PORT_SCR_ACT);
else
qc_active = readl(port_mmio + PORT_CMD_ISSUE);
}
rc = ata_qc_complete_multiple(ap, qc_active);
if (unlikely(rc < 0 && !(ap->pflags & ATA_PFLAG_RESETTING))) {
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
}
}
static void ahci_handle_port_interrupt(struct ata_port *ap,
void __iomem *port_mmio, u32 status)
{
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
/* ignore BAD_PMP while resetting */
if (unlikely(ap->pflags & ATA_PFLAG_RESETTING))
status &= ~PORT_IRQ_BAD_PMP;
if (sata_lpm_ignore_phy_events(&ap->link)) {
status &= ~PORT_IRQ_PHYRDY;
ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
}
if (unlikely(status & PORT_IRQ_ERROR)) {
/*
* Before getting the error notification, we may have
* received SDB FISes notifying successful completions.
* Handle these first and then handle the error.
*/
ahci_qc_complete(ap, port_mmio);
ahci_error_intr(ap, status);
return;
}
if (status & PORT_IRQ_SDB_FIS) {
/* If SNotification is available, leave notification
* handling to sata_async_notification(). If not,
* emulate it by snooping SDB FIS RX area.
*
* Snooping FIS RX area is probably cheaper than
* poking SNotification but some constrollers which
* implement SNotification, ICH9 for example, don't
* store AN SDB FIS into receive area.
*/
if (hpriv->cap & HOST_CAP_SNTF)
sata_async_notification(ap);
else {
/* If the 'N' bit in word 0 of the FIS is set,
* we just received asynchronous notification.
* Tell libata about it.
*
* Lack of SNotification should not appear in
* ahci 1.2, so the workaround is unnecessary
* when FBS is enabled.
*/
if (pp->fbs_enabled)
WARN_ON_ONCE(1);
else {
const __le32 *f = pp->rx_fis + RX_FIS_SDB;
u32 f0 = le32_to_cpu(f[0]);
if (f0 & (1 << 15))
sata_async_notification(ap);
}
}
}
/* Handle completed commands */
ahci_qc_complete(ap, port_mmio);
}
static void ahci_port_intr(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 status;
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
ahci_handle_port_interrupt(ap, port_mmio, status);
}
static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
{
struct ata_port *ap = dev_instance;
void __iomem *port_mmio = ahci_port_base(ap);
u32 status;
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
spin_lock(ap->lock);
ahci_handle_port_interrupt(ap, port_mmio, status);
spin_unlock(ap->lock);
return IRQ_HANDLED;
}
u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
{
unsigned int i, handled = 0;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
if (!(irq_masked & (1 << i)))
continue;
ap = host->ports[i];
if (ap) {
ahci_port_intr(ap);
} else {
if (ata_ratelimit())
dev_warn(host->dev,
"interrupt on disabled port %u\n", i);
}
handled = 1;
}
return handled;
}
EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
hpriv = host->private_data;
mmio = hpriv->mmio;
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
rc = ahci_handle_port_intr(host, irq_masked);
/* HOST_IRQ_STAT behaves as level triggered latch meaning that
* it should be cleared after all the port events are cleared;
* otherwise, it will raise a spurious interrupt after each
* valid one. Please read section 10.6.2 of ahci 1.1 for more
* information.
*
* Also, use the unmasked value to clear interrupt as spurious
* pending event on a dummy port might cause screaming IRQ.
*/
writel(irq_stat, mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
return IRQ_RETVAL(rc);
}
unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_port_priv *pp = ap->private_data;
/* Keep track of the currently active link. It will be used
* in completion path to determine whether NCQ phase is in
* progress.
*/
pp->active_link = qc->dev->link;
if (ata_is_ncq(qc->tf.protocol))
writel(1 << qc->hw_tag, port_mmio + PORT_SCR_ACT);
if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
u32 fbs = readl(port_mmio + PORT_FBS);
fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
writel(fbs, port_mmio + PORT_FBS);
pp->fbs_last_dev = qc->dev->link->pmp;
}
writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE);
ahci_sw_activity(qc->dev->link);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_qc_issue);
static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *rx_fis = pp->rx_fis;
/*
* rtf may already be filled (e.g. for successful NCQ commands).
* If that is the case, we have nothing to do.
*/
if (qc->flags & ATA_QCFLAG_RTF_FILLED)
return;
if (pp->fbs_enabled)
rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
/*
* After a successful execution of an ATA PIO data-in command,
* the device doesn't send D2H Reg FIS to update the TF and
* the host should take TF and E_Status from the preceding PIO
* Setup FIS.
*/
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
!(qc->flags & ATA_QCFLAG_EH)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
qc->flags |= ATA_QCFLAG_RTF_FILLED;
return;
}
/*
* For NCQ commands, we never get a D2H FIS, so reading the D2H Register
* FIS area of the Received FIS Structure (which contains a copy of the
* last D2H FIS received) will contain an outdated status code.
* For NCQ commands, we instead get a SDB FIS, so read the SDB FIS area
* instead. However, the SDB FIS does not contain the LBA, so we can't
* use the ata_tf_from_fis() helper.
*/
if (ata_is_ncq(qc->tf.protocol)) {
const u8 *fis = rx_fis + RX_FIS_SDB;
/*
* Successful NCQ commands have been filled already.
* A failed NCQ command will read the status here.
* (Note that a failed NCQ command will get a more specific
* error when reading the NCQ Command Error log.)
*/
qc->result_tf.status = fis[2];
qc->result_tf.error = fis[3];
qc->flags |= ATA_QCFLAG_RTF_FILLED;
return;
}
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
{
struct ahci_port_priv *pp = ap->private_data;
const u8 *fis;
/* No outstanding commands. */
if (!ap->qc_active)
return;
/*
* FBS not enabled, so read status and error once, since they are shared
* for all QCs.
*/
if (!pp->fbs_enabled) {
u8 status, error;
/* No outstanding NCQ commands. */
if (!pp->active_link->sactive)
return;
fis = pp->rx_fis + RX_FIS_SDB;
status = fis[2];
error = fis[3];
while (done_mask) {
struct ata_queued_cmd *qc;
unsigned int tag = __ffs64(done_mask);
qc = ata_qc_from_tag(ap, tag);
if (qc && ata_is_ncq(qc->tf.protocol)) {
qc->result_tf.status = status;
qc->result_tf.error = error;
qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
done_mask &= ~(1ULL << tag);
}
return;
}
/*
* FBS enabled, so read the status and error for each QC, since the QCs
* can belong to different PMP links. (Each PMP link has its own FIS
* Receive Area.)
*/
while (done_mask) {
struct ata_queued_cmd *qc;
unsigned int tag = __ffs64(done_mask);
qc = ata_qc_from_tag(ap, tag);
if (qc && ata_is_ncq(qc->tf.protocol)) {
fis = pp->rx_fis;
fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
fis += RX_FIS_SDB;
qc->result_tf.status = fis[2];
qc->result_tf.error = fis[3];
qc->flags |= ATA_QCFLAG_RTF_FILLED;
}
done_mask &= ~(1ULL << tag);
}
}
static void ahci_freeze(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
/* turn IRQ off */
writel(0, port_mmio + PORT_IRQ_MASK);
}
static void ahci_thaw(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = hpriv->mmio;
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
struct ahci_port_priv *pp = ap->private_data;
/* clear IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
/* turn IRQ back on */
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
void ahci_error_handler(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
if (!ata_port_is_frozen(ap)) {
/* restart engine */
hpriv->stop_engine(ap);
hpriv->start_engine(ap);
}
sata_pmp_error_handler(ap);
if (!ata_dev_enabled(ap->link.device))
hpriv->stop_engine(ap);
}
EXPORT_SYMBOL_GPL(ahci_error_handler);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_EH)
ahci_kick_engine(ap);
}
static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_device *dev = ap->link.device;
u32 devslp, dm, dito, mdat, deto, dito_conf;
int rc;
unsigned int err_mask;
devslp = readl(port_mmio + PORT_DEVSLP);
if (!(devslp & PORT_DEVSLP_DSP)) {
dev_info(ap->host->dev, "port does not support device sleep\n");
return;
}
/* disable device sleep */
if (!sleep) {
if (devslp & PORT_DEVSLP_ADSE) {
writel(devslp & ~PORT_DEVSLP_ADSE,
port_mmio + PORT_DEVSLP);
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE,
SATA_DEVSLP);
if (err_mask && err_mask != AC_ERR_DEV)
ata_dev_warn(dev, "failed to disable DEVSLP\n");
}
return;
}
dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
dito = devslp_idle_timeout / (dm + 1);
if (dito > 0x3ff)
dito = 0x3ff;
dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
/* device sleep was already enabled and same dito */
if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
return;
/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
rc = hpriv->stop_engine(ap);
if (rc)
return;
/* Use the nominal value 10 ms if the read MDAT is zero,
* the nominal value of DETO is 20 ms.
*/
if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
ATA_LOG_DEVSLP_VALID_MASK) {
mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
ATA_LOG_DEVSLP_MDAT_MASK;
if (!mdat)
mdat = 10;
deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
if (!deto)
deto = 20;
} else {
mdat = 10;
deto = 20;
}
/* Make dito, mdat, deto bits to 0s */
devslp &= ~GENMASK_ULL(24, 2);
devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
(mdat << PORT_DEVSLP_MDAT_OFFSET) |
(deto << PORT_DEVSLP_DETO_OFFSET) |
PORT_DEVSLP_ADSE);
writel(devslp, port_mmio + PORT_DEVSLP);
hpriv->start_engine(ap);
/* enable device sleep feature for the drive */
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE,
SATA_DEVSLP);
if (err_mask && err_mask != AC_ERR_DEV)
ata_dev_warn(dev, "failed to enable DEVSLP\n");
}
static void ahci_enable_fbs(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs;
int rc;
if (!pp->fbs_supported)
return;
fbs = readl(port_mmio + PORT_FBS);
if (fbs & PORT_FBS_EN) {
pp->fbs_enabled = true;
pp->fbs_last_dev = -1; /* initialization */
return;
}
rc = hpriv->stop_engine(ap);
if (rc)
return;
writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
fbs = readl(port_mmio + PORT_FBS);
if (fbs & PORT_FBS_EN) {
dev_info(ap->host->dev, "FBS is enabled\n");
pp->fbs_enabled = true;
pp->fbs_last_dev = -1; /* initialization */
} else
dev_err(ap->host->dev, "Failed to enable FBS\n");
hpriv->start_engine(ap);
}
static void ahci_disable_fbs(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs;
int rc;
if (!pp->fbs_supported)
return;
fbs = readl(port_mmio + PORT_FBS);
if ((fbs & PORT_FBS_EN) == 0) {
pp->fbs_enabled = false;
return;
}
rc = hpriv->stop_engine(ap);
if (rc)
return;
writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
fbs = readl(port_mmio + PORT_FBS);
if (fbs & PORT_FBS_EN)
dev_err(ap->host->dev, "Failed to disable FBS\n");
else {
dev_info(ap->host->dev, "FBS is disabled\n");
pp->fbs_enabled = false;
}
hpriv->start_engine(ap);
}
static void ahci_pmp_attach(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_port_priv *pp = ap->private_data;
u32 cmd;
cmd = readl(port_mmio + PORT_CMD);
cmd |= PORT_CMD_PMP;
writel(cmd, port_mmio + PORT_CMD);
ahci_enable_fbs(ap);
pp->intr_mask |= PORT_IRQ_BAD_PMP;
/*
* We must not change the port interrupt mask register if the
* port is marked frozen, the value in pp->intr_mask will be
* restored later when the port is thawed.
*
* Note that during initialization, the port is marked as
* frozen since the irq handler is not yet registered.
*/
if (!ata_port_is_frozen(ap))
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
static void ahci_pmp_detach(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_port_priv *pp = ap->private_data;
u32 cmd;
ahci_disable_fbs(ap);
cmd = readl(port_mmio + PORT_CMD);
cmd &= ~PORT_CMD_PMP;
writel(cmd, port_mmio + PORT_CMD);
pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
/* see comment above in ahci_pmp_attach() */
if (!ata_port_is_frozen(ap))
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
int ahci_port_resume(struct ata_port *ap)
{
ahci_rpm_get_port(ap);
ahci_power_up(ap);
ahci_start_port(ap);
if (sata_pmp_attached(ap))
ahci_pmp_attach(ap);
else
ahci_pmp_detach(ap);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_port_resume);
#ifdef CONFIG_PM
static void ahci_handle_s2idle(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 devslp;
if (pm_suspend_via_firmware())
return;
devslp = readl(port_mmio + PORT_DEVSLP);
if ((devslp & PORT_DEVSLP_ADSE))
ata_msleep(ap, devslp_idle_timeout);
}
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
const char *emsg = NULL;
int rc;
rc = ahci_deinit_port(ap, &emsg);
if (rc == 0)
ahci_power_down(ap);
else {
ata_port_err(ap, "%s (%d)\n", emsg, rc);
ata_port_freeze(ap);
}
if (acpi_storage_d3(ap->host->dev))
ahci_handle_s2idle(ap);
ahci_rpm_put_port(ap);
return rc;
}
#endif
static int ahci_port_start(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct device *dev = ap->host->dev;
struct ahci_port_priv *pp;
void *mem;
dma_addr_t mem_dma;
size_t dma_sz, rx_fis_sz;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
if (ap->host->n_ports > 1) {
pp->irq_desc = devm_kzalloc(dev, 8, GFP_KERNEL);
if (!pp->irq_desc) {
devm_kfree(dev, pp);
return -ENOMEM;
}
snprintf(pp->irq_desc, 8,
"%s%d", dev_driver_string(dev), ap->port_no);
}
/* check FBS capability */
if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
void __iomem *port_mmio = ahci_port_base(ap);
u32 cmd = readl(port_mmio + PORT_CMD);
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
ap->port_no);
pp->fbs_supported = true;
} else
dev_warn(dev, "port %d is not capable of FBS\n",
ap->port_no);
}
if (pp->fbs_supported) {
dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
rx_fis_sz = AHCI_RX_FIS_SZ * 16;
} else {
dma_sz = AHCI_PORT_PRIV_DMA_SZ;
rx_fis_sz = AHCI_RX_FIS_SZ;
}
mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
/*
* First item in chunk of DMA memory: 32-slot command table,
* 32 bytes each in size
*/
pp->cmd_slot = mem;
pp->cmd_slot_dma = mem_dma;
mem += AHCI_CMD_SLOT_SZ;
mem_dma += AHCI_CMD_SLOT_SZ;
/*
* Second item: Received-FIS area
*/
pp->rx_fis = mem;
pp->rx_fis_dma = mem_dma;
mem += rx_fis_sz;
mem_dma += rx_fis_sz;
/*
* Third item: data area for storing a single command
* and its scatter-gather table
*/
pp->cmd_tbl = mem;
pp->cmd_tbl_dma = mem_dma;
/*
* Save off initial list of interrupts to be enabled.
* This could be changed later
*/
pp->intr_mask = DEF_PORT_IRQ;
/*
* Switch to per-port locking in case each port has its own MSI vector.
*/
if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
spin_lock_init(&pp->lock);
ap->lock = &pp->lock;
}
ap->private_data = pp;
/* engage engines, captain */
return ahci_port_resume(ap);
}
static void ahci_port_stop(struct ata_port *ap)
{
const char *emsg = NULL;
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *host_mmio = hpriv->mmio;
int rc;
/* de-initialize port */
rc = ahci_deinit_port(ap, &emsg);
if (rc)
ata_port_warn(ap, "%s (%d)\n", emsg, rc);
/*
* Clear GHC.IS to prevent stuck INTx after disabling MSI and
* re-enabling INTx.
*/
writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT);
ahci_rpm_put_port(ap);
}
void ahci_print_info(struct ata_host *host, const char *scc_s)
{
struct ahci_host_priv *hpriv = host->private_data;
u32 vers, cap, cap2, impl, speed;
const char *speed_s;
vers = hpriv->version;
cap = hpriv->cap;
cap2 = hpriv->cap2;
impl = hpriv->port_map;
speed = (cap >> 20) & 0xf;
if (speed == 1)
speed_s = "1.5";
else if (speed == 2)
speed_s = "3";
else if (speed == 3)
speed_s = "6";
else
speed_s = "?";
dev_info(host->dev,
"AHCI %02x%02x.%02x%02x "
"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
,
(vers >> 24) & 0xff,
(vers >> 16) & 0xff,
(vers >> 8) & 0xff,
vers & 0xff,
((cap >> 8) & 0x1f) + 1,
(cap & 0x1f) + 1,
speed_s,
impl,
scc_s);
dev_info(host->dev,
"flags: "
"%s%s%s%s%s%s%s"
"%s%s%s%s%s%s%s"
"%s%s%s%s%s%s%s"
"%s%s\n"
,
cap & HOST_CAP_64 ? "64bit " : "",
cap & HOST_CAP_NCQ ? "ncq " : "",
cap & HOST_CAP_SNTF ? "sntf " : "",
cap & HOST_CAP_MPS ? "ilck " : "",
cap & HOST_CAP_SSS ? "stag " : "",
cap & HOST_CAP_ALPM ? "pm " : "",
cap & HOST_CAP_LED ? "led " : "",
cap & HOST_CAP_CLO ? "clo " : "",
cap & HOST_CAP_ONLY ? "only " : "",
cap & HOST_CAP_PMP ? "pmp " : "",
cap & HOST_CAP_FBS ? "fbs " : "",
cap & HOST_CAP_PIO_MULTI ? "pio " : "",
cap & HOST_CAP_SSC ? "slum " : "",
cap & HOST_CAP_PART ? "part " : "",
cap & HOST_CAP_CCC ? "ccc " : "",
cap & HOST_CAP_EMS ? "ems " : "",
cap & HOST_CAP_SXS ? "sxs " : "",
cap2 & HOST_CAP2_DESO ? "deso " : "",
cap2 & HOST_CAP2_SADM ? "sadm " : "",
cap2 & HOST_CAP2_SDS ? "sds " : "",
cap2 & HOST_CAP2_APST ? "apst " : "",
cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
cap2 & HOST_CAP2_BOH ? "boh " : ""
);
}
EXPORT_SYMBOL_GPL(ahci_print_info);
void ahci_set_em_messages(struct ahci_host_priv *hpriv,
struct ata_port_info *pi)
{
u8 messages;
void __iomem *mmio = hpriv->mmio;
u32 em_loc = readl(mmio + HOST_EM_LOC);
u32 em_ctl = readl(mmio + HOST_EM_CTL);
if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
return;
messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
if (messages) {
/* store em_loc */
hpriv->em_loc = ((em_loc >> 16) * 4);
hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
hpriv->em_msg_type = messages;
pi->flags |= ATA_FLAG_EM;
if (!(em_ctl & EM_CTL_ALHD))
pi->flags |= ATA_FLAG_SW_ACTIVITY;
}
}
EXPORT_SYMBOL_GPL(ahci_set_em_messages);
static int ahci_host_activate_multi_irqs(struct ata_host *host,
const struct scsi_host_template *sht)
{
struct ahci_host_priv *hpriv = host->private_data;
int i, rc;
rc = ata_host_start(host);
if (rc)
return rc;
/*
* Requests IRQs according to AHCI-1.1 when multiple MSIs were
* allocated. That is one MSI per port, starting from @irq.
*/
for (i = 0; i < host->n_ports; i++) {
struct ahci_port_priv *pp = host->ports[i]->private_data;
int irq = hpriv->get_irq_vector(host, i);
/* Do not receive interrupts sent by dummy ports */
if (!pp) {
disable_irq(irq);
continue;
}
rc = devm_request_irq(host->dev, irq, ahci_multi_irqs_intr_hard,
0, pp->irq_desc, host->ports[i]);
if (rc)
return rc;
ata_port_desc(host->ports[i], "irq %d", irq);
}
return ata_host_register(host, sht);
}
/**
* ahci_host_activate - start AHCI host, request IRQs and register it
* @host: target ATA host
* @sht: scsi_host_template to use when registering the host
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ahci_host_activate(struct ata_host *host, const struct scsi_host_template *sht)
{
struct ahci_host_priv *hpriv = host->private_data;
int irq = hpriv->irq;
int rc;
if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
if (hpriv->irq_handler &&
hpriv->irq_handler != ahci_single_level_irq_intr)
dev_warn(host->dev,
"both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
if (!hpriv->get_irq_vector) {
dev_err(host->dev,
"AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
return -EIO;
}
rc = ahci_host_activate_multi_irqs(host, sht);
} else {
rc = ata_host_activate(host, irq, hpriv->irq_handler,
IRQF_SHARED, sht);
}
return rc;
}
EXPORT_SYMBOL_GPL(ahci_host_activate);
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
MODULE_LICENSE("GPL");
| linux-master | drivers/ata/libahci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_opti.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
*
* Based on
* linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002
*
* Copyright (C) 1996-1998 Linus Torvalds & authors (see below)
*
* Authors:
* Jaromir Koutek <[email protected]>,
* Jan Harkes <[email protected]>,
* Mark Lord <[email protected]>
* Some parts of code are from ali14xx.c and from rz1000.c.
*
* Also consulted the FreeBSD prototype driver by Kevin Day to try
* and resolve some confusions. Further documentation can be found in
* Ralf Brown's interrupt list
*
* If you have other variants of the Opti range (Viper/Vendetta) please
* try this driver with those PCI idents and report back. For the later
* chips see the pata_optidma driver
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_opti"
#define DRV_VERSION "0.2.9"
enum {
READ_REG = 0, /* index of Read cycle timing register */
WRITE_REG = 1, /* index of Write cycle timing register */
CNTRL_REG = 3, /* index of Control register */
STRAP_REG = 5, /* index of Strap register */
MISC_REG = 6 /* index of Miscellaneous register */
};
/**
* opti_pre_reset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Set up cable type and use generic probe init
*/
static int opti_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits opti_enable_bits[] = {
{ 0x45, 1, 0x80, 0x00 },
{ 0x40, 1, 0x08, 0x00 }
};
if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* opti_write_reg - control register setup
* @ap: ATA port
* @val: value
* @reg: control register number
*
* The Opti uses magic 'trapdoor' register accesses to do configuration
* rather than using PCI space as other controllers do. The double inw
* on the error register activates configuration mode. We can then write
* the control register
*/
static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
{
void __iomem *regio = ap->ioaddr.cmd_addr;
/* These 3 unlock the control register access */
ioread16(regio + 1);
ioread16(regio + 1);
iowrite8(3, regio + 2);
/* Do the I/O */
iowrite8(val, regio + reg);
/* Relock */
iowrite8(0x83, regio + 2);
}
/**
* opti_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Timing numbers are taken from
* the FreeBSD driver then pre computed to keep the code clean. There
* are two tables depending on the hardware clock speed.
*/
static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_device *pair = ata_dev_pair(adev);
int clock;
int pio = adev->pio_mode - XFER_PIO_0;
void __iomem *regio = ap->ioaddr.cmd_addr;
u8 addr;
/* Address table precomputed with prefetch off and a DCLK of 2 */
static const u8 addr_timing[2][5] = {
{ 0x30, 0x20, 0x20, 0x10, 0x10 },
{ 0x20, 0x20, 0x10, 0x10, 0x10 }
};
static const u8 data_rec_timing[2][5] = {
{ 0x6B, 0x56, 0x42, 0x32, 0x31 },
{ 0x58, 0x44, 0x32, 0x22, 0x21 }
};
iowrite8(0xff, regio + 5);
clock = ioread16(regio + 5) & 1;
/*
* As with many controllers the address setup time is shared
* and must suit both devices if present.
*/
addr = addr_timing[clock][pio];
if (pair) {
/* Hardware constraint */
u8 pair_addr = addr_timing[clock][pair->pio_mode - XFER_PIO_0];
if (pair_addr > addr)
addr = pair_addr;
}
/* Commence primary programming sequence */
opti_write_reg(ap, adev->devno, MISC_REG);
opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG);
opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG);
opti_write_reg(ap, addr, MISC_REG);
/* Programming sequence complete, override strapping */
opti_write_reg(ap, 0x85, CNTRL_REG);
}
static const struct scsi_host_template opti_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations opti_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = opti_set_piomode,
.prereset = opti_pre_reset,
};
static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &opti_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
ata_print_version_once(&dev->dev, DRV_VERSION);
return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL, 0);
}
static const struct pci_device_id opti[] = {
{ PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 },
{ PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 1 },
{ },
};
static struct pci_driver opti_pci_driver = {
.name = DRV_NAME,
.id_table = opti,
.probe = opti_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(opti_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, opti);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_opti.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic PXA PATA driver
*
* Copyright (C) 2010 Marek Vasut <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <scsi/scsi_host.h>
#include <linux/platform_data/ata-pxa.h>
#define DRV_NAME "pata_pxa"
#define DRV_VERSION "0.1"
struct pata_pxa_data {
struct dma_chan *dma_chan;
dma_cookie_t dma_cookie;
struct completion dma_done;
};
/*
* DMA interrupt handler.
*/
static void pxa_ata_dma_irq(void *d)
{
struct pata_pxa_data *pd = d;
enum dma_status status;
status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
if (status == DMA_ERROR || status == DMA_COMPLETE)
complete(&pd->dma_done);
}
/*
* Prepare taskfile for submission.
*/
static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
struct dma_async_tx_descriptor *tx;
enum dma_transfer_direction dir;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
DMA_PREP_INTERRUPT);
if (!tx) {
ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
return AC_ERR_OK;
}
tx->callback = pxa_ata_dma_irq;
tx->callback_param = pd;
pd->dma_cookie = dmaengine_submit(tx);
return AC_ERR_OK;
}
/*
* Configure the DMA controller, load the DMA descriptors, but don't start the
* DMA controller yet. Only issue the ATA command.
*/
static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
{
qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
}
/*
* Execute the DMA transfer.
*/
static void pxa_bmdma_start(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
init_completion(&pd->dma_done);
dma_async_issue_pending(pd->dma_chan);
}
/*
* Wait until the DMA transfer completes, then stop the DMA controller.
*/
static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
enum dma_status status;
status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
if (status != DMA_ERROR && status != DMA_COMPLETE &&
wait_for_completion_timeout(&pd->dma_done, HZ))
ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");
dmaengine_terminate_all(pd->dma_chan);
}
/*
* Read DMA status. The bmdma_stop() will take care of properly finishing the
* DMA transfer so we always have DMA-complete interrupt here.
*/
static unsigned char pxa_bmdma_status(struct ata_port *ap)
{
struct pata_pxa_data *pd = ap->private_data;
unsigned char ret = ATA_DMA_INTR;
struct dma_tx_state state;
enum dma_status status;
status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
if (status != DMA_COMPLETE)
ret |= ATA_DMA_ERR;
return ret;
}
/*
* No IRQ register present so we do nothing.
*/
static void pxa_irq_clear(struct ata_port *ap)
{
}
/*
* Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
* unclear why ATAPI has DMA issues.
*/
static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
{
return -EOPNOTSUPP;
}
static const struct scsi_host_template pxa_ata_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations pxa_ata_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_40wire,
.bmdma_setup = pxa_bmdma_setup,
.bmdma_start = pxa_bmdma_start,
.bmdma_stop = pxa_bmdma_stop,
.bmdma_status = pxa_bmdma_status,
.check_atapi_dma = pxa_check_atapi_dma,
.sff_irq_clear = pxa_irq_clear,
.qc_prep = pxa_qc_prep,
};
static int pxa_ata_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
struct pata_pxa_data *data;
struct resource *cmd_res;
struct resource *ctl_res;
struct resource *dma_res;
struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
struct dma_slave_config config;
int ret = 0;
int irq;
/*
* Resource validation, three resources are needed:
* - CMD port base address
* - CTL port base address
* - DMA port base address
* - IRQ pin
*/
if (pdev->num_resources != 4) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
/*
* CMD port base address
*/
cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(cmd_res == NULL))
return -EINVAL;
/*
* CTL port base address
*/
ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (unlikely(ctl_res == NULL))
return -EINVAL;
/*
* DMA port base address
*/
dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (unlikely(dma_res == NULL))
return -EINVAL;
/*
* IRQ pin
*/
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/*
* Allocate the host
*/
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
ap->ops = &pxa_ata_port_ops;
ap->pio_mask = ATA_PIO4;
ap->mwdma_mask = ATA_MWDMA2;
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
resource_size(ctl_res));
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
resource_size(dma_res));
/*
* Adjust register offsets
*/
ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
ap->ioaddr.data_addr = ap->ioaddr.cmd_addr +
(ATA_REG_DATA << pdata->reg_shift);
ap->ioaddr.error_addr = ap->ioaddr.cmd_addr +
(ATA_REG_ERR << pdata->reg_shift);
ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr +
(ATA_REG_FEATURE << pdata->reg_shift);
ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr +
(ATA_REG_NSECT << pdata->reg_shift);
ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr +
(ATA_REG_LBAL << pdata->reg_shift);
ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr +
(ATA_REG_LBAM << pdata->reg_shift);
ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr +
(ATA_REG_LBAH << pdata->reg_shift);
ap->ioaddr.device_addr = ap->ioaddr.cmd_addr +
(ATA_REG_DEVICE << pdata->reg_shift);
ap->ioaddr.status_addr = ap->ioaddr.cmd_addr +
(ATA_REG_STATUS << pdata->reg_shift);
ap->ioaddr.command_addr = ap->ioaddr.cmd_addr +
(ATA_REG_CMD << pdata->reg_shift);
/*
* Allocate and load driver's internal data structure
*/
data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
ap->private_data = data;
memset(&config, 0, sizeof(config));
config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
config.src_addr = dma_res->start;
config.dst_addr = dma_res->start;
config.src_maxburst = 32;
config.dst_maxburst = 32;
/*
* Request the DMA channel
*/
data->dma_chan =
dma_request_slave_channel(&pdev->dev, "data");
if (!data->dma_chan)
return -EBUSY;
ret = dmaengine_slave_config(data->dma_chan, &config);
if (ret < 0) {
dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
return ret;
}
/*
* Activate the ATA host
*/
ret = ata_host_activate(host, irq, ata_sff_interrupt,
pdata->irq_flags, &pxa_ata_sht);
if (ret)
dma_release_channel(data->dma_chan);
return ret;
}
static void pxa_ata_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct pata_pxa_data *data = host->ports[0]->private_data;
dma_release_channel(data->dma_chan);
ata_host_detach(host);
}
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
.remove_new = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
},
};
module_platform_driver(pxa_ata_driver);
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/pata_pxa.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/ata/sata_dwc_460ex.c
*
* Synopsys DesignWare Cores (DWC) SATA host driver
*
* Author: Mark Miesfeld <[email protected]>
*
* Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <[email protected]>
* Copyright 2008 DENX Software Engineering
*
* Based on versions provided by AMCC and Synopsys which are:
* Copyright 2006 Applied Micro Circuits Corporation
* COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/libata.h>
#include <linux/slab.h>
#include <trace/events/libata.h>
#include "libata.h"
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
/* These two are defined in "libata.h" */
#undef DRV_NAME
#undef DRV_VERSION
#define DRV_NAME "sata-dwc"
#define DRV_VERSION "1.3"
#define sata_dwc_writel(a, v) writel_relaxed(v, a)
#define sata_dwc_readl(a) readl_relaxed(a)
#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
enum {
SATA_DWC_MAX_PORTS = 1,
SATA_DWC_SCR_OFFSET = 0x24,
SATA_DWC_REG_OFFSET = 0x64,
};
/* DWC SATA Registers */
struct sata_dwc_regs {
u32 fptagr; /* 1st party DMA tag */
u32 fpbor; /* 1st party DMA buffer offset */
u32 fptcr; /* 1st party DMA Xfr count */
u32 dmacr; /* DMA Control */
u32 dbtsr; /* DMA Burst Transac size */
u32 intpr; /* Interrupt Pending */
u32 intmr; /* Interrupt Mask */
u32 errmr; /* Error Mask */
u32 llcr; /* Link Layer Control */
u32 phycr; /* PHY Control */
u32 physr; /* PHY Status */
u32 rxbistpd; /* Recvd BIST pattern def register */
u32 rxbistpd1; /* Recvd BIST data dword1 */
u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
u32 txbistpd; /* Trans BIST pattern def register */
u32 txbistpd1; /* Trans BIST data dword1 */
u32 txbistpd2; /* Trans BIST data dword2 */
u32 bistcr; /* BIST Control Register */
u32 bistfctr; /* BIST FIS Count Register */
u32 bistsr; /* BIST Status Register */
u32 bistdecr; /* BIST Dword Error count register */
u32 res[15]; /* Reserved locations */
u32 testr; /* Test Register */
u32 versionr; /* Version Register */
u32 idr; /* ID Register */
u32 unimpl[192]; /* Unimplemented */
u32 dmadr[256]; /* FIFO Locations in DMA Mode */
};
enum {
SCR_SCONTROL_DET_ENABLE = 0x00000001,
SCR_SSTATUS_DET_PRESENT = 0x00000001,
SCR_SERROR_DIAG_X = 0x04000000,
/* DWC SATA Register Operations */
SATA_DWC_TXFIFO_DEPTH = 0x01FF,
SATA_DWC_RXFIFO_DEPTH = 0x01FF,
SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
SATA_DWC_INTPR_DMAT = 0x00000001,
SATA_DWC_INTPR_NEWFP = 0x00000002,
SATA_DWC_INTPR_PMABRT = 0x00000004,
SATA_DWC_INTPR_ERR = 0x00000008,
SATA_DWC_INTPR_NEWBIST = 0x00000010,
SATA_DWC_INTPR_IPF = 0x10000000,
SATA_DWC_INTMR_DMATM = 0x00000001,
SATA_DWC_INTMR_NEWFPM = 0x00000002,
SATA_DWC_INTMR_PMABRTM = 0x00000004,
SATA_DWC_INTMR_ERRM = 0x00000008,
SATA_DWC_INTMR_NEWBISTM = 0x00000010,
SATA_DWC_LLCR_SCRAMEN = 0x00000001,
SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
SATA_DWC_LLCR_RPDEN = 0x00000004,
/* This is all error bits, zero's are reserved fields. */
SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
};
#define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
SATA_DWC_DMACR_TMOD_TXCHEN)
#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
SATA_DWC_DMACR_TMOD_TXCHEN)
#define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
#define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
<< 16)
struct sata_dwc_device {
struct device *dev; /* generic device struct */
struct ata_probe_ent *pe; /* ptr to probe-ent */
struct ata_host *host;
struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */
u32 sactive_issued;
u32 sactive_queued;
struct phy *phy;
phys_addr_t dmadr;
#ifdef CONFIG_SATA_DWC_OLD_DMA
struct dw_dma_chip *dma;
#endif
};
/*
* Allow one extra special slot for commands and DMA management
* to account for libata internal commands.
*/
#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
struct sata_dwc_device_port {
struct sata_dwc_device *hsdev;
int cmd_issued[SATA_DWC_QCMD_MAX];
int dma_pending[SATA_DWC_QCMD_MAX];
/* DMA info */
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
u32 dma_interrupt_count;
};
/*
* Commonly used DWC SATA driver macros
*/
#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
enum {
SATA_DWC_CMD_ISSUED_NOT = 0,
SATA_DWC_CMD_ISSUED_PEND = 1,
SATA_DWC_CMD_ISSUED_EXEC = 2,
SATA_DWC_CMD_ISSUED_NODATA = 3,
SATA_DWC_DMA_PENDING_NONE = 0,
SATA_DWC_DMA_PENDING_TX = 1,
SATA_DWC_DMA_PENDING_RX = 2,
};
/*
* Prototypes
*/
static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc);
static void sata_dwc_dma_xfer_complete(struct ata_port *ap);
static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
#ifdef CONFIG_SATA_DWC_OLD_DMA
#include <linux/platform_data/dma-dw.h>
#include <linux/dma/dw.h>
static struct dw_dma_slave sata_dwc_dma_dws = {
.src_id = 0,
.dst_id = 0,
.m_master = 1,
.p_master = 0,
};
static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
{
struct dw_dma_slave *dws = &sata_dwc_dma_dws;
if (dws->dma_dev != chan->device->dev)
return false;
chan->private = dws;
return true;
}
static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
{
struct sata_dwc_device *hsdev = hsdevp->hsdev;
struct dw_dma_slave *dws = &sata_dwc_dma_dws;
struct device *dev = hsdev->dev;
dma_cap_mask_t mask;
dws->dma_dev = dev;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* Acquire DMA channel */
hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
if (!hsdevp->chan) {
dev_err(dev, "%s: dma channel unavailable\n", __func__);
return -EAGAIN;
}
return 0;
}
static int sata_dwc_dma_init_old(struct platform_device *pdev,
struct sata_dwc_device *hsdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL);
if (!hsdev->dma)
return -ENOMEM;
hsdev->dma->dev = dev;
hsdev->dma->id = pdev->id;
/* Get SATA DMA interrupt number */
hsdev->dma->irq = irq_of_parse_and_map(np, 1);
if (!hsdev->dma->irq) {
dev_err(dev, "no SATA DMA irq\n");
return -ENODEV;
}
/* Get physical SATA DMA register base address */
hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(hsdev->dma->regs))
return PTR_ERR(hsdev->dma->regs);
/* Initialize AHB DMAC */
return dw_dma_probe(hsdev->dma);
}
static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
{
if (!hsdev->dma)
return;
dw_dma_remove(hsdev->dma);
}
#endif
static const char *get_prot_descript(u8 protocol)
{
switch (protocol) {
case ATA_PROT_NODATA:
return "ATA no data";
case ATA_PROT_PIO:
return "ATA PIO";
case ATA_PROT_DMA:
return "ATA DMA";
case ATA_PROT_NCQ:
return "ATA NCQ";
case ATA_PROT_NCQ_NODATA:
return "ATA NCQ no data";
case ATAPI_PROT_NODATA:
return "ATAPI no data";
case ATAPI_PROT_PIO:
return "ATAPI PIO";
case ATAPI_PROT_DMA:
return "ATAPI DMA";
default:
return "unknown";
}
}
static void dma_dwc_xfer_done(void *hsdev_instance)
{
unsigned long flags;
struct sata_dwc_device *hsdev = hsdev_instance;
struct ata_host *host = (struct ata_host *)hsdev->host;
struct ata_port *ap;
struct sata_dwc_device_port *hsdevp;
u8 tag = 0;
unsigned int port = 0;
spin_lock_irqsave(&host->lock, flags);
ap = host->ports[port];
hsdevp = HSDEVP_FROM_AP(ap);
tag = ap->link.active_tag;
/*
* Each DMA command produces 2 interrupts. Only
* complete the command after both interrupts have been
* seen. (See sata_dwc_isr())
*/
hsdevp->dma_interrupt_count++;
sata_dwc_clear_dmacr(hsdevp, tag);
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
tag, hsdevp->dma_pending[tag]);
}
if ((hsdevp->dma_interrupt_count % 2) == 0)
sata_dwc_dma_xfer_complete(ap);
spin_unlock_irqrestore(&host->lock, flags);
}
static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct dma_slave_config sconf;
struct dma_async_tx_descriptor *desc;
if (qc->dma_dir == DMA_DEV_TO_MEM) {
sconf.src_addr = hsdev->dmadr;
sconf.device_fc = false;
} else { /* DMA_MEM_TO_DEV */
sconf.dst_addr = hsdev->dmadr;
sconf.device_fc = false;
}
sconf.direction = qc->dma_dir;
sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dmaengine_slave_config(hsdevp->chan, &sconf);
/* Convert SG list to linked list of items (LLIs) for AHB DMA */
desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
qc->dma_dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
return NULL;
desc->callback = dma_dwc_xfer_done;
desc->callback_param = hsdev;
dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
qc->sg, qc->n_elem, &hsdev->dmadr);
return desc;
}
static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
{
if (scr > SCR_NOTIFICATION) {
dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
__func__, scr);
return -EINVAL;
}
*val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
link->ap->print_id, scr, *val);
return 0;
}
static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
{
dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
link->ap->print_id, scr, val);
if (scr > SCR_NOTIFICATION) {
dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
__func__, scr);
return -EINVAL;
}
sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
return 0;
}
static void clear_serror(struct ata_port *ap)
{
u32 val;
sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
}
static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
{
sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
}
static u32 qcmd_tag_to_mask(u8 tag)
{
return 0x00000001 << (tag & 0x1f);
}
/* See ahci.c */
static void sata_dwc_error_intr(struct ata_port *ap,
struct sata_dwc_device *hsdev, uint intpr)
{
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned int err_mask = 0, action = 0;
struct ata_queued_cmd *qc;
u32 serror;
u8 status, tag;
ata_ehi_clear_desc(ehi);
sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
status = ap->ops->sff_check_status(ap);
tag = ap->link.active_tag;
dev_err(ap->dev,
"%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
__func__, serror, intpr, status, hsdevp->dma_interrupt_count,
hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
/* Clear error register and interrupt bit */
clear_serror(ap);
clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
/* This is the only error happening now. TODO check for exact error */
err_mask |= AC_ERR_HOST_BUS;
action |= ATA_EH_RESET;
/* Pass this on to EH */
ehi->serror |= serror;
ehi->action |= action;
qc = ata_qc_from_tag(ap, tag);
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
ata_port_abort(ap);
}
/*
* Function : sata_dwc_isr
* arguments : irq, void *dev_instance, struct pt_regs *regs
* Return value : irqreturn_t - status of IRQ
* This Interrupt handler called via port ops registered function.
* .irq_handler = sata_dwc_isr
*/
static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
{
struct ata_host *host = (struct ata_host *)dev_instance;
struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
struct ata_port *ap;
struct ata_queued_cmd *qc;
unsigned long flags;
u8 status, tag;
int handled, port = 0;
uint intpr, sactive, sactive2, tag_mask;
struct sata_dwc_device_port *hsdevp;
hsdev->sactive_issued = 0;
spin_lock_irqsave(&host->lock, flags);
/* Read the interrupt register */
intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
ap = host->ports[port];
hsdevp = HSDEVP_FROM_AP(ap);
dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
ap->link.active_tag);
/* Check for error interrupt */
if (intpr & SATA_DWC_INTPR_ERR) {
sata_dwc_error_intr(ap, hsdev, intpr);
handled = 1;
goto DONE;
}
/* Check for DMA SETUP FIS (FP DMA) interrupt */
if (intpr & SATA_DWC_INTPR_NEWFP) {
clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
qc = ata_qc_from_tag(ap, tag);
if (unlikely(!qc)) {
dev_err(ap->dev, "failed to get qc");
handled = 1;
goto DONE;
}
/*
* Start FP DMA for NCQ command. At this point the tag is the
* active tag. It is the tag that matches the command about to
* be completed.
*/
trace_ata_bmdma_start(ap, &qc->tf, tag);
qc->ap->link.active_tag = tag;
sata_dwc_bmdma_start_by_tag(qc, tag);
handled = 1;
goto DONE;
}
sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
/* If no sactive issued and tag_mask is zero then this is not NCQ */
if (hsdev->sactive_issued == 0 && tag_mask == 0) {
if (ap->link.active_tag == ATA_TAG_POISON)
tag = 0;
else
tag = ap->link.active_tag;
qc = ata_qc_from_tag(ap, tag);
/* DEV interrupt w/ no active qc? */
if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
dev_err(ap->dev,
"%s interrupt with no active qc qc=%p\n",
__func__, qc);
ap->ops->sff_check_status(ap);
handled = 1;
goto DONE;
}
status = ap->ops->sff_check_status(ap);
qc->ap->link.active_tag = tag;
hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
if (status & ATA_ERR) {
dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
sata_dwc_qc_complete(ap, qc);
handled = 1;
goto DONE;
}
dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
__func__, get_prot_descript(qc->tf.protocol));
DRVSTILLBUSY:
if (ata_is_dma(qc->tf.protocol)) {
/*
* Each DMA transaction produces 2 interrupts. The DMAC
* transfer complete interrupt and the SATA controller
* operation done interrupt. The command should be
* completed only after both interrupts are seen.
*/
hsdevp->dma_interrupt_count++;
if (hsdevp->dma_pending[tag] == \
SATA_DWC_DMA_PENDING_NONE) {
dev_err(ap->dev,
"%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
__func__, intpr, status,
hsdevp->dma_pending[tag]);
}
if ((hsdevp->dma_interrupt_count % 2) == 0)
sata_dwc_dma_xfer_complete(ap);
} else if (ata_is_pio(qc->tf.protocol)) {
ata_sff_hsm_move(ap, qc, status, 0);
handled = 1;
goto DONE;
} else {
if (unlikely(sata_dwc_qc_complete(ap, qc)))
goto DRVSTILLBUSY;
}
handled = 1;
goto DONE;
}
/*
* This is a NCQ command. At this point we need to figure out for which
* tags we have gotten a completion interrupt. One interrupt may serve
* as completion for more than one operation when commands are queued
* (NCQ). We need to process each completed command.
*/
/* process completed commands */
sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
dev_dbg(ap->dev,
"%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
__func__, sactive, hsdev->sactive_issued, tag_mask);
}
if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
dev_warn(ap->dev,
"Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
sactive, hsdev->sactive_issued, tag_mask);
}
/* read just to clear ... not bad if currently still busy */
status = ap->ops->sff_check_status(ap);
dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
tag = 0;
while (tag_mask) {
while (!(tag_mask & 0x00000001)) {
tag++;
tag_mask <<= 1;
}
tag_mask &= (~0x00000001);
qc = ata_qc_from_tag(ap, tag);
if (unlikely(!qc)) {
dev_err(ap->dev, "failed to get qc");
handled = 1;
goto DONE;
}
/* To be picked up by completion functions */
qc->ap->link.active_tag = tag;
hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
/* Let libata/scsi layers handle error */
if (status & ATA_ERR) {
dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
status);
sata_dwc_qc_complete(ap, qc);
handled = 1;
goto DONE;
}
/* Process completed command */
dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
get_prot_descript(qc->tf.protocol));
if (ata_is_dma(qc->tf.protocol)) {
hsdevp->dma_interrupt_count++;
if (hsdevp->dma_pending[tag] == \
SATA_DWC_DMA_PENDING_NONE)
dev_warn(ap->dev, "%s: DMA not pending?\n",
__func__);
if ((hsdevp->dma_interrupt_count % 2) == 0)
sata_dwc_dma_xfer_complete(ap);
} else {
if (unlikely(sata_dwc_qc_complete(ap, qc)))
goto STILLBUSY;
}
continue;
STILLBUSY:
ap->stats.idle_irq++;
dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
ap->print_id);
} /* while tag_mask */
/*
* Check to see if any commands completed while we were processing our
* initial set of completed commands (read status clears interrupts,
* so we might miss a completed command interrupt if one came in while
* we were processing --we read status as part of processing a completed
* command).
*/
sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
if (sactive2 != sactive) {
dev_dbg(ap->dev,
"More completed - sactive=0x%x sactive2=0x%x\n",
sactive, sactive2);
}
handled = 1;
DONE:
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
{
struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
} else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
} else {
/*
* This should not happen, it indicates the driver is out of
* sync. If it does happen, clear dmacr anyway.
*/
dev_err(hsdev->dev,
"%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
__func__, tag, hsdevp->dma_pending[tag], dmacr);
sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
SATA_DWC_DMACR_TXRXCH_CLEAR);
}
}
static void sata_dwc_dma_xfer_complete(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
u8 tag = 0;
tag = ap->link.active_tag;
qc = ata_qc_from_tag(ap, tag);
if (!qc) {
dev_err(ap->dev, "failed to get qc");
return;
}
if (ata_is_dma(qc->tf.protocol)) {
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
dev_err(ap->dev,
"%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
__func__,
sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
}
hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
sata_dwc_qc_complete(ap, qc);
ap->link.active_tag = ATA_TAG_POISON;
} else {
sata_dwc_qc_complete(ap, qc);
}
}
static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc)
{
u8 status = 0;
u32 mask = 0x0;
u8 tag = qc->hw_tag;
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
hsdev->sactive_queued = 0;
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
dev_err(ap->dev, "TX DMA PENDING\n");
else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
dev_err(ap->dev, "RX DMA PENDING\n");
dev_dbg(ap->dev,
"QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
qc->tf.command, status, ap->print_id, qc->tf.protocol);
/* clear active bit */
mask = (~(qcmd_tag_to_mask(tag)));
hsdev->sactive_queued = hsdev->sactive_queued & mask;
hsdev->sactive_issued = hsdev->sactive_issued & mask;
ata_qc_complete(qc);
return 0;
}
static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
{
/* Enable selective interrupts by setting the interrupt maskregister*/
sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
SATA_DWC_INTMR_ERRM |
SATA_DWC_INTMR_NEWFPM |
SATA_DWC_INTMR_PMABRTM |
SATA_DWC_INTMR_DMATM);
/*
* Unmask the error bits that should trigger an error interrupt by
* setting the error mask register.
*/
sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
__func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
}
static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr = base + 0x00;
port->data_addr = base + 0x00;
port->error_addr = base + 0x04;
port->feature_addr = base + 0x04;
port->nsect_addr = base + 0x08;
port->lbal_addr = base + 0x0c;
port->lbam_addr = base + 0x10;
port->lbah_addr = base + 0x14;
port->device_addr = base + 0x18;
port->command_addr = base + 0x1c;
port->status_addr = base + 0x1c;
port->altstatus_addr = base + 0x20;
port->ctl_addr = base + 0x20;
}
static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
{
struct sata_dwc_device *hsdev = hsdevp->hsdev;
struct device *dev = hsdev->dev;
#ifdef CONFIG_SATA_DWC_OLD_DMA
if (!of_property_present(dev->of_node, "dmas"))
return sata_dwc_dma_get_channel_old(hsdevp);
#endif
hsdevp->chan = dma_request_chan(dev, "sata-dma");
if (IS_ERR(hsdevp->chan)) {
dev_err(dev, "failed to allocate dma channel: %ld\n",
PTR_ERR(hsdevp->chan));
return PTR_ERR(hsdevp->chan);
}
return 0;
}
/*
* Function : sata_dwc_port_start
* arguments : struct ata_ioports *port
* Return value : returns 0 if success, error code otherwise
* This function allocates the scatter gather LLI table for AHB DMA
*/
static int sata_dwc_port_start(struct ata_port *ap)
{
int err = 0;
struct sata_dwc_device *hsdev;
struct sata_dwc_device_port *hsdevp = NULL;
struct device *pdev;
int i;
hsdev = HSDEV_FROM_AP(ap);
dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
hsdev->host = ap->host;
pdev = ap->host->dev;
if (!pdev) {
dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
err = -ENODEV;
goto CLEANUP;
}
/* Allocate Port Struct */
hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
if (!hsdevp) {
err = -ENOMEM;
goto CLEANUP;
}
hsdevp->hsdev = hsdev;
err = sata_dwc_dma_get_channel(hsdevp);
if (err)
goto CLEANUP_ALLOC;
err = phy_power_on(hsdev->phy);
if (err)
goto CLEANUP_ALLOC;
for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
ap->bmdma_prd_dma = 0;
if (ap->port_no == 0) {
dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
__func__);
sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
SATA_DWC_DMACR_TXRXCH_CLEAR);
dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
__func__);
sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
(SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
}
/* Clear any error bits before libata starts issuing commands */
clear_serror(ap);
ap->private_data = hsdevp;
dev_dbg(ap->dev, "%s: done\n", __func__);
return 0;
CLEANUP_ALLOC:
kfree(hsdevp);
CLEANUP:
dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
return err;
}
static void sata_dwc_port_stop(struct ata_port *ap)
{
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
dmaengine_terminate_sync(hsdevp->chan);
dma_release_channel(hsdevp->chan);
phy_power_off(hsdev->phy);
kfree(hsdevp);
ap->private_data = NULL;
}
/*
* Function : sata_dwc_exec_command_by_tag
* arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
* Return value : None
* This function keeps track of individual command tag ids and calls
* ata_exec_command in libata
*/
static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
struct ata_taskfile *tf,
u8 tag, u32 cmd_issued)
{
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
hsdevp->cmd_issued[tag] = cmd_issued;
/*
* Clear SError before executing a new command.
* sata_dwc_scr_write and read can not be used here. Clearing the PM
* managed SError register for the disk needs to be done before the
* task file is loaded.
*/
clear_serror(ap);
ata_sff_exec_command(ap, tf);
}
static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
{
sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
SATA_DWC_CMD_ISSUED_PEND);
}
static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
{
u8 tag = qc->hw_tag;
if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
sata_dwc_bmdma_setup_by_tag(qc, tag);
}
static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
{
int start_dma;
u32 reg;
struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
struct ata_port *ap = qc->ap;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
int dir = qc->dma_dir;
if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
start_dma = 1;
if (dir == DMA_TO_DEVICE)
hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
else
hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
} else {
dev_err(ap->dev,
"%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
__func__, hsdevp->cmd_issued[tag], tag);
start_dma = 0;
}
if (start_dma) {
sata_dwc_scr_read(&ap->link, SCR_ERROR, ®);
if (reg & SATA_DWC_SERROR_ERR_BITS) {
dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
__func__, reg);
}
if (dir == DMA_TO_DEVICE)
sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
SATA_DWC_DMACR_TXCHEN);
else
sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
SATA_DWC_DMACR_RXCHEN);
/* Enable AHB DMA transfer on the specified channel */
dmaengine_submit(desc);
dma_async_issue_pending(hsdevp->chan);
}
}
static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
{
u8 tag = qc->hw_tag;
if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
sata_dwc_bmdma_start_by_tag(qc, tag);
}
static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
{
u32 sactive;
u8 tag = qc->hw_tag;
struct ata_port *ap = qc->ap;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
if (ata_is_dma(qc->tf.protocol)) {
hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
if (!hsdevp->desc[tag])
return AC_ERR_SYSTEM;
} else {
hsdevp->desc[tag] = NULL;
}
if (ata_is_ncq(qc->tf.protocol)) {
sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
sactive |= (0x00000001 << tag);
sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf);
trace_ata_exec_command(ap, &qc->tf, tag);
sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
SATA_DWC_CMD_ISSUED_PEND);
} else {
return ata_bmdma_qc_issue(qc);
}
return 0;
}
static void sata_dwc_error_handler(struct ata_port *ap)
{
ata_sff_error_handler(ap);
}
static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
int ret;
ret = sata_sff_hardreset(link, class, deadline);
sata_dwc_enable_interrupts(hsdev);
/* Reconfigure the DMA control register */
sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
SATA_DWC_DMACR_TXRXCH_CLEAR);
/* Reconfigure the DMA Burst Transaction Size register */
sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
return ret;
}
static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
{
/* SATA DWC is master only */
}
/*
* scsi mid-layer and libata interface structures
*/
static const struct scsi_host_template sata_dwc_sht = {
ATA_NCQ_SHT(DRV_NAME),
/*
* test-only: Currently this driver doesn't handle NCQ
* correctly. We enable NCQ but set the queue depth to a
* max of 1. This will get fixed in in a future release.
*/
.sg_tablesize = LIBATA_MAX_PRD,
/* .can_queue = ATA_MAX_QUEUE, */
/*
* Make sure a LLI block is not created that will span 8K max FIS
* boundary. If the block spans such a FIS boundary, there is a chance
* that a DMA burst will cross that boundary -- this results in an
* error in the host controller.
*/
.dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */,
};
static struct ata_port_operations sata_dwc_ops = {
.inherits = &ata_sff_port_ops,
.error_handler = sata_dwc_error_handler,
.hardreset = sata_dwc_hardreset,
.qc_issue = sata_dwc_qc_issue,
.scr_read = sata_dwc_scr_read,
.scr_write = sata_dwc_scr_write,
.port_start = sata_dwc_port_start,
.port_stop = sata_dwc_port_stop,
.sff_dev_select = sata_dwc_dev_select,
.bmdma_setup = sata_dwc_bmdma_setup,
.bmdma_start = sata_dwc_bmdma_start,
};
static const struct ata_port_info sata_dwc_port_info[] = {
{
.flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &sata_dwc_ops,
},
};
static int sata_dwc_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = dev->of_node;
struct sata_dwc_device *hsdev;
u32 idr, versionr;
char *ver = (char *)&versionr;
void __iomem *base;
int err = 0;
int irq;
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct resource *res;
/* Allocate DWC SATA device */
host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS);
hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL);
if (!host || !hsdev)
return -ENOMEM;
host->private_data = hsdev;
/* Ioremap SATA registers */
base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
dev_dbg(dev, "ioremap done for SATA register address\n");
/* Synopsys DWC SATA specific Registers */
hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
/* Setup port */
host->ports[0]->ioaddr.cmd_addr = base;
host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
/* Read the ID and Version Registers */
idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]);
/* Save dev for later use in dev_xxx() routines */
hsdev->dev = dev;
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
/* Get SATA interrupt number */
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
dev_err(dev, "no SATA DMA irq\n");
return -ENODEV;
}
#ifdef CONFIG_SATA_DWC_OLD_DMA
if (!of_property_present(np, "dmas")) {
err = sata_dwc_dma_init_old(ofdev, hsdev);
if (err)
return err;
}
#endif
hsdev->phy = devm_phy_optional_get(dev, "sata-phy");
if (IS_ERR(hsdev->phy))
return PTR_ERR(hsdev->phy);
err = phy_init(hsdev->phy);
if (err)
goto error_out;
/*
* Now, register with libATA core, this will also initiate the
* device discovery process, invoking our port_start() handler &
* error_handler() to execute a dummy Softreset EH session
*/
err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
if (err)
dev_err(dev, "failed to activate host");
return 0;
error_out:
phy_exit(hsdev->phy);
return err;
}
static void sata_dwc_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
struct sata_dwc_device *hsdev = host->private_data;
ata_host_detach(host);
phy_exit(hsdev->phy);
#ifdef CONFIG_SATA_DWC_OLD_DMA
/* Free SATA DMA resources */
sata_dwc_dma_exit_old(hsdev);
#endif
dev_dbg(dev, "done\n");
}
static const struct of_device_id sata_dwc_match[] = {
{ .compatible = "amcc,sata-460ex", },
{}
};
MODULE_DEVICE_TABLE(of, sata_dwc_match);
static struct platform_driver sata_dwc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sata_dwc_match,
},
.probe = sata_dwc_probe,
.remove_new = sata_dwc_remove,
};
module_platform_driver(sata_dwc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Miesfeld <[email protected]>");
MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/sata_dwc_460ex.c |
/*
* Driver for the Octeon bootbus compact flash.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 - 2012 Cavium Inc.
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/hrtimer.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <scsi/scsi_host.h>
#include <trace/events/libata.h>
#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
/*
* The Octeon bootbus compact flash interface is connected in at least
* 3 different configurations on various evaluation boards:
*
* -- 8 bits no irq, no DMA
* -- 16 bits no irq, no DMA
* -- 16 bits True IDE mode with DMA, but no irq.
*
* In the last case the DMA engine can generate an interrupt when the
* transfer is complete. For the first two cases only PIO is supported.
*
*/
#define DRV_NAME "pata_octeon_cf"
#define DRV_VERSION "2.2"
/* Poll interval in nS. */
#define OCTEON_CF_BUSY_POLL_INTERVAL 500000
#define DMA_CFG 0
#define DMA_TIM 0x20
#define DMA_INT 0x38
#define DMA_INT_EN 0x50
struct octeon_cf_port {
struct hrtimer delayed_finish;
struct ata_port *ap;
int dma_finished;
void *c0;
unsigned int cs0;
unsigned int cs1;
bool is_true_ide;
u64 dma_base;
};
static const struct scsi_host_template octeon_cf_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static int enable_dma;
module_param(enable_dma, int, 0444);
MODULE_PARM_DESC(enable_dma,
"Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
/*
* Convert nanosecond based time to setting used in the
* boot bus timing register, based on timing multiple
*/
static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
{
/*
* Compute # of eclock periods to get desired duration in
* nanoseconds.
*/
return DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
1000 * tim_mult);
}
static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
{
union cvmx_mio_boot_reg_cfgx reg_cfg;
unsigned int tim_mult;
switch (multiplier) {
case 8:
tim_mult = 3;
break;
case 4:
tim_mult = 0;
break;
case 2:
tim_mult = 2;
break;
default:
tim_mult = 1;
break;
}
reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */
reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
reg_cfg.s.sam = 0; /* Don't combine write and output enable */
reg_cfg.s.we_ext = 0; /* No write enable extension */
reg_cfg.s.oe_ext = 0; /* No read enable extension */
reg_cfg.s.en = 1; /* Enable this region */
reg_cfg.s.orbit = 0; /* Don't combine with previous region */
reg_cfg.s.ale = 0; /* Don't do address multiplexing */
cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
}
/*
* Called after libata determines the needed PIO mode. This
* function programs the Octeon bootbus regions to support the
* timing requirements of the PIO mode.
*
* @ap: ATA port information
* @dev: ATA device
*/
static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
{
struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_reg_timx reg_tim;
int T;
struct ata_timing timing;
unsigned int div;
int use_iordy;
int trh;
int pause;
/* These names are timing parameters from the ATA spec */
int t2;
/*
* A divisor value of four will overflow the timing fields at
* clock rates greater than 800MHz
*/
if (octeon_get_io_clock_rate() <= 800000000)
div = 4;
else
div = 8;
T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
t2 = timing.active;
if (t2)
t2--;
trh = ns_to_tim_reg(div, 20);
if (trh)
trh--;
pause = (int)timing.cycle - (int)timing.active -
(int)timing.setup - trh;
if (pause < 0)
pause = 0;
if (pause)
pause--;
octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
if (cf_port->is_true_ide)
/* True IDE mode, program both chip selects. */
octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
use_iordy = ata_pio_need_iordy(dev);
reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
/* Disable page mode */
reg_tim.s.pagem = 0;
/* Enable dynamic timing */
reg_tim.s.waitm = use_iordy;
/* Pages are disabled */
reg_tim.s.pages = 0;
/* We don't use multiplexed address mode */
reg_tim.s.ale = 0;
/* Not used */
reg_tim.s.page = 0;
/* Time after IORDY to coninue to assert the data */
reg_tim.s.wait = 0;
/* Time to wait to complete the cycle. */
reg_tim.s.pause = pause;
/* How long to hold after a write to de-assert CE. */
reg_tim.s.wr_hld = trh;
/* How long to wait after a read to de-assert CE. */
reg_tim.s.rd_hld = trh;
/* How long write enable is asserted */
reg_tim.s.we = t2;
/* How long read enable is asserted */
reg_tim.s.oe = t2;
/* Time after CE that read/write starts */
reg_tim.s.ce = ns_to_tim_reg(div, 5);
/* Time before CE that address is valid */
reg_tim.s.adr = 0;
/* Program the bootbus region timing for the data port chip select. */
cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
if (cf_port->is_true_ide)
/* True IDE mode, program both chip selects. */
cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
reg_tim.u64);
}
static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
{
struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_pin_defs pin_defs;
union cvmx_mio_boot_dma_timx dma_tim;
unsigned int oe_a;
unsigned int oe_n;
unsigned int dma_ackh;
unsigned int dma_arq;
unsigned int pause;
unsigned int T0, Tkr, Td;
unsigned int tim_mult;
int c;
const struct ata_timing *timing;
timing = ata_timing_find_mode(dev->dma_mode);
T0 = timing->cycle;
Td = timing->active;
Tkr = timing->recover;
dma_ackh = timing->dmack_hold;
dma_tim.u64 = 0;
/* dma_tim.s.tim_mult = 0 --> 4x */
tim_mult = 4;
/* not spec'ed, value in eclocks, not affected by tim_mult */
dma_arq = 8;
pause = 25 - dma_arq * 1000 /
(octeon_get_io_clock_rate() / 1000000); /* Tz */
oe_a = Td;
/* Tkr from cf spec, lengthened to meet T0 */
oe_n = max(T0 - oe_a, Tkr);
pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
/* DMA channel number. */
c = (cf_port->dma_base & 8) >> 3;
/* Invert the polarity if the default is 0*/
dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
/*
* This is tI, C.F. spec. says 0, but Sony CF card requires
* more, we use 20 nS.
*/
dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
dma_tim.s.dmarq = dma_arq;
dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
dma_tim.s.rd_dly = 0; /* Sample right on edge */
/* writes only */
dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
ata_dev_dbg(dev, "ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
ns_to_tim_reg(tim_mult, 60));
ata_dev_dbg(dev, "oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
}
/*
* Handle an 8 bit I/O request.
*
* @qc: Queued command
* @buffer: Data buffer
* @buflen: Length of the buffer.
* @rw: True to write.
*/
static unsigned int octeon_cf_data_xfer8(struct ata_queued_cmd *qc,
unsigned char *buffer,
unsigned int buflen,
int rw)
{
struct ata_port *ap = qc->dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned long words;
int count;
words = buflen;
if (rw) {
count = 16;
while (words--) {
iowrite8(*buffer, data_addr);
buffer++;
/*
* Every 16 writes do a read so the bootbus
* FIFO doesn't fill up.
*/
if (--count == 0) {
ioread8(ap->ioaddr.altstatus_addr);
count = 16;
}
}
} else {
ioread8_rep(data_addr, buffer, words);
}
return buflen;
}
/*
* Handle a 16 bit I/O request.
*
* @qc: Queued command
* @buffer: Data buffer
* @buflen: Length of the buffer.
* @rw: True to write.
*/
static unsigned int octeon_cf_data_xfer16(struct ata_queued_cmd *qc,
unsigned char *buffer,
unsigned int buflen,
int rw)
{
struct ata_port *ap = qc->dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned long words;
int count;
words = buflen / 2;
if (rw) {
count = 16;
while (words--) {
iowrite16(*(uint16_t *)buffer, data_addr);
buffer += sizeof(uint16_t);
/*
* Every 16 writes do a read so the bootbus
* FIFO doesn't fill up.
*/
if (--count == 0) {
ioread8(ap->ioaddr.altstatus_addr);
count = 16;
}
}
} else {
while (words--) {
*(uint16_t *)buffer = ioread16(data_addr);
buffer += sizeof(uint16_t);
}
}
/* Transfer trailing 1 byte, if any. */
if (unlikely(buflen & 0x01)) {
__le16 align_buf[1] = { 0 };
if (rw == READ) {
align_buf[0] = cpu_to_le16(ioread16(data_addr));
memcpy(buffer, align_buf, 1);
} else {
memcpy(align_buf, buffer, 1);
iowrite16(le16_to_cpu(align_buf[0]), data_addr);
}
words++;
}
return buflen;
}
/*
* Read the taskfile for 16bit non-True IDE only.
*/
static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
{
u16 blob;
/* The base of the registers is at ioaddr.data_addr. */
void __iomem *base = ap->ioaddr.data_addr;
blob = __raw_readw(base + 0xc);
tf->error = blob >> 8;
blob = __raw_readw(base + 2);
tf->nsect = blob & 0xff;
tf->lbal = blob >> 8;
blob = __raw_readw(base + 4);
tf->lbam = blob & 0xff;
tf->lbah = blob >> 8;
blob = __raw_readw(base + 6);
tf->device = blob & 0xff;
tf->status = blob >> 8;
if (tf->flags & ATA_TFLAG_LBA48) {
if (likely(ap->ioaddr.ctl_addr)) {
iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
blob = __raw_readw(base + 0xc);
tf->hob_feature = blob >> 8;
blob = __raw_readw(base + 2);
tf->hob_nsect = blob & 0xff;
tf->hob_lbal = blob >> 8;
blob = __raw_readw(base + 4);
tf->hob_lbam = blob & 0xff;
tf->hob_lbah = blob >> 8;
iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
ap->last_ctl = tf->ctl;
} else {
WARN_ON(1);
}
}
}
static u8 octeon_cf_check_status16(struct ata_port *ap)
{
u16 blob;
void __iomem *base = ap->ioaddr.data_addr;
blob = __raw_readw(base + 6);
return blob >> 8;
}
static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
void __iomem *base = ap->ioaddr.data_addr;
int rc;
u8 err;
__raw_writew(ap->ctl, base + 0xe);
udelay(20);
__raw_writew(ap->ctl | ATA_SRST, base + 0xe);
udelay(20);
__raw_writew(ap->ctl, base + 0xe);
rc = ata_sff_wait_after_reset(link, 1, deadline);
if (rc) {
ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
return 0;
}
/*
* Load the taskfile for 16bit non-True IDE only. The device_addr is
* not loaded, we do this as part of octeon_cf_exec_command16.
*/
static void octeon_cf_tf_load16(struct ata_port *ap,
const struct ata_taskfile *tf)
{
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
/* The base of the registers is at ioaddr.data_addr. */
void __iomem *base = ap->ioaddr.data_addr;
if (tf->ctl != ap->last_ctl) {
iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
ap->last_ctl = tf->ctl;
ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
__raw_writew(tf->hob_feature << 8, base + 0xc);
__raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
__raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
}
if (is_addr) {
__raw_writew(tf->feature << 8, base + 0xc);
__raw_writew(tf->nsect | tf->lbal << 8, base + 2);
__raw_writew(tf->lbam | tf->lbah << 8, base + 4);
}
ata_wait_idle(ap);
}
static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
{
/* There is only one device, do nothing. */
return;
}
/*
* Issue ATA command to host controller. The device_addr is also sent
* as it must be written in a combined write with the command.
*/
static void octeon_cf_exec_command16(struct ata_port *ap,
const struct ata_taskfile *tf)
{
/* The base of the registers is at ioaddr.data_addr. */
void __iomem *base = ap->ioaddr.data_addr;
u16 blob = 0;
if (tf->flags & ATA_TFLAG_DEVICE)
blob = tf->device;
blob |= (tf->command << 8);
__raw_writew(blob, base + 6);
ata_wait_idle(ap);
}
static void octeon_cf_ata_port_noaction(struct ata_port *ap)
{
}
static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct octeon_cf_port *cf_port;
cf_port = ap->private_data;
/* issue r/w command */
qc->cursg = qc->sg;
cf_port->dma_finished = 0;
ap->ops->sff_exec_command(ap, &qc->tf);
}
/*
* Start a DMA transfer that was already setup
*
* @qc: Information about the DMA
*/
static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
{
struct octeon_cf_port *cf_port = qc->ap->private_data;
union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
union cvmx_mio_boot_dma_intx mio_boot_dma_int;
struct scatterlist *sg;
/* Get the scatter list entry we need to DMA into */
sg = qc->cursg;
BUG_ON(!sg);
/*
* Clear the DMA complete status.
*/
mio_boot_dma_int.u64 = 0;
mio_boot_dma_int.s.done = 1;
cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
/* Enable the interrupt. */
cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
/* Set the direction of the DMA */
mio_boot_dma_cfg.u64 = 0;
#ifdef __LITTLE_ENDIAN
mio_boot_dma_cfg.s.endian = 1;
#endif
mio_boot_dma_cfg.s.en = 1;
mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
/*
* Don't stop the DMA if the device deasserts DMARQ. Many
* compact flashes deassert DMARQ for a short time between
* sectors. Instead of stopping and restarting the DMA, we'll
* let the hardware do it. If the DMA is really stopped early
* due to an error condition, a later timeout will force us to
* stop.
*/
mio_boot_dma_cfg.s.clr = 0;
/* Size is specified in 16bit words and minus one notation */
mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
/* We need to swap the high and low bytes of every 16 bits */
mio_boot_dma_cfg.s.swap8 = 1;
mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
}
/*
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_dma_cfgx dma_cfg;
union cvmx_mio_boot_dma_intx dma_int;
u8 status;
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
if (ap->hsm_task_state != HSM_ST_LAST)
return 0;
dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
if (dma_cfg.s.size != 0xfffff) {
/* Error, the transfer was not complete. */
qc->err_mask |= AC_ERR_HOST_BUS;
ap->hsm_task_state = HSM_ST_ERR;
}
/* Stop and clear the dma engine. */
dma_cfg.u64 = 0;
dma_cfg.s.size = -1;
cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
/* Disable the interrupt. */
dma_int.u64 = 0;
cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
/* Clear the DMA complete status */
dma_int.s.done = 1;
cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
status = ap->ops->sff_check_status(ap);
ata_sff_hsm_move(ap, qc, status, 0);
if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
return 1;
}
/*
* Check if any queued commands have more DMAs, if so start the next
* transfer, else do end of transfer handling.
*/
static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct octeon_cf_port *cf_port;
int i;
unsigned int handled = 0;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host->n_ports; i++) {
u8 status;
struct ata_port *ap;
struct ata_queued_cmd *qc;
union cvmx_mio_boot_dma_intx dma_int;
union cvmx_mio_boot_dma_cfgx dma_cfg;
ap = host->ports[i];
cf_port = ap->private_data;
dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
continue;
if (dma_int.s.done && !dma_cfg.s.en) {
if (!sg_is_last(qc->cursg)) {
qc->cursg = sg_next(qc->cursg);
handled = 1;
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
octeon_cf_dma_start(qc);
continue;
} else {
cf_port->dma_finished = 1;
}
}
if (!cf_port->dma_finished)
continue;
status = ioread8(ap->ioaddr.altstatus_addr);
if (status & (ATA_BUSY | ATA_DRQ)) {
/*
* We are busy, try to handle it later. This
* is the DMA finished interrupt, and it could
* take a little while for the card to be
* ready for more commands.
*/
/* Clear DMA irq. */
dma_int.u64 = 0;
dma_int.s.done = 1;
cvmx_write_csr(cf_port->dma_base + DMA_INT,
dma_int.u64);
hrtimer_start_range_ns(&cf_port->delayed_finish,
ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
OCTEON_CF_BUSY_POLL_INTERVAL / 5,
HRTIMER_MODE_REL);
handled = 1;
} else {
handled |= octeon_cf_dma_finished(ap, qc);
}
}
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
{
struct octeon_cf_port *cf_port = container_of(hrt,
struct octeon_cf_port,
delayed_finish);
struct ata_port *ap = cf_port->ap;
struct ata_host *host = ap->host;
struct ata_queued_cmd *qc;
unsigned long flags;
u8 status;
enum hrtimer_restart rv = HRTIMER_NORESTART;
spin_lock_irqsave(&host->lock, flags);
/*
* If the port is not waiting for completion, it must have
* handled it previously. The hsm_task_state is
* protected by host->lock.
*/
if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
goto out;
status = ioread8(ap->ioaddr.altstatus_addr);
if (status & (ATA_BUSY | ATA_DRQ)) {
/* Still busy, try again. */
hrtimer_forward_now(hrt,
ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
rv = HRTIMER_RESTART;
goto out;
}
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
octeon_cf_dma_finished(ap, qc);
out:
spin_unlock_irqrestore(&host->lock, flags);
return rv;
}
static void octeon_cf_dev_config(struct ata_device *dev)
{
/*
* A maximum of 2^20 - 1 16 bit transfers are possible with
* the bootbus DMA. So we need to throttle max_sectors to
* (2^12 - 1 == 4095) to assure that this can never happen.
*/
dev->max_sectors = min(dev->max_sectors, 4095U);
}
/*
* We don't do ATAPI DMA so return 0.
*/
static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
{
return 0;
}
static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
octeon_cf_dma_setup(qc); /* set up dma */
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
octeon_cf_dma_start(qc); /* initiate dma */
ap->hsm_task_state = HSM_ST_LAST;
break;
case ATAPI_PROT_DMA:
dev_err(ap->dev, "Error, ATAPI not supported\n");
BUG();
default:
return ata_sff_qc_issue(qc);
}
return 0;
}
static struct ata_port_operations octeon_cf_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = octeon_cf_check_atapi_dma,
.qc_prep = ata_noop_qc_prep,
.qc_issue = octeon_cf_qc_issue,
.sff_dev_select = octeon_cf_dev_select,
.sff_irq_on = octeon_cf_ata_port_noaction,
.sff_irq_clear = octeon_cf_ata_port_noaction,
.cable_detect = ata_cable_40wire,
.set_piomode = octeon_cf_set_piomode,
.set_dmamode = octeon_cf_set_dmamode,
.dev_config = octeon_cf_dev_config,
};
static int octeon_cf_probe(struct platform_device *pdev)
{
struct resource *res_cs0, *res_cs1;
bool is_16bit;
u64 reg;
struct device_node *node;
void __iomem *cs0;
void __iomem *cs1 = NULL;
struct ata_host *host;
struct ata_port *ap;
int irq = 0;
irq_handler_t irq_handler = NULL;
void __iomem *base;
struct octeon_cf_port *cf_port;
u32 bus_width;
int rv;
node = pdev->dev.of_node;
if (node == NULL)
return -EINVAL;
cf_port = devm_kzalloc(&pdev->dev, sizeof(*cf_port), GFP_KERNEL);
if (!cf_port)
return -ENOMEM;
cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide");
if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0)
is_16bit = (bus_width == 16);
else
is_16bit = false;
rv = of_property_read_reg(node, 0, ®, NULL);
if (rv < 0)
return rv;
cf_port->cs0 = upper_32_bits(reg);
if (cf_port->is_true_ide) {
struct device_node *dma_node;
dma_node = of_parse_phandle(node,
"cavium,dma-engine-handle", 0);
if (dma_node) {
struct platform_device *dma_dev;
dma_dev = of_find_device_by_node(dma_node);
if (dma_dev) {
struct resource *res_dma;
int i;
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
if (!res_dma) {
put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
resource_size(res_dma));
if (!cf_port->dma_base) {
put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
i = platform_get_irq(dma_dev, 0);
if (i > 0) {
irq = i;
irq_handler = octeon_cf_interrupt;
}
put_device(&dma_dev->dev);
}
of_node_put(dma_node);
}
res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res_cs1)
return -EINVAL;
cs1 = devm_ioremap(&pdev->dev, res_cs1->start,
resource_size(res_cs1));
if (!cs1)
return -EINVAL;
rv = of_property_read_reg(node, 1, ®, NULL);
if (rv < 0)
return rv;
cf_port->cs1 = upper_32_bits(reg);
}
res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_cs0)
return -EINVAL;
cs0 = devm_ioremap(&pdev->dev, res_cs0->start,
resource_size(res_cs0));
if (!cs0)
return -ENOMEM;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
ap->private_data = cf_port;
pdev->dev.platform_data = cf_port;
cf_port->ap = ap;
ap->ops = &octeon_cf_ops;
ap->pio_mask = ATA_PIO6;
ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
if (!is_16bit) {
base = cs0 + 0x800;
ap->ioaddr.cmd_addr = base;
ata_sff_std_ports(&ap->ioaddr);
ap->ioaddr.altstatus_addr = base + 0xe;
ap->ioaddr.ctl_addr = base + 0xe;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
} else if (cf_port->is_true_ide) {
base = cs0;
ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1;
ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1;
ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1;
ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1;
ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1;
ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1;
ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
/* True IDE mode needs a timer to poll for not-busy. */
hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
cf_port->delayed_finish.function = octeon_cf_delayed_finish;
} else {
/* 16 bit but not True IDE */
base = cs0 + 0x800;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
octeon_cf_ops.softreset = octeon_cf_softreset16;
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16;
ap->ioaddr.data_addr = base + ATA_REG_DATA;
ap->ioaddr.nsect_addr = base + ATA_REG_NSECT;
ap->ioaddr.lbal_addr = base + ATA_REG_LBAL;
ap->ioaddr.ctl_addr = base + 0xe;
ap->ioaddr.altstatus_addr = base + 0xe;
}
cf_port->c0 = ap->ioaddr.ctl_addr;
rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv)
return rv;
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
is_16bit ? 16 : 8,
cf_port->is_true_ide ? ", True IDE" : "");
return ata_host_activate(host, irq, irq_handler,
IRQF_SHARED, &octeon_cf_sht);
}
static void octeon_cf_shutdown(struct device *dev)
{
union cvmx_mio_boot_dma_cfgx dma_cfg;
union cvmx_mio_boot_dma_intx dma_int;
struct octeon_cf_port *cf_port = dev_get_platdata(dev);
if (cf_port->dma_base) {
/* Stop and clear the dma engine. */
dma_cfg.u64 = 0;
dma_cfg.s.size = -1;
cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
/* Disable the interrupt. */
dma_int.u64 = 0;
cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
/* Clear the DMA complete status */
dma_int.s.done = 1;
cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
__raw_writeb(0, cf_port->c0);
udelay(20);
__raw_writeb(ATA_SRST, cf_port->c0);
udelay(20);
__raw_writeb(0, cf_port->c0);
mdelay(100);
}
}
static const struct of_device_id octeon_cf_match[] = {
{ .compatible = "cavium,ebt3000-compact-flash", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, octeon_cf_match);
static struct platform_driver octeon_cf_driver = {
.probe = octeon_cf_probe,
.driver = {
.name = DRV_NAME,
.of_match_table = octeon_cf_match,
.shutdown = octeon_cf_shutdown
},
};
static int __init octeon_cf_init(void)
{
return platform_driver_register(&octeon_cf_driver);
}
MODULE_AUTHOR("David Daney <[email protected]>");
MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
module_init(octeon_cf_init);
| linux-master | drivers/ata/pata_octeon_cf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DWC AHCI SATA Platform driver
*
* Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
*/
#include <linux/ahci_platform.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/log2.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
#include "ahci.h"
#define DRV_NAME "ahci-dwc"
#define AHCI_DWC_FBS_PMPN_MAX 15
/* DWC AHCI SATA controller specific registers */
#define AHCI_DWC_HOST_OOBR 0xbc
#define AHCI_DWC_HOST_OOB_WE BIT(31)
#define AHCI_DWC_HOST_CWMIN_MASK GENMASK(30, 24)
#define AHCI_DWC_HOST_CWMAX_MASK GENMASK(23, 16)
#define AHCI_DWC_HOST_CIMIN_MASK GENMASK(15, 8)
#define AHCI_DWC_HOST_CIMAX_MASK GENMASK(7, 0)
#define AHCI_DWC_HOST_GPCR 0xd0
#define AHCI_DWC_HOST_GPSR 0xd4
#define AHCI_DWC_HOST_TIMER1MS 0xe0
#define AHCI_DWC_HOST_TIMV_MASK GENMASK(19, 0)
#define AHCI_DWC_HOST_GPARAM1R 0xe8
#define AHCI_DWC_HOST_ALIGN_M BIT(31)
#define AHCI_DWC_HOST_RX_BUFFER BIT(30)
#define AHCI_DWC_HOST_PHY_DATA_MASK GENMASK(29, 28)
#define AHCI_DWC_HOST_PHY_RST BIT(27)
#define AHCI_DWC_HOST_PHY_CTRL_MASK GENMASK(26, 21)
#define AHCI_DWC_HOST_PHY_STAT_MASK GENMASK(20, 15)
#define AHCI_DWC_HOST_LATCH_M BIT(14)
#define AHCI_DWC_HOST_PHY_TYPE_MASK GENMASK(13, 11)
#define AHCI_DWC_HOST_RET_ERR BIT(10)
#define AHCI_DWC_HOST_AHB_ENDIAN_MASK GENMASK(9, 8)
#define AHCI_DWC_HOST_S_HADDR BIT(7)
#define AHCI_DWC_HOST_M_HADDR BIT(6)
#define AHCI_DWC_HOST_S_HDATA_MASK GENMASK(5, 3)
#define AHCI_DWC_HOST_M_HDATA_MASK GENMASK(2, 0)
#define AHCI_DWC_HOST_GPARAM2R 0xec
#define AHCI_DWC_HOST_FBS_MEM_S BIT(19)
#define AHCI_DWC_HOST_FBS_PMPN_MASK GENMASK(17, 16)
#define AHCI_DWC_HOST_FBS_SUP BIT(15)
#define AHCI_DWC_HOST_DEV_CP BIT(14)
#define AHCI_DWC_HOST_DEV_MP BIT(13)
#define AHCI_DWC_HOST_ENCODE_M BIT(12)
#define AHCI_DWC_HOST_RXOOB_CLK_M BIT(11)
#define AHCI_DWC_HOST_RXOOB_M BIT(10)
#define AHCI_DWC_HOST_TXOOB_M BIT(9)
#define AHCI_DWC_HOST_RXOOB_M BIT(10)
#define AHCI_DWC_HOST_RXOOB_CLK_MASK GENMASK(8, 0)
#define AHCI_DWC_HOST_PPARAMR 0xf0
#define AHCI_DWC_HOST_TX_MEM_M BIT(11)
#define AHCI_DWC_HOST_TX_MEM_S BIT(10)
#define AHCI_DWC_HOST_RX_MEM_M BIT(9)
#define AHCI_DWC_HOST_RX_MEM_S BIT(8)
#define AHCI_DWC_HOST_TXFIFO_DEPTH GENMASK(7, 4)
#define AHCI_DWC_HOST_RXFIFO_DEPTH GENMASK(3, 0)
#define AHCI_DWC_HOST_TESTR 0xf4
#define AHCI_DWC_HOST_PSEL_MASK GENMASK(18, 16)
#define AHCI_DWC_HOST_TEST_IF BIT(0)
#define AHCI_DWC_HOST_VERSIONR 0xf8
#define AHCI_DWC_HOST_IDR 0xfc
#define AHCI_DWC_PORT_DMACR 0x70
#define AHCI_DWC_PORT_RXABL_MASK GENMASK(15, 12)
#define AHCI_DWC_PORT_TXABL_MASK GENMASK(11, 8)
#define AHCI_DWC_PORT_RXTS_MASK GENMASK(7, 4)
#define AHCI_DWC_PORT_TXTS_MASK GENMASK(3, 0)
#define AHCI_DWC_PORT_PHYCR 0x74
#define AHCI_DWC_PORT_PHYSR 0x78
/* Baikal-T1 AHCI SATA specific registers */
#define AHCI_BT1_HOST_PHYCR AHCI_DWC_HOST_GPCR
#define AHCI_BT1_HOST_MPLM_MASK GENMASK(29, 23)
#define AHCI_BT1_HOST_LOSDT_MASK GENMASK(22, 20)
#define AHCI_BT1_HOST_CRR BIT(19)
#define AHCI_BT1_HOST_CRW BIT(18)
#define AHCI_BT1_HOST_CRCD BIT(17)
#define AHCI_BT1_HOST_CRCA BIT(16)
#define AHCI_BT1_HOST_CRDI_MASK GENMASK(15, 0)
#define AHCI_BT1_HOST_PHYSR AHCI_DWC_HOST_GPSR
#define AHCI_BT1_HOST_CRA BIT(16)
#define AHCI_BT1_HOST_CRDO_MASK GENMASK(15, 0)
struct ahci_dwc_plat_data {
unsigned int pflags;
unsigned int hflags;
int (*init)(struct ahci_host_priv *hpriv);
int (*reinit)(struct ahci_host_priv *hpriv);
void (*clear)(struct ahci_host_priv *hpriv);
};
struct ahci_dwc_host_priv {
const struct ahci_dwc_plat_data *pdata;
struct platform_device *pdev;
u32 timv;
u32 dmacr[AHCI_MAX_PORTS];
};
static int ahci_bt1_init(struct ahci_host_priv *hpriv)
{
struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
int ret;
/* APB, application and reference clocks are required */
if (!ahci_platform_find_clk(hpriv, "pclk") ||
!ahci_platform_find_clk(hpriv, "aclk") ||
!ahci_platform_find_clk(hpriv, "ref")) {
dev_err(&dpriv->pdev->dev, "No system clocks specified\n");
return -EINVAL;
}
/*
* Fully reset the SATA AXI and ref clocks domain to ensure the state
* machine is working from scratch especially if the reference clocks
* source has been changed.
*/
ret = ahci_platform_assert_rsts(hpriv);
if (ret) {
dev_err(&dpriv->pdev->dev, "Couldn't assert the resets\n");
return ret;
}
ret = ahci_platform_deassert_rsts(hpriv);
if (ret) {
dev_err(&dpriv->pdev->dev, "Couldn't de-assert the resets\n");
return ret;
}
return 0;
}
static struct ahci_host_priv *ahci_dwc_get_resources(struct platform_device *pdev)
{
struct ahci_dwc_host_priv *dpriv;
struct ahci_host_priv *hpriv;
dpriv = devm_kzalloc(&pdev->dev, sizeof(*dpriv), GFP_KERNEL);
if (!dpriv)
return ERR_PTR(-ENOMEM);
dpriv->pdev = pdev;
dpriv->pdata = device_get_match_data(&pdev->dev);
if (!dpriv->pdata)
return ERR_PTR(-EINVAL);
hpriv = ahci_platform_get_resources(pdev, dpriv->pdata->pflags);
if (IS_ERR(hpriv))
return hpriv;
hpriv->flags |= dpriv->pdata->hflags;
hpriv->plat_data = (void *)dpriv;
return hpriv;
}
static void ahci_dwc_check_cap(struct ahci_host_priv *hpriv)
{
unsigned long port_map = hpriv->saved_port_map | hpriv->mask_port_map;
struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
bool dev_mp, dev_cp, fbs_sup;
unsigned int fbs_pmp;
u32 param;
int i;
param = readl(hpriv->mmio + AHCI_DWC_HOST_GPARAM2R);
dev_mp = !!(param & AHCI_DWC_HOST_DEV_MP);
dev_cp = !!(param & AHCI_DWC_HOST_DEV_CP);
fbs_sup = !!(param & AHCI_DWC_HOST_FBS_SUP);
fbs_pmp = 5 * FIELD_GET(AHCI_DWC_HOST_FBS_PMPN_MASK, param);
if (!dev_mp && hpriv->saved_cap & HOST_CAP_MPS) {
dev_warn(&dpriv->pdev->dev, "MPS is unsupported\n");
hpriv->saved_cap &= ~HOST_CAP_MPS;
}
if (fbs_sup && fbs_pmp < AHCI_DWC_FBS_PMPN_MAX) {
dev_warn(&dpriv->pdev->dev, "PMPn is limited up to %u ports\n",
fbs_pmp);
}
for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
if (!dev_mp && hpriv->saved_port_cap[i] & PORT_CMD_MPSP) {
dev_warn(&dpriv->pdev->dev, "MPS incapable port %d\n", i);
hpriv->saved_port_cap[i] &= ~PORT_CMD_MPSP;
}
if (!dev_cp && hpriv->saved_port_cap[i] & PORT_CMD_CPD) {
dev_warn(&dpriv->pdev->dev, "CPD incapable port %d\n", i);
hpriv->saved_port_cap[i] &= ~PORT_CMD_CPD;
}
if (!fbs_sup && hpriv->saved_port_cap[i] & PORT_CMD_FBSCP) {
dev_warn(&dpriv->pdev->dev, "FBS incapable port %d\n", i);
hpriv->saved_port_cap[i] &= ~PORT_CMD_FBSCP;
}
}
}
static void ahci_dwc_init_timer(struct ahci_host_priv *hpriv)
{
struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
unsigned long rate;
struct clk *aclk;
u32 cap, cap2;
/* 1ms tick is generated only for the CCC or DevSleep features */
cap = readl(hpriv->mmio + HOST_CAP);
cap2 = readl(hpriv->mmio + HOST_CAP2);
if (!(cap & HOST_CAP_CCC) && !(cap2 & HOST_CAP2_SDS))
return;
/*
* Tick is generated based on the AXI/AHB application clocks signal
* so we need to be sure in the clock we are going to use.
*/
aclk = ahci_platform_find_clk(hpriv, "aclk");
if (!aclk)
return;
/* 1ms timer interval is set as TIMV = AMBA_FREQ[MHZ] * 1000 */
dpriv->timv = readl(hpriv->mmio + AHCI_DWC_HOST_TIMER1MS);
dpriv->timv = FIELD_GET(AHCI_DWC_HOST_TIMV_MASK, dpriv->timv);
rate = clk_get_rate(aclk) / 1000UL;
if (rate == dpriv->timv)
return;
dev_info(&dpriv->pdev->dev, "Update CCC/DevSlp timer for Fapp %lu MHz\n",
rate / 1000UL);
dpriv->timv = FIELD_PREP(AHCI_DWC_HOST_TIMV_MASK, rate);
writel(dpriv->timv, hpriv->mmio + AHCI_DWC_HOST_TIMER1MS);
}
static int ahci_dwc_init_dmacr(struct ahci_host_priv *hpriv)
{
struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
struct device_node *child;
void __iomem *port_mmio;
u32 port, dmacr, ts;
/*
* Update the DMA Tx/Rx transaction sizes in accordance with the
* platform setup. Note values exceeding maximal or minimal limits will
* be automatically clamped. Also note the register isn't affected by
* the HBA global reset so we can freely initialize it once until the
* next system reset.
*/
for_each_child_of_node(dpriv->pdev->dev.of_node, child) {
if (!of_device_is_available(child))
continue;
if (of_property_read_u32(child, "reg", &port)) {
of_node_put(child);
return -EINVAL;
}
port_mmio = __ahci_port_base(hpriv, port);
dmacr = readl(port_mmio + AHCI_DWC_PORT_DMACR);
if (!of_property_read_u32(child, "snps,tx-ts-max", &ts)) {
ts = ilog2(ts);
dmacr &= ~AHCI_DWC_PORT_TXTS_MASK;
dmacr |= FIELD_PREP(AHCI_DWC_PORT_TXTS_MASK, ts);
}
if (!of_property_read_u32(child, "snps,rx-ts-max", &ts)) {
ts = ilog2(ts);
dmacr &= ~AHCI_DWC_PORT_RXTS_MASK;
dmacr |= FIELD_PREP(AHCI_DWC_PORT_RXTS_MASK, ts);
}
writel(dmacr, port_mmio + AHCI_DWC_PORT_DMACR);
dpriv->dmacr[port] = dmacr;
}
return 0;
}
static int ahci_dwc_init_host(struct ahci_host_priv *hpriv)
{
struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
int rc;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
if (dpriv->pdata->init) {
rc = dpriv->pdata->init(hpriv);
if (rc)
goto err_disable_resources;
}
ahci_dwc_check_cap(hpriv);
ahci_dwc_init_timer(hpriv);
rc = ahci_dwc_init_dmacr(hpriv);
if (rc)
goto err_clear_platform;
return 0;
err_clear_platform:
if (dpriv->pdata->clear)
dpriv->pdata->clear(hpriv);
err_disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static int ahci_dwc_reinit_host(struct ahci_host_priv *hpriv)
{
struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
unsigned long port_map = hpriv->port_map;
void __iomem *port_mmio;
int i, rc;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
if (dpriv->pdata->reinit) {
rc = dpriv->pdata->reinit(hpriv);
if (rc)
goto err_disable_resources;
}
writel(dpriv->timv, hpriv->mmio + AHCI_DWC_HOST_TIMER1MS);
for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
port_mmio = __ahci_port_base(hpriv, i);
writel(dpriv->dmacr[i], port_mmio + AHCI_DWC_PORT_DMACR);
}
return 0;
err_disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static void ahci_dwc_clear_host(struct ahci_host_priv *hpriv)
{
struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
if (dpriv->pdata->clear)
dpriv->pdata->clear(hpriv);
ahci_platform_disable_resources(hpriv);
}
static void ahci_dwc_stop_host(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
ahci_dwc_clear_host(hpriv);
}
static struct ata_port_operations ahci_dwc_port_ops = {
.inherits = &ahci_platform_ops,
.host_stop = ahci_dwc_stop_host,
};
static const struct ata_port_info ahci_dwc_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_dwc_port_ops,
};
static const struct scsi_host_template ahci_dwc_scsi_info = {
AHCI_SHT(DRV_NAME),
};
static int ahci_dwc_probe(struct platform_device *pdev)
{
struct ahci_host_priv *hpriv;
int rc;
hpriv = ahci_dwc_get_resources(pdev);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
rc = ahci_dwc_init_host(hpriv);
if (rc)
return rc;
rc = ahci_platform_init_host(pdev, hpriv, &ahci_dwc_port_info,
&ahci_dwc_scsi_info);
if (rc)
goto err_clear_host;
return 0;
err_clear_host:
ahci_dwc_clear_host(hpriv);
return rc;
}
static int ahci_dwc_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_suspend_host(dev);
if (rc)
return rc;
ahci_dwc_clear_host(hpriv);
return 0;
}
static int ahci_dwc_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_dwc_reinit_host(hpriv);
if (rc)
return rc;
return ahci_platform_resume_host(dev);
}
static DEFINE_SIMPLE_DEV_PM_OPS(ahci_dwc_pm_ops, ahci_dwc_suspend,
ahci_dwc_resume);
static struct ahci_dwc_plat_data ahci_dwc_plat = {
.pflags = AHCI_PLATFORM_GET_RESETS,
};
static struct ahci_dwc_plat_data ahci_bt1_plat = {
.pflags = AHCI_PLATFORM_GET_RESETS | AHCI_PLATFORM_RST_TRIGGER,
.init = ahci_bt1_init,
};
static const struct of_device_id ahci_dwc_of_match[] = {
{ .compatible = "snps,dwc-ahci", &ahci_dwc_plat },
{ .compatible = "snps,spear-ahci", &ahci_dwc_plat },
{ .compatible = "baikal,bt1-ahci", &ahci_bt1_plat },
{},
};
MODULE_DEVICE_TABLE(of, ahci_dwc_of_match);
static struct platform_driver ahci_dwc_driver = {
.probe = ahci_dwc_probe,
.remove_new = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_dwc_of_match,
.pm = &ahci_dwc_pm_ops,
},
};
module_platform_driver(ahci_dwc_driver);
MODULE_DESCRIPTION("DWC AHCI SATA platform driver");
MODULE_AUTHOR("Serge Semin <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/ata/ahci_dwc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_promise.c - Promise SATA
*
* Maintained by: Tejun Heo <[email protected]>
* Mikael Pettersson
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2003-2004 Red Hat, Inc.
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware information only available under NDA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include "sata_promise.h"
#define DRV_NAME "sata_promise"
#define DRV_VERSION "2.12"
enum {
PDC_MAX_PORTS = 4,
PDC_MMIO_BAR = 3,
PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
/* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
PDC_FLASH_CTL = 0x44, /* Flash control register */
PDC_PCI_CTL = 0x48, /* PCI control/status reg */
PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
/* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */
PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */
PDC_CYLINDER_LOW = 0x10, /* Cylinder low reg (per port) */
PDC_CYLINDER_HIGH = 0x14, /* Cylinder high reg (per port) */
PDC_DEVICE = 0x18, /* Device/Head reg (per port) */
PDC_COMMAND = 0x1C, /* Command/status reg (per port) */
PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */
PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
/* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
PDC_SATA_ERROR = 0x04,
PDC_PHYMODE4 = 0x14,
PDC_LINK_LAYER_ERRORS = 0x6C,
PDC_FPDMA_CTLSTAT = 0xD8,
PDC_INTERNAL_DEBUG_1 = 0xF8, /* also used for PATA */
PDC_INTERNAL_DEBUG_2 = 0xFC, /* also used for PATA */
/* PDC_FPDMA_CTLSTAT bit definitions */
PDC_FPDMA_CTLSTAT_RESET = 1 << 3,
PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG = 1 << 10,
PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG = 1 << 11,
/* PDC_GLOBAL_CTL bit definitions */
PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */
PDC_SH_ERR = (1 << 9), /* PCI error while loading S/G table */
PDC_DH_ERR = (1 << 10), /* PCI error while loading data */
PDC2_HTO_ERR = (1 << 12), /* host bus timeout */
PDC2_ATA_HBA_ERR = (1 << 13), /* error during SATA DATA FIS transmission */
PDC2_ATA_DMA_CNT_ERR = (1 << 14), /* DMA DATA FIS size differs from S/G count */
PDC_OVERRUN_ERR = (1 << 19), /* S/G byte count larger than HD requires */
PDC_UNDERRUN_ERR = (1 << 20), /* S/G byte count less than HD requires */
PDC_DRIVE_ERR = (1 << 21), /* drive error */
PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */
PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */
PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR,
PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
PDC2_ATA_DMA_CNT_ERR,
PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
PDC1_ERR_MASK | PDC2_ERR_MASK,
board_2037x = 0, /* FastTrak S150 TX2plus */
board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */
board_20319 = 2, /* FastTrak S150 TX4 */
board_20619 = 3, /* FastTrak TX4000 */
board_2057x = 4, /* SATAII150 Tx2plus */
board_2057x_pata = 5, /* SATAII150 Tx2plus PATA port */
board_40518 = 6, /* SATAII150 Tx4 */
PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
/* Sequence counter control registers bit definitions */
PDC_SEQCNTRL_INT_MASK = (1 << 5), /* Sequence Interrupt Mask */
/* Feature register values */
PDC_FEATURE_ATAPI_PIO = 0x00, /* ATAPI data xfer by PIO */
PDC_FEATURE_ATAPI_DMA = 0x01, /* ATAPI data xfer by DMA */
/* Device/Head register values */
PDC_DEVICE_SATA = 0xE0, /* Device/Head value for SATA devices */
/* PDC_CTLSTAT bit definitions */
PDC_DMA_ENABLE = (1 << 7),
PDC_IRQ_DISABLE = (1 << 10),
PDC_RESET = (1 << 11), /* HDMA reset */
PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING,
/* ap->flags bits */
PDC_FLAG_GEN_II = (1 << 24),
PDC_FLAG_SATA_PATA = (1 << 25), /* supports SATA + PATA */
PDC_FLAG_4_PORTS = (1 << 26), /* 4 ports */
};
struct pdc_port_priv {
u8 *pkt;
dma_addr_t pkt_dma;
};
struct pdc_host_priv {
spinlock_t hard_reset_lock;
};
static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc);
static void pdc_irq_clear(struct ata_port *ap);
static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc);
static void pdc_freeze(struct ata_port *ap);
static void pdc_sata_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap);
static void pdc_sata_thaw(struct ata_port *ap);
static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void pdc_error_handler(struct ata_port *ap);
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
static int pdc_pata_cable_detect(struct ata_port *ap);
static const struct scsi_host_template pdc_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = PDC_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static const struct ata_port_operations pdc_common_ops = {
.inherits = &ata_sff_port_ops,
.sff_tf_load = pdc_tf_load_mmio,
.sff_exec_command = pdc_exec_command_mmio,
.check_atapi_dma = pdc_check_atapi_dma,
.qc_prep = pdc_qc_prep,
.qc_issue = pdc_qc_issue,
.sff_irq_clear = pdc_irq_clear,
.lost_interrupt = ATA_OP_NULL,
.post_internal_cmd = pdc_post_internal_cmd,
.error_handler = pdc_error_handler,
};
static struct ata_port_operations pdc_sata_ops = {
.inherits = &pdc_common_ops,
.cable_detect = ata_cable_sata,
.freeze = pdc_sata_freeze,
.thaw = pdc_sata_thaw,
.scr_read = pdc_sata_scr_read,
.scr_write = pdc_sata_scr_write,
.port_start = pdc_sata_port_start,
.hardreset = pdc_sata_hardreset,
};
/* First-generation chips need a more restrictive ->check_atapi_dma op,
and ->freeze/thaw that ignore the hotplug controls. */
static struct ata_port_operations pdc_old_sata_ops = {
.inherits = &pdc_sata_ops,
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.check_atapi_dma = pdc_old_sata_check_atapi_dma,
};
static struct ata_port_operations pdc_pata_ops = {
.inherits = &pdc_common_ops,
.cable_detect = pdc_pata_cable_detect,
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.port_start = pdc_common_port_start,
.softreset = pdc_pata_softreset,
};
static const struct ata_port_info pdc_port_info[] = {
[board_2037x] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_SATA_PATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_old_sata_ops,
},
[board_2037x_pata] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_20319] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_old_sata_ops,
},
[board_20619] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_2057x] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_sata_ops,
},
[board_2057x_pata] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
PDC_FLAG_GEN_II,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_40518] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_sata_ops,
},
};
static const struct pci_device_id pdc_ata_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
{ PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
{ } /* terminate list */
};
static struct pci_driver pdc_ata_pci_driver = {
.name = DRV_NAME,
.id_table = pdc_ata_pci_tbl,
.probe = pdc_ata_init_one,
.remove = ata_pci_remove_one,
};
static int pdc_common_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
int rc;
/* we use the same prd table as bmdma, allocate it */
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
ap->private_data = pp;
return 0;
}
static int pdc_sata_port_start(struct ata_port *ap)
{
int rc;
rc = pdc_common_port_start(ap);
if (rc)
return rc;
/* fix up PHYMODE4 align timing */
if (ap->flags & PDC_FLAG_GEN_II) {
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
unsigned int tmp;
tmp = readl(sata_mmio + PDC_PHYMODE4);
tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */
writel(tmp, sata_mmio + PDC_PHYMODE4);
}
return 0;
}
static void pdc_fpdma_clear_interrupt_flag(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
u32 tmp;
tmp = readl(sata_mmio + PDC_FPDMA_CTLSTAT);
tmp |= PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG;
tmp |= PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG;
/* It's not allowed to write to the entire FPDMA_CTLSTAT register
when NCQ is running. So do a byte-sized write to bits 10 and 11. */
writeb(tmp >> 8, sata_mmio + PDC_FPDMA_CTLSTAT + 1);
readb(sata_mmio + PDC_FPDMA_CTLSTAT + 1); /* flush */
}
static void pdc_fpdma_reset(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
u8 tmp;
tmp = (u8)readl(sata_mmio + PDC_FPDMA_CTLSTAT);
tmp &= 0x7F;
tmp |= PDC_FPDMA_CTLSTAT_RESET;
writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
udelay(100);
tmp &= ~PDC_FPDMA_CTLSTAT_RESET;
writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
pdc_fpdma_clear_interrupt_flag(ap);
}
static void pdc_not_at_command_packet_phase(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
unsigned int i;
u32 tmp;
/* check not at ASIC packet command phase */
for (i = 0; i < 100; ++i) {
writel(0, sata_mmio + PDC_INTERNAL_DEBUG_1);
tmp = readl(sata_mmio + PDC_INTERNAL_DEBUG_2);
if ((tmp & 0xF) != 1)
break;
udelay(100);
}
}
static void pdc_clear_internal_debug_record_error_register(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
writel(0xffffffff, sata_mmio + PDC_SATA_ERROR);
writel(0xffff0000, sata_mmio + PDC_LINK_LAYER_ERRORS);
}
static void pdc_reset_port(struct ata_port *ap)
{
void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
unsigned int i;
u32 tmp;
if (ap->flags & PDC_FLAG_GEN_II)
pdc_not_at_command_packet_phase(ap);
tmp = readl(ata_ctlstat_mmio);
tmp |= PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
for (i = 11; i > 0; i--) {
tmp = readl(ata_ctlstat_mmio);
if (tmp & PDC_RESET)
break;
udelay(100);
tmp |= PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
}
tmp &= ~PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
readl(ata_ctlstat_mmio); /* flush */
if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) {
pdc_fpdma_reset(ap);
pdc_clear_internal_debug_record_error_register(ap);
}
}
static int pdc_pata_cable_detect(struct ata_port *ap)
{
u8 tmp;
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
tmp = readb(ata_mmio + PDC_CTLSTAT + 3);
if (tmp & 0x01)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
static int pdc_sata_scr_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static int pdc_sata_scr_write(struct ata_link *link,
unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
dma_addr_t sg_table = ap->bmdma_prd_dma;
unsigned int cdb_len = qc->dev->cdb_len;
u8 *cdb = qc->cdb;
struct pdc_port_priv *pp = ap->private_data;
u8 *buf = pp->pkt;
__le32 *buf32 = (__le32 *) buf;
unsigned int dev_sel, feature;
/* set control bits (byte 0), zero delay seq id (byte 3),
* and seq id (byte 2)
*/
switch (qc->tf.protocol) {
case ATAPI_PROT_DMA:
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
buf32[0] = cpu_to_le32(PDC_PKT_READ);
else
buf32[0] = 0;
break;
case ATAPI_PROT_NODATA:
buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
break;
default:
BUG();
break;
}
buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
buf32[2] = 0; /* no next-packet */
/* select drive */
if (sata_scr_valid(&ap->link))
dev_sel = PDC_DEVICE_SATA;
else
dev_sel = qc->tf.device;
buf[12] = (1 << 5) | ATA_REG_DEVICE;
buf[13] = dev_sel;
buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
buf[15] = dev_sel; /* once more, waiting for BSY to clear */
buf[16] = (1 << 5) | ATA_REG_NSECT;
buf[17] = qc->tf.nsect;
buf[18] = (1 << 5) | ATA_REG_LBAL;
buf[19] = qc->tf.lbal;
/* set feature and byte counter registers */
if (qc->tf.protocol != ATAPI_PROT_DMA)
feature = PDC_FEATURE_ATAPI_PIO;
else
feature = PDC_FEATURE_ATAPI_DMA;
buf[20] = (1 << 5) | ATA_REG_FEATURE;
buf[21] = feature;
buf[22] = (1 << 5) | ATA_REG_BYTEL;
buf[23] = qc->tf.lbam;
buf[24] = (1 << 5) | ATA_REG_BYTEH;
buf[25] = qc->tf.lbah;
/* send ATAPI packet command 0xA0 */
buf[26] = (1 << 5) | ATA_REG_CMD;
buf[27] = qc->tf.command;
/* select drive and check DRQ */
buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
buf[29] = dev_sel;
/* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
BUG_ON(cdb_len & ~0x1E);
/* append the CDB as the final part */
buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG;
memcpy(buf+31, cdb, cdb_len);
}
/**
* pdc_fill_sg - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command.
* Make sure hardware does not choke on it.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void pdc_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_bmdma_prd *prd = ap->bmdma_prd;
struct scatterlist *sg;
const u32 SG_COUNT_ASIC_BUG = 41*4;
unsigned int si, idx;
u32 len;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
idx = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len & 0xffff);
ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n",
idx, addr, len);
idx++;
sg_len -= len;
addr += len;
}
}
len = le32_to_cpu(prd[idx - 1].flags_len);
if (len > SG_COUNT_ASIC_BUG) {
u32 addr;
addr = le32_to_cpu(prd[idx - 1].addr);
prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n",
idx - 1, addr, SG_COUNT_ASIC_BUG);
addr = addr + len - SG_COUNT_ASIC_BUG;
len = SG_COUNT_ASIC_BUG;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len);
ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
}
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
{
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pdc_fill_sg(qc);
fallthrough;
case ATA_PROT_NODATA:
i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
qc->dev->devno, pp->pkt);
if (qc->tf.flags & ATA_TFLAG_LBA48)
i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
else
i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
pdc_pkt_footer(&qc->tf, pp->pkt, i);
break;
case ATAPI_PROT_PIO:
pdc_fill_sg(qc);
break;
case ATAPI_PROT_DMA:
pdc_fill_sg(qc);
fallthrough;
case ATAPI_PROT_NODATA:
pdc_atapi_pkt(qc);
break;
default:
break;
}
return AC_ERR_OK;
}
static int pdc_is_sataii_tx4(unsigned long flags)
{
const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
return (flags & mask) == mask;
}
static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
int is_sataii_tx4)
{
static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
}
static unsigned int pdc_sata_nr_ports(const struct ata_port *ap)
{
return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2;
}
static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap)
{
const struct ata_host *host = ap->host;
unsigned int nr_ports = pdc_sata_nr_ports(ap);
unsigned int i;
for (i = 0; i < nr_ports && host->ports[i] != ap; ++i)
;
BUG_ON(i >= nr_ports);
return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags));
}
static void pdc_freeze(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 tmp;
tmp = readl(ata_mmio + PDC_CTLSTAT);
tmp |= PDC_IRQ_DISABLE;
tmp &= ~PDC_DMA_ENABLE;
writel(tmp, ata_mmio + PDC_CTLSTAT);
readl(ata_mmio + PDC_CTLSTAT); /* flush */
}
static void pdc_sata_freeze(struct ata_port *ap)
{
struct ata_host *host = ap->host;
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
u32 hotplug_status;
/* Disable hotplug events on this port.
*
* Locking:
* 1) hotplug register accesses must be serialised via host->lock
* 2) ap->lock == &ap->host->lock
* 3) ->freeze() and ->thaw() are called with ap->lock held
*/
hotplug_status = readl(host_mmio + hotplug_offset);
hotplug_status |= 0x11 << (ata_no + 16);
writel(hotplug_status, host_mmio + hotplug_offset);
readl(host_mmio + hotplug_offset); /* flush */
pdc_freeze(ap);
}
static void pdc_thaw(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 tmp;
/* clear IRQ */
readl(ata_mmio + PDC_COMMAND);
/* turn IRQ back on */
tmp = readl(ata_mmio + PDC_CTLSTAT);
tmp &= ~PDC_IRQ_DISABLE;
writel(tmp, ata_mmio + PDC_CTLSTAT);
readl(ata_mmio + PDC_CTLSTAT); /* flush */
}
static void pdc_sata_thaw(struct ata_port *ap)
{
struct ata_host *host = ap->host;
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
u32 hotplug_status;
pdc_thaw(ap);
/* Enable hotplug events on this port.
* Locking: see pdc_sata_freeze().
*/
hotplug_status = readl(host_mmio + hotplug_offset);
hotplug_status |= 0x11 << ata_no;
hotplug_status &= ~(0x11 << (ata_no + 16));
writel(hotplug_status, host_mmio + hotplug_offset);
readl(host_mmio + hotplug_offset); /* flush */
}
static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
pdc_reset_port(link->ap);
return ata_sff_softreset(link, class, deadline);
}
static unsigned int pdc_ata_port_to_ata_no(const struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
/* ata_mmio == host_mmio + 0x200 + ata_no * 0x80 */
return (ata_mmio - host_mmio - 0x200) / 0x80;
}
static void pdc_hard_reset_port(struct ata_port *ap)
{
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
struct pdc_host_priv *hpriv = ap->host->private_data;
u8 tmp;
spin_lock(&hpriv->hard_reset_lock);
tmp = readb(pcictl_b1_mmio);
tmp &= ~(0x10 << ata_no);
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
udelay(100);
tmp |= (0x10 << ata_no);
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
spin_unlock(&hpriv->hard_reset_lock);
}
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
if (link->ap->flags & PDC_FLAG_GEN_II)
pdc_not_at_command_packet_phase(link->ap);
/* hotplug IRQs should have been masked by pdc_sata_freeze() */
pdc_hard_reset_port(link->ap);
pdc_reset_port(link->ap);
/* sata_promise can't reliably acquire the first D2H Reg FIS
* after hardreset. Do non-waiting hardreset and request
* follow-up SRST.
*/
return sata_std_hardreset(link, class, deadline);
}
static void pdc_error_handler(struct ata_port *ap)
{
if (!ata_port_is_frozen(ap))
pdc_reset_port(ap);
ata_sff_error_handler(ap);
}
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_EH)
pdc_reset_port(ap);
}
static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
u32 port_status, u32 err_mask)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned int ac_err_mask = 0;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status);
port_status &= err_mask;
if (port_status & PDC_DRIVE_ERR)
ac_err_mask |= AC_ERR_DEV;
if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
ac_err_mask |= AC_ERR_OTHER;
if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
ac_err_mask |= AC_ERR_ATA_BUS;
if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
| PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
ac_err_mask |= AC_ERR_HOST_BUS;
if (sata_scr_valid(&ap->link)) {
u32 serror;
pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
ehi->serror |= serror;
}
qc->err_mask |= ac_err_mask;
pdc_reset_port(ap);
ata_port_abort(ap);
}
static unsigned int pdc_host_intr(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
unsigned int handled = 0;
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 port_status, err_mask;
err_mask = PDC_ERR_MASK;
if (ap->flags & PDC_FLAG_GEN_II)
err_mask &= ~PDC1_ERR_MASK;
else
err_mask &= ~PDC2_ERR_MASK;
port_status = readl(ata_mmio + PDC_GLOBAL_CTL);
if (unlikely(port_status & err_mask)) {
pdc_error_intr(ap, qc, port_status, err_mask);
return 1;
}
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
case ATA_PROT_NODATA:
case ATAPI_PROT_DMA:
case ATAPI_PROT_NODATA:
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
ata_qc_complete(qc);
handled = 1;
break;
default:
ap->stats.idle_irq++;
break;
}
return handled;
}
static void pdc_irq_clear(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
readl(ata_mmio + PDC_COMMAND);
}
static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp;
unsigned int handled = 0;
void __iomem *host_mmio;
unsigned int hotplug_offset, ata_no;
u32 hotplug_status;
int is_sataii_tx4;
if (!host || !host->iomap[PDC_MMIO_BAR])
return IRQ_NONE;
host_mmio = host->iomap[PDC_MMIO_BAR];
spin_lock(&host->lock);
/* read and clear hotplug flags for all ports */
if (host->ports[0]->flags & PDC_FLAG_GEN_II) {
hotplug_offset = PDC2_SATA_PLUG_CSR;
hotplug_status = readl(host_mmio + hotplug_offset);
if (hotplug_status & 0xff)
writel(hotplug_status | 0xff, host_mmio + hotplug_offset);
hotplug_status &= 0xff; /* clear uninteresting bits */
} else
hotplug_status = 0;
/* reading should also clear interrupts */
mask = readl(host_mmio + PDC_INT_SEQMASK);
if (mask == 0xffffffff && hotplug_status == 0)
goto done_irq;
mask &= 0xffff; /* only 16 SEQIDs possible */
if (mask == 0 && hotplug_status == 0)
goto done_irq;
writel(mask, host_mmio + PDC_INT_SEQMASK);
is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
for (i = 0; i < host->n_ports; i++) {
ap = host->ports[i];
/* check for a plug or unplug event */
ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
tmp = hotplug_status & (0x11 << ata_no);
if (tmp) {
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
ata_port_freeze(ap);
++handled;
continue;
}
/* check for a packet interrupt */
tmp = mask & (1 << (i + 1));
if (tmp) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += pdc_host_intr(ap, qc);
}
}
done_irq:
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void pdc_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
unsigned int port_no = ap->port_no;
u8 seq = (u8) (port_no + 1);
writel(0x00000001, host_mmio + (seq * 4));
readl(host_mmio + (seq * 4)); /* flush */
pp->pkt[2] = seq;
wmb(); /* flush PRD, pkt writes */
writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT);
readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */
}
static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATAPI_PROT_NODATA:
if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
break;
fallthrough;
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
break;
fallthrough;
case ATAPI_PROT_DMA:
case ATA_PROT_DMA:
pdc_packet_start(qc);
return 0;
default:
break;
}
return ata_sff_qc_issue(qc);
}
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
ata_sff_tf_load(ap, tf);
}
static void pdc_exec_command_mmio(struct ata_port *ap,
const struct ata_taskfile *tf)
{
WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
ata_sff_exec_command(ap, tf);
}
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
{
u8 *scsicmd = qc->scsicmd->cmnd;
int pio = 1; /* atapi dma off by default */
/* Whitelist commands that may use DMA. */
switch (scsicmd[0]) {
case WRITE_12:
case WRITE_10:
case WRITE_6:
case READ_12:
case READ_10:
case READ_6:
case 0xad: /* READ_DVD_STRUCTURE */
case 0xbe: /* READ_CD */
pio = 0;
}
/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
if (scsicmd[0] == WRITE_10) {
unsigned int lba =
(scsicmd[2] << 24) |
(scsicmd[3] << 16) |
(scsicmd[4] << 8) |
scsicmd[5];
if (lba >= 0xFFFF4FA2)
pio = 1;
}
return pio;
}
static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc)
{
/* First generation chips cannot use ATAPI DMA on SATA ports */
return 1;
}
static void pdc_ata_setup_port(struct ata_port *ap,
void __iomem *base, void __iomem *scr_addr)
{
ap->ioaddr.cmd_addr = base;
ap->ioaddr.data_addr = base;
ap->ioaddr.feature_addr =
ap->ioaddr.error_addr = base + 0x4;
ap->ioaddr.nsect_addr = base + 0x8;
ap->ioaddr.lbal_addr = base + 0xc;
ap->ioaddr.lbam_addr = base + 0x10;
ap->ioaddr.lbah_addr = base + 0x14;
ap->ioaddr.device_addr = base + 0x18;
ap->ioaddr.command_addr =
ap->ioaddr.status_addr = base + 0x1c;
ap->ioaddr.altstatus_addr =
ap->ioaddr.ctl_addr = base + 0x38;
ap->ioaddr.scr_addr = scr_addr;
}
static void pdc_host_init(struct ata_host *host)
{
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
int hotplug_offset;
u32 tmp;
if (is_gen2)
hotplug_offset = PDC2_SATA_PLUG_CSR;
else
hotplug_offset = PDC_SATA_PLUG_CSR;
/*
* Except for the hotplug stuff, this is voodoo from the
* Promise driver. Label this entire section
* "TODO: figure out why we do this"
*/
/* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
tmp = readl(host_mmio + PDC_FLASH_CTL);
tmp |= 0x02000; /* bit 13 (enable bmr burst) */
if (!is_gen2)
tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
writel(tmp, host_mmio + PDC_FLASH_CTL);
/* clear plug/unplug flags for all ports */
tmp = readl(host_mmio + hotplug_offset);
writel(tmp | 0xff, host_mmio + hotplug_offset);
tmp = readl(host_mmio + hotplug_offset);
if (is_gen2) /* unmask plug/unplug ints */
writel(tmp & ~0xff0000, host_mmio + hotplug_offset);
else /* mask plug/unplug ints */
writel(tmp | 0xff0000, host_mmio + hotplug_offset);
/* don't initialise TBG or SLEW on 2nd generation chips */
if (is_gen2)
return;
/* reduce TBG clock to 133 Mhz. */
tmp = readl(host_mmio + PDC_TBG_MODE);
tmp &= ~0x30000; /* clear bit 17, 16*/
tmp |= 0x10000; /* set bit 17:16 = 0:1 */
writel(tmp, host_mmio + PDC_TBG_MODE);
readl(host_mmio + PDC_TBG_MODE); /* flush */
msleep(10);
/* adjust slew rate control register. */
tmp = readl(host_mmio + PDC_SLEW_CTL);
tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
writel(tmp, host_mmio + PDC_SLEW_CTL);
}
static int pdc_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
const struct ata_port_info *ppi[PDC_MAX_PORTS];
struct ata_host *host;
struct pdc_host_priv *hpriv;
void __iomem *host_mmio;
int n_ports, i, rc;
int is_sataii_tx4;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* enable and acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
/* determine port configuration and setup host */
n_ports = 2;
if (pi->flags & PDC_FLAG_4_PORTS)
n_ports = 4;
for (i = 0; i < n_ports; i++)
ppi[i] = pi;
if (pi->flags & PDC_FLAG_SATA_PATA) {
u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1);
if (!(tmp & 0x80))
ppi[n_ports++] = pi + 1;
}
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host) {
dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
spin_lock_init(&hpriv->hard_reset_lock);
host->private_data = hpriv;
host->iomap = pcim_iomap_table(pdev);
is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
unsigned int ata_offset = 0x200 + ata_no * 0x80;
unsigned int scr_offset = 0x400 + ata_no * 0x100;
pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset);
ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata");
}
/* initialize adapter */
pdc_host_init(host);
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
/* start host, request IRQ and attach */
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED,
&pdc_ata_sht);
}
module_pci_driver(pdc_ata_pci_driver);
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/sata_promise.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cortina Systems Gemini SATA bridge add-on to Faraday FTIDE010
* Copyright (C) 2017 Linus Walleij <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/reset.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
#include "sata_gemini.h"
#define DRV_NAME "gemini_sata_bridge"
/**
* struct sata_gemini - a state container for a Gemini SATA bridge
* @dev: the containing device
* @base: remapped I/O memory base
* @muxmode: the current muxing mode
* @ide_pins: if the device is using the plain IDE interface pins
* @sata_bridge: if the device enables the SATA bridge
* @sata0_reset: SATA0 reset handler
* @sata1_reset: SATA1 reset handler
* @sata0_pclk: SATA0 PCLK handler
* @sata1_pclk: SATA1 PCLK handler
*/
struct sata_gemini {
struct device *dev;
void __iomem *base;
enum gemini_muxmode muxmode;
bool ide_pins;
bool sata_bridge;
struct reset_control *sata0_reset;
struct reset_control *sata1_reset;
struct clk *sata0_pclk;
struct clk *sata1_pclk;
};
/* Miscellaneous Control Register */
#define GEMINI_GLOBAL_MISC_CTRL 0x30
/*
* Values of IDE IOMUX bits in the misc control register
*
* Bits 26:24 are "IDE IO Select", which decides what SATA
* adapters are connected to which of the two IDE/ATA
* controllers in the Gemini. We can connect the two IDE blocks
* to one SATA adapter each, both acting as master, or one IDE
* blocks to two SATA adapters so the IDE block can act in a
* master/slave configuration.
*
* We also bring out different blocks on the actual IDE
* pins (not SATA pins) if (and only if) these are muxed in.
*
* 111-100 - Reserved
* Mode 0: 000 - ata0 master <-> sata0
* ata1 master <-> sata1
* ata0 slave interface brought out on IDE pads
* Mode 1: 001 - ata0 master <-> sata0
* ata1 master <-> sata1
* ata1 slave interface brought out on IDE pads
* Mode 2: 010 - ata1 master <-> sata1
* ata1 slave <-> sata0
* ata0 master and slave interfaces brought out
* on IDE pads
* Mode 3: 011 - ata0 master <-> sata0
* ata1 slave <-> sata1
* ata1 master and slave interfaces brought out
* on IDE pads
*/
#define GEMINI_IDE_IOMUX_MASK (7 << 24)
#define GEMINI_IDE_IOMUX_MODE0 (0 << 24)
#define GEMINI_IDE_IOMUX_MODE1 (1 << 24)
#define GEMINI_IDE_IOMUX_MODE2 (2 << 24)
#define GEMINI_IDE_IOMUX_MODE3 (3 << 24)
#define GEMINI_IDE_IOMUX_SHIFT (24)
/*
* Registers directly controlling the PATA<->SATA adapters
*/
#define GEMINI_SATA_ID 0x00
#define GEMINI_SATA_PHY_ID 0x04
#define GEMINI_SATA0_STATUS 0x08
#define GEMINI_SATA1_STATUS 0x0c
#define GEMINI_SATA0_CTRL 0x18
#define GEMINI_SATA1_CTRL 0x1c
#define GEMINI_SATA_STATUS_BIST_DONE BIT(5)
#define GEMINI_SATA_STATUS_BIST_OK BIT(4)
#define GEMINI_SATA_STATUS_PHY_READY BIT(0)
#define GEMINI_SATA_CTRL_PHY_BIST_EN BIT(14)
#define GEMINI_SATA_CTRL_PHY_FORCE_IDLE BIT(13)
#define GEMINI_SATA_CTRL_PHY_FORCE_READY BIT(12)
#define GEMINI_SATA_CTRL_PHY_AFE_LOOP_EN BIT(10)
#define GEMINI_SATA_CTRL_PHY_DIG_LOOP_EN BIT(9)
#define GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN BIT(4)
#define GEMINI_SATA_CTRL_ATAPI_EN BIT(3)
#define GEMINI_SATA_CTRL_BUS_WITH_20 BIT(2)
#define GEMINI_SATA_CTRL_SLAVE_EN BIT(1)
#define GEMINI_SATA_CTRL_EN BIT(0)
/*
* There is only ever one instance of this bridge on a system,
* so create a singleton so that the FTIDE010 instances can grab
* a reference to it.
*/
static struct sata_gemini *sg_singleton;
struct sata_gemini *gemini_sata_bridge_get(void)
{
if (sg_singleton)
return sg_singleton;
return ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL(gemini_sata_bridge_get);
bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1)
{
if (!sg->sata_bridge)
return false;
/*
* In muxmode 2 and 3 one of the ATA controllers is
* actually not connected to any SATA bridge.
*/
if ((sg->muxmode == GEMINI_MUXMODE_2) &&
!is_ata1)
return false;
if ((sg->muxmode == GEMINI_MUXMODE_3) &&
is_ata1)
return false;
return true;
}
EXPORT_SYMBOL(gemini_sata_bridge_enabled);
enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg)
{
return sg->muxmode;
}
EXPORT_SYMBOL(gemini_sata_get_muxmode);
static int gemini_sata_setup_bridge(struct sata_gemini *sg,
unsigned int bridge)
{
unsigned long timeout = jiffies + (HZ * 1);
bool bridge_online;
u32 val;
if (bridge == 0) {
val = GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN | GEMINI_SATA_CTRL_EN;
/* SATA0 slave mode is only used in muxmode 2 */
if (sg->muxmode == GEMINI_MUXMODE_2)
val |= GEMINI_SATA_CTRL_SLAVE_EN;
writel(val, sg->base + GEMINI_SATA0_CTRL);
} else {
val = GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN | GEMINI_SATA_CTRL_EN;
/* SATA1 slave mode is only used in muxmode 3 */
if (sg->muxmode == GEMINI_MUXMODE_3)
val |= GEMINI_SATA_CTRL_SLAVE_EN;
writel(val, sg->base + GEMINI_SATA1_CTRL);
}
/* Vendor code waits 10 ms here */
msleep(10);
/* Wait for PHY to become ready */
do {
msleep(100);
if (bridge == 0)
val = readl(sg->base + GEMINI_SATA0_STATUS);
else
val = readl(sg->base + GEMINI_SATA1_STATUS);
if (val & GEMINI_SATA_STATUS_PHY_READY)
break;
} while (time_before(jiffies, timeout));
bridge_online = !!(val & GEMINI_SATA_STATUS_PHY_READY);
dev_info(sg->dev, "SATA%d PHY %s\n", bridge,
bridge_online ? "ready" : "not ready");
return bridge_online ? 0: -ENODEV;
}
int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
{
struct clk *pclk;
int ret;
if (bridge == 0)
pclk = sg->sata0_pclk;
else
pclk = sg->sata1_pclk;
clk_enable(pclk);
msleep(10);
/* Do not keep clocking a bridge that is not online */
ret = gemini_sata_setup_bridge(sg, bridge);
if (ret)
clk_disable(pclk);
return ret;
}
EXPORT_SYMBOL(gemini_sata_start_bridge);
void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge)
{
if (bridge == 0)
clk_disable(sg->sata0_pclk);
else if (bridge == 1)
clk_disable(sg->sata1_pclk);
}
EXPORT_SYMBOL(gemini_sata_stop_bridge);
int gemini_sata_reset_bridge(struct sata_gemini *sg,
unsigned int bridge)
{
if (bridge == 0)
reset_control_reset(sg->sata0_reset);
else
reset_control_reset(sg->sata1_reset);
msleep(10);
return gemini_sata_setup_bridge(sg, bridge);
}
EXPORT_SYMBOL(gemini_sata_reset_bridge);
static int gemini_sata_bridge_init(struct sata_gemini *sg)
{
struct device *dev = sg->dev;
u32 sata_id, sata_phy_id;
int ret;
sg->sata0_pclk = devm_clk_get(dev, "SATA0_PCLK");
if (IS_ERR(sg->sata0_pclk)) {
dev_err(dev, "no SATA0 PCLK");
return -ENODEV;
}
sg->sata1_pclk = devm_clk_get(dev, "SATA1_PCLK");
if (IS_ERR(sg->sata1_pclk)) {
dev_err(dev, "no SATA1 PCLK");
return -ENODEV;
}
ret = clk_prepare_enable(sg->sata0_pclk);
if (ret) {
dev_err(dev, "failed to enable SATA0 PCLK\n");
return ret;
}
ret = clk_prepare_enable(sg->sata1_pclk);
if (ret) {
dev_err(dev, "failed to enable SATA1 PCLK\n");
clk_disable_unprepare(sg->sata0_pclk);
return ret;
}
sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
if (IS_ERR(sg->sata0_reset)) {
dev_err(dev, "no SATA0 reset controller\n");
clk_disable_unprepare(sg->sata1_pclk);
clk_disable_unprepare(sg->sata0_pclk);
return PTR_ERR(sg->sata0_reset);
}
sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
if (IS_ERR(sg->sata1_reset)) {
dev_err(dev, "no SATA1 reset controller\n");
clk_disable_unprepare(sg->sata1_pclk);
clk_disable_unprepare(sg->sata0_pclk);
return PTR_ERR(sg->sata1_reset);
}
sata_id = readl(sg->base + GEMINI_SATA_ID);
sata_phy_id = readl(sg->base + GEMINI_SATA_PHY_ID);
sg->sata_bridge = true;
clk_disable(sg->sata0_pclk);
clk_disable(sg->sata1_pclk);
dev_info(dev, "SATA ID %08x, PHY ID: %08x\n", sata_id, sata_phy_id);
return 0;
}
static int gemini_setup_ide_pins(struct device *dev)
{
struct pinctrl *p;
struct pinctrl_state *ide_state;
int ret;
p = devm_pinctrl_get(dev);
if (IS_ERR(p))
return PTR_ERR(p);
ide_state = pinctrl_lookup_state(p, "ide");
if (IS_ERR(ide_state))
return PTR_ERR(ide_state);
ret = pinctrl_select_state(p, ide_state);
if (ret) {
dev_err(dev, "could not select IDE state\n");
return ret;
}
return 0;
}
static int gemini_sata_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct sata_gemini *sg;
struct regmap *map;
enum gemini_muxmode muxmode;
u32 gmode;
u32 gmask;
int ret;
sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
if (!sg)
return -ENOMEM;
sg->dev = dev;
sg->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sg->base))
return PTR_ERR(sg->base);
map = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(map)) {
dev_err(dev, "no global syscon\n");
return PTR_ERR(map);
}
/* Set up the SATA bridge if need be */
if (of_property_read_bool(np, "cortina,gemini-enable-sata-bridge")) {
ret = gemini_sata_bridge_init(sg);
if (ret)
return ret;
}
if (of_property_read_bool(np, "cortina,gemini-enable-ide-pins"))
sg->ide_pins = true;
if (!sg->sata_bridge && !sg->ide_pins) {
dev_err(dev, "neither SATA bridge or IDE output enabled\n");
ret = -EINVAL;
goto out_unprep_clk;
}
ret = of_property_read_u32(np, "cortina,gemini-ata-muxmode", &muxmode);
if (ret) {
dev_err(dev, "could not parse ATA muxmode\n");
goto out_unprep_clk;
}
if (muxmode > GEMINI_MUXMODE_3) {
dev_err(dev, "illegal muxmode %d\n", muxmode);
ret = -EINVAL;
goto out_unprep_clk;
}
sg->muxmode = muxmode;
gmask = GEMINI_IDE_IOMUX_MASK;
gmode = (muxmode << GEMINI_IDE_IOMUX_SHIFT);
ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode);
if (ret) {
dev_err(dev, "unable to set up IDE muxing\n");
ret = -ENODEV;
goto out_unprep_clk;
}
/*
* Route out the IDE pins if desired.
* This is done by looking up a special pin control state called
* "ide" that will route out the IDE pins.
*/
if (sg->ide_pins) {
ret = gemini_setup_ide_pins(dev);
if (ret)
return ret;
}
dev_info(dev, "set up the Gemini IDE/SATA nexus\n");
platform_set_drvdata(pdev, sg);
sg_singleton = sg;
return 0;
out_unprep_clk:
if (sg->sata_bridge) {
clk_unprepare(sg->sata1_pclk);
clk_unprepare(sg->sata0_pclk);
}
return ret;
}
static void gemini_sata_remove(struct platform_device *pdev)
{
struct sata_gemini *sg = platform_get_drvdata(pdev);
if (sg->sata_bridge) {
clk_unprepare(sg->sata1_pclk);
clk_unprepare(sg->sata0_pclk);
}
sg_singleton = NULL;
}
static const struct of_device_id gemini_sata_of_match[] = {
{ .compatible = "cortina,gemini-sata-bridge", },
{ /* sentinel */ }
};
static struct platform_driver gemini_sata_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = gemini_sata_of_match,
},
.probe = gemini_sata_probe,
.remove_new = gemini_sata_remove,
};
module_platform_driver(gemini_sata_driver);
MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge");
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/sata_gemini.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_optidma.c - Opti DMA PATA for new ATA layer
* (C) 2006 Red Hat Inc
*
* The Opti DMA controllers are related to the older PIO PCI controllers
* and indeed the VLB ones. The main differences are that the timing
* numbers are now based off PCI clocks not VLB and differ, and that
* MWDMA is supported.
*
* This driver should support Viper-N+, FireStar, FireStar Plus.
*
* These devices support virtual DMA for read (aka the CS5520). Later
* chips support UDMA33, but only if the rest of the board logic does,
* so you have to get this right. We don't support the virtual DMA
* but we do handle UDMA.
*
* Bits that are worth knowing
* Most control registers are shadowed into I/O registers
* 0x1F5 bit 0 tells you if the PCI/VLB clock is 33 or 25Mhz
* Virtual DMA registers *move* between rev 0x02 and rev 0x10
* UDMA requires a 66MHz FSB
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_optidma"
#define DRV_VERSION "0.3.2"
enum {
READ_REG = 0, /* index of Read cycle timing register */
WRITE_REG = 1, /* index of Write cycle timing register */
CNTRL_REG = 3, /* index of Control register */
STRAP_REG = 5, /* index of Strap register */
MISC_REG = 6 /* index of Miscellaneous register */
};
static int pci_clock; /* 0 = 33 1 = 25 */
/**
* optidma_pre_reset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Set up cable type and use generic probe init
*/
static int optidma_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits optidma_enable_bits = {
0x40, 1, 0x08, 0x00
};
if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* optidma_unlock - unlock control registers
* @ap: ATA port
*
* Unlock the control register block for this adapter. Registers must not
* be unlocked in a situation where libata might look at them.
*/
static void optidma_unlock(struct ata_port *ap)
{
void __iomem *regio = ap->ioaddr.cmd_addr;
/* These 3 unlock the control register access */
ioread16(regio + 1);
ioread16(regio + 1);
iowrite8(3, regio + 2);
}
/**
* optidma_lock - issue temporary relock
* @ap: ATA port
*
* Re-lock the configuration register settings.
*/
static void optidma_lock(struct ata_port *ap)
{
void __iomem *regio = ap->ioaddr.cmd_addr;
/* Relock */
iowrite8(0x83, regio + 2);
}
/**
* optidma_mode_setup - set mode data
* @ap: ATA interface
* @adev: ATA device
* @mode: Mode to set
*
* Called to do the DMA or PIO mode setup. Timing numbers are all
* pre computed to keep the code clean. There are two tables depending
* on the hardware clock speed.
*
* WARNING: While we do this the IDE registers vanish. If we take an
* IRQ here we depend on the host set locking to avoid catastrophe.
*/
static void optidma_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode)
{
struct ata_device *pair = ata_dev_pair(adev);
int pio = adev->pio_mode - XFER_PIO_0;
int dma = adev->dma_mode - XFER_MW_DMA_0;
void __iomem *regio = ap->ioaddr.cmd_addr;
u8 addr;
/* Address table precomputed with a DCLK of 2 */
static const u8 addr_timing[2][5] = {
{ 0x30, 0x20, 0x20, 0x10, 0x10 },
{ 0x20, 0x20, 0x10, 0x10, 0x10 }
};
static const u8 data_rec_timing[2][5] = {
{ 0x59, 0x46, 0x30, 0x20, 0x20 },
{ 0x46, 0x32, 0x20, 0x20, 0x10 }
};
static const u8 dma_data_rec_timing[2][3] = {
{ 0x76, 0x20, 0x20 },
{ 0x54, 0x20, 0x10 }
};
/* Switch from IDE to control mode */
optidma_unlock(ap);
/*
* As with many controllers the address setup time is shared
* and must suit both devices if present. FIXME: Check if we
* need to look at slowest of PIO/DMA mode of either device
*/
if (mode >= XFER_MW_DMA_0)
addr = 0;
else
addr = addr_timing[pci_clock][pio];
if (pair) {
u8 pair_addr;
/* Hardware constraint */
if (ata_dma_enabled(pair))
pair_addr = 0;
else
pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
if (pair_addr > addr)
addr = pair_addr;
}
/* Commence primary programming sequence */
/* First we load the device number into the timing select */
iowrite8(adev->devno, regio + MISC_REG);
/* Now we load the data timings into read data/write data */
if (mode < XFER_MW_DMA_0) {
iowrite8(data_rec_timing[pci_clock][pio], regio + READ_REG);
iowrite8(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
} else if (mode < XFER_UDMA_0) {
iowrite8(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
iowrite8(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
}
/* Finally we load the address setup into the misc register */
iowrite8(addr | adev->devno, regio + MISC_REG);
/* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
iowrite8(0x85, regio + CNTRL_REG);
/* Switch back to IDE mode */
optidma_lock(ap);
/* Note: at this point our programming is incomplete. We are
not supposed to program PCI 0x43 "things we hacked onto the chip"
until we've done both sets of PIO/DMA timings */
}
/**
* optiplus_mode_setup - DMA setup for Firestar Plus
* @ap: ATA port
* @adev: device
* @mode: desired mode
*
* The Firestar plus has additional UDMA functionality for UDMA0-2 and
* requires we do some additional work. Because the base work we must do
* is mostly shared we wrap the Firestar setup functionality in this
* one
*/
static void optiplus_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 udcfg;
u8 udslave;
int dev2 = 2 * adev->devno;
int unit = 2 * ap->port_no + adev->devno;
int udma = mode - XFER_UDMA_0;
pci_read_config_byte(pdev, 0x44, &udcfg);
if (mode <= XFER_UDMA_0) {
udcfg &= ~(1 << unit);
optidma_mode_setup(ap, adev, adev->dma_mode);
} else {
udcfg |= (1 << unit);
if (ap->port_no) {
pci_read_config_byte(pdev, 0x45, &udslave);
udslave &= ~(0x03 << dev2);
udslave |= (udma << dev2);
pci_write_config_byte(pdev, 0x45, udslave);
} else {
udcfg &= ~(0x30 << dev2);
udcfg |= (udma << dev2);
}
}
pci_write_config_byte(pdev, 0x44, udcfg);
}
/**
* optidma_set_pio_mode - PIO setup callback
* @ap: ATA port
* @adev: Device
*
* The libata core provides separate functions for handling PIO and
* DMA programming. The architecture of the Firestar makes it easier
* for us to have a common function so we provide wrappers
*/
static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
{
optidma_mode_setup(ap, adev, adev->pio_mode);
}
/**
* optidma_set_dma_mode - DMA setup callback
* @ap: ATA port
* @adev: Device
*
* The libata core provides separate functions for handling PIO and
* DMA programming. The architecture of the Firestar makes it easier
* for us to have a common function so we provide wrappers
*/
static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
{
optidma_mode_setup(ap, adev, adev->dma_mode);
}
/**
* optiplus_set_pio_mode - PIO setup callback
* @ap: ATA port
* @adev: Device
*
* The libata core provides separate functions for handling PIO and
* DMA programming. The architecture of the Firestar makes it easier
* for us to have a common function so we provide wrappers
*/
static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
{
optiplus_mode_setup(ap, adev, adev->pio_mode);
}
/**
* optiplus_set_dma_mode - DMA setup callback
* @ap: ATA port
* @adev: Device
*
* The libata core provides separate functions for handling PIO and
* DMA programming. The architecture of the Firestar makes it easier
* for us to have a common function so we provide wrappers
*/
static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
{
optiplus_mode_setup(ap, adev, adev->dma_mode);
}
/**
* optidma_make_bits43 - PCI setup helper
* @adev: ATA device
*
* Turn the ATA device setup into PCI configuration bits
* for register 0x43 and return the two bits needed.
*/
static u8 optidma_make_bits43(struct ata_device *adev)
{
static const u8 bits43[5] = {
0, 0, 0, 1, 2
};
if (!ata_dev_enabled(adev))
return 0;
if (ata_dma_enabled(adev))
return adev->dma_mode - XFER_MW_DMA_0;
return bits43[adev->pio_mode - XFER_PIO_0];
}
/**
* optidma_set_mode - mode setup
* @link: link to set up
* @r_failed: out parameter for failed device
*
* Use the standard setup to tune the chipset and then finalise the
* configuration by writing the nibble of extra bits of data into
* the chip.
*/
static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
{
struct ata_port *ap = link->ap;
u8 r;
int nybble = 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int rc = ata_do_set_mode(link, r_failed);
if (rc == 0) {
pci_read_config_byte(pdev, 0x43, &r);
r &= (0x0F << nybble);
r |= (optidma_make_bits43(&link->device[0]) +
(optidma_make_bits43(&link->device[0]) << 2)) << nybble;
pci_write_config_byte(pdev, 0x43, r);
}
return rc;
}
static const struct scsi_host_template optidma_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations optidma_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = optidma_set_pio_mode,
.set_dmamode = optidma_set_dma_mode,
.set_mode = optidma_set_mode,
.prereset = optidma_pre_reset,
};
static struct ata_port_operations optiplus_port_ops = {
.inherits = &optidma_port_ops,
.set_piomode = optiplus_set_pio_mode,
.set_dmamode = optiplus_set_dma_mode,
};
/**
* optiplus_with_udma - Look for UDMA capable setup
* @pdev: ATA controller
*/
static int optiplus_with_udma(struct pci_dev *pdev)
{
u8 r;
int ret = 0;
int ioport = 0x22;
struct pci_dev *dev1;
/* Find function 1 */
dev1 = pci_get_device(0x1045, 0xC701, NULL);
if (dev1 == NULL)
return 0;
/* Rev must be >= 0x10 */
pci_read_config_byte(dev1, 0x08, &r);
if (r < 0x10)
goto done_nomsg;
/* Read the chipset system configuration to check our mode */
pci_read_config_byte(dev1, 0x5F, &r);
ioport |= (r << 8);
outb(0x10, ioport);
/* Must be 66Mhz sync */
if ((inb(ioport + 2) & 1) == 0)
goto done;
/* Check the ATA arbitration/timing is suitable */
pci_read_config_byte(pdev, 0x42, &r);
if ((r & 0x36) != 0x36)
goto done;
pci_read_config_byte(dev1, 0x52, &r);
if (r & 0x80) /* IDEDIR disabled */
ret = 1;
done:
printk(KERN_WARNING "UDMA not supported in this configuration.\n");
done_nomsg: /* Wrong chip revision */
pci_dev_put(dev1);
return ret;
}
static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info_82c700 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &optidma_port_ops
};
static const struct ata_port_info info_82c700_udma = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &optiplus_port_ops
};
const struct ata_port_info *ppi[] = { &info_82c700, NULL };
int rc;
ata_print_version_once(&dev->dev, DRV_VERSION);
rc = pcim_enable_device(dev);
if (rc)
return rc;
/* Fixed location chipset magic */
inw(0x1F1);
inw(0x1F1);
pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */
if (optiplus_with_udma(dev))
ppi[0] = &info_82c700_udma;
return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0);
}
static const struct pci_device_id optidma[] = {
{ PCI_VDEVICE(OPTI, 0xD568), }, /* Opti 82C700 */
{ },
};
static struct pci_driver optidma_pci_driver = {
.name = DRV_NAME,
.id_table = optidma,
.probe = optidma_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(optidma_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, optidma);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_optidma.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Buddha, Catweasel and X-Surf PATA controller driver
*
* Copyright (c) 2018 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Based on buddha.c:
*
* Copyright (C) 1997, 2001 by Geert Uytterhoeven and others
*/
#include <linux/ata.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/mm.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/zorro.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/setup.h>
#define DRV_NAME "pata_buddha"
#define DRV_VERSION "0.1.1"
#define BUDDHA_BASE1 0x800
#define BUDDHA_BASE2 0xa00
#define BUDDHA_BASE3 0xc00
#define XSURF_BASE1 0xb000 /* 2.5" interface */
#define XSURF_BASE2 0xd000 /* 3.5" interface */
#define BUDDHA_CONTROL 0x11a
#define BUDDHA_IRQ 0xf00
#define XSURF_IRQ 0x7e
#define BUDDHA_IRQ_MR 0xfc0 /* master interrupt enable */
enum {
BOARD_BUDDHA = 0,
BOARD_CATWEASEL,
BOARD_XSURF
};
static unsigned int buddha_bases[3] = {
BUDDHA_BASE1, BUDDHA_BASE2, BUDDHA_BASE3
};
static unsigned int xsurf_bases[2] = {
XSURF_BASE1, XSURF_BASE2
};
static const struct scsi_host_template pata_buddha_sht = {
ATA_PIO_SHT(DRV_NAME),
};
/* FIXME: is this needed? */
static unsigned int pata_buddha_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_device *dev = qc->dev;
struct ata_port *ap = dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1;
/* Transfer multiple of 2 bytes */
if (rw == READ)
raw_insw((u16 *)data_addr, (u16 *)buf, words);
else
raw_outsw((u16 *)data_addr, (u16 *)buf, words);
/* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
unsigned char pad[2] = { };
/* Point buf to the tail of buffer */
buf += buflen - 1;
if (rw == READ) {
raw_insw((u16 *)data_addr, (u16 *)pad, 1);
*buf = pad[0];
} else {
pad[0] = *buf;
raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
}
words++;
}
return words << 1;
}
/*
* Provide our own set_mode() as we don't want to change anything that has
* already been configured..
*/
static int pata_buddha_set_mode(struct ata_link *link,
struct ata_device **unused)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
static bool pata_buddha_irq_check(struct ata_port *ap)
{
u8 ch;
ch = z_readb((unsigned long)ap->private_data);
return !!(ch & 0x80);
}
static void pata_xsurf_irq_clear(struct ata_port *ap)
{
z_writeb(0, (unsigned long)ap->private_data);
}
static struct ata_port_operations pata_buddha_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = pata_buddha_data_xfer,
.sff_irq_check = pata_buddha_irq_check,
.cable_detect = ata_cable_unknown,
.set_mode = pata_buddha_set_mode,
};
static struct ata_port_operations pata_xsurf_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = pata_buddha_data_xfer,
.sff_irq_check = pata_buddha_irq_check,
.sff_irq_clear = pata_xsurf_irq_clear,
.cable_detect = ata_cable_unknown,
.set_mode = pata_buddha_set_mode,
};
static int pata_buddha_probe(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
static const char * const board_name[] = {
"Buddha", "Catweasel", "X-Surf"
};
struct ata_host *host;
void __iomem *buddha_board;
unsigned long board;
unsigned int type = ent->driver_data;
unsigned int nr_ports = (type == BOARD_CATWEASEL) ? 3 : 2;
void *old_drvdata;
int i;
dev_info(&z->dev, "%s IDE controller\n", board_name[type]);
board = z->resource.start;
if (type != BOARD_XSURF) {
if (!devm_request_mem_region(&z->dev,
board + BUDDHA_BASE1,
0x800, DRV_NAME))
return -ENXIO;
} else {
if (!devm_request_mem_region(&z->dev,
board + XSURF_BASE1,
0x1000, DRV_NAME))
return -ENXIO;
if (!devm_request_mem_region(&z->dev,
board + XSURF_BASE2,
0x1000, DRV_NAME)) {
}
}
/* Workaround for X-Surf: Save drvdata in case zorro8390 has set it */
if (type == BOARD_XSURF)
old_drvdata = dev_get_drvdata(&z->dev);
/* allocate host */
host = ata_host_alloc(&z->dev, nr_ports);
if (type == BOARD_XSURF)
dev_set_drvdata(&z->dev, old_drvdata);
if (!host)
return -ENXIO;
buddha_board = ZTWO_VADDR(board);
/* enable the board IRQ on Buddha/Catweasel */
if (type != BOARD_XSURF)
z_writeb(0, buddha_board + BUDDHA_IRQ_MR);
for (i = 0; i < nr_ports; i++) {
struct ata_port *ap = host->ports[i];
void __iomem *base, *irqport;
unsigned long ctl = 0;
if (type != BOARD_XSURF) {
ap->ops = &pata_buddha_ops;
base = buddha_board + buddha_bases[i];
ctl = BUDDHA_CONTROL;
irqport = buddha_board + BUDDHA_IRQ + i * 0x40;
} else {
ap->ops = &pata_xsurf_ops;
base = buddha_board + xsurf_bases[i];
/* X-Surf has no CS1* (Control/AltStat) */
irqport = buddha_board + XSURF_IRQ;
}
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
ap->ioaddr.data_addr = base;
ap->ioaddr.error_addr = base + 2 + 1 * 4;
ap->ioaddr.feature_addr = base + 2 + 1 * 4;
ap->ioaddr.nsect_addr = base + 2 + 2 * 4;
ap->ioaddr.lbal_addr = base + 2 + 3 * 4;
ap->ioaddr.lbam_addr = base + 2 + 4 * 4;
ap->ioaddr.lbah_addr = base + 2 + 5 * 4;
ap->ioaddr.device_addr = base + 2 + 6 * 4;
ap->ioaddr.status_addr = base + 2 + 7 * 4;
ap->ioaddr.command_addr = base + 2 + 7 * 4;
if (ctl) {
ap->ioaddr.altstatus_addr = base + ctl;
ap->ioaddr.ctl_addr = base + ctl;
}
ap->private_data = (void *)irqport;
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", board,
ctl ? board + buddha_bases[i] + ctl : 0);
}
ata_host_activate(host, IRQ_AMIGA_PORTS, ata_sff_interrupt,
IRQF_SHARED, &pata_buddha_sht);
return 0;
}
static void pata_buddha_remove(struct zorro_dev *z)
{
struct ata_host *host = dev_get_drvdata(&z->dev);
ata_host_detach(host);
}
static const struct zorro_device_id pata_buddha_zorro_tbl[] = {
{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA, BOARD_BUDDHA},
{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL, BOARD_CATWEASEL},
{ 0 }
};
MODULE_DEVICE_TABLE(zorro, pata_buddha_zorro_tbl);
static struct zorro_driver pata_buddha_driver = {
.name = "pata_buddha",
.id_table = pata_buddha_zorro_tbl,
.probe = pata_buddha_probe,
.remove = pata_buddha_remove,
};
/*
* We cannot have a modalias for X-Surf boards, as it competes with the
* zorro8390 network driver. As a stopgap measure until we have proper
* MFD support for this board, we manually attach to it late after Zorro
* has enumerated its boards.
*/
static int __init pata_buddha_late_init(void)
{
struct zorro_dev *z = NULL;
/* Auto-bind to regular boards */
zorro_register_driver(&pata_buddha_driver);
/* Manually bind to all X-Surf boards */
while ((z = zorro_find_device(ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, z))) {
static struct zorro_device_id xsurf_ent = {
ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, BOARD_XSURF
};
pata_buddha_probe(z, &xsurf_ent);
}
return 0;
}
late_initcall(pata_buddha_late_init);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for Buddha/Catweasel/X-Surf PATA");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_buddha.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DaVinci DM816 AHCI SATA platform driver
*
* Copyright (C) 2017 BayLibre SAS
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
#include "ahci.h"
#define AHCI_DM816_DRV_NAME "ahci-dm816"
#define AHCI_DM816_PHY_ENPLL(x) ((x) << 0)
#define AHCI_DM816_PHY_MPY(x) ((x) << 1)
#define AHCI_DM816_PHY_LOS(x) ((x) << 12)
#define AHCI_DM816_PHY_RXCDR(x) ((x) << 13)
#define AHCI_DM816_PHY_RXEQ(x) ((x) << 16)
#define AHCI_DM816_PHY_TXSWING(x) ((x) << 23)
#define AHCI_DM816_P0PHYCR_REG 0x178
#define AHCI_DM816_P1PHYCR_REG 0x1f8
#define AHCI_DM816_PLL_OUT 1500000000LU
static const unsigned long pll_mpy_table[] = {
400, 500, 600, 800, 825, 1000, 1200,
1250, 1500, 1600, 1650, 2000, 2200, 2500
};
static int ahci_dm816_get_mpy_bits(unsigned long refclk_rate)
{
unsigned long pll_multiplier;
int i;
/*
* We need to determine the value of the multiplier (MPY) bits.
* In order to include the 8.25 multiplier we need to first divide
* the refclk rate by 100.
*/
pll_multiplier = AHCI_DM816_PLL_OUT / (refclk_rate / 100);
for (i = 0; i < ARRAY_SIZE(pll_mpy_table); i++) {
if (pll_mpy_table[i] == pll_multiplier)
return i;
}
/*
* We should have divided evenly - if not, return an invalid
* value.
*/
return -1;
}
static int ahci_dm816_phy_init(struct ahci_host_priv *hpriv, struct device *dev)
{
unsigned long refclk_rate;
int mpy;
u32 val;
/*
* We should have been supplied two clocks: the functional and
* keep-alive clock and the external reference clock. We need the
* rate of the latter to calculate the correct value of MPY bits.
*/
if (hpriv->n_clks < 2) {
dev_err(dev, "reference clock not supplied\n");
return -EINVAL;
}
refclk_rate = clk_get_rate(hpriv->clks[1].clk);
if ((refclk_rate % 100) != 0) {
dev_err(dev, "reference clock rate must be divisible by 100\n");
return -EINVAL;
}
mpy = ahci_dm816_get_mpy_bits(refclk_rate);
if (mpy < 0) {
dev_err(dev, "can't calculate the MPY bits value\n");
return -EINVAL;
}
/* Enable the PHY and configure the first HBA port. */
val = AHCI_DM816_PHY_MPY(mpy) | AHCI_DM816_PHY_LOS(1) |
AHCI_DM816_PHY_RXCDR(4) | AHCI_DM816_PHY_RXEQ(1) |
AHCI_DM816_PHY_TXSWING(3) | AHCI_DM816_PHY_ENPLL(1);
writel(val, hpriv->mmio + AHCI_DM816_P0PHYCR_REG);
/* Configure the second HBA port. */
val = AHCI_DM816_PHY_LOS(1) | AHCI_DM816_PHY_RXCDR(4) |
AHCI_DM816_PHY_RXEQ(1) | AHCI_DM816_PHY_TXSWING(3);
writel(val, hpriv->mmio + AHCI_DM816_P1PHYCR_REG);
return 0;
}
static int ahci_dm816_softreset(struct ata_link *link,
unsigned int *class, unsigned long deadline)
{
int pmp, ret;
pmp = sata_srst_pmp(link);
/*
* There's an issue with the SATA controller on DM816 SoC: if we
* enable Port Multiplier support, but the drive is connected directly
* to the board, it can't be detected. As a workaround: if PMP is
* enabled, we first call ahci_do_softreset() and pass it the result of
* sata_srst_pmp(). If this call fails, we retry with pmp = 0.
*/
ret = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
if (pmp && ret == -EBUSY)
return ahci_do_softreset(link, class, 0,
deadline, ahci_check_ready);
return ret;
}
static struct ata_port_operations ahci_dm816_port_ops = {
.inherits = &ahci_platform_ops,
.softreset = ahci_dm816_softreset,
};
static const struct ata_port_info ahci_dm816_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_dm816_port_ops,
};
static const struct scsi_host_template ahci_dm816_platform_sht = {
AHCI_SHT(AHCI_DM816_DRV_NAME),
};
static int ahci_dm816_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
int rc;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
rc = ahci_dm816_phy_init(hpriv, dev);
if (rc)
goto disable_resources;
rc = ahci_platform_init_host(pdev, hpriv,
&ahci_dm816_port_info,
&ahci_dm816_platform_sht);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static SIMPLE_DEV_PM_OPS(ahci_dm816_pm_ops,
ahci_platform_suspend,
ahci_platform_resume);
static const struct of_device_id ahci_dm816_of_match[] = {
{ .compatible = "ti,dm816-ahci", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_dm816_of_match);
static struct platform_driver ahci_dm816_driver = {
.probe = ahci_dm816_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = AHCI_DM816_DRV_NAME,
.of_match_table = ahci_dm816_of_match,
.pm = &ahci_dm816_pm_ops,
},
};
module_platform_driver(ahci_dm816_driver);
MODULE_DESCRIPTION("DaVinci DM816 AHCI SATA platform driver");
MODULE_AUTHOR("Bartosz Golaszewski <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/ata/ahci_dm816.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AppliedMicro X-Gene SoC SATA Host Controller Driver
*
* Copyright (c) 2014, Applied Micro Circuits Corporation
* Author: Loc Ho <[email protected]>
* Tuan Phan <[email protected]>
* Suman Tripathi <[email protected]>
*
* NOTE: PM support is not currently available.
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/ahci_platform.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include "ahci.h"
#define DRV_NAME "xgene-ahci"
/* Max # of disk per a controller */
#define MAX_AHCI_CHN_PERCTR 2
/* MUX CSR */
#define SATA_ENET_CONFIG_REG 0x00000000
#define CFG_SATA_ENET_SELECT_MASK 0x00000001
/* SATA core host controller CSR */
#define SLVRDERRATTRIBUTES 0x00000000
#define SLVWRERRATTRIBUTES 0x00000004
#define MSTRDERRATTRIBUTES 0x00000008
#define MSTWRERRATTRIBUTES 0x0000000c
#define BUSCTLREG 0x00000014
#define IOFMSTRWAUX 0x00000018
#define INTSTATUSMASK 0x0000002c
#define ERRINTSTATUS 0x00000030
#define ERRINTSTATUSMASK 0x00000034
/* SATA host AHCI CSR */
#define PORTCFG 0x000000a4
#define PORTADDR_SET(dst, src) \
(((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
#define PORTPHY1CFG 0x000000a8
#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
(((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
#define PORTPHY2CFG 0x000000ac
#define PORTPHY3CFG 0x000000b0
#define PORTPHY4CFG 0x000000b4
#define PORTPHY5CFG 0x000000b8
#define SCTL0 0x0000012C
#define PORTPHY5CFG_RTCHG_SET(dst, src) \
(((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
(((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
#define PORTAXICFG 0x000000bc
#define PORTAXICFG_OUTTRANS_SET(dst, src) \
(((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
#define PORTRANSCFG 0x000000c8
#define PORTRANSCFG_RXWM_SET(dst, src) \
(((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
/* SATA host controller AXI CSR */
#define INT_SLV_TMOMASK 0x00000010
/* SATA diagnostic CSR */
#define CFG_MEM_RAM_SHUTDOWN 0x00000070
#define BLOCK_MEM_RDY 0x00000074
/* Max retry for link down */
#define MAX_LINK_DOWN_RETRY 3
enum xgene_ahci_version {
XGENE_AHCI_V1 = 1,
XGENE_AHCI_V2,
};
struct xgene_ahci_context {
struct ahci_host_priv *hpriv;
struct device *dev;
u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
void __iomem *csr_core; /* Core CSR address of IP */
void __iomem *csr_diag; /* Diag CSR address of IP */
void __iomem *csr_axi; /* AXI CSR address of IP */
void __iomem *csr_mux; /* MUX CSR address of IP */
};
static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
{
dev_dbg(ctx->dev, "Release memory from shutdown\n");
writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
msleep(1); /* reset may take up to 1ms */
if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
dev_err(ctx->dev, "failed to release memory from shutdown\n");
return -ENODEV;
}
return 0;
}
/**
* xgene_ahci_poll_reg_val- Poll a register on a specific value.
* @ap : ATA port of interest.
* @reg : Register of interest.
* @val : Value to be attained.
* @interval : waiting interval for polling.
* @timeout : timeout for achieving the value.
*/
static int xgene_ahci_poll_reg_val(struct ata_port *ap,
void __iomem *reg, unsigned int val,
unsigned int interval, unsigned int timeout)
{
unsigned long deadline;
unsigned int tmp;
tmp = ioread32(reg);
deadline = ata_deadline(jiffies, timeout);
while (tmp != val && time_before(jiffies, deadline)) {
ata_msleep(ap, interval);
tmp = ioread32(reg);
}
return tmp;
}
/**
* xgene_ahci_restart_engine - Restart the dma engine.
* @ap : ATA port of interest
*
* Waits for completion of multiple commands and restarts
* the DMA engine inside the controller.
*/
static int xgene_ahci_restart_engine(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs;
/*
* In case of PMP multiple IDENTIFY DEVICE commands can be
* issued inside PxCI. So need to poll PxCI for the
* completion of outstanding IDENTIFY DEVICE commands before
* we restart the DMA engine.
*/
if (xgene_ahci_poll_reg_val(ap, port_mmio +
PORT_CMD_ISSUE, 0x0, 1, 100))
return -EBUSY;
hpriv->stop_engine(ap);
ahci_start_fis_rx(ap);
/*
* Enable the PxFBS.FBS_EN bit as it
* gets cleared due to stopping the engine.
*/
if (pp->fbs_supported) {
fbs = readl(port_mmio + PORT_FBS);
writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
fbs = readl(port_mmio + PORT_FBS);
}
hpriv->start_engine(ap);
return 0;
}
/**
* xgene_ahci_qc_issue - Issue commands to the device
* @qc: Command to issue
*
* Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
* clear the BSY bit after receiving the PIO setup FIS. This results in the dma
* state machine goes into the CMFatalErrorUpdate state and locks up. By
* restarting the dma engine, it removes the controller out of lock up state.
*
* Due to H/W errata, the controller is unable to save the PMP
* field fetched from command header before sending the H2D FIS.
* When the device returns the PMP port field in the D2H FIS, there is
* a mismatch and results in command completion failure. The
* workaround is to write the pmp value to PxFBS.DEV field before issuing
* any command to PMP.
*/
static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct xgene_ahci_context *ctx = hpriv->plat_data;
int rc = 0;
u32 port_fbs;
void __iomem *port_mmio = ahci_port_base(ap);
/*
* Write the pmp value to PxFBS.DEV
* for case of Port Mulitplier.
*/
if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
port_fbs = readl(port_mmio + PORT_FBS);
port_fbs &= ~PORT_FBS_DEV_MASK;
port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
}
if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
(ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
(ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
xgene_ahci_restart_engine(ap);
rc = ahci_qc_issue(qc);
/* Save the last command issued */
ctx->last_cmd[ap->port_no] = qc->tf.command;
return rc;
}
static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
{
void __iomem *diagcsr = ctx->csr_diag;
return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
}
/**
* xgene_ahci_read_id - Read ID data from the specified device
* @dev: device
* @tf: proposed taskfile
* @id: data buffer
*
* This custom read ID function is required due to the fact that the HW
* does not support DEVSLP.
*/
static unsigned int xgene_ahci_read_id(struct ata_device *dev,
struct ata_taskfile *tf, __le16 *id)
{
u32 err_mask;
err_mask = ata_do_dev_read_id(dev, tf, id);
if (err_mask)
return err_mask;
/*
* Mask reserved area. Word78 spec of Link Power Management
* bit15-8: reserved
* bit7: NCQ autosence
* bit6: Software settings preservation supported
* bit5: reserved
* bit4: In-order sata delivery supported
* bit3: DIPM requests supported
* bit2: DMA Setup FIS Auto-Activate optimization supported
* bit1: DMA Setup FIX non-Zero buffer offsets supported
* bit0: Reserved
*
* Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
*/
id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
{
void __iomem *mmio = ctx->hpriv->mmio;
u32 val;
dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
mmio, channel);
val = readl(mmio + PORTCFG);
val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
writel(val, mmio + PORTCFG);
readl(mmio + PORTCFG); /* Force a barrier */
/* Disable fix rate */
writel(0x0001fffe, mmio + PORTPHY1CFG);
readl(mmio + PORTPHY1CFG); /* Force a barrier */
writel(0x28183219, mmio + PORTPHY2CFG);
readl(mmio + PORTPHY2CFG); /* Force a barrier */
writel(0x13081008, mmio + PORTPHY3CFG);
readl(mmio + PORTPHY3CFG); /* Force a barrier */
writel(0x00480815, mmio + PORTPHY4CFG);
readl(mmio + PORTPHY4CFG); /* Force a barrier */
/* Set window negotiation */
val = readl(mmio + PORTPHY5CFG);
val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
writel(val, mmio + PORTPHY5CFG);
readl(mmio + PORTPHY5CFG); /* Force a barrier */
val = readl(mmio + PORTAXICFG);
val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
writel(val, mmio + PORTAXICFG);
readl(mmio + PORTAXICFG); /* Force a barrier */
/* Set the watermark threshold of the receive FIFO */
val = readl(mmio + PORTRANSCFG);
val = PORTRANSCFG_RXWM_SET(val, 0x30);
writel(val, mmio + PORTRANSCFG);
}
/**
* xgene_ahci_do_hardreset - Issue the actual COMRESET
* @link: link to reset
* @deadline: deadline jiffies for the operation
* @online: Return value to indicate if device online
*
* Due to the limitation of the hardware PHY, a difference set of setting is
* required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
* and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
* report disparity error and etc. In addition, during COMRESET, there can
* be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
* SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
* reboot cycle regression, sometimes the PHY reports link down even if the
* device is present because of speed negotiation failure. so need to retry
* the COMRESET to get the link up. The following algorithm is followed to
* proper configure the hardware PHY during COMRESET:
*
* Alg Part 1:
* 1. Start the PHY at Gen3 speed (default setting)
* 2. Issue the COMRESET
* 3. If no link, go to Alg Part 3
* 4. If link up, determine if the negotiated speed matches the PHY
* configured speed
* 5. If they matched, go to Alg Part 2
* 6. If they do not matched and first time, configure the PHY for the linked
* up disk speed and repeat step 2
* 7. Go to Alg Part 2
*
* Alg Part 2:
* 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
* reported in the register PORT_SCR_ERR, then reset the PHY receiver line
* 2. Go to Alg Part 4
*
* Alg Part 3:
* 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
* communication establishment failed and maximum link down attempts are
* less than Max attempts 3 then goto Alg Part 1.
* 2. Go to Alg Part 4.
*
* Alg Part 4:
* 1. Clear any pending from register PORT_SCR_ERR.
*
* NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
* and until the underlying PHY supports an method to reset the receiver
* line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
* an warning message will be printed.
*/
static int xgene_ahci_do_hardreset(struct ata_link *link,
unsigned long deadline, bool *online)
{
const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct xgene_ahci_context *ctx = hpriv->plat_data;
struct ahci_port_priv *pp = ap->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_taskfile tf;
int link_down_retry = 0;
int rc;
u32 val, sstatus;
do {
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
if (*online) {
val = readl(port_mmio + PORT_SCR_ERR);
if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
dev_warn(ctx->dev, "link has error\n");
break;
}
sata_scr_read(link, SCR_STATUS, &sstatus);
} while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
(sstatus & 0xff) == 0x1);
/* clear all errors if any pending */
val = readl(port_mmio + PORT_SCR_ERR);
writel(val, port_mmio + PORT_SCR_ERR);
return rc;
}
static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
bool online;
int rc;
u32 portcmd_saved;
u32 portclb_saved;
u32 portclbhi_saved;
u32 portrxfis_saved;
u32 portrxfishi_saved;
/* As hardreset resets these CSR, save it to restore later */
portcmd_saved = readl(port_mmio + PORT_CMD);
portclb_saved = readl(port_mmio + PORT_LST_ADDR);
portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
hpriv->stop_engine(ap);
rc = xgene_ahci_do_hardreset(link, deadline, &online);
/* As controller hardreset clears them, restore them */
writel(portcmd_saved, port_mmio + PORT_CMD);
writel(portclb_saved, port_mmio + PORT_LST_ADDR);
writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
return rc;
}
static void xgene_ahci_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
ahci_platform_disable_resources(hpriv);
}
/**
* xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
* to Port Multiplier.
* @link: link to reset
* @class: Return value to indicate class of device
* @deadline: deadline jiffies for the operation
*
* Due to H/W errata, the controller is unable to save the PMP
* field fetched from command header before sending the H2D FIS.
* When the device returns the PMP port field in the D2H FIS, there is
* a mismatch and results in command completion failure. The workaround
* is to write the pmp value to PxFBS.DEV field before issuing any command
* to PMP.
*/
static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
u32 rc;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
/*
* Set PxFBS.DEV field with pmp
* value.
*/
port_fbs = readl(port_mmio + PORT_FBS);
port_fbs &= ~PORT_FBS_DEV_MASK;
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
return rc;
}
/**
* xgene_ahci_softreset - Issue the softreset to the drive.
* @link: link to reset
* @class: Return value to indicate class of device
* @deadline: deadline jiffies for the operation
*
* Due to H/W errata, the controller is unable to save the PMP
* field fetched from command header before sending the H2D FIS.
* When the device returns the PMP port field in the D2H FIS, there is
* a mismatch and results in command completion failure. The workaround
* is to write the pmp value to PxFBS.DEV field before issuing any command
* to PMP. Here is the algorithm to detect PMP :
*
* 1. Save the PxFBS value
* 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
* 0xF for both PMP/NON-PMP initially
* 3. Issue softreset
* 4. If signature class is PMP goto 6
* 5. restore the original PxFBS and goto 3
* 6. return
*/
static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct xgene_ahci_context *ctx = hpriv->plat_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
u32 rc;
port_fbs_save = readl(port_mmio + PORT_FBS);
/*
* Set PxFBS.DEV field with pmp
* value.
*/
port_fbs = readl(port_mmio + PORT_FBS);
port_fbs &= ~PORT_FBS_DEV_MASK;
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
softreset_retry:
rc = ahci_do_softreset(link, class, pmp,
deadline, ahci_check_ready);
ctx->class[ap->port_no] = *class;
if (*class != ATA_DEV_PMP) {
/*
* Retry for normal drives without
* setting PxFBS.DEV field with pmp value.
*/
if (retry--) {
writel(port_fbs_save, port_mmio + PORT_FBS);
goto softreset_retry;
}
}
return rc;
}
/**
* xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
* @host: Host that recieved the irq
* @irq_masked: HOST_IRQ_STAT value
*
* For hardware with broken edge trigger latch
* the HOST_IRQ_STAT register misses the edge interrupt
* when clearing of HOST_IRQ_STAT register and hardware
* reporting the PORT_IRQ_STAT register at the
* same clock cycle.
* As such, the algorithm below outlines the workaround.
*
* 1. Read HOST_IRQ_STAT register and save the state.
* 2. Clear the HOST_IRQ_STAT register.
* 3. Read back the HOST_IRQ_STAT register.
* 4. If HOST_IRQ_STAT register equals to zero, then
* traverse the rest of port's PORT_IRQ_STAT register
* to check if an interrupt is triggered at that point else
* go to step 6.
* 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
* then update the state of HOST_IRQ_STAT saved in step 1.
* 6. Handle port interrupts.
* 7. Exit
*/
static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
u32 irq_masked)
{
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *port_mmio;
int i;
if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
for (i = 0; i < host->n_ports; i++) {
if (irq_masked & (1 << i))
continue;
port_mmio = ahci_port_base(host->ports[i]);
if (readl(port_mmio + PORT_IRQ_STAT))
irq_masked |= (1 << i);
}
}
return ahci_handle_port_intr(host, irq_masked);
}
static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
hpriv = host->private_data;
mmio = hpriv->mmio;
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
/*
* HOST_IRQ_STAT behaves as edge triggered latch meaning that
* it should be cleared before all the port events are cleared.
*/
writel(irq_stat, mmio + HOST_IRQ_STAT);
rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
spin_unlock(&host->lock);
return IRQ_RETVAL(rc);
}
static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
.qc_issue = xgene_ahci_qc_issue,
.softreset = xgene_ahci_softreset,
.pmp_softreset = xgene_ahci_pmp_softreset
};
static const struct ata_port_info xgene_ahci_v1_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &xgene_ahci_v1_ops,
};
static struct ata_port_operations xgene_ahci_v2_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
};
static const struct ata_port_info xgene_ahci_v2_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &xgene_ahci_v2_ops,
};
static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
{
struct xgene_ahci_context *ctx = hpriv->plat_data;
int i;
int rc;
u32 val;
/* Remove IP RAM out of shutdown */
rc = xgene_ahci_init_memram(ctx);
if (rc)
return rc;
for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
xgene_ahci_set_phy_cfg(ctx, i);
/* AXI disable Mask */
writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
writel(0, ctx->csr_core + INTSTATUSMASK);
val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
INTSTATUSMASK, val);
writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
readl(ctx->csr_axi + INT_SLV_TMOMASK);
/* Enable AXI Interrupt */
writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
/* Enable coherency */
val = readl(ctx->csr_core + BUSCTLREG);
val &= ~0x00000002; /* Enable write coherency */
val &= ~0x00000001; /* Enable read coherency */
writel(val, ctx->csr_core + BUSCTLREG);
val = readl(ctx->csr_core + IOFMSTRWAUX);
val |= (1 << 3); /* Enable read coherency */
val |= (1 << 9); /* Enable write coherency */
writel(val, ctx->csr_core + IOFMSTRWAUX);
val = readl(ctx->csr_core + IOFMSTRWAUX);
dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
IOFMSTRWAUX, val);
return rc;
}
static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
{
u32 val;
/* Check for optional MUX resource */
if (!ctx->csr_mux)
return 0;
val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
val &= ~CFG_SATA_ENET_SELECT_MASK;
writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
}
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_ahci_acpi_match[] = {
{ "APMC0D0D", XGENE_AHCI_V1},
{ "APMC0D32", XGENE_AHCI_V2},
{},
};
MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
#endif
static const struct of_device_id xgene_ahci_of_match[] = {
{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
static int xgene_ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct xgene_ahci_context *ctx;
struct resource *res;
const struct of_device_id *of_devid;
enum xgene_ahci_version version = XGENE_AHCI_V1;
const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
&xgene_ahci_v2_port_info };
int rc;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
hpriv->plat_data = ctx;
ctx->hpriv = hpriv;
ctx->dev = dev;
/* Retrieve the IP core resource */
ctx->csr_core = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctx->csr_core))
return PTR_ERR(ctx->csr_core);
/* Retrieve the IP diagnostic resource */
ctx->csr_diag = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(ctx->csr_diag))
return PTR_ERR(ctx->csr_diag);
/* Retrieve the IP AXI resource */
ctx->csr_axi = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(ctx->csr_axi))
return PTR_ERR(ctx->csr_axi);
/* Retrieve the optional IP mux resource */
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
if (res) {
void __iomem *csr = devm_ioremap_resource(dev, res);
if (IS_ERR(csr))
return PTR_ERR(csr);
ctx->csr_mux = csr;
}
of_devid = of_match_device(xgene_ahci_of_match, dev);
if (of_devid) {
if (of_devid->data)
version = (unsigned long) of_devid->data;
}
#ifdef CONFIG_ACPI
else {
const struct acpi_device_id *acpi_id;
struct acpi_device_info *info;
acpi_status status;
acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
if (!acpi_id) {
dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
version = XGENE_AHCI_V1;
} else if (acpi_id->driver_data) {
version = (enum xgene_ahci_version) acpi_id->driver_data;
status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
if (ACPI_FAILURE(status)) {
dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
__func__);
version = XGENE_AHCI_V1;
} else {
if (info->valid & ACPI_VALID_CID)
version = XGENE_AHCI_V2;
kfree(info);
}
}
}
#endif
dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
hpriv->mmio);
/* Select ATA */
if ((rc = xgene_ahci_mux_select(ctx))) {
dev_err(dev, "SATA mux selection failed error %d\n", rc);
return -ENODEV;
}
if (xgene_ahci_is_memram_inited(ctx)) {
dev_info(dev, "skip clock and PHY initialization\n");
goto skip_clk_phy;
}
/* Due to errata, HW requires full toggle transition */
rc = ahci_platform_enable_clks(hpriv);
if (rc)
goto disable_resources;
ahci_platform_disable_clks(hpriv);
rc = ahci_platform_enable_resources(hpriv);
if (rc)
goto disable_resources;
/* Configure the host controller */
xgene_ahci_hw_init(hpriv);
skip_clk_phy:
switch (version) {
case XGENE_AHCI_V1:
hpriv->flags = AHCI_HFLAG_NO_NCQ;
break;
case XGENE_AHCI_V2:
hpriv->flags |= AHCI_HFLAG_YES_FBS;
hpriv->irq_handler = xgene_ahci_irq_intr;
break;
default:
break;
}
rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
&ahci_platform_sht);
if (rc)
goto disable_resources;
dev_dbg(dev, "X-Gene SATA host controller initialized\n");
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static struct platform_driver xgene_ahci_driver = {
.probe = xgene_ahci_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = xgene_ahci_of_match,
.acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
},
};
module_platform_driver(xgene_ahci_driver);
MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
MODULE_AUTHOR("Loc Ho <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.4");
| linux-master | drivers/ata/ahci_xgene.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* New ATA layer SC1200 driver Alan Cox <[email protected]>
*
* TODO: Mode selection filtering
* TODO: Needs custom DMA cleanup code
*
* Based very heavily on
*
* linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003
*
* Copyright (C) 2000-2002 Mark Lord <[email protected]>
* May be copied or modified under the terms of the GNU General Public License
*
* Development of this chipset driver was funded
* by the nice folks at National Semiconductor.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_sc1200"
#define DRV_VERSION "0.2.6"
#define SC1200_REV_A 0x00
#define SC1200_REV_B1 0x01
#define SC1200_REV_B3 0x02
#define SC1200_REV_C1 0x03
#define SC1200_REV_D1 0x04
/**
* sc1200_clock - PCI clock
*
* Return the PCI bus clocking for the SC1200 chipset configuration
* in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz
*/
static int sc1200_clock(void)
{
/* Magic registers that give us the chipset data */
u8 chip_id = inb(0x903C);
u8 silicon_rev = inb(0x903D);
u16 pci_clock;
if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1)
return 0; /* 33 MHz mode */
/* Clock generator configuration 0x901E its 8/9 are the PCI clocking
0/3 is 33Mhz 1 is 48 2 is 66 */
pci_clock = inw(0x901E);
pci_clock >>= 8;
pci_clock &= 0x03;
if (pci_clock == 3)
pci_clock = 0;
return pci_clock;
}
/**
* sc1200_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. This is fairly simple on the SC1200
*/
static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 pio_timings[4][5] = {
/* format0, 33Mhz */
{ 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 },
/* format1, 33Mhz */
{ 0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010 },
/* format1, 48Mhz */
{ 0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021 },
/* format1, 66Mhz */
{ 0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 format;
unsigned int reg = 0x40 + 0x10 * ap->port_no;
int mode = adev->pio_mode - XFER_PIO_0;
pci_read_config_dword(pdev, reg + 4, &format);
format >>= 31;
format += sc1200_clock();
pci_write_config_dword(pdev, reg + 8 * adev->devno,
pio_timings[format][mode]);
}
/**
* sc1200_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* We cannot mix MWDMA and UDMA without reloading timings each switch
* master to slave.
*/
static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 udma_timing[3][3] = {
{ 0x00921250, 0x00911140, 0x00911030 },
{ 0x00932470, 0x00922260, 0x00922140 },
{ 0x009436A1, 0x00933481, 0x00923261 }
};
static const u32 mwdma_timing[3][3] = {
{ 0x00077771, 0x00012121, 0x00002020 },
{ 0x000BBBB2, 0x00024241, 0x00013131 },
{ 0x000FFFF3, 0x00035352, 0x00015151 }
};
int clock = sc1200_clock();
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int reg = 0x40 + 0x10 * ap->port_no;
int mode = adev->dma_mode;
u32 format;
if (mode >= XFER_UDMA_0)
format = udma_timing[clock][mode - XFER_UDMA_0];
else
format = mwdma_timing[clock][mode - XFER_MW_DMA_0];
if (adev->devno == 0) {
u32 timings;
pci_read_config_dword(pdev, reg + 4, &timings);
timings &= 0x80000000UL;
timings |= format;
pci_write_config_dword(pdev, reg + 4, timings);
} else
pci_write_config_dword(pdev, reg + 12, format);
}
/**
* sc1200_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary. Specifically we have a problem that there is only
* one MWDMA/UDMA bit.
*/
static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_device *prev = ap->private_data;
/* See if the DMA settings could be wrong */
if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
/* Maybe, but do the channels match MWDMA/UDMA ? */
if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
(ata_using_udma(prev) && !ata_using_udma(adev)))
/* Switch the mode bits */
sc1200_set_dmamode(ap, adev);
}
return ata_bmdma_qc_issue(qc);
}
/**
* sc1200_qc_defer - implement serialization
* @qc: command
*
* Serialize command issue on this controller.
*/
static int sc1200_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_host *host = qc->ap->host;
struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
int rc;
/* First apply the usual rules */
rc = ata_std_qc_defer(qc);
if (rc != 0)
return rc;
/* Now apply serialization rules. Only allow a command if the
other channel state machine is idle */
if (alt && alt->qc_active)
return ATA_DEFER_PORT;
return 0;
}
static const struct scsi_host_template sc1200_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations sc1200_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = sc1200_qc_issue,
.qc_defer = sc1200_qc_defer,
.cable_detect = ata_cable_40wire,
.set_piomode = sc1200_set_piomode,
.set_dmamode = sc1200_set_dmamode,
};
/**
* sc1200_init_one - Initialise an SC1200
* @dev: PCI device
* @id: Entry in match table
*
* Just throw the needed data at the libata helper and it does all
* our work.
*/
static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &sc1200_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0);
}
static const struct pci_device_id sc1200[] = {
{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
{ },
};
static struct pci_driver sc1200_pci_driver = {
.name = DRV_NAME,
.id_table = sc1200,
.probe = sc1200_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(sc1200_pci_driver);
MODULE_AUTHOR("Alan Cox, Mark Lord");
MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sc1200);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_sc1200.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_cypress.c - Cypress PATA for new ATA layer
* (C) 2006 Red Hat Inc
* Alan Cox
*
* Based heavily on
* linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_cypress"
#define DRV_VERSION "0.1.5"
/* here are the offset definitions for the registers */
enum {
CY82_IDE_CMDREG = 0x04,
CY82_IDE_ADDRSETUP = 0x48,
CY82_IDE_MASTER_IOR = 0x4C,
CY82_IDE_MASTER_IOW = 0x4D,
CY82_IDE_SLAVE_IOR = 0x4E,
CY82_IDE_SLAVE_IOW = 0x4F,
CY82_IDE_MASTER_8BIT = 0x50,
CY82_IDE_SLAVE_8BIT = 0x51,
CY82_INDEX_PORT = 0x22,
CY82_DATA_PORT = 0x23,
CY82_INDEX_CTRLREG1 = 0x01,
CY82_INDEX_CHANNEL0 = 0x30,
CY82_INDEX_CHANNEL1 = 0x31,
CY82_INDEX_TIMEOUT = 0x32
};
static bool enable_dma = true;
module_param(enable_dma, bool, 0);
MODULE_PARM_DESC(enable_dma, "Enable bus master DMA operations");
/**
* cy82c693_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup.
*/
static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_timing t;
const unsigned long T = 1000000 / 33;
short time_16, time_8;
u32 addr;
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
ata_dev_err(adev, DRV_NAME ": mome computation failed.\n");
return;
}
time_16 = clamp_val(t.recover - 1, 0, 15) |
(clamp_val(t.active - 1, 0, 15) << 4);
time_8 = clamp_val(t.act8b - 1, 0, 15) |
(clamp_val(t.rec8b - 1, 0, 15) << 4);
if (adev->devno == 0) {
pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
addr &= ~0x0F; /* Mask bits */
addr |= clamp_val(t.setup - 1, 0, 15);
pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
pci_write_config_byte(pdev, CY82_IDE_MASTER_IOW, time_16);
pci_write_config_byte(pdev, CY82_IDE_MASTER_8BIT, time_8);
} else {
pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
addr &= ~0xF0; /* Mask bits */
addr |= (clamp_val(t.setup - 1, 0, 15) << 4);
pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOW, time_16);
pci_write_config_byte(pdev, CY82_IDE_SLAVE_8BIT, time_8);
}
}
/**
* cy82c693_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup.
*/
static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
int reg = CY82_INDEX_CHANNEL0 + ap->port_no;
/* Be afraid, be very afraid. Magic registers in low I/O space */
outb(reg, 0x22);
outb(adev->dma_mode - XFER_MW_DMA_0, 0x23);
/* 0x50 gives the best behaviour on the Alpha's using this chip */
outb(CY82_INDEX_TIMEOUT, 0x22);
outb(0x50, 0x23);
}
static const struct scsi_host_template cy82c693_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations cy82c693_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = cy82c693_set_piomode,
.set_dmamode = cy82c693_set_dmamode,
};
static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &cy82c693_port_ops
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
if (enable_dma)
info.mwdma_mask = ATA_MWDMA2;
/* Devfn 1 is the ATA primary. The secondary is magic and on devfn2.
For the moment we don't handle the secondary. FIXME */
if (PCI_FUNC(pdev->devfn) != 1)
return -ENODEV;
return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
}
static const struct pci_device_id cy82c693[] = {
{ PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), },
{ },
};
static struct pci_driver cy82c693_pci_driver = {
.name = DRV_NAME,
.id_table = cy82c693,
.probe = cy82c693_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(cy82c693_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cy82c693);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_cypress.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ahci.c - AHCI SATA support
*
* Maintained by: Tejun Heo <[email protected]>
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2004-2005 Red Hat, Inc.
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* AHCI hardware documentation:
* http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
* http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <linux/ahci-remap.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "ahci.h"
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
enum {
AHCI_PCI_BAR_STA2X11 = 0,
AHCI_PCI_BAR_CAVIUM = 0,
AHCI_PCI_BAR_LOONGSON = 0,
AHCI_PCI_BAR_ENMOTUS = 2,
AHCI_PCI_BAR_CAVIUM_GEN5 = 4,
AHCI_PCI_BAR_STANDARD = 5,
};
enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
board_ahci_low_power,
board_ahci_no_debounce_delay,
board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_al,
board_ahci_avn,
board_ahci_mcp65,
board_ahci_mcp77,
board_ahci_mcp89,
board_ahci_mv,
board_ahci_sb600,
board_ahci_sb700, /* for SB700 and SB800 */
board_ahci_vt8251,
/*
* board IDs for Intel chipsets that support more than 6 ports
* *and* end up needing the PCS quirk.
*/
board_ahci_pcs7,
/* aliases */
board_ahci_mcp_linux = board_ahci_mcp65,
board_ahci_mcp67 = board_ahci_mcp65,
board_ahci_mcp73 = board_ahci_mcp65,
board_ahci_mcp79 = board_ahci_mcp77,
};
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
static void ahci_shutdown_one(struct pci_dev *dev);
static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
static bool is_mcp89_apple(struct pci_dev *pdev);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
#ifdef CONFIG_PM
static int ahci_pci_device_runtime_suspend(struct device *dev);
static int ahci_pci_device_runtime_resume(struct device *dev);
#ifdef CONFIG_PM_SLEEP
static int ahci_pci_device_suspend(struct device *dev);
static int ahci_pci_device_resume(struct device *dev);
#endif
#endif /* CONFIG_PM */
static const struct scsi_host_template ahci_sht = {
AHCI_SHT("ahci"),
};
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_vt8251_hardreset,
};
static struct ata_port_operations ahci_p5wdh_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_p5wdh_hardreset,
};
static struct ata_port_operations ahci_avn_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_avn_hardreset,
};
static const struct ata_port_info ahci_port_info[] = {
/* by features */
[board_ahci] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_ign_iferr] = {
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_low_power] = {
AHCI_HFLAGS (AHCI_HFLAG_USE_LPM_POLICY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_no_debounce_delay] = {
.flags = AHCI_FLAG_COMMON,
.link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_nomsi] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_noncq] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_nosntf] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_yes_fbs] = {
AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
/* by chipsets */
[board_ahci_al] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_avn] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_avn_ops,
},
[board_ahci_mcp65] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_mcp77] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_mcp89] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_mv] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_sb600] = {
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
AHCI_HFLAG_32BIT_ONLY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_pmp_retry_srst_ops,
},
[board_ahci_sb700] = { /* for SB700 and SB800 */
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_pmp_retry_srst_ops,
},
[board_ahci_vt8251] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
[board_ahci_pcs7] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
};
static const struct pci_device_id ahci_pci_tbl[] = {
/* Intel */
{ PCI_VDEVICE(INTEL, 0x06d6), board_ahci }, /* Comet Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292c), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b29), board_ahci_low_power }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_low_power }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci_low_power }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci_low_power }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
{ PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e07), board_ahci_low_power }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
{ PCI_VDEVICE(INTEL, 0x8c03), board_ahci_low_power }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c05), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c07), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x9c02), board_ahci_low_power }, /* Lynx LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c03), board_ahci_low_power }, /* Lynx LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c04), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c05), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c06), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_low_power }, /* Cannon Lake PCH-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg/Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* *burg SATA0 'RAID' */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* *burg SATA1 'RAID' */
{ PCI_VDEVICE(INTEL, 0x282f), board_ahci }, /* *burg SATA2 'RAID' */
{ PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d7), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
{ PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
{ PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
{ PCI_VDEVICE(INTEL, 0x9c83), board_ahci_low_power }, /* Wildcat LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci_low_power }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci_low_power }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_low_power }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
{ PCI_VDEVICE(INTEL, 0x8c83), board_ahci_low_power }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c85), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci_low_power }, /* Sunrise LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci_low_power }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci_low_power }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci_low_power }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci_low_power }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
{ PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
{ PCI_VDEVICE(INTEL, 0x0f22), board_ahci_low_power }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_low_power }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_low_power }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_low_power }, /* ApolloLake AHCI */
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
/* JMicron 362B and 362C have an AHCI function with IDE class code */
{ PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
{ PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
/* May need to update quirk_jmicron_async_suspend() for additions */
/* ATI */
{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
/* Amazon's Annapurna Labs support */
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031),
.class = PCI_CLASS_STORAGE_SATA_AHCI,
.class_mask = 0xffffff,
board_ahci_al },
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
{ PCI_VDEVICE(AMD, 0x7901), board_ahci_low_power }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
/* Dell S140/S150 */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_SUBVENDOR_ID_DELL, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
/* NVIDIA */
{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
/* SiS */
{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
/* ST Microelectronics */
{ PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123),
.class = PCI_CLASS_STORAGE_SATA_AHCI,
.class_mask = 0xffffff,
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
.driver_data = board_ahci_yes_fbs }, /* 88se9170 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
.driver_data = board_ahci_yes_fbs }, /* 88se9182 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a2), /* 88se91a2 */
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
.driver_data = board_ahci_no_debounce_delay },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
.driver_data = board_ahci_yes_fbs },
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
/*
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
* enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
*/
{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
{ PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
/* Loongson */
{ PCI_VDEVICE(LOONGSON, 0x7a08), board_ahci },
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
{ } /* terminate list */
};
static const struct dev_pm_ops ahci_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ahci_pci_device_suspend, ahci_pci_device_resume)
SET_RUNTIME_PM_OPS(ahci_pci_device_runtime_suspend,
ahci_pci_device_runtime_resume, NULL)
};
static struct pci_driver ahci_pci_driver = {
.name = DRV_NAME,
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
.remove = ahci_remove_one,
.shutdown = ahci_shutdown_one,
.driver = {
.pm = &ahci_pci_pm_ops,
},
};
#if IS_ENABLED(CONFIG_PATA_MARVELL)
static int marvell_enable;
#else
static int marvell_enable = 1;
#endif
module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->saved_port_map = 1;
}
/*
* Temporary Marvell 6145 hack: PATA port presence
* is asserted through the standard AHCI port
* presence register, as bit 4 (counting from 0)
*/
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
hpriv->mask_port_map = 0x3;
else
hpriv->mask_port_map = 0xf;
dev_info(&pdev->dev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
}
ahci_save_initial_config(&pdev->dev, hpriv);
}
static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_reset_controller(host);
if (rc)
return rc;
/*
* If platform firmware failed to enable ports, try to enable
* them here.
*/
ahci_intel_pcs_quirk(pdev, hpriv);
return 0;
}
static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
void __iomem *port_mmio;
u32 tmp;
int mv;
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
mv = 2;
else
mv = 4;
port_mmio = __ahci_port_base(hpriv, mv);
writel(0, port_mmio + PORT_IRQ_MASK);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
ahci_init_controller(host);
}
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
bool online;
int rc;
hpriv->stop_engine(ap);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
hpriv->start_engine(ap);
/* vt8251 doesn't clear BSY on signature FIS reception,
* request follow-up softreset.
*/
return online ? -EAGAIN : rc;
}
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
int rc;
hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
hpriv->start_engine(ap);
/* The pseudo configuration device on SIMG4726 attached to
* ASUS P5W-DH Deluxe doesn't send signature FIS after
* hardreset if no device is attached to the first downstream
* port && the pseudo device locks up on SRST w/ PMP==0. To
* work around this, wait for !BSY only briefly. If BSY isn't
* cleared, perform CLO and proceed to IDENTIFY (achieved by
* ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
*
* Wait for two seconds. Devices attached to downstream port
* which can't process the following IDENTIFY after this will
* have to be reset again. For most cases, this should
* suffice while making probing snappish enough.
*/
if (online) {
rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
ahci_check_ready);
if (rc)
ahci_kick_engine(ap);
}
return rc;
}
/*
* ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
*
* It has been observed with some SSDs that the timing of events in the
* link synchronization phase can leave the port in a state that can not
* be recovered by a SATA-hard-reset alone. The failing signature is
* SStatus.DET stuck at 1 ("Device presence detected but Phy
* communication not established"). It was found that unloading and
* reloading the driver when this problem occurs allows the drive
* connection to be recovered (DET advanced to 0x3). The critical
* component of reloading the driver is that the port state machines are
* reset by bouncing "port enable" in the AHCI PCS configuration
* register. So, reproduce that effect by bouncing a port whenever we
* see DET==1 after a reset.
*/
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
unsigned long tmo = deadline - jiffies;
struct ata_taskfile tf;
bool online;
int rc, i;
hpriv->stop_engine(ap);
for (i = 0; i < 2; i++) {
u16 val;
u32 sstatus;
int port = ap->port_no;
struct ata_host *host = ap->host;
struct pci_dev *pdev = to_pci_dev(host->dev);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, &online,
ahci_check_ready);
if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
(sstatus & 0xf) != 1)
break;
ata_link_info(link, "avn bounce port%d\n", port);
pci_read_config_word(pdev, 0x92, &val);
val &= ~(1 << port);
pci_write_config_word(pdev, 0x92, val);
ata_msleep(ap, 1000);
val |= 1 << port;
pci_write_config_word(pdev, 0x92, val);
deadline += tmo;
}
hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
return rc;
}
#ifdef CONFIG_PM
static void ahci_pci_disable_interrupts(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 ctl;
/* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
* transition of the HBA to D3 state.
*/
ctl = readl(mmio + HOST_CTL);
ctl &= ~HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
}
static int ahci_pci_device_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ata_host *host = pci_get_drvdata(pdev);
ahci_pci_disable_interrupts(host);
return 0;
}
static int ahci_pci_device_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ahci_pci_device_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ata_host *host = pci_get_drvdata(pdev);
struct ahci_host_priv *hpriv = host->private_data;
if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
dev_err(&pdev->dev,
"BIOS update required for suspend/resume\n");
return -EIO;
}
ahci_pci_disable_interrupts(host);
ata_host_suspend(host, PMSG_SUSPEND);
return 0;
}
static int ahci_pci_device_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
/* Apple BIOS helpfully mangles the registers on resume */
if (is_mcp89_apple(pdev))
ahci_mcp89_apple_enable(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
}
ata_host_resume(host);
return 0;
}
#endif
#endif /* CONFIG_PM */
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
const int dma_bits = using_dac ? 64 : 32;
int rc;
/*
* If the device fixup already set the dma_mask to some non-standard
* value, don't extend it here. This happens on STA2X11, for example.
*
* XXX: manipulating the DMA mask from platform code is completely
* bogus, platform code should use dev->bus_dma_limit instead..
*/
if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
return 0;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits));
if (rc)
dev_err(&pdev->dev, "DMA enable failed\n");
return rc;
}
static void ahci_pci_print_info(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
u16 cc;
const char *scc_s;
pci_read_config_word(pdev, 0x0a, &cc);
if (cc == PCI_CLASS_STORAGE_IDE)
scc_s = "IDE";
else if (cc == PCI_CLASS_STORAGE_SATA)
scc_s = "SATA";
else if (cc == PCI_CLASS_STORAGE_RAID)
scc_s = "RAID";
else
scc_s = "unknown";
ahci_print_info(host, scc_s);
}
/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
* hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
* support PMP and the 4726 either directly exports the device
* attached to the first downstream port or acts as a hardware storage
* controller and emulate a single ATA device (can be RAID 0/1 or some
* other configuration).
*
* When there's no device attached to the first downstream port of the
* 4726, "Config Disk" appears, which is a pseudo ATA device to
* configure the 4726. However, ATA emulation of the device is very
* lame. It doesn't send signature D2H Reg FIS after the initial
* hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
*
* The following function works around the problem by always using
* hardreset on the port and not depending on receiving signature FIS
* afterward. If signature FIS isn't received soon, ATA class is
* assumed without follow-up softreset.
*/
static void ahci_p5wdh_workaround(struct ata_host *host)
{
static const struct dmi_system_id sysids[] = {
{
.ident = "P5W DH Deluxe",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"ASUSTEK COMPUTER INC"),
DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
},
},
{ }
};
struct pci_dev *pdev = to_pci_dev(host->dev);
if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
dmi_check_system(sysids)) {
struct ata_port *ap = host->ports[1];
dev_info(&pdev->dev,
"enabling ASUS P5W DH Deluxe on-board SIMG4726 workaround\n");
ap->ops = &ahci_p5wdh_ops;
ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
}
}
/*
* Macbook7,1 firmware forcibly disables MCP89 AHCI and changes PCI ID when
* booting in BIOS compatibility mode. We restore the registers but not ID.
*/
static void ahci_mcp89_apple_enable(struct pci_dev *pdev)
{
u32 val;
printk(KERN_INFO "ahci: enabling MCP89 AHCI mode\n");
pci_read_config_dword(pdev, 0xf8, &val);
val |= 1 << 0x1b;
/* the following changes the device ID, but appears not to affect function */
/* val = (val & ~0xf0000000) | 0x80000000; */
pci_write_config_dword(pdev, 0xf8, val);
pci_read_config_dword(pdev, 0x54c, &val);
val |= 1 << 0xc;
pci_write_config_dword(pdev, 0x54c, val);
pci_read_config_dword(pdev, 0x4a4, &val);
val &= 0xff;
val |= 0x01060100;
pci_write_config_dword(pdev, 0x4a4, val);
pci_read_config_dword(pdev, 0x54c, &val);
val &= ~(1 << 0xc);
pci_write_config_dword(pdev, 0x54c, val);
pci_read_config_dword(pdev, 0xf8, &val);
val &= ~(1 << 0x1b);
pci_write_config_dword(pdev, 0xf8, val);
}
static bool is_mcp89_apple(struct pci_dev *pdev)
{
return pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
pdev->subsystem_device == 0xcb89;
}
/* only some SB600 ahci controllers can do 64bit DMA */
static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
{
static const struct dmi_system_id sysids[] = {
/*
* The oldest version known to be broken is 0901 and
* working is 1501 which was released on 2007-10-26.
* Enable 64bit DMA on 1501 and anything newer.
*
* Please read bko#9412 for more info.
*/
{
.ident = "ASUS M2A-VM",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR,
"ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
},
.driver_data = "20071026", /* yyyymmdd */
},
/*
* All BIOS versions for the MSI K9A2 Platinum (MS-7376)
* support 64bit DMA.
*
* BIOS versions earlier than 1.5 had the Manufacturer DMI
* fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
* This spelling mistake was fixed in BIOS version 1.5, so
* 1.5 and later have the Manufacturer as
* "MICRO-STAR INTERNATIONAL CO.,LTD".
* So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
*
* BIOS versions earlier than 1.9 had a Board Product Name
* DMI field of "MS-7376". This was changed to be
* "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
* match on DMI_BOARD_NAME of "MS-7376".
*/
{
.ident = "MSI K9A2 Platinum",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR,
"MICRO-STAR INTER"),
DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
},
},
/*
* All BIOS versions for the MSI K9AGM2 (MS-7327) support
* 64bit DMA.
*
* This board also had the typo mentioned above in the
* Manufacturer DMI field (fixed in BIOS version 1.5), so
* match on DMI_BOARD_VENDOR of "MICRO-STAR INTER" again.
*/
{
.ident = "MSI K9AGM2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR,
"MICRO-STAR INTER"),
DMI_MATCH(DMI_BOARD_NAME, "MS-7327"),
},
},
/*
* All BIOS versions for the Asus M3A support 64bit DMA.
* (all release versions from 0301 to 1206 were tested)
*/
{
.ident = "ASUS M3A",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR,
"ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "M3A"),
},
},
{ }
};
const struct dmi_system_id *match;
int year, month, date;
char buf[9];
match = dmi_first_match(sysids);
if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
!match)
return false;
if (!match->driver_data)
goto enable_64bit;
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
if (strcmp(buf, match->driver_data) >= 0)
goto enable_64bit;
else {
dev_warn(&pdev->dev,
"%s: BIOS too old, forcing 32bit DMA, update BIOS\n",
match->ident);
return false;
}
enable_64bit:
dev_warn(&pdev->dev, "%s: enabling 64bit DMA\n", match->ident);
return true;
}
static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
{
static const struct dmi_system_id broken_systems[] = {
{
.ident = "HP Compaq nx6310",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
{
.ident = "HP Compaq 6720s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
if (dmi) {
unsigned long slot = (unsigned long)dmi->driver_data;
/* apply the quirk only to on-board controllers */
return slot == PCI_SLOT(pdev->devfn);
}
return false;
}
static bool ahci_broken_suspend(struct pci_dev *pdev)
{
static const struct dmi_system_id sysids[] = {
/*
* On HP dv[4-6] and HDX18 with earlier BIOSen, link
* to the harddisk doesn't become online after
* resuming from STR. Warn and fail suspend.
*
* http://bugzilla.kernel.org/show_bug.cgi?id=12276
*
* Use dates instead of versions to match as HP is
* apparently recycling both product and version
* strings.
*
* http://bugzilla.kernel.org/show_bug.cgi?id=15462
*/
{
.ident = "dv4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,
"HP Pavilion dv4 Notebook PC"),
},
.driver_data = "20090105", /* F.30 */
},
{
.ident = "dv5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,
"HP Pavilion dv5 Notebook PC"),
},
.driver_data = "20090506", /* F.16 */
},
{
.ident = "dv6",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,
"HP Pavilion dv6 Notebook PC"),
},
.driver_data = "20090423", /* F.21 */
},
{
.ident = "HDX18",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,
"HP HDX18 Notebook PC"),
},
.driver_data = "20090430", /* F.23 */
},
/*
* Acer eMachines G725 has the same problem. BIOS
* V1.03 is known to be broken. V3.04 is known to
* work. Between, there are V1.06, V2.06 and V3.03
* that we don't have much idea about. For now,
* blacklist anything older than V3.04.
*
* http://bugzilla.kernel.org/show_bug.cgi?id=15104
*/
{
.ident = "G725",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
},
.driver_data = "20091216", /* V3.04 */
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(sysids);
int year, month, date;
char buf[9];
if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
return false;
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
return strcmp(buf, dmi->driver_data) < 0;
}
static bool ahci_broken_lpm(struct pci_dev *pdev)
{
static const struct dmi_system_id sysids[] = {
/* Various Lenovo 50 series have LPM issues with older BIOSen */
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
},
.driver_data = "20180406", /* 1.31 */
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
},
.driver_data = "20180420", /* 1.28 */
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
},
.driver_data = "20180315", /* 1.33 */
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
},
/*
* Note date based on release notes, 2.35 has been
* reported to be good, but I've been unable to get
* a hold of the reporter to get the DMI BIOS date.
* TODO: fix this.
*/
.driver_data = "20180310", /* 2.35 */
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(sysids);
int year, month, date;
char buf[9];
if (!dmi)
return false;
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
return strcmp(buf, dmi->driver_data) < 0;
}
static bool ahci_broken_online(struct pci_dev *pdev)
{
#define ENCODE_BUSDEVFN(bus, slot, func) \
(void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
static const struct dmi_system_id sysids[] = {
/*
* There are several gigabyte boards which use
* SIMG5723s configured as hardware RAID. Certain
* 5723 firmware revisions shipped there keep the link
* online but fail to answer properly to SRST or
* IDENTIFY when no device is attached downstream
* causing libata to retry quite a few times leading
* to excessive detection delay.
*
* As these firmwares respond to the second reset try
* with invalid device signature, considering unknown
* sig as offline works around the problem acceptably.
*/
{
.ident = "EP45-DQ6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR,
"Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
},
.driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
},
{
.ident = "EP45-DS5",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR,
"Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
},
.driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
},
{ } /* terminate list */
};
#undef ENCODE_BUSDEVFN
const struct dmi_system_id *dmi = dmi_first_match(sysids);
unsigned int val;
if (!dmi)
return false;
val = (unsigned long)dmi->driver_data;
return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
}
static bool ahci_broken_devslp(struct pci_dev *pdev)
{
/* device with broken DEVSLP but still showing SDS capability */
static const struct pci_device_id ids[] = {
{ PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
{}
};
return pci_match_id(ids, pdev);
}
#ifdef CONFIG_ATA_ACPI
static void ahci_gtf_filter_workaround(struct ata_host *host)
{
static const struct dmi_system_id sysids[] = {
/*
* Aspire 3810T issues a bunch of SATA enable commands
* via _GTF including an invalid one and one which is
* rejected by the device. Among the successful ones
* is FPDMA non-zero offset enable which when enabled
* only on the drive side leads to NCQ command
* failures. Filter it out.
*/
{
.ident = "Aspire 3810T",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
},
.driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
},
{ }
};
const struct dmi_system_id *dmi = dmi_first_match(sysids);
unsigned int filter;
int i;
if (!dmi)
return;
filter = (unsigned long)dmi->driver_data;
dev_info(host->dev, "applying extra ACPI _GTF filter 0x%x for %s\n",
filter, dmi->ident);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct ata_link *link;
struct ata_device *dev;
ata_for_each_link(link, ap, EDGE)
ata_for_each_dev(dev, link, ALL)
dev->gtf_filter |= filter;
}
}
#else
static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif
/*
* On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
* as DUMMY, or detected but eventually get a "link down" and never get up
* again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
* port_map may hold a value of 0x00.
*
* Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
* and can significantly reduce the occurrence of the problem.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=189471
*/
static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
struct pci_dev *pdev)
{
static const struct dmi_system_id sysids[] = {
{
.ident = "Acer Switch Alpha 12",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
},
},
{ }
};
if (dmi_check_system(sysids)) {
dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
hpriv->port_map = 0x7;
hpriv->cap = 0xC734FF02;
}
}
}
#ifdef CONFIG_ARM64
/*
* Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
* Workaround is to make sure all pending IRQs are served before leaving
* handler.
*/
static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
unsigned int handled = 1;
hpriv = host->private_data;
mmio = hpriv->mmio;
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
do {
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
rc = ahci_handle_port_intr(host, irq_masked);
if (!rc)
handled = 0;
writel(irq_stat, mmio + HOST_IRQ_STAT);
irq_stat = readl(mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
} while (irq_stat);
return IRQ_RETVAL(handled);
}
#endif
static void ahci_remap_check(struct pci_dev *pdev, int bar,
struct ahci_host_priv *hpriv)
{
int i;
u32 cap;
/*
* Check if this device might have remapped nvme devices.
*/
if (pdev->vendor != PCI_VENDOR_ID_INTEL ||
pci_resource_len(pdev, bar) < SZ_512K ||
bar != AHCI_PCI_BAR_STANDARD ||
!(readl(hpriv->mmio + AHCI_VSCAP) & 1))
return;
cap = readq(hpriv->mmio + AHCI_REMAP_CAP);
for (i = 0; i < AHCI_MAX_REMAP; i++) {
if ((cap & (1 << i)) == 0)
continue;
if (readl(hpriv->mmio + ahci_remap_dcc(i))
!= PCI_CLASS_STORAGE_EXPRESS)
continue;
/* We've found a remapped device */
hpriv->remapped_nvme++;
}
if (!hpriv->remapped_nvme)
return;
dev_warn(&pdev->dev, "Found %u remapped NVMe devices.\n",
hpriv->remapped_nvme);
dev_warn(&pdev->dev,
"Switch your BIOS from RAID to AHCI mode to use them.\n");
/*
* Don't rely on the msi-x capability in the remap case,
* share the legacy interrupt across ahci and remapped devices.
*/
hpriv->flags |= AHCI_HFLAG_NO_MSI;
}
static int ahci_get_irq_vector(struct ata_host *host, int port)
{
return pci_irq_vector(to_pci_dev(host->dev), port);
}
static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
int nvec;
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
return -ENODEV;
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead.
*/
if (n_ports > 1) {
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nvec > 0) {
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
hpriv->get_irq_vector = ahci_get_irq_vector;
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
return nvec;
}
/*
* Fallback to single MSI mode if the controller
* enforced MRSM mode.
*/
printk(KERN_INFO
"ahci: MRSM is on, fallback to single MSI\n");
pci_free_irq_vectors(pdev);
}
}
/*
* If the host is not capable of supporting per-port vectors, fall
* back to single MSI before finally attempting single MSI-X.
*/
nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (nvec == 1)
return nvec;
return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
static void ahci_update_initial_lpm_policy(struct ata_port *ap,
struct ahci_host_priv *hpriv)
{
int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
/* Ignore processing for chipsets that don't use policy */
if (!(hpriv->flags & AHCI_HFLAG_USE_LPM_POLICY))
return;
/* user modified policy via module param */
if (mobile_lpm_policy != -1) {
policy = mobile_lpm_policy;
goto update_policy;
}
if (policy > ATA_LPM_MED_POWER && pm_suspend_default_s2idle()) {
if (hpriv->cap & HOST_CAP_PART)
policy = ATA_LPM_MIN_POWER_WITH_PARTIAL;
else if (hpriv->cap & HOST_CAP_SSC)
policy = ATA_LPM_MIN_POWER;
}
update_policy:
if (policy >= ATA_LPM_UNKNOWN && policy <= ATA_LPM_MIN_POWER)
ap->target_lpm_policy = policy;
}
static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
{
const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
u16 tmp16;
/*
* Only apply the 6-port PCS quirk for known legacy platforms.
*/
if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
return;
/* Skip applying the quirk on Denverton and beyond */
if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
return;
/*
* port_map is determined from PORTS_IMPL PCI register which is
* implemented as write or write-once register. If the register
* isn't programmed, ahci automatically generates it from number
* of ports, which is good enough for PCS programming. It is
* otherwise expected that platform firmware enables the ports
* before the OS boots.
*/
pci_read_config_word(pdev, PCS_6, &tmp16);
if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
tmp16 |= hpriv->port_map;
pci_write_config_word(pdev, PCS_6, tmp16);
}
}
static ssize_t remapped_nvme_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
return sysfs_emit(buf, "%u\n", hpriv->remapped_nvme);
}
static DEVICE_ATTR_RO(remapped_nvme);
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
struct ata_port_info pi = ahci_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
int n_ports, i, rc;
int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* The AHCI driver can only drive the SATA ports, the PATA driver
can drive them all so if both drivers are selected make sure
AHCI stays out of the way */
if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
return -ENODEV;
/* Apple BIOS on MCP89 prevents us using AHCI */
if (is_mcp89_apple(pdev))
ahci_mcp89_apple_enable(pdev);
/* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
* At the moment, we can only use the AHCI mode. Let the users know
* that for SAS drives they're out of luck.
*/
if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
dev_info(&pdev->dev,
"PDC42819 can only drive SATA devices with this driver\n");
/* Some devices use non-standard BARs */
if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
else if (pdev->vendor == PCI_VENDOR_ID_CAVIUM) {
if (pdev->device == 0xa01c)
ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
if (pdev->device == 0xa084)
ahci_pci_bar = AHCI_PCI_BAR_CAVIUM_GEN5;
} else if (pdev->vendor == PCI_VENDOR_ID_LOONGSON) {
if (pdev->device == 0x7a08)
ahci_pci_bar = AHCI_PCI_BAR_LOONGSON;
}
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == 0x2652 || pdev->device == 0x2653)) {
u8 map;
/* ICH6s share the same PCI ID for both piix and ahci
* modes. Enabling ahci mode while MAP indicates
* combined mode is a bad idea. Yield to ata_piix.
*/
pci_read_config_byte(pdev, ICH_MAP, &map);
if (map & 0x3) {
dev_info(&pdev->dev,
"controller is in combined mode, can't enable AHCI mode\n");
return -ENODEV;
}
}
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
hpriv->flags |= (unsigned long)pi.private_data;
/* MCP65 revision A1 and A2 can't do MSI */
if (board_id == board_ahci_mcp65 &&
(pdev->revision == 0xa1 || pdev->revision == 0xa2))
hpriv->flags |= AHCI_HFLAG_NO_MSI;
/* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
/* only some SB600s can do 64bit DMA */
if (ahci_sb600_enable_64bit(pdev))
hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
/* detect remapped nvme devices */
ahci_remap_check(pdev, ahci_pci_bar, hpriv);
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_remapped_nvme.attr,
NULL);
/* must set flag prior to save config in order to take effect */
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
#ifdef CONFIG_ARM64
if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
pdev->device == 0xa235 &&
pdev->revision < 0x30)
hpriv->flags |= AHCI_HFLAG_NO_SXS;
if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
hpriv->irq_handler = ahci_thunderx_irq_handler;
#endif
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
/*
* Auto-activate optimization is supposed to be
* supported on all AHCI controllers indicating NCQ
* capability, but it seems to be broken on some
* chipsets including NVIDIAs.
*/
if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
pi.flags |= ATA_FLAG_FPDMA_AA;
/*
* All AHCI controllers should be forward-compatible
* with the new auxiliary field. This code should be
* conditionalized if any buggy AHCI controllers are
* encountered.
*/
pi.flags |= ATA_FLAG_FPDMA_AUX;
}
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
ahci_set_em_messages(hpriv, &pi);
if (ahci_broken_system_poweroff(pdev)) {
pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
dev_info(&pdev->dev,
"quirky BIOS, skipping spindown on poweroff\n");
}
if (ahci_broken_lpm(pdev)) {
pi.flags |= ATA_FLAG_NO_LPM;
dev_warn(&pdev->dev,
"BIOS update required for Link Power Management support\n");
}
if (ahci_broken_suspend(pdev)) {
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
dev_warn(&pdev->dev,
"BIOS update required for suspend/resume\n");
}
if (ahci_broken_online(pdev)) {
hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
dev_info(&pdev->dev,
"online status unreliable, applying workaround\n");
}
/* Acer SA5-271 workaround modifies private_data */
acer_sa5_271_workaround(hpriv, pdev);
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
* both CAP.NP and port_map.
*/
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
host->private_data = hpriv;
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
/* legacy intx interrupts */
pci_intx(pdev, 1);
}
hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
else
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
if (!(hpriv->cap & HOST_CAP_PART))
host->flags |= ATA_HOST_NO_PART;
if (!(hpriv->cap & HOST_CAP_SSC))
host->flags |= ATA_HOST_NO_SSC;
if (!(hpriv->cap2 & HOST_CAP2_SDS))
host->flags |= ATA_HOST_NO_DEVSLP;
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
ata_port_pbar_desc(ap, ahci_pci_bar,
0x100 + ap->port_no * 0x80, "port");
/* set enclosure management message type */
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
ahci_update_initial_lpm_policy(ap, hpriv);
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
ap->ops = &ata_dummy_port_ops;
}
/* apply workaround for ASUS P5W DH Deluxe mainboard */
ahci_p5wdh_workaround(host);
/* apply gtf filter quirk */
ahci_gtf_filter_workaround(host);
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
if (rc)
return rc;
rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
ahci_pci_print_info(host);
pci_set_master(pdev);
rc = ahci_host_activate(host, &ahci_sht);
if (rc)
return rc;
pm_runtime_put_noidle(&pdev->dev);
return 0;
}
static void ahci_shutdown_one(struct pci_dev *pdev)
{
ata_pci_shutdown_one(pdev);
}
static void ahci_remove_one(struct pci_dev *pdev)
{
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_remapped_nvme.attr,
NULL);
pm_runtime_get_noresume(&pdev->dev);
ata_pci_remove_one(pdev);
}
module_pci_driver(ahci_pci_driver);
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("AHCI SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/ahci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
*
* Maintained by: Jeremy Higdon @ SGI
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2004 SGI
*
* Bits from Jeff Garzik, Copyright RedHat, Inc.
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Vitesse hardware documentation presumably available under NDA.
* Intel 31244 (same hardware interface) documentation presumably
* available from http://developer.intel.com/
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "sata_vsc"
#define DRV_VERSION "2.3"
enum {
VSC_MMIO_BAR = 0,
/* Interrupt register offsets (from chip base address) */
VSC_SATA_INT_STAT_OFFSET = 0x00,
VSC_SATA_INT_MASK_OFFSET = 0x04,
/* Taskfile registers offsets */
VSC_SATA_TF_CMD_OFFSET = 0x00,
VSC_SATA_TF_DATA_OFFSET = 0x00,
VSC_SATA_TF_ERROR_OFFSET = 0x04,
VSC_SATA_TF_FEATURE_OFFSET = 0x06,
VSC_SATA_TF_NSECT_OFFSET = 0x08,
VSC_SATA_TF_LBAL_OFFSET = 0x0c,
VSC_SATA_TF_LBAM_OFFSET = 0x10,
VSC_SATA_TF_LBAH_OFFSET = 0x14,
VSC_SATA_TF_DEVICE_OFFSET = 0x18,
VSC_SATA_TF_STATUS_OFFSET = 0x1c,
VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
VSC_SATA_TF_CTL_OFFSET = 0x29,
/* DMA base */
VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
VSC_SATA_DMA_CMD_OFFSET = 0x70,
/* SCRs base */
VSC_SATA_SCR_STATUS_OFFSET = 0x100,
VSC_SATA_SCR_ERROR_OFFSET = 0x104,
VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
/* Port stride */
VSC_SATA_PORT_OFFSET = 0x200,
/* Error interrupt status bit offsets */
VSC_SATA_INT_ERROR_CRC = 0x40,
VSC_SATA_INT_ERROR_T = 0x20,
VSC_SATA_INT_ERROR_P = 0x10,
VSC_SATA_INT_ERROR_R = 0x8,
VSC_SATA_INT_ERROR_E = 0x4,
VSC_SATA_INT_ERROR_M = 0x2,
VSC_SATA_INT_PHY_CHANGE = 0x1,
VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
VSC_SATA_INT_PHY_CHANGE),
};
static int vsc_sata_scr_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static int vsc_sata_scr_write(struct ata_link *link,
unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static void vsc_freeze(struct ata_port *ap)
{
void __iomem *mask_addr;
mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
VSC_SATA_INT_MASK_OFFSET + ap->port_no;
writeb(0, mask_addr);
}
static void vsc_thaw(struct ata_port *ap)
{
void __iomem *mask_addr;
mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
VSC_SATA_INT_MASK_OFFSET + ap->port_no;
writeb(0xff, mask_addr);
}
static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
{
void __iomem *mask_addr;
u8 mask;
mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
VSC_SATA_INT_MASK_OFFSET + ap->port_no;
mask = readb(mask_addr);
if (ctl & ATA_NIEN)
mask |= 0x80;
else
mask &= 0x7F;
writeb(mask, mask_addr);
}
static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
/*
* The only thing the ctl register is used for is SRST.
* That is not enabled or disabled via tf_load.
* However, if ATA_NIEN is changed, then we need to change
* the interrupt register.
*/
if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
ap->last_ctl = tf->ctl;
vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
writew(tf->feature | (((u16)tf->hob_feature) << 8),
ioaddr->feature_addr);
writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
ioaddr->nsect_addr);
writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
ioaddr->lbal_addr);
writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
ioaddr->lbam_addr);
writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
ioaddr->lbah_addr);
} else if (is_addr) {
writew(tf->feature, ioaddr->feature_addr);
writew(tf->nsect, ioaddr->nsect_addr);
writew(tf->lbal, ioaddr->lbal_addr);
writew(tf->lbam, ioaddr->lbam_addr);
writew(tf->lbah, ioaddr->lbah_addr);
}
if (tf->flags & ATA_TFLAG_DEVICE)
writeb(tf->device, ioaddr->device_addr);
ata_wait_idle(ap);
}
static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u16 nsect, lbal, lbam, lbah, error;
tf->status = ata_sff_check_status(ap);
tf->device = readw(ioaddr->device_addr);
error = readw(ioaddr->error_addr);
nsect = readw(ioaddr->nsect_addr);
lbal = readw(ioaddr->lbal_addr);
lbam = readw(ioaddr->lbam_addr);
lbah = readw(ioaddr->lbah_addr);
tf->error = error;
tf->nsect = nsect;
tf->lbal = lbal;
tf->lbam = lbam;
tf->lbah = lbah;
if (tf->flags & ATA_TFLAG_LBA48) {
tf->hob_feature = error >> 8;
tf->hob_nsect = nsect >> 8;
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
tf->hob_lbah = lbah >> 8;
}
}
static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
{
if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M))
ata_port_freeze(ap);
else
ata_port_abort(ap);
}
static void vsc_port_intr(u8 port_status, struct ata_port *ap)
{
struct ata_queued_cmd *qc;
int handled = 0;
if (unlikely(port_status & VSC_SATA_INT_ERROR)) {
vsc_error_intr(port_status, ap);
return;
}
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled = ata_bmdma_port_intr(ap, qc);
/* We received an interrupt during a polled command,
* or some other spurious condition. Interrupt reporting
* with this hardware is fairly reliable so it is safe to
* simply clear the interrupt
*/
if (unlikely(!handled))
ap->ops->sff_check_status(ap);
}
/*
* vsc_sata_interrupt
*
* Read the interrupt register and process for the devices that have
* them pending.
*/
static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
u32 status;
status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET);
if (unlikely(status == 0xffffffff || status == 0)) {
if (status)
dev_err(host->dev,
": IRQ status == 0xffffffff, PCI fault or device removal?\n");
goto out;
}
spin_lock(&host->lock);
for (i = 0; i < host->n_ports; i++) {
u8 port_status = (status >> (8 * i)) & 0xff;
if (port_status) {
vsc_port_intr(port_status, host->ports[i]);
handled++;
}
}
spin_unlock(&host->lock);
out:
return IRQ_RETVAL(handled);
}
static const struct scsi_host_template vsc_sata_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations vsc_sata_ops = {
.inherits = &ata_bmdma_port_ops,
/* The IRQ handling is not quite standard SFF behaviour so we
cannot use the default lost interrupt handler */
.lost_interrupt = ATA_OP_NULL,
.sff_tf_load = vsc_sata_tf_load,
.sff_tf_read = vsc_sata_tf_read,
.freeze = vsc_freeze,
.thaw = vsc_thaw,
.scr_read = vsc_sata_scr_read,
.scr_write = vsc_sata_scr_write,
};
static void vsc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
}
static int vsc_sata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static const struct ata_port_info pi = {
.flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &vsc_sata_ops,
};
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ata_host *host;
void __iomem *mmio_base;
int i, rc;
u8 cls;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
if (!host)
return -ENOMEM;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* check if we have needed resource mapped */
if (pci_resource_len(pdev, 0) == 0)
return -ENODEV;
/* map IO regions and initialize host accordingly */
rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
mmio_base = host->iomap[VSC_MMIO_BAR];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET;
vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset);
ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port");
}
/*
* Use 32 bit DMA mask, because 64 bit address support is poor.
*/
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
/*
* Due to a bug in the chip, the default cache line size can't be
* used (unless the default is non-zero).
*/
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls);
if (cls == 0x00)
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
if (pci_enable_msi(pdev) == 0)
pci_intx(pdev, 0);
/*
* Config offset 0x98 is "Extended Control and Status Register 0"
* Default value is (1 << 28). All bits except bit 28 are reserved in
* DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
* If bit 28 is clear, each port has its own LED.
*/
pci_write_config_dword(pdev, 0x98, 0);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, vsc_sata_interrupt,
IRQF_SHARED, &vsc_sata_sht);
}
static const struct pci_device_id vsc_sata_pci_tbl[] = {
{ PCI_VENDOR_ID_VITESSE, 0x7174,
PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
{ PCI_VENDOR_ID_INTEL, 0x3200,
PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
{ } /* terminate list */
};
static struct pci_driver vsc_sata_pci_driver = {
.name = DRV_NAME,
.id_table = vsc_sata_pci_tbl,
.probe = vsc_sata_init_one,
.remove = ata_pci_remove_one,
};
module_pci_driver(vsc_sata_pci_driver);
MODULE_AUTHOR("Jeremy Higdon");
MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/sata_vsc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 ioogle, Inc. All rights reserved.
*
* Libata transport class.
*
* The ATA transport class contains common code to deal with ATA HBAs,
* an approximated representation of ATA topologies in the driver model,
* and various sysfs attributes to expose these topologies and management
* interfaces to user-space.
*
* There are 3 objects defined in this class:
* - ata_port
* - ata_link
* - ata_device
* Each port has a link object. Each link can have up to two devices for PATA
* and generally one for SATA.
* If there is SATA port multiplier [PMP], 15 additional ata_link object are
* created.
*
* These objects are created when the ata host is initialized and when a PMP is
* found. They are removed only when the HBA is removed, cleaned before the
* error handler runs.
*/
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <scsi/scsi_transport.h>
#include <linux/libata.h>
#include <linux/hdreg.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include "libata.h"
#include "libata-transport.h"
#define ATA_PORT_ATTRS 3
#define ATA_LINK_ATTRS 3
#define ATA_DEV_ATTRS 9
struct scsi_transport_template;
struct scsi_transport_template *ata_scsi_transport_template;
struct ata_internal {
struct scsi_transport_template t;
struct device_attribute private_port_attrs[ATA_PORT_ATTRS];
struct device_attribute private_link_attrs[ATA_LINK_ATTRS];
struct device_attribute private_dev_attrs[ATA_DEV_ATTRS];
struct transport_container link_attr_cont;
struct transport_container dev_attr_cont;
/*
* The array of null terminated pointers to attributes
* needed by scsi_sysfs.c
*/
struct device_attribute *link_attrs[ATA_LINK_ATTRS + 1];
struct device_attribute *port_attrs[ATA_PORT_ATTRS + 1];
struct device_attribute *dev_attrs[ATA_DEV_ATTRS + 1];
};
#define to_ata_internal(tmpl) container_of(tmpl, struct ata_internal, t)
#define tdev_to_device(d) \
container_of((d), struct ata_device, tdev)
#define transport_class_to_dev(dev) \
tdev_to_device((dev)->parent)
#define tdev_to_link(d) \
container_of((d), struct ata_link, tdev)
#define transport_class_to_link(dev) \
tdev_to_link((dev)->parent)
#define tdev_to_port(d) \
container_of((d), struct ata_port, tdev)
#define transport_class_to_port(dev) \
tdev_to_port((dev)->parent)
/* Device objects are always created whit link objects */
static int ata_tdev_add(struct ata_device *dev);
static void ata_tdev_delete(struct ata_device *dev);
/*
* Hack to allow attributes of the same name in different objects.
*/
#define ATA_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
struct device_attribute device_attr_##_prefix##_##_name = \
__ATTR(_name,_mode,_show,_store)
#define ata_bitfield_name_match(title, table) \
static ssize_t \
get_ata_##title##_names(u32 table_key, char *buf) \
{ \
char *prefix = ""; \
ssize_t len = 0; \
int i; \
\
for (i = 0; i < ARRAY_SIZE(table); i++) { \
if (table[i].value & table_key) { \
len += sprintf(buf + len, "%s%s", \
prefix, table[i].name); \
prefix = ", "; \
} \
} \
len += sprintf(buf + len, "\n"); \
return len; \
}
#define ata_bitfield_name_search(title, table) \
static ssize_t \
get_ata_##title##_names(u32 table_key, char *buf) \
{ \
ssize_t len = 0; \
int i; \
\
for (i = 0; i < ARRAY_SIZE(table); i++) { \
if (table[i].value == table_key) { \
len += sprintf(buf + len, "%s", \
table[i].name); \
break; \
} \
} \
len += sprintf(buf + len, "\n"); \
return len; \
}
static struct {
u32 value;
char *name;
} ata_class_names[] = {
{ ATA_DEV_UNKNOWN, "unknown" },
{ ATA_DEV_ATA, "ata" },
{ ATA_DEV_ATA_UNSUP, "ata" },
{ ATA_DEV_ATAPI, "atapi" },
{ ATA_DEV_ATAPI_UNSUP, "atapi" },
{ ATA_DEV_PMP, "pmp" },
{ ATA_DEV_PMP_UNSUP, "pmp" },
{ ATA_DEV_SEMB, "semb" },
{ ATA_DEV_SEMB_UNSUP, "semb" },
{ ATA_DEV_ZAC, "zac" },
{ ATA_DEV_NONE, "none" }
};
ata_bitfield_name_search(class, ata_class_names)
static struct {
u32 value;
char *name;
} ata_err_names[] = {
{ AC_ERR_DEV, "DeviceError" },
{ AC_ERR_HSM, "HostStateMachineError" },
{ AC_ERR_TIMEOUT, "Timeout" },
{ AC_ERR_MEDIA, "MediaError" },
{ AC_ERR_ATA_BUS, "BusError" },
{ AC_ERR_HOST_BUS, "HostBusError" },
{ AC_ERR_SYSTEM, "SystemError" },
{ AC_ERR_INVALID, "InvalidArg" },
{ AC_ERR_OTHER, "Unknown" },
{ AC_ERR_NODEV_HINT, "NoDeviceHint" },
{ AC_ERR_NCQ, "NCQError" }
};
ata_bitfield_name_match(err, ata_err_names)
static struct {
u32 value;
char *name;
} ata_xfer_names[] = {
{ XFER_UDMA_7, "XFER_UDMA_7" },
{ XFER_UDMA_6, "XFER_UDMA_6" },
{ XFER_UDMA_5, "XFER_UDMA_5" },
{ XFER_UDMA_4, "XFER_UDMA_4" },
{ XFER_UDMA_3, "XFER_UDMA_3" },
{ XFER_UDMA_2, "XFER_UDMA_2" },
{ XFER_UDMA_1, "XFER_UDMA_1" },
{ XFER_UDMA_0, "XFER_UDMA_0" },
{ XFER_MW_DMA_4, "XFER_MW_DMA_4" },
{ XFER_MW_DMA_3, "XFER_MW_DMA_3" },
{ XFER_MW_DMA_2, "XFER_MW_DMA_2" },
{ XFER_MW_DMA_1, "XFER_MW_DMA_1" },
{ XFER_MW_DMA_0, "XFER_MW_DMA_0" },
{ XFER_SW_DMA_2, "XFER_SW_DMA_2" },
{ XFER_SW_DMA_1, "XFER_SW_DMA_1" },
{ XFER_SW_DMA_0, "XFER_SW_DMA_0" },
{ XFER_PIO_6, "XFER_PIO_6" },
{ XFER_PIO_5, "XFER_PIO_5" },
{ XFER_PIO_4, "XFER_PIO_4" },
{ XFER_PIO_3, "XFER_PIO_3" },
{ XFER_PIO_2, "XFER_PIO_2" },
{ XFER_PIO_1, "XFER_PIO_1" },
{ XFER_PIO_0, "XFER_PIO_0" },
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
};
ata_bitfield_name_search(xfer, ata_xfer_names)
/*
* ATA Port attributes
*/
#define ata_port_show_simple(field, name, format_string, cast) \
static ssize_t \
show_ata_port_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ata_port *ap = transport_class_to_port(dev); \
\
return scnprintf(buf, 20, format_string, cast ap->field); \
}
#define ata_port_simple_attr(field, name, format_string, type) \
ata_port_show_simple(field, name, format_string, (type)) \
static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL)
ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int);
ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long);
ata_port_simple_attr(local_port_no, port_no, "%u\n", unsigned int);
static DECLARE_TRANSPORT_CLASS(ata_port_class,
"ata_port", NULL, NULL, NULL);
static void ata_tport_release(struct device *dev)
{
struct ata_port *ap = tdev_to_port(dev);
ata_host_put(ap->host);
}
/**
* ata_is_port -- check if a struct device represents a ATA port
* @dev: device to check
*
* Returns:
* %1 if the device represents a ATA Port, %0 else
*/
static int ata_is_port(const struct device *dev)
{
return dev->release == ata_tport_release;
}
static int ata_tport_match(struct attribute_container *cont,
struct device *dev)
{
if (!ata_is_port(dev))
return 0;
return &ata_scsi_transport_template->host_attrs.ac == cont;
}
/**
* ata_tport_delete -- remove ATA PORT
* @ap: ATA PORT to remove
*
* Removes the specified ATA PORT. Remove the associated link as well.
*/
void ata_tport_delete(struct ata_port *ap)
{
struct device *dev = &ap->tdev;
ata_tlink_delete(&ap->link);
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
put_device(dev);
}
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
* @ap: existing ata_port structure
*
* Initialize a ATA port structure for sysfs. It will be added to the device
* tree below the device specified by @parent which could be a PCI device.
*
* Returns %0 on success
*/
int ata_tport_add(struct device *parent,
struct ata_port *ap)
{
int error;
struct device *dev = &ap->tdev;
device_initialize(dev);
dev->type = &ata_port_type;
dev->parent = parent;
ata_host_get(ap->host);
dev->release = ata_tport_release;
dev_set_name(dev, "ata%d", ap->print_id);
transport_setup_device(dev);
ata_acpi_bind_port(ap);
error = device_add(dev);
if (error) {
goto tport_err;
}
device_enable_async_suspend(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_forbid(dev);
error = transport_add_device(dev);
if (error)
goto tport_transport_add_err;
transport_configure_device(dev);
error = ata_tlink_add(&ap->link);
if (error) {
goto tport_link_err;
}
return 0;
tport_link_err:
transport_remove_device(dev);
tport_transport_add_err:
device_del(dev);
tport_err:
transport_destroy_device(dev);
put_device(dev);
return error;
}
/**
* ata_port_classify - determine device type based on ATA-spec signature
* @ap: ATA port device on which the classification should be run
* @tf: ATA taskfile register set for device to be identified
*
* A wrapper around ata_dev_classify() to provide additional logging
*
* RETURNS:
* Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
* %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
*/
unsigned int ata_port_classify(struct ata_port *ap,
const struct ata_taskfile *tf)
{
int i;
unsigned int class = ata_dev_classify(tf);
/* Start with index '1' to skip the 'unknown' entry */
for (i = 1; i < ARRAY_SIZE(ata_class_names); i++) {
if (ata_class_names[i].value == class) {
ata_port_dbg(ap, "found %s device by sig\n",
ata_class_names[i].name);
return class;
}
}
ata_port_info(ap, "found unknown device (class %u)\n", class);
return class;
}
EXPORT_SYMBOL_GPL(ata_port_classify);
/*
* ATA link attributes
*/
static int noop(int x) { return x; }
#define ata_link_show_linkspeed(field, format) \
static ssize_t \
show_ata_link_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ata_link *link = transport_class_to_link(dev); \
\
return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
}
#define ata_link_linkspeed_attr(field, format) \
ata_link_show_linkspeed(field, format) \
static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
ata_link_linkspeed_attr(sata_spd_limit, fls);
ata_link_linkspeed_attr(sata_spd, noop);
static DECLARE_TRANSPORT_CLASS(ata_link_class,
"ata_link", NULL, NULL, NULL);
static void ata_tlink_release(struct device *dev)
{
}
/**
* ata_is_link -- check if a struct device represents a ATA link
* @dev: device to check
*
* Returns:
* %1 if the device represents a ATA link, %0 else
*/
static int ata_is_link(const struct device *dev)
{
return dev->release == ata_tlink_release;
}
static int ata_tlink_match(struct attribute_container *cont,
struct device *dev)
{
struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
if (!ata_is_link(dev))
return 0;
return &i->link_attr_cont.ac == cont;
}
/**
* ata_tlink_delete -- remove ATA LINK
* @link: ATA LINK to remove
*
* Removes the specified ATA LINK. remove associated ATA device(s) as well.
*/
void ata_tlink_delete(struct ata_link *link)
{
struct device *dev = &link->tdev;
struct ata_device *ata_dev;
ata_for_each_dev(ata_dev, link, ALL) {
ata_tdev_delete(ata_dev);
}
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
put_device(dev);
}
/**
* ata_tlink_add -- initialize a transport ATA link structure
* @link: allocated ata_link structure.
*
* Initialize an ATA LINK structure for sysfs. It will be added in the
* device tree below the ATA PORT it belongs to.
*
* Returns %0 on success
*/
int ata_tlink_add(struct ata_link *link)
{
struct device *dev = &link->tdev;
struct ata_port *ap = link->ap;
struct ata_device *ata_dev;
int error;
device_initialize(dev);
dev->parent = &ap->tdev;
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
else
dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
transport_setup_device(dev);
error = device_add(dev);
if (error) {
goto tlink_err;
}
error = transport_add_device(dev);
if (error)
goto tlink_transport_err;
transport_configure_device(dev);
ata_for_each_dev(ata_dev, link, ALL) {
error = ata_tdev_add(ata_dev);
if (error) {
goto tlink_dev_err;
}
}
return 0;
tlink_dev_err:
while (--ata_dev >= link->device) {
ata_tdev_delete(ata_dev);
}
transport_remove_device(dev);
tlink_transport_err:
device_del(dev);
tlink_err:
transport_destroy_device(dev);
put_device(dev);
return error;
}
/*
* ATA device attributes
*/
#define ata_dev_show_class(title, field) \
static ssize_t \
show_ata_dev_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ata_device *ata_dev = transport_class_to_dev(dev); \
\
return get_ata_##title##_names(ata_dev->field, buf); \
}
#define ata_dev_attr(title, field) \
ata_dev_show_class(title, field) \
static DEVICE_ATTR(field, S_IRUGO, show_ata_dev_##field, NULL)
ata_dev_attr(class, class);
ata_dev_attr(xfer, pio_mode);
ata_dev_attr(xfer, dma_mode);
ata_dev_attr(xfer, xfer_mode);
#define ata_dev_show_simple(field, format_string, cast) \
static ssize_t \
show_ata_dev_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ata_device *ata_dev = transport_class_to_dev(dev); \
\
return scnprintf(buf, 20, format_string, cast ata_dev->field); \
}
#define ata_dev_simple_attr(field, format_string, type) \
ata_dev_show_simple(field, format_string, (type)) \
static DEVICE_ATTR(field, S_IRUGO, \
show_ata_dev_##field, NULL)
ata_dev_simple_attr(spdn_cnt, "%d\n", int);
struct ata_show_ering_arg {
char* buf;
int written;
};
static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg)
{
struct ata_show_ering_arg* arg = void_arg;
u64 seconds;
u32 rem;
seconds = div_u64_rem(ent->timestamp, HZ, &rem);
arg->written += sprintf(arg->buf + arg->written,
"[%5llu.%09lu]", seconds,
rem * NSEC_PER_SEC / HZ);
arg->written += get_ata_err_names(ent->err_mask,
arg->buf + arg->written);
return 0;
}
static ssize_t
show_ata_dev_ering(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ata_device *ata_dev = transport_class_to_dev(dev);
struct ata_show_ering_arg arg = { buf, 0 };
ata_ering_map(&ata_dev->ering, ata_show_ering, &arg);
return arg.written;
}
static DEVICE_ATTR(ering, S_IRUGO, show_ata_dev_ering, NULL);
static ssize_t
show_ata_dev_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ata_device *ata_dev = transport_class_to_dev(dev);
int written = 0, i = 0;
if (ata_dev->class == ATA_DEV_PMP)
return 0;
for(i=0;i<ATA_ID_WORDS;i++) {
written += scnprintf(buf+written, 20, "%04x%c",
ata_dev->id[i],
((i+1) & 7) ? ' ' : '\n');
}
return written;
}
static DEVICE_ATTR(id, S_IRUGO, show_ata_dev_id, NULL);
static ssize_t
show_ata_dev_gscr(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ata_device *ata_dev = transport_class_to_dev(dev);
int written = 0, i = 0;
if (ata_dev->class != ATA_DEV_PMP)
return 0;
for(i=0;i<SATA_PMP_GSCR_DWORDS;i++) {
written += scnprintf(buf+written, 20, "%08x%c",
ata_dev->gscr[i],
((i+1) & 3) ? ' ' : '\n');
}
if (SATA_PMP_GSCR_DWORDS & 3)
buf[written-1] = '\n';
return written;
}
static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL);
static ssize_t
show_ata_dev_trim(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ata_device *ata_dev = transport_class_to_dev(dev);
unsigned char *mode;
if (!ata_id_has_trim(ata_dev->id))
mode = "unsupported";
else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
mode = "forced_unsupported";
else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
mode = "forced_unqueued";
else if (ata_fpdma_dsm_supported(ata_dev))
mode = "queued";
else
mode = "unqueued";
return scnprintf(buf, 20, "%s\n", mode);
}
static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL);
static DECLARE_TRANSPORT_CLASS(ata_dev_class,
"ata_device", NULL, NULL, NULL);
static void ata_tdev_release(struct device *dev)
{
}
/**
* ata_is_ata_dev -- check if a struct device represents a ATA device
* @dev: device to check
*
* Returns:
* %1 if the device represents a ATA device, %0 else
*/
static int ata_is_ata_dev(const struct device *dev)
{
return dev->release == ata_tdev_release;
}
static int ata_tdev_match(struct attribute_container *cont,
struct device *dev)
{
struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
if (!ata_is_ata_dev(dev))
return 0;
return &i->dev_attr_cont.ac == cont;
}
/**
* ata_tdev_free -- free a ATA LINK
* @dev: ATA PHY to free
*
* Frees the specified ATA PHY.
*
* Note:
* This function must only be called on a PHY that has not
* successfully been added using ata_tdev_add().
*/
static void ata_tdev_free(struct ata_device *dev)
{
transport_destroy_device(&dev->tdev);
put_device(&dev->tdev);
}
/**
* ata_tdev_delete -- remove ATA device
* @ata_dev: ATA device to remove
*
* Removes the specified ATA device.
*/
static void ata_tdev_delete(struct ata_device *ata_dev)
{
struct device *dev = &ata_dev->tdev;
transport_remove_device(dev);
device_del(dev);
ata_tdev_free(ata_dev);
}
/**
* ata_tdev_add -- initialize a transport ATA device structure.
* @ata_dev: ata_dev structure.
*
* Initialize an ATA device structure for sysfs. It will be added in the
* device tree below the ATA LINK device it belongs to.
*
* Returns %0 on success
*/
static int ata_tdev_add(struct ata_device *ata_dev)
{
struct device *dev = &ata_dev->tdev;
struct ata_link *link = ata_dev->link;
struct ata_port *ap = link->ap;
int error;
device_initialize(dev);
dev->parent = &link->tdev;
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
else
dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp);
transport_setup_device(dev);
ata_acpi_bind_dev(ata_dev);
error = device_add(dev);
if (error) {
ata_tdev_free(ata_dev);
return error;
}
error = transport_add_device(dev);
if (error) {
device_del(dev);
ata_tdev_free(ata_dev);
return error;
}
transport_configure_device(dev);
return 0;
}
/*
* Setup / Teardown code
*/
#define SETUP_TEMPLATE(attrb, field, perm, test) \
i->private_##attrb[count] = dev_attr_##field; \
i->private_##attrb[count].attr.mode = perm; \
i->attrb[count] = &i->private_##attrb[count]; \
if (test) \
count++
#define SETUP_LINK_ATTRIBUTE(field) \
SETUP_TEMPLATE(link_attrs, field, S_IRUGO, 1)
#define SETUP_PORT_ATTRIBUTE(field) \
SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
#define SETUP_DEV_ATTRIBUTE(field) \
SETUP_TEMPLATE(dev_attrs, field, S_IRUGO, 1)
/**
* ata_attach_transport -- instantiate ATA transport template
*/
struct scsi_transport_template *ata_attach_transport(void)
{
struct ata_internal *i;
int count;
i = kzalloc(sizeof(struct ata_internal), GFP_KERNEL);
if (!i)
return NULL;
i->t.eh_strategy_handler = ata_scsi_error;
i->t.user_scan = ata_scsi_user_scan;
i->t.host_attrs.ac.attrs = &i->port_attrs[0];
i->t.host_attrs.ac.class = &ata_port_class.class;
i->t.host_attrs.ac.match = ata_tport_match;
transport_container_register(&i->t.host_attrs);
i->link_attr_cont.ac.class = &ata_link_class.class;
i->link_attr_cont.ac.attrs = &i->link_attrs[0];
i->link_attr_cont.ac.match = ata_tlink_match;
transport_container_register(&i->link_attr_cont);
i->dev_attr_cont.ac.class = &ata_dev_class.class;
i->dev_attr_cont.ac.attrs = &i->dev_attrs[0];
i->dev_attr_cont.ac.match = ata_tdev_match;
transport_container_register(&i->dev_attr_cont);
count = 0;
SETUP_PORT_ATTRIBUTE(nr_pmp_links);
SETUP_PORT_ATTRIBUTE(idle_irq);
SETUP_PORT_ATTRIBUTE(port_no);
BUG_ON(count > ATA_PORT_ATTRS);
i->port_attrs[count] = NULL;
count = 0;
SETUP_LINK_ATTRIBUTE(hw_sata_spd_limit);
SETUP_LINK_ATTRIBUTE(sata_spd_limit);
SETUP_LINK_ATTRIBUTE(sata_spd);
BUG_ON(count > ATA_LINK_ATTRS);
i->link_attrs[count] = NULL;
count = 0;
SETUP_DEV_ATTRIBUTE(class);
SETUP_DEV_ATTRIBUTE(pio_mode);
SETUP_DEV_ATTRIBUTE(dma_mode);
SETUP_DEV_ATTRIBUTE(xfer_mode);
SETUP_DEV_ATTRIBUTE(spdn_cnt);
SETUP_DEV_ATTRIBUTE(ering);
SETUP_DEV_ATTRIBUTE(id);
SETUP_DEV_ATTRIBUTE(gscr);
SETUP_DEV_ATTRIBUTE(trim);
BUG_ON(count > ATA_DEV_ATTRS);
i->dev_attrs[count] = NULL;
return &i->t;
}
/**
* ata_release_transport -- release ATA transport template instance
* @t: transport template instance
*/
void ata_release_transport(struct scsi_transport_template *t)
{
struct ata_internal *i = to_ata_internal(t);
transport_container_unregister(&i->t.host_attrs);
transport_container_unregister(&i->link_attr_cont);
transport_container_unregister(&i->dev_attr_cont);
kfree(i);
}
__init int libata_transport_init(void)
{
int error;
error = transport_class_register(&ata_link_class);
if (error)
goto out_unregister_transport;
error = transport_class_register(&ata_port_class);
if (error)
goto out_unregister_link;
error = transport_class_register(&ata_dev_class);
if (error)
goto out_unregister_port;
return 0;
out_unregister_port:
transport_class_unregister(&ata_port_class);
out_unregister_link:
transport_class_unregister(&ata_link_class);
out_unregister_transport:
return error;
}
void __exit libata_transport_exit(void)
{
transport_class_unregister(&ata_link_class);
transport_class_unregister(&ata_port_class);
transport_class_unregister(&ata_dev_class);
}
| linux-master | drivers/ata/libata-transport.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_it8213.c - iTE Tech. Inc. IT8213 PATA driver
*
* The IT8213 is a very Intel ICH like device for timing purposes, having
* a similar register layout and the same split clock arrangement. Cable
* detection is different, and it does not have slave channels or all the
* clutter of later ICH/SATA setups.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_it8213"
#define DRV_VERSION "0.0.3"
/**
* it8213_pre_reset - probe begin
* @link: link
* @deadline: deadline jiffies for the operation
*
* Filter out ports by the enable bits before doing the normal reset
* and probe.
*/
static int it8213_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits it8213_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* it8213_cable_detect - check for 40/80 pin
* @ap: Port
*
* Perform cable detection for the 8213 ATA interface. This is
* different to the PIIX arrangement
*/
static int it8213_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
pci_read_config_byte(pdev, 0x42, &tmp);
if (tmp & 2) /* The initial docs are incorrect */
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* it8213_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device whose timings we are configuring
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for PIIX/ICH. The 8213 is a clone so very similar
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio > 1)
control |= 1; /* TIME */
if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
control |= 2; /* IE */
/* Bit 2 is set for ATAPI on the IT8213 - reverse of ICH/PIIX */
if (adev->class != ATA_DEV_ATA)
control |= 4; /* PPE */
pci_read_config_word(dev, master_port, &master_data);
/* Set PPE, IE, and TIME as appropriate */
if (adev->devno == 0) {
master_data &= 0xCCF0;
master_data |= control;
master_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
} else {
u8 slave_data;
master_data &= 0xFF0F;
master_data |= (control << 4);
/* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= 0xF0;
slave_data |= (timings[pio][0] << 2) | timings[pio][1];
pci_write_config_byte(dev, 0x44, slave_data);
}
master_data |= 0x4000; /* Ensure SITRE is set */
pci_write_config_word(dev, master_port, master_data);
}
/**
* it8213_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
* This device is basically an ICH alike.
*
* LOCKING:
* None (inherited from caller).
*/
static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno;
u8 udma_enable;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
pci_read_config_word(dev, 0x40, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (speed >= XFER_UDMA_0) {
unsigned int udma = adev->dma_mode - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
/* Clocks follow the PIIX style */
u_speed = min(2 - (udma & 1), udma);
if (udma > 4)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
else
u_clock = 0; /* 33Mhz */
udma_enable |= (1 << devid);
/* Load the UDMA cycle time */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(3 << (4 * devid));
udma_timing |= u_speed << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
/* Load the clock selection */
pci_read_config_word(dev, 0x54, &ideconf);
ideconf &= ~(0x1001 << devid);
ideconf |= u_clock << devid;
pci_write_config_word(dev, 0x54, ideconf);
} else {
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
u8 slave_data;
static const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
control = 3; /* IORDY|TIME1 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
if (devid) { /* Slave */
master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
master_data |= control << 4;
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= 0xF0;
/* Load the matching timing */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
pci_write_config_byte(dev, 0x44, slave_data);
} else { /* Master */
master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
and master timing bits */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, 0x40, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
}
static const struct scsi_host_template it8213_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations it8213_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = it8213_cable_detect,
.set_piomode = it8213_set_piomode,
.set_dmamode = it8213_set_dmamode,
.prereset = it8213_pre_reset,
};
/**
* it8213_init_one - Register 8213 ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in it8213_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA6,
.port_ops = &it8213_ops,
};
/* Current IT8213 stuff is single port */
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
}
static const struct pci_device_id it8213_pci_tbl[] = {
{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), },
{ } /* terminate list */
};
static struct pci_driver it8213_pci_driver = {
.name = DRV_NAME,
.id_table = it8213_pci_tbl,
.probe = it8213_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(it8213_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_it8213.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/ata/ahci_tegra.c
*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Mikko Perttunen <[email protected]>
*/
#include <linux/ahci_platform.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
#include "ahci.h"
#define DRV_NAME "tegra-ahci"
#define SATA_CONFIGURATION_0 0x180
#define SATA_CONFIGURATION_0_EN_FPCI BIT(0)
#define SATA_CONFIGURATION_0_CLK_OVERRIDE BIT(31)
#define SCFG_OFFSET 0x1000
#define T_SATA0_CFG_1 0x04
#define T_SATA0_CFG_1_IO_SPACE BIT(0)
#define T_SATA0_CFG_1_MEMORY_SPACE BIT(1)
#define T_SATA0_CFG_1_BUS_MASTER BIT(2)
#define T_SATA0_CFG_1_SERR BIT(8)
#define T_SATA0_CFG_9 0x24
#define T_SATA0_CFG_9_BASE_ADDRESS 0x40020000
#define SATA_FPCI_BAR5 0x94
#define SATA_FPCI_BAR5_START_MASK (0xfffffff << 4)
#define SATA_FPCI_BAR5_START (0x0040020 << 4)
#define SATA_FPCI_BAR5_ACCESS_TYPE (0x1)
#define SATA_INTR_MASK 0x188
#define SATA_INTR_MASK_IP_INT_MASK BIT(16)
#define T_SATA0_CFG_35 0x94
#define T_SATA0_CFG_35_IDP_INDEX_MASK (0x7ff << 2)
#define T_SATA0_CFG_35_IDP_INDEX (0x2a << 2)
#define T_SATA0_AHCI_IDP1 0x98
#define T_SATA0_AHCI_IDP1_DATA (0x400040)
#define T_SATA0_CFG_PHY_1 0x12c
#define T_SATA0_CFG_PHY_1_PADS_IDDQ_EN BIT(23)
#define T_SATA0_CFG_PHY_1_PAD_PLL_IDDQ_EN BIT(22)
#define T_SATA0_NVOOB 0x114
#define T_SATA0_NVOOB_SQUELCH_FILTER_MODE_MASK (0x3 << 24)
#define T_SATA0_NVOOB_SQUELCH_FILTER_MODE (0x1 << 24)
#define T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH_MASK (0x3 << 26)
#define T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH (0x3 << 26)
#define T_SATA_CFG_PHY_0 0x120
#define T_SATA_CFG_PHY_0_USE_7BIT_ALIGN_DET_FOR_SPD BIT(11)
#define T_SATA_CFG_PHY_0_MASK_SQUELCH BIT(24)
#define T_SATA0_CFG2NVOOB_2 0x134
#define T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW_MASK (0x1ff << 18)
#define T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW (0xc << 18)
#define T_SATA0_AHCI_HBA_CAP_BKDR 0x300
#define T_SATA0_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP BIT(13)
#define T_SATA0_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP BIT(14)
#define T_SATA0_AHCI_HBA_CAP_BKDR_SALP BIT(26)
#define T_SATA0_AHCI_HBA_CAP_BKDR_SUPP_PM BIT(17)
#define T_SATA0_AHCI_HBA_CAP_BKDR_SNCQ BIT(30)
#define T_SATA0_BKDOOR_CC 0x4a4
#define T_SATA0_BKDOOR_CC_CLASS_CODE_MASK (0xffff << 16)
#define T_SATA0_BKDOOR_CC_CLASS_CODE (0x0106 << 16)
#define T_SATA0_BKDOOR_CC_PROG_IF_MASK (0xff << 8)
#define T_SATA0_BKDOOR_CC_PROG_IF (0x01 << 8)
#define T_SATA0_CFG_SATA 0x54c
#define T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN BIT(12)
#define T_SATA0_CFG_MISC 0x550
#define T_SATA0_INDEX 0x680
#define T_SATA0_CHX_PHY_CTRL1_GEN1 0x690
#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK 0xff
#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT 0
#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK (0xff << 8)
#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT 8
#define T_SATA0_CHX_PHY_CTRL1_GEN2 0x694
#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK 0xff
#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT 0
#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK (0xff << 12)
#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT 12
#define T_SATA0_CHX_PHY_CTRL2 0x69c
#define T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1 0x23
#define T_SATA0_CHX_PHY_CTRL11 0x6d0
#define T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ (0x2800 << 16)
#define T_SATA0_CHX_PHY_CTRL17_0 0x6e8
#define T_SATA0_CHX_PHY_CTRL17_0_RX_EQ_CTRL_L_GEN1 0x55010000
#define T_SATA0_CHX_PHY_CTRL18_0 0x6ec
#define T_SATA0_CHX_PHY_CTRL18_0_RX_EQ_CTRL_L_GEN2 0x55010000
#define T_SATA0_CHX_PHY_CTRL20_0 0x6f4
#define T_SATA0_CHX_PHY_CTRL20_0_RX_EQ_CTRL_H_GEN1 0x1
#define T_SATA0_CHX_PHY_CTRL21_0 0x6f8
#define T_SATA0_CHX_PHY_CTRL21_0_RX_EQ_CTRL_H_GEN2 0x1
/* AUX Registers */
#define SATA_AUX_MISC_CNTL_1_0 0x8
#define SATA_AUX_MISC_CNTL_1_0_DEVSLP_OVERRIDE BIT(17)
#define SATA_AUX_MISC_CNTL_1_0_SDS_SUPPORT BIT(13)
#define SATA_AUX_MISC_CNTL_1_0_DESO_SUPPORT BIT(15)
#define SATA_AUX_RX_STAT_INT_0 0xc
#define SATA_AUX_RX_STAT_INT_0_SATA_DEVSLP BIT(7)
#define SATA_AUX_SPARE_CFG0_0 0x18
#define SATA_AUX_SPARE_CFG0_0_MDAT_TIMER_AFTER_PG_VALID BIT(14)
#define FUSE_SATA_CALIB 0x124
#define FUSE_SATA_CALIB_MASK 0x3
struct sata_pad_calibration {
u8 gen1_tx_amp;
u8 gen1_tx_peak;
u8 gen2_tx_amp;
u8 gen2_tx_peak;
};
static const struct sata_pad_calibration tegra124_pad_calibration[] = {
{0x18, 0x04, 0x18, 0x0a},
{0x0e, 0x04, 0x14, 0x0a},
{0x0e, 0x07, 0x1a, 0x0e},
{0x14, 0x0e, 0x1a, 0x0e},
};
struct tegra_ahci_ops {
int (*init)(struct ahci_host_priv *hpriv);
};
struct tegra_ahci_regs {
unsigned int nvoob_comma_cnt_mask;
unsigned int nvoob_comma_cnt_val;
};
struct tegra_ahci_soc {
const char *const *supply_names;
u32 num_supplies;
bool supports_devslp;
bool has_sata_oob_rst;
const struct tegra_ahci_ops *ops;
const struct tegra_ahci_regs *regs;
};
struct tegra_ahci_priv {
struct platform_device *pdev;
void __iomem *sata_regs;
void __iomem *sata_aux_regs;
struct reset_control *sata_rst;
struct reset_control *sata_oob_rst;
struct reset_control *sata_cold_rst;
/* Needs special handling, cannot use ahci_platform */
struct clk *sata_clk;
struct regulator_bulk_data *supplies;
const struct tegra_ahci_soc *soc;
};
static void tegra_ahci_handle_quirks(struct ahci_host_priv *hpriv)
{
struct tegra_ahci_priv *tegra = hpriv->plat_data;
u32 val;
if (tegra->sata_aux_regs && !tegra->soc->supports_devslp) {
val = readl(tegra->sata_aux_regs + SATA_AUX_MISC_CNTL_1_0);
val &= ~SATA_AUX_MISC_CNTL_1_0_SDS_SUPPORT;
writel(val, tegra->sata_aux_regs + SATA_AUX_MISC_CNTL_1_0);
}
}
static int tegra124_ahci_init(struct ahci_host_priv *hpriv)
{
struct tegra_ahci_priv *tegra = hpriv->plat_data;
struct sata_pad_calibration calib;
int ret;
u32 val;
/* Pad calibration */
ret = tegra_fuse_readl(FUSE_SATA_CALIB, &val);
if (ret)
return ret;
calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK];
writel(BIT(0), tegra->sata_regs + SCFG_OFFSET + T_SATA0_INDEX);
val = readl(tegra->sata_regs +
SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN1);
val &= ~T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK;
val &= ~T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK;
val |= calib.gen1_tx_amp << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT;
val |= calib.gen1_tx_peak << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT;
writel(val, tegra->sata_regs + SCFG_OFFSET +
T_SATA0_CHX_PHY_CTRL1_GEN1);
val = readl(tegra->sata_regs +
SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN2);
val &= ~T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK;
val &= ~T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK;
val |= calib.gen2_tx_amp << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT;
val |= calib.gen2_tx_peak << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT;
writel(val, tegra->sata_regs + SCFG_OFFSET +
T_SATA0_CHX_PHY_CTRL1_GEN2);
writel(T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ,
tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL11);
writel(T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1,
tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL2);
writel(0, tegra->sata_regs + SCFG_OFFSET + T_SATA0_INDEX);
return 0;
}
static int tegra_ahci_power_on(struct ahci_host_priv *hpriv)
{
struct tegra_ahci_priv *tegra = hpriv->plat_data;
int ret;
ret = regulator_bulk_enable(tegra->soc->num_supplies,
tegra->supplies);
if (ret)
return ret;
if (!tegra->pdev->dev.pm_domain) {
ret = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_SATA,
tegra->sata_clk,
tegra->sata_rst);
if (ret)
goto disable_regulators;
}
reset_control_assert(tegra->sata_oob_rst);
reset_control_assert(tegra->sata_cold_rst);
ret = ahci_platform_enable_resources(hpriv);
if (ret)
goto disable_power;
reset_control_deassert(tegra->sata_cold_rst);
reset_control_deassert(tegra->sata_oob_rst);
return 0;
disable_power:
clk_disable_unprepare(tegra->sata_clk);
if (!tegra->pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_SATA);
disable_regulators:
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
return ret;
}
static void tegra_ahci_power_off(struct ahci_host_priv *hpriv)
{
struct tegra_ahci_priv *tegra = hpriv->plat_data;
ahci_platform_disable_resources(hpriv);
reset_control_assert(tegra->sata_rst);
reset_control_assert(tegra->sata_oob_rst);
reset_control_assert(tegra->sata_cold_rst);
clk_disable_unprepare(tegra->sata_clk);
if (!tegra->pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_SATA);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
}
static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv)
{
struct tegra_ahci_priv *tegra = hpriv->plat_data;
int ret;
u32 val;
ret = tegra_ahci_power_on(hpriv);
if (ret) {
dev_err(&tegra->pdev->dev,
"failed to power on AHCI controller: %d\n", ret);
return ret;
}
/*
* Program the following SATA IPFS registers to allow SW accesses to
* SATA's MMIO register range.
*/
val = readl(tegra->sata_regs + SATA_FPCI_BAR5);
val &= ~(SATA_FPCI_BAR5_START_MASK | SATA_FPCI_BAR5_ACCESS_TYPE);
val |= SATA_FPCI_BAR5_START | SATA_FPCI_BAR5_ACCESS_TYPE;
writel(val, tegra->sata_regs + SATA_FPCI_BAR5);
/* Program the following SATA IPFS register to enable the SATA */
val = readl(tegra->sata_regs + SATA_CONFIGURATION_0);
val |= SATA_CONFIGURATION_0_EN_FPCI;
writel(val, tegra->sata_regs + SATA_CONFIGURATION_0);
/* Electrical settings for better link stability */
val = T_SATA0_CHX_PHY_CTRL17_0_RX_EQ_CTRL_L_GEN1;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL17_0);
val = T_SATA0_CHX_PHY_CTRL18_0_RX_EQ_CTRL_L_GEN2;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL18_0);
val = T_SATA0_CHX_PHY_CTRL20_0_RX_EQ_CTRL_H_GEN1;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL20_0);
val = T_SATA0_CHX_PHY_CTRL21_0_RX_EQ_CTRL_H_GEN2;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL21_0);
/* For SQUELCH Filter & Gen3 drive getting detected as Gen1 drive */
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA_CFG_PHY_0);
val |= T_SATA_CFG_PHY_0_MASK_SQUELCH;
val &= ~T_SATA_CFG_PHY_0_USE_7BIT_ALIGN_DET_FOR_SPD;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA_CFG_PHY_0);
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_NVOOB);
val &= ~(tegra->soc->regs->nvoob_comma_cnt_mask |
T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH_MASK |
T_SATA0_NVOOB_SQUELCH_FILTER_MODE_MASK);
val |= (tegra->soc->regs->nvoob_comma_cnt_val |
T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH |
T_SATA0_NVOOB_SQUELCH_FILTER_MODE);
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_NVOOB);
/*
* Change CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW from 83.3 ns to 58.8ns
*/
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG2NVOOB_2);
val &= ~T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW_MASK;
val |= T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG2NVOOB_2);
if (tegra->soc->ops && tegra->soc->ops->init)
tegra->soc->ops->init(hpriv);
/*
* Program the following SATA configuration registers to
* initialize SATA
*/
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_1);
val |= (T_SATA0_CFG_1_IO_SPACE | T_SATA0_CFG_1_MEMORY_SPACE |
T_SATA0_CFG_1_BUS_MASTER | T_SATA0_CFG_1_SERR);
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_1);
val = T_SATA0_CFG_9_BASE_ADDRESS;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_9);
/* Program Class Code and Programming interface for SATA */
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
val |= T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_BKDOOR_CC);
val &=
~(T_SATA0_BKDOOR_CC_CLASS_CODE_MASK |
T_SATA0_BKDOOR_CC_PROG_IF_MASK);
val |= T_SATA0_BKDOOR_CC_CLASS_CODE | T_SATA0_BKDOOR_CC_PROG_IF;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_BKDOOR_CC);
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
val &= ~T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
/* Enabling LPM capabilities through Backdoor Programming */
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_AHCI_HBA_CAP_BKDR);
val |= (T_SATA0_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP |
T_SATA0_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP |
T_SATA0_AHCI_HBA_CAP_BKDR_SALP |
T_SATA0_AHCI_HBA_CAP_BKDR_SUPP_PM);
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_AHCI_HBA_CAP_BKDR);
/* SATA Second Level Clock Gating configuration
* Enabling Gating of Tx/Rx clocks and driving Pad IDDQ and Lane
* IDDQ Signals
*/
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_35);
val &= ~T_SATA0_CFG_35_IDP_INDEX_MASK;
val |= T_SATA0_CFG_35_IDP_INDEX;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_35);
val = T_SATA0_AHCI_IDP1_DATA;
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_AHCI_IDP1);
val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_PHY_1);
val |= (T_SATA0_CFG_PHY_1_PADS_IDDQ_EN |
T_SATA0_CFG_PHY_1_PAD_PLL_IDDQ_EN);
writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_PHY_1);
/* Enabling IPFS Clock Gating */
val = readl(tegra->sata_regs + SATA_CONFIGURATION_0);
val &= ~SATA_CONFIGURATION_0_CLK_OVERRIDE;
writel(val, tegra->sata_regs + SATA_CONFIGURATION_0);
tegra_ahci_handle_quirks(hpriv);
/* Unmask SATA interrupts */
val = readl(tegra->sata_regs + SATA_INTR_MASK);
val |= SATA_INTR_MASK_IP_INT_MASK;
writel(val, tegra->sata_regs + SATA_INTR_MASK);
return 0;
}
static void tegra_ahci_controller_deinit(struct ahci_host_priv *hpriv)
{
tegra_ahci_power_off(hpriv);
}
static void tegra_ahci_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
tegra_ahci_controller_deinit(hpriv);
}
static struct ata_port_operations ahci_tegra_port_ops = {
.inherits = &ahci_ops,
.host_stop = tegra_ahci_host_stop,
};
static const struct ata_port_info ahci_tegra_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_tegra_port_ops,
};
static const char *const tegra124_supply_names[] = {
"avdd", "hvdd", "vddio", "target-5v", "target-12v"
};
static const struct tegra_ahci_ops tegra124_ahci_ops = {
.init = tegra124_ahci_init,
};
static const struct tegra_ahci_regs tegra124_ahci_regs = {
.nvoob_comma_cnt_mask = GENMASK(30, 28),
.nvoob_comma_cnt_val = (7 << 28),
};
static const struct tegra_ahci_soc tegra124_ahci_soc = {
.supply_names = tegra124_supply_names,
.num_supplies = ARRAY_SIZE(tegra124_supply_names),
.supports_devslp = false,
.has_sata_oob_rst = true,
.ops = &tegra124_ahci_ops,
.regs = &tegra124_ahci_regs,
};
static const struct tegra_ahci_soc tegra210_ahci_soc = {
.supports_devslp = false,
.has_sata_oob_rst = true,
.regs = &tegra124_ahci_regs,
};
static const struct tegra_ahci_regs tegra186_ahci_regs = {
.nvoob_comma_cnt_mask = GENMASK(23, 16),
.nvoob_comma_cnt_val = (7 << 16),
};
static const struct tegra_ahci_soc tegra186_ahci_soc = {
.supports_devslp = false,
.has_sata_oob_rst = false,
.regs = &tegra186_ahci_regs,
};
static const struct of_device_id tegra_ahci_of_match[] = {
{
.compatible = "nvidia,tegra124-ahci",
.data = &tegra124_ahci_soc
},
{
.compatible = "nvidia,tegra210-ahci",
.data = &tegra210_ahci_soc
},
{
.compatible = "nvidia,tegra186-ahci",
.data = &tegra186_ahci_soc
},
{}
};
MODULE_DEVICE_TABLE(of, tegra_ahci_of_match);
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int tegra_ahci_probe(struct platform_device *pdev)
{
struct ahci_host_priv *hpriv;
struct tegra_ahci_priv *tegra;
struct resource *res;
int ret;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
hpriv->plat_data = tegra;
tegra->pdev = pdev;
tegra->soc = of_device_get_match_data(&pdev->dev);
tegra->sata_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(tegra->sata_regs))
return PTR_ERR(tegra->sata_regs);
/*
* AUX registers is optional.
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (res) {
tegra->sata_aux_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(tegra->sata_aux_regs))
return PTR_ERR(tegra->sata_aux_regs);
}
tegra->sata_rst = devm_reset_control_get(&pdev->dev, "sata");
if (IS_ERR(tegra->sata_rst)) {
dev_err(&pdev->dev, "Failed to get sata reset\n");
return PTR_ERR(tegra->sata_rst);
}
if (tegra->soc->has_sata_oob_rst) {
tegra->sata_oob_rst = devm_reset_control_get(&pdev->dev,
"sata-oob");
if (IS_ERR(tegra->sata_oob_rst)) {
dev_err(&pdev->dev, "Failed to get sata-oob reset\n");
return PTR_ERR(tegra->sata_oob_rst);
}
}
tegra->sata_cold_rst = devm_reset_control_get(&pdev->dev, "sata-cold");
if (IS_ERR(tegra->sata_cold_rst)) {
dev_err(&pdev->dev, "Failed to get sata-cold reset\n");
return PTR_ERR(tegra->sata_cold_rst);
}
tegra->sata_clk = devm_clk_get(&pdev->dev, "sata");
if (IS_ERR(tegra->sata_clk)) {
dev_err(&pdev->dev, "Failed to get sata clock\n");
return PTR_ERR(tegra->sata_clk);
}
tegra->supplies = devm_kcalloc(&pdev->dev,
tegra->soc->num_supplies,
sizeof(*tegra->supplies), GFP_KERNEL);
if (!tegra->supplies)
return -ENOMEM;
regulator_bulk_set_supply_names(tegra->supplies,
tegra->soc->supply_names,
tegra->soc->num_supplies);
ret = devm_regulator_bulk_get(&pdev->dev,
tegra->soc->num_supplies,
tegra->supplies);
if (ret) {
dev_err(&pdev->dev, "Failed to get regulators\n");
return ret;
}
ret = tegra_ahci_controller_init(hpriv);
if (ret)
return ret;
ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info,
&ahci_platform_sht);
if (ret)
goto deinit_controller;
return 0;
deinit_controller:
tegra_ahci_controller_deinit(hpriv);
return ret;
};
static struct platform_driver tegra_ahci_driver = {
.probe = tegra_ahci_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = tegra_ahci_of_match,
},
/* LP0 suspend support not implemented */
};
module_platform_driver(tegra_ahci_driver);
MODULE_AUTHOR("Mikko Perttunen <[email protected]>");
MODULE_DESCRIPTION("Tegra AHCI SATA driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ata/ahci_tegra.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Seattle AHCI SATA driver
*
* Copyright (c) 2015, Advanced Micro Devices
* Author: Brijesh Singh <[email protected]>
*
* based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
#include <linux/acpi.h>
#include <linux/pci_ids.h>
#include "ahci.h"
/* SGPIO Control Register definition
*
* Bit Type Description
* 31 RW OD7.2 (activity)
* 30 RW OD7.1 (locate)
* 29 RW OD7.0 (fault)
* 28...8 RW OD6.2...OD0.0 (3bits per port, 1 bit per LED)
* 7 RO SGPIO feature flag
* 6:4 RO Reserved
* 3:0 RO Number of ports (0 means no port supported)
*/
#define ACTIVITY_BIT_POS(x) (8 + (3 * x))
#define LOCATE_BIT_POS(x) (ACTIVITY_BIT_POS(x) + 1)
#define FAULT_BIT_POS(x) (LOCATE_BIT_POS(x) + 1)
#define ACTIVITY_MASK 0x00010000
#define LOCATE_MASK 0x00080000
#define FAULT_MASK 0x00400000
#define DRV_NAME "ahci-seattle"
static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size);
struct seattle_plat_data {
void __iomem *sgpio_ctrl;
};
static struct ata_port_operations ahci_port_ops = {
.inherits = &ahci_ops,
};
static const struct ata_port_info ahci_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_port_ops,
};
static struct ata_port_operations ahci_seattle_ops = {
.inherits = &ahci_ops,
.transmit_led_message = seattle_transmit_led_message,
};
static const struct ata_port_info ahci_port_seattle_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
.link_flags = ATA_LFLAG_SW_ACTIVITY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_seattle_ops,
};
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
struct seattle_plat_data *plat_data = hpriv->plat_data;
unsigned long flags;
int pmp;
struct ahci_em_priv *emp;
u32 val;
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
if (pmp >= EM_MAX_SLOTS)
return -EINVAL;
emp = &pp->em_priv[pmp];
val = ioread32(plat_data->sgpio_ctrl);
if (state & ACTIVITY_MASK)
val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
else
val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
if (state & LOCATE_MASK)
val |= 1 << LOCATE_BIT_POS((ap->port_no));
else
val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
if (state & FAULT_MASK)
val |= 1 << FAULT_BIT_POS((ap->port_no));
else
val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
iowrite32(val, plat_data->sgpio_ctrl);
spin_lock_irqsave(ap->lock, flags);
/* save off new led state for port/slot */
emp->led_state = state;
spin_unlock_irqrestore(ap->lock, flags);
return size;
}
static const struct ata_port_info *ahci_seattle_get_port_info(
struct platform_device *pdev, struct ahci_host_priv *hpriv)
{
struct device *dev = &pdev->dev;
struct seattle_plat_data *plat_data;
u32 val;
plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
if (!plat_data)
return &ahci_port_info;
plat_data->sgpio_ctrl = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(plat_data->sgpio_ctrl))
return &ahci_port_info;
val = ioread32(plat_data->sgpio_ctrl);
if (!(val & 0xf))
return &ahci_port_info;
hpriv->em_loc = 0;
hpriv->em_buf_sz = 4;
hpriv->em_msg_type = EM_MSG_TYPE_LED;
hpriv->plat_data = plat_data;
dev_info(dev, "SGPIO LED control is enabled.\n");
return &ahci_port_seattle_info;
}
static int ahci_seattle_probe(struct platform_device *pdev)
{
int rc;
struct ahci_host_priv *hpriv;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
rc = ahci_platform_init_host(pdev, hpriv,
ahci_seattle_get_port_info(pdev, hpriv),
&ahci_platform_sht);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
ahci_platform_resume);
static const struct acpi_device_id ahci_acpi_match[] = {
{ "AMDI0600", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_seattle_driver = {
.probe = ahci_seattle_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.acpi_match_table = ahci_acpi_match,
.pm = &ahci_pm_ops,
},
};
module_platform_driver(ahci_seattle_driver);
MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
MODULE_AUTHOR("Brijesh Singh <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/ahci_seattle.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Xilinx, Inc.
* CEVA AHCI SATA platform driver
*
* based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
*/
#include <linux/ahci_platform.h>
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include "ahci.h"
/* Vendor Specific Register Offsets */
#define AHCI_VEND_PCFG 0xA4
#define AHCI_VEND_PPCFG 0xA8
#define AHCI_VEND_PP2C 0xAC
#define AHCI_VEND_PP3C 0xB0
#define AHCI_VEND_PP4C 0xB4
#define AHCI_VEND_PP5C 0xB8
#define AHCI_VEND_AXICC 0xBC
#define AHCI_VEND_PAXIC 0xC0
#define AHCI_VEND_PTC 0xC8
/* Vendor Specific Register bit definitions */
#define PAXIC_ADBW_BW64 0x1
#define PAXIC_MAWID(i) (((i) * 2) << 4)
#define PAXIC_MARID(i) (((i) * 2) << 12)
#define PAXIC_MARIDD(i) ((((i) * 2) + 1) << 16)
#define PAXIC_MAWIDD(i) ((((i) * 2) + 1) << 8)
#define PAXIC_OTL (0x4 << 20)
/* Register bit definitions for cache control */
#define AXICC_ARCA_VAL (0xF << 0)
#define AXICC_ARCF_VAL (0xF << 4)
#define AXICC_ARCH_VAL (0xF << 8)
#define AXICC_ARCP_VAL (0xF << 12)
#define AXICC_AWCFD_VAL (0xF << 16)
#define AXICC_AWCD_VAL (0xF << 20)
#define AXICC_AWCF_VAL (0xF << 24)
#define PCFG_TPSS_VAL (0x32 << 16)
#define PCFG_TPRS_VAL (0x2 << 12)
#define PCFG_PAD_VAL 0x2
#define PPCFG_TTA 0x1FFFE
#define PPCFG_PSSO_EN (1 << 28)
#define PPCFG_PSS_EN (1 << 29)
#define PPCFG_ESDF_EN (1 << 31)
#define PP5C_RIT 0x60216
#define PP5C_RCT (0x7f0 << 20)
#define PTC_RX_WM_VAL 0x40
#define PTC_RSVD (1 << 27)
#define PORT0_BASE 0x100
#define PORT1_BASE 0x180
/* Port Control Register Bit Definitions */
#define PORT_SCTL_SPD_GEN3 (0x3 << 4)
#define PORT_SCTL_SPD_GEN2 (0x2 << 4)
#define PORT_SCTL_SPD_GEN1 (0x1 << 4)
#define PORT_SCTL_IPM (0x3 << 8)
#define PORT_BASE 0x100
#define PORT_OFFSET 0x80
#define NR_PORTS 2
#define DRV_NAME "ahci-ceva"
#define CEVA_FLAG_BROKEN_GEN2 1
static unsigned int rx_watermark = PTC_RX_WM_VAL;
module_param(rx_watermark, uint, 0644);
MODULE_PARM_DESC(rx_watermark, "RxWaterMark value (0 - 0x80)");
struct ceva_ahci_priv {
struct platform_device *ahci_pdev;
/* Port Phy2Cfg Register */
u32 pp2c[NR_PORTS];
u32 pp3c[NR_PORTS];
u32 pp4c[NR_PORTS];
u32 pp5c[NR_PORTS];
/* Axi Cache Control Register */
u32 axicc;
bool is_cci_enabled;
int flags;
struct reset_control *rst;
};
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
struct ata_taskfile *tf, __le16 *id)
{
u32 err_mask;
err_mask = ata_do_dev_read_id(dev, tf, id);
if (err_mask)
return err_mask;
/*
* Since CEVA controller does not support device sleep feature, we
* need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
*/
id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
static struct ata_port_operations ahci_ceva_ops = {
.inherits = &ahci_platform_ops,
.read_id = ceva_ahci_read_id,
};
static const struct ata_port_info ahci_ceva_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ceva_ops,
};
static void ahci_ceva_setup(struct ahci_host_priv *hpriv)
{
void __iomem *mmio = hpriv->mmio;
struct ceva_ahci_priv *cevapriv = hpriv->plat_data;
u32 tmp;
int i;
/* Set AHCI Enable */
tmp = readl(mmio + HOST_CTL);
tmp |= HOST_AHCI_EN;
writel(tmp, mmio + HOST_CTL);
for (i = 0; i < NR_PORTS; i++) {
/* TPSS TPRS scalars, CISE and Port Addr */
tmp = PCFG_TPSS_VAL | PCFG_TPRS_VAL | (PCFG_PAD_VAL + i);
writel(tmp, mmio + AHCI_VEND_PCFG);
/*
* AXI Data bus width to 64
* Set Mem Addr Read, Write ID for data transfers
* Set Mem Addr Read ID, Write ID for non-data transfers
* Transfer limit to 72 DWord
*/
tmp = PAXIC_ADBW_BW64 | PAXIC_MAWIDD(i) | PAXIC_MARIDD(i) |
PAXIC_MAWID(i) | PAXIC_MARID(i) | PAXIC_OTL;
writel(tmp, mmio + AHCI_VEND_PAXIC);
/* Set AXI cache control register if CCi is enabled */
if (cevapriv->is_cci_enabled) {
tmp = readl(mmio + AHCI_VEND_AXICC);
tmp |= AXICC_ARCA_VAL | AXICC_ARCF_VAL |
AXICC_ARCH_VAL | AXICC_ARCP_VAL |
AXICC_AWCFD_VAL | AXICC_AWCD_VAL |
AXICC_AWCF_VAL;
writel(tmp, mmio + AHCI_VEND_AXICC);
}
/* Port Phy Cfg register enables */
tmp = PPCFG_TTA | PPCFG_PSS_EN | PPCFG_ESDF_EN;
writel(tmp, mmio + AHCI_VEND_PPCFG);
/* Phy Control OOB timing parameters COMINIT */
writel(cevapriv->pp2c[i], mmio + AHCI_VEND_PP2C);
/* Phy Control OOB timing parameters COMWAKE */
writel(cevapriv->pp3c[i], mmio + AHCI_VEND_PP3C);
/* Phy Control Burst timing setting */
writel(cevapriv->pp4c[i], mmio + AHCI_VEND_PP4C);
/* Rate Change Timer and Retry Interval Timer setting */
writel(cevapriv->pp5c[i], mmio + AHCI_VEND_PP5C);
/* Rx Watermark setting */
tmp = rx_watermark | PTC_RSVD;
writel(tmp, mmio + AHCI_VEND_PTC);
/* Default to Gen 3 Speed and Gen 1 if Gen2 is broken */
tmp = PORT_SCTL_SPD_GEN3 | PORT_SCTL_IPM;
if (cevapriv->flags & CEVA_FLAG_BROKEN_GEN2)
tmp = PORT_SCTL_SPD_GEN1 | PORT_SCTL_IPM;
writel(tmp, mmio + PORT_SCR_CTL + PORT_BASE + PORT_OFFSET * i);
}
}
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int ceva_ahci_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ceva_ahci_priv *cevapriv;
enum dev_dma_attr attr;
int rc;
cevapriv = devm_kzalloc(dev, sizeof(*cevapriv), GFP_KERNEL);
if (!cevapriv)
return -ENOMEM;
cevapriv->ahci_pdev = pdev;
cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
NULL);
if (IS_ERR(cevapriv->rst))
dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
"failed to get reset\n");
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
if (!cevapriv->rst) {
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
} else {
int i;
rc = ahci_platform_enable_clks(hpriv);
if (rc)
return rc;
/* Assert the controller reset */
reset_control_assert(cevapriv->rst);
for (i = 0; i < hpriv->nports; i++) {
rc = phy_init(hpriv->phys[i]);
if (rc)
return rc;
}
/* De-assert the controller reset */
reset_control_deassert(cevapriv->rst);
for (i = 0; i < hpriv->nports; i++) {
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
return rc;
}
}
}
if (of_property_read_bool(np, "ceva,broken-gen2"))
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
/* Read OOB timing value for COMINIT from device-tree */
if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
(u8 *)&cevapriv->pp2c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
return -EINVAL;
}
if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
(u8 *)&cevapriv->pp2c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
return -EINVAL;
}
/* Read OOB timing value for COMWAKE from device-tree*/
if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
(u8 *)&cevapriv->pp3c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
return -EINVAL;
}
if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
(u8 *)&cevapriv->pp3c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
return -EINVAL;
}
/* Read phy BURST timing value from device-tree */
if (of_property_read_u8_array(np, "ceva,p0-burst-params",
(u8 *)&cevapriv->pp4c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-burst-params property not defined\n");
return -EINVAL;
}
if (of_property_read_u8_array(np, "ceva,p1-burst-params",
(u8 *)&cevapriv->pp4c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-burst-params property not defined\n");
return -EINVAL;
}
/* Read phy RETRY interval timing value from device-tree */
if (of_property_read_u16_array(np, "ceva,p0-retry-params",
(u16 *)&cevapriv->pp5c[0], 2) < 0) {
dev_warn(dev, "ceva,p0-retry-params property not defined\n");
return -EINVAL;
}
if (of_property_read_u16_array(np, "ceva,p1-retry-params",
(u16 *)&cevapriv->pp5c[1], 2) < 0) {
dev_warn(dev, "ceva,p1-retry-params property not defined\n");
return -EINVAL;
}
/*
* Check if CCI is enabled for SATA. The DEV_DMA_COHERENT is returned
* if CCI is enabled, so check for DEV_DMA_COHERENT.
*/
attr = device_get_dma_attr(dev);
cevapriv->is_cci_enabled = (attr == DEV_DMA_COHERENT);
hpriv->plat_data = cevapriv;
/* CEVA specific initialization */
ahci_ceva_setup(hpriv);
rc = ahci_platform_init_host(pdev, hpriv, &ahci_ceva_port_info,
&ahci_platform_sht);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static int __maybe_unused ceva_ahci_suspend(struct device *dev)
{
return ahci_platform_suspend(dev);
}
static int __maybe_unused ceva_ahci_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
/* Configure CEVA specific config before resuming HBA */
ahci_ceva_setup(hpriv);
rc = ahci_platform_resume_host(dev);
if (rc)
goto disable_resources;
/* We resumed so update PM runtime state */
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static SIMPLE_DEV_PM_OPS(ahci_ceva_pm_ops, ceva_ahci_suspend, ceva_ahci_resume);
static const struct of_device_id ceva_ahci_of_match[] = {
{ .compatible = "ceva,ahci-1v84" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ceva_ahci_of_match);
static struct platform_driver ceva_ahci_driver = {
.probe = ceva_ahci_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ceva_ahci_of_match,
.pm = &ahci_ceva_pm_ops,
},
};
module_platform_driver(ceva_ahci_driver);
MODULE_DESCRIPTION("CEVA AHCI SATA platform driver");
MODULE_AUTHOR("Xilinx Inc.");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ata/ahci_ceva.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SATA specific part of ATA helper library
*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
* Copyright 2006 Tejun Heo <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <linux/libata.h>
#include <asm/unaligned.h>
#include "libata.h"
#include "libata-transport.h"
/* debounce timing parameters in msecs { interval, duration, timeout } */
const unsigned int sata_deb_timing_normal[] = { 5, 100, 2000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
const unsigned int sata_deb_timing_hotplug[] = { 25, 500, 2000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
const unsigned int sata_deb_timing_long[] = { 100, 2000, 5000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
/**
* sata_scr_valid - test whether SCRs are accessible
* @link: ATA link to test SCR accessibility for
*
* Test whether SCRs are accessible for @link.
*
* LOCKING:
* None.
*
* RETURNS:
* 1 if SCRs are accessible, 0 otherwise.
*/
int sata_scr_valid(struct ata_link *link)
{
struct ata_port *ap = link->ap;
return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
}
EXPORT_SYMBOL_GPL(sata_scr_valid);
/**
* sata_scr_read - read SCR register of the specified port
* @link: ATA link to read SCR for
* @reg: SCR to read
* @val: Place to store read value
*
* Read SCR register @reg of @link into *@val. This function is
* guaranteed to succeed if @link is ap->link, the cable type of
* the port is SATA and the port implements ->scr_read.
*
* LOCKING:
* None if @link is ap->link. Kernel thread context otherwise.
*
* RETURNS:
* 0 on success, negative errno on failure.
*/
int sata_scr_read(struct ata_link *link, int reg, u32 *val)
{
if (ata_is_host_link(link)) {
if (sata_scr_valid(link))
return link->ap->ops->scr_read(link, reg, val);
return -EOPNOTSUPP;
}
return sata_pmp_scr_read(link, reg, val);
}
EXPORT_SYMBOL_GPL(sata_scr_read);
/**
* sata_scr_write - write SCR register of the specified port
* @link: ATA link to write SCR for
* @reg: SCR to write
* @val: value to write
*
* Write @val to SCR register @reg of @link. This function is
* guaranteed to succeed if @link is ap->link, the cable type of
* the port is SATA and the port implements ->scr_read.
*
* LOCKING:
* None if @link is ap->link. Kernel thread context otherwise.
*
* RETURNS:
* 0 on success, negative errno on failure.
*/
int sata_scr_write(struct ata_link *link, int reg, u32 val)
{
if (ata_is_host_link(link)) {
if (sata_scr_valid(link))
return link->ap->ops->scr_write(link, reg, val);
return -EOPNOTSUPP;
}
return sata_pmp_scr_write(link, reg, val);
}
EXPORT_SYMBOL_GPL(sata_scr_write);
/**
* sata_scr_write_flush - write SCR register of the specified port and flush
* @link: ATA link to write SCR for
* @reg: SCR to write
* @val: value to write
*
* This function is identical to sata_scr_write() except that this
* function performs flush after writing to the register.
*
* LOCKING:
* None if @link is ap->link. Kernel thread context otherwise.
*
* RETURNS:
* 0 on success, negative errno on failure.
*/
int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
{
if (ata_is_host_link(link)) {
int rc;
if (sata_scr_valid(link)) {
rc = link->ap->ops->scr_write(link, reg, val);
if (rc == 0)
rc = link->ap->ops->scr_read(link, reg, &val);
return rc;
}
return -EOPNOTSUPP;
}
return sata_pmp_scr_write(link, reg, val);
}
EXPORT_SYMBOL_GPL(sata_scr_write_flush);
/**
* ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
* @tf: Taskfile to convert
* @pmp: Port multiplier port
* @is_cmd: This FIS is for command
* @fis: Buffer into which data will output
*
* Converts a standard ATA taskfile to a Serial ATA
* FIS structure (Register - Host to Device).
*
* LOCKING:
* Inherited from caller.
*/
void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
{
fis[0] = 0x27; /* Register - Host to Device FIS */
fis[1] = pmp & 0xf; /* Port multiplier number*/
if (is_cmd)
fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
fis[2] = tf->command;
fis[3] = tf->feature;
fis[4] = tf->lbal;
fis[5] = tf->lbam;
fis[6] = tf->lbah;
fis[7] = tf->device;
fis[8] = tf->hob_lbal;
fis[9] = tf->hob_lbam;
fis[10] = tf->hob_lbah;
fis[11] = tf->hob_feature;
fis[12] = tf->nsect;
fis[13] = tf->hob_nsect;
fis[14] = 0;
fis[15] = tf->ctl;
fis[16] = tf->auxiliary & 0xff;
fis[17] = (tf->auxiliary >> 8) & 0xff;
fis[18] = (tf->auxiliary >> 16) & 0xff;
fis[19] = (tf->auxiliary >> 24) & 0xff;
}
EXPORT_SYMBOL_GPL(ata_tf_to_fis);
/**
* ata_tf_from_fis - Convert SATA FIS to ATA taskfile
* @fis: Buffer from which data will be input
* @tf: Taskfile to output
*
* Converts a serial ATA FIS structure to a standard ATA taskfile.
*
* LOCKING:
* Inherited from caller.
*/
void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
{
tf->status = fis[2];
tf->error = fis[3];
tf->lbal = fis[4];
tf->lbam = fis[5];
tf->lbah = fis[6];
tf->device = fis[7];
tf->hob_lbal = fis[8];
tf->hob_lbam = fis[9];
tf->hob_lbah = fis[10];
tf->nsect = fis[12];
tf->hob_nsect = fis[13];
}
EXPORT_SYMBOL_GPL(ata_tf_from_fis);
/**
* sata_link_debounce - debounce SATA phy status
* @link: ATA link to debounce SATA phy status for
* @params: timing parameters { interval, duration, timeout } in msec
* @deadline: deadline jiffies for the operation
*
* Make sure SStatus of @link reaches stable state, determined by
* holding the same value where DET is not 1 for @duration polled
* every @interval, before @timeout. Timeout constraints the
* beginning of the stable state. Because DET gets stuck at 1 on
* some controllers after hot unplugging, this functions waits
* until timeout then returns 0 if DET is stable at 1.
*
* @timeout is further limited by @deadline. The sooner of the
* two is used.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int sata_link_debounce(struct ata_link *link, const unsigned int *params,
unsigned long deadline)
{
unsigned int interval = params[0];
unsigned int duration = params[1];
unsigned long last_jiffies, t;
u32 last, cur;
int rc;
t = ata_deadline(jiffies, params[2]);
if (time_before(t, deadline))
deadline = t;
if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
return rc;
cur &= 0xf;
last = cur;
last_jiffies = jiffies;
while (1) {
ata_msleep(link->ap, interval);
if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
return rc;
cur &= 0xf;
/* DET stable? */
if (cur == last) {
if (cur == 1 && time_before(jiffies, deadline))
continue;
if (time_after(jiffies,
ata_deadline(last_jiffies, duration)))
return 0;
continue;
}
/* unstable, start over */
last = cur;
last_jiffies = jiffies;
/* Check deadline. If debouncing failed, return
* -EPIPE to tell upper layer to lower link speed.
*/
if (time_after(jiffies, deadline))
return -EPIPE;
}
}
EXPORT_SYMBOL_GPL(sata_link_debounce);
/**
* sata_link_resume - resume SATA link
* @link: ATA link to resume SATA
* @params: timing parameters { interval, duration, timeout } in msec
* @deadline: deadline jiffies for the operation
*
* Resume SATA phy @link and debounce it.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline)
{
int tries = ATA_LINK_RESUME_TRIES;
u32 scontrol, serror;
int rc;
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
return rc;
/*
* Writes to SControl sometimes get ignored under certain
* controllers (ata_piix SIDPR). Make sure DET actually is
* cleared.
*/
do {
scontrol = (scontrol & 0x0f0) | 0x300;
if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
return rc;
/*
* Some PHYs react badly if SStatus is pounded
* immediately after resuming. Delay 200ms before
* debouncing.
*/
if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY))
ata_msleep(link->ap, 200);
/* is SControl restored correctly? */
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
return rc;
} while ((scontrol & 0xf0f) != 0x300 && --tries);
if ((scontrol & 0xf0f) != 0x300) {
ata_link_warn(link, "failed to resume link (SControl %X)\n",
scontrol);
return 0;
}
if (tries < ATA_LINK_RESUME_TRIES)
ata_link_warn(link, "link resume succeeded after %d retries\n",
ATA_LINK_RESUME_TRIES - tries);
if ((rc = sata_link_debounce(link, params, deadline)))
return rc;
/* clear SError, some PHYs require this even for SRST to work */
if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
rc = sata_scr_write(link, SCR_ERROR, serror);
return rc != -EINVAL ? rc : 0;
}
EXPORT_SYMBOL_GPL(sata_link_resume);
/**
* sata_link_scr_lpm - manipulate SControl IPM and SPM fields
* @link: ATA link to manipulate SControl for
* @policy: LPM policy to configure
* @spm_wakeup: initiate LPM transition to active state
*
* Manipulate the IPM field of the SControl register of @link
* according to @policy. If @policy is ATA_LPM_MAX_POWER and
* @spm_wakeup is %true, the SPM field is manipulated to wake up
* the link. This function also clears PHYRDY_CHG before
* returning.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup)
{
struct ata_eh_context *ehc = &link->eh_context;
bool woken_up = false;
u32 scontrol;
int rc;
rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
if (rc)
return rc;
switch (policy) {
case ATA_LPM_MAX_POWER:
/* disable all LPM transitions */
scontrol |= (0x7 << 8);
/* initiate transition to active state */
if (spm_wakeup) {
scontrol |= (0x4 << 12);
woken_up = true;
}
break;
case ATA_LPM_MED_POWER:
/* allow LPM to PARTIAL */
scontrol &= ~(0x1 << 8);
scontrol |= (0x6 << 8);
break;
case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER_WITH_PARTIAL:
case ATA_LPM_MIN_POWER:
if (ata_link_nr_enabled(link) > 0) {
/* assume no restrictions on LPM transitions */
scontrol &= ~(0x7 << 8);
/*
* If the controller does not support partial, slumber,
* or devsleep, then disallow these transitions.
*/
if (link->ap->host->flags & ATA_HOST_NO_PART)
scontrol |= (0x1 << 8);
if (link->ap->host->flags & ATA_HOST_NO_SSC)
scontrol |= (0x2 << 8);
if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
scontrol |= (0x4 << 8);
} else {
/* empty port, power off */
scontrol &= ~0xf;
scontrol |= (0x1 << 2);
}
break;
default:
WARN_ON(1);
}
rc = sata_scr_write(link, SCR_CONTROL, scontrol);
if (rc)
return rc;
/* give the link time to transit out of LPM state */
if (woken_up)
msleep(10);
/* clear PHYRDY_CHG from SError */
ehc->i.serror &= ~SERR_PHYRDY_CHG;
return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
}
EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
{
struct ata_link *host_link = &link->ap->link;
u32 limit, target, spd;
limit = link->sata_spd_limit;
/* Don't configure downstream link faster than upstream link.
* It doesn't speed up anything and some PMPs choke on such
* configuration.
*/
if (!ata_is_host_link(link) && host_link->sata_spd)
limit &= (1 << host_link->sata_spd) - 1;
if (limit == UINT_MAX)
target = 0;
else
target = fls(limit);
spd = (*scontrol >> 4) & 0xf;
*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
return spd != target;
}
/**
* sata_set_spd_needed - is SATA spd configuration needed
* @link: Link in question
*
* Test whether the spd limit in SControl matches
* @link->sata_spd_limit. This function is used to determine
* whether hardreset is necessary to apply SATA spd
* configuration.
*
* LOCKING:
* Inherited from caller.
*
* RETURNS:
* 1 if SATA spd configuration is needed, 0 otherwise.
*/
static int sata_set_spd_needed(struct ata_link *link)
{
u32 scontrol;
if (sata_scr_read(link, SCR_CONTROL, &scontrol))
return 1;
return __sata_set_spd_needed(link, &scontrol);
}
/**
* sata_set_spd - set SATA spd according to spd limit
* @link: Link to set SATA spd for
*
* Set SATA spd of @link according to sata_spd_limit.
*
* LOCKING:
* Inherited from caller.
*
* RETURNS:
* 0 if spd doesn't need to be changed, 1 if spd has been
* changed. Negative errno if SCR registers are inaccessible.
*/
int sata_set_spd(struct ata_link *link)
{
u32 scontrol;
int rc;
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
return rc;
if (!__sata_set_spd_needed(link, &scontrol))
return 0;
if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
return rc;
return 1;
}
EXPORT_SYMBOL_GPL(sata_set_spd);
/**
* sata_link_hardreset - reset link via SATA phy reset
* @link: link to reset
* @timing: timing parameters { interval, duration, timeout } in msec
* @deadline: deadline jiffies for the operation
* @online: optional out parameter indicating link onlineness
* @check_ready: optional callback to check link readiness
*
* SATA phy-reset @link using DET bits of SControl register.
* After hardreset, link readiness is waited upon using
* ata_wait_ready() if @check_ready is specified. LLDs are
* allowed to not specify @check_ready and wait itself after this
* function returns. Device classification is LLD's
* responsibility.
*
* *@online is set to one iff reset succeeded and @link is online
* after reset.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int sata_link_hardreset(struct ata_link *link, const unsigned int *timing,
unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *))
{
u32 scontrol;
int rc;
if (online)
*online = false;
if (sata_set_spd_needed(link)) {
/* SATA spec says nothing about how to reconfigure
* spd. To be on the safe side, turn off phy during
* reconfiguration. This works for at least ICH7 AHCI
* and Sil3124.
*/
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
goto out;
scontrol = (scontrol & 0x0f0) | 0x304;
if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
goto out;
sata_set_spd(link);
}
/* issue phy wake/reset */
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
goto out;
scontrol = (scontrol & 0x0f0) | 0x301;
if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
goto out;
/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
* 10.4.2 says at least 1 ms.
*/
ata_msleep(link->ap, 1);
/* bring link back */
rc = sata_link_resume(link, timing, deadline);
if (rc)
goto out;
/* if link is offline nothing more to do */
if (ata_phys_link_offline(link))
goto out;
/* Link is online. From this point, -ENODEV too is an error. */
if (online)
*online = true;
if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
/* If PMP is supported, we have to do follow-up SRST.
* Some PMPs don't send D2H Reg FIS after hardreset if
* the first port is empty. Wait only for
* ATA_TMOUT_PMP_SRST_WAIT.
*/
if (check_ready) {
unsigned long pmp_deadline;
pmp_deadline = ata_deadline(jiffies,
ATA_TMOUT_PMP_SRST_WAIT);
if (time_after(pmp_deadline, deadline))
pmp_deadline = deadline;
ata_wait_ready(link, pmp_deadline, check_ready);
}
rc = -EAGAIN;
goto out;
}
rc = 0;
if (check_ready)
rc = ata_wait_ready(link, deadline, check_ready);
out:
if (rc && rc != -EAGAIN) {
/* online is set iff link is online && reset succeeded */
if (online)
*online = false;
ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
}
return rc;
}
EXPORT_SYMBOL_GPL(sata_link_hardreset);
/**
* ata_qc_complete_multiple - Complete multiple qcs successfully
* @ap: port in question
* @qc_active: new qc_active mask
*
* Complete in-flight commands. This functions is meant to be
* called from low-level driver's interrupt routine to complete
* requests normally. ap->qc_active and @qc_active is compared
* and commands are completed accordingly.
*
* Always use this function when completing multiple NCQ commands
* from IRQ handlers instead of calling ata_qc_complete()
* multiple times to keep IRQ expect status properly in sync.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Number of completed commands on success, -errno otherwise.
*/
int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
{
u64 done_mask, ap_qc_active = ap->qc_active;
int nr_done = 0;
/*
* If the internal tag is set on ap->qc_active, then we care about
* bit0 on the passed in qc_active mask. Move that bit up to match
* the internal tag.
*/
if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
qc_active ^= qc_active & 0x01;
}
done_mask = ap_qc_active ^ qc_active;
if (unlikely(done_mask & qc_active)) {
ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
ap->qc_active, qc_active);
return -EINVAL;
}
if (ap->ops->qc_ncq_fill_rtf)
ap->ops->qc_ncq_fill_rtf(ap, done_mask);
while (done_mask) {
struct ata_queued_cmd *qc;
unsigned int tag = __ffs64(done_mask);
qc = ata_qc_from_tag(ap, tag);
if (qc) {
ata_qc_complete(qc);
nr_done++;
}
done_mask &= ~(1ULL << tag);
}
return nr_done;
}
EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
/**
* ata_slave_link_init - initialize slave link
* @ap: port to initialize slave link for
*
* Create and initialize slave link for @ap. This enables slave
* link handling on the port.
*
* In libata, a port contains links and a link contains devices.
* There is single host link but if a PMP is attached to it,
* there can be multiple fan-out links. On SATA, there's usually
* a single device connected to a link but PATA and SATA
* controllers emulating TF based interface can have two - master
* and slave.
*
* However, there are a few controllers which don't fit into this
* abstraction too well - SATA controllers which emulate TF
* interface with both master and slave devices but also have
* separate SCR register sets for each device. These controllers
* need separate links for physical link handling
* (e.g. onlineness, link speed) but should be treated like a
* traditional M/S controller for everything else (e.g. command
* issue, softreset).
*
* slave_link is libata's way of handling this class of
* controllers without impacting core layer too much. For
* anything other than physical link handling, the default host
* link is used for both master and slave. For physical link
* handling, separate @ap->slave_link is used. All dirty details
* are implemented inside libata core layer. From LLD's POV, the
* only difference is that prereset, hardreset and postreset are
* called once more for the slave link, so the reset sequence
* looks like the following.
*
* prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
* softreset(M) -> postreset(M) -> postreset(S)
*
* Note that softreset is called only for the master. Softreset
* resets both M/S by definition, so SRST on master should handle
* both (the standard method will work just fine).
*
* LOCKING:
* Should be called before host is registered.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int ata_slave_link_init(struct ata_port *ap)
{
struct ata_link *link;
WARN_ON(ap->slave_link);
WARN_ON(ap->flags & ATA_FLAG_PMP);
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
ata_link_init(ap, link, 1);
ap->slave_link = link;
return 0;
}
EXPORT_SYMBOL_GPL(ata_slave_link_init);
/**
* sata_lpm_ignore_phy_events - test if PHY event should be ignored
* @link: Link receiving the event
*
* Test whether the received PHY event has to be ignored or not.
*
* LOCKING:
* None:
*
* RETURNS:
* True if the event has to be ignored.
*/
bool sata_lpm_ignore_phy_events(struct ata_link *link)
{
unsigned long lpm_timeout = link->last_lpm_change +
msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
/* if LPM is enabled, PHYRDY doesn't mean anything */
if (link->lpm_policy > ATA_LPM_MAX_POWER)
return true;
/* ignore the first PHY event after the LPM policy changed
* as it is might be spurious
*/
if ((link->flags & ATA_LFLAG_CHANGED) &&
time_before(jiffies, lpm_timeout))
return true;
return false;
}
EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
static const char *ata_lpm_policy_names[] = {
[ATA_LPM_UNKNOWN] = "max_performance",
[ATA_LPM_MAX_POWER] = "max_performance",
[ATA_LPM_MED_POWER] = "medium_power",
[ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
[ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
[ATA_LPM_MIN_POWER] = "min_power",
};
static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
struct ata_link *link;
struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
/* UNKNOWN is internal state, iterate from MAX_POWER */
for (policy = ATA_LPM_MAX_POWER;
policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
const char *name = ata_lpm_policy_names[policy];
if (strncmp(name, buf, strlen(name)) == 0)
break;
}
if (policy == ARRAY_SIZE(ata_lpm_policy_names))
return -EINVAL;
spin_lock_irqsave(ap->lock, flags);
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, &ap->link, ENABLED) {
if (dev->horkage & ATA_HORKAGE_NOLPM) {
count = -EOPNOTSUPP;
goto out_unlock;
}
}
}
ap->target_lpm_policy = policy;
ata_port_schedule_eh(ap);
out_unlock:
spin_unlock_irqrestore(ap->lock, flags);
return count;
}
static ssize_t ata_scsi_lpm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
return -EINVAL;
return sysfs_emit(buf, "%s\n",
ata_lpm_policy_names[ap->target_lpm_policy]);
}
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
ata_scsi_lpm_show, ata_scsi_lpm_store);
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
static ssize_t ata_ncq_prio_supported_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
bool ncq_prio_supported;
int rc = 0;
spin_lock_irq(ap->lock);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
spin_unlock_irq(ap->lock);
return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported);
}
DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
static ssize_t ata_ncq_prio_enable_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
bool ncq_prio_enable;
int rc = 0;
spin_lock_irq(ap->lock);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
spin_unlock_irq(ap->lock);
return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
}
static ssize_t ata_ncq_prio_enable_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_device *dev;
long int input;
int rc = 0;
rc = kstrtol(buf, 10, &input);
if (rc)
return rc;
if ((input < 0) || (input > 1))
return -EINVAL;
ap = ata_shost_to_port(sdev->host);
dev = ata_scsi_find_dev(ap, sdev);
if (unlikely(!dev))
return -ENODEV;
spin_lock_irq(ap->lock);
if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
rc = -EINVAL;
goto unlock;
}
if (input) {
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
ata_dev_err(dev,
"CDL must be disabled to enable NCQ priority\n");
rc = -EINVAL;
goto unlock;
}
dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLED;
} else {
dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
}
unlock:
spin_unlock_irq(ap->lock);
return rc ? rc : len;
}
DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
static struct attribute *ata_ncq_sdev_attrs[] = {
&dev_attr_unload_heads.attr,
&dev_attr_ncq_prio_enable.attr,
&dev_attr_ncq_prio_supported.attr,
NULL
};
static const struct attribute_group ata_ncq_sdev_attr_group = {
.attrs = ata_ncq_sdev_attrs
};
const struct attribute_group *ata_ncq_sdev_groups[] = {
&ata_ncq_sdev_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups);
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
return ap->ops->em_store(ap, buf, count);
return -EINVAL;
}
static ssize_t
ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
return ap->ops->em_show(ap, buf);
return -EINVAL;
}
DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
ata_scsi_em_message_show, ata_scsi_em_message_store);
EXPORT_SYMBOL_GPL(dev_attr_em_message);
static ssize_t
ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
return sysfs_emit(buf, "%d\n", ap->em_message_type);
}
DEVICE_ATTR(em_message_type, S_IRUGO,
ata_scsi_em_message_type_show, NULL);
EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
static ssize_t
ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
if (atadev && ap->ops->sw_activity_show &&
(ap->flags & ATA_FLAG_SW_ACTIVITY))
return ap->ops->sw_activity_show(atadev, buf);
return -EINVAL;
}
static ssize_t
ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
enum sw_activity val;
int rc;
if (atadev && ap->ops->sw_activity_store &&
(ap->flags & ATA_FLAG_SW_ACTIVITY)) {
val = simple_strtoul(buf, NULL, 0);
switch (val) {
case OFF: case BLINK_ON: case BLINK_OFF:
rc = ap->ops->sw_activity_store(atadev, val);
if (!rc)
return count;
else
return rc;
}
}
return -EINVAL;
}
DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
ata_scsi_activity_store);
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
/**
* ata_change_queue_depth - Set a device maximum queue depth
* @ap: ATA port of the target device
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
*
* Helper to set a device maximum queue depth, usable with both libsas
* and libata.
*
*/
int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
int queue_depth)
{
struct ata_device *dev;
unsigned long flags;
int max_queue_depth;
spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev || queue_depth < 1 || queue_depth == sdev->queue_depth) {
spin_unlock_irqrestore(ap->lock, flags);
return sdev->queue_depth;
}
/*
* Make sure that the queue depth requested does not exceed the device
* capabilities.
*/
max_queue_depth = min(ATA_MAX_QUEUE, sdev->host->can_queue);
max_queue_depth = min(max_queue_depth, ata_id_queue_depth(dev->id));
if (queue_depth > max_queue_depth) {
spin_unlock_irqrestore(ap->lock, flags);
return -EINVAL;
}
/*
* If NCQ is not supported by the device or if the target queue depth
* is 1 (to disable drive side command queueing), turn off NCQ.
*/
if (queue_depth == 1 || !ata_ncq_supported(dev)) {
dev->flags |= ATA_DFLAG_NCQ_OFF;
queue_depth = 1;
} else {
dev->flags &= ~ATA_DFLAG_NCQ_OFF;
}
spin_unlock_irqrestore(ap->lock, flags);
if (queue_depth == sdev->queue_depth)
return sdev->queue_depth;
return scsi_change_queue_depth(sdev, queue_depth);
}
EXPORT_SYMBOL_GPL(ata_change_queue_depth);
/**
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
*
* This is libata standard hostt->change_queue_depth callback.
* SCSI will call into this callback when user tries to set queue
* depth via sysfs.
*
* LOCKING:
* SCSI layer (we don't care)
*
* RETURNS:
* Newly configured queue depth.
*/
int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
return ata_change_queue_depth(ap, sdev, queue_depth);
}
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
/**
* ata_sas_port_alloc - Allocate port for a SAS attached SATA device
* @host: ATA host container for all SAS ports
* @port_info: Information from low-level host driver
* @shost: SCSI host that the scsi device is attached to
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* ata_port pointer on success / NULL on failure.
*/
struct ata_port *ata_sas_port_alloc(struct ata_host *host,
struct ata_port_info *port_info,
struct Scsi_Host *shost)
{
struct ata_port *ap;
ap = ata_port_alloc(host);
if (!ap)
return NULL;
ap->port_no = 0;
ap->lock = &host->lock;
ap->pio_mask = port_info->pio_mask;
ap->mwdma_mask = port_info->mwdma_mask;
ap->udma_mask = port_info->udma_mask;
ap->flags |= port_info->flags;
ap->ops = port_info->port_ops;
ap->cbl = ATA_CBL_SATA;
ap->print_id = atomic_inc_return(&ata_print_id);
return ap;
}
EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
{
return ata_tport_add(parent, ap);
}
EXPORT_SYMBOL_GPL(ata_sas_tport_add);
void ata_sas_tport_delete(struct ata_port *ap)
{
ata_tport_delete(ap);
}
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
/**
* ata_sas_slave_configure - Default slave_config routine for libata devices
* @sdev: SCSI device to configure
* @ap: ATA port to which SCSI device is attached
*
* RETURNS:
* Zero.
*/
int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
ata_scsi_dev_config(sdev, ap->link.device);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
* @cmd: SCSI command to be sent
* @ap: ATA port to which the command is being sent
*
* RETURNS:
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
* 0 otherwise.
*/
int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
{
int rc = 0;
if (likely(ata_dev_enabled(ap->link.device)))
rc = __ata_scsi_queuecmd(cmd, ap->link.device);
else {
cmd->result = (DID_BAD_TARGET << 16);
scsi_done(cmd);
}
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
/**
* sata_async_notification - SATA async notification handler
* @ap: ATA port where async notification is received
*
* Handler to be called when async notification via SDB FIS is
* received. This function schedules EH if necessary.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 1 if EH is scheduled, 0 otherwise.
*/
int sata_async_notification(struct ata_port *ap)
{
u32 sntf;
int rc;
if (!(ap->flags & ATA_FLAG_AN))
return 0;
rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
if (rc == 0)
sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
if (!sata_pmp_attached(ap) || rc) {
/* PMP is not attached or SNTF is not available */
if (!sata_pmp_attached(ap)) {
/* PMP is not attached. Check whether ATAPI
* AN is configured. If so, notify media
* change.
*/
struct ata_device *dev = ap->link.device;
if ((dev->class == ATA_DEV_ATAPI) &&
(dev->flags & ATA_DFLAG_AN))
ata_scsi_media_change_notify(dev);
return 0;
} else {
/* PMP is attached but SNTF is not available.
* ATAPI async media change notification is
* not used. The PMP must be reporting PHY
* status change, schedule EH.
*/
ata_port_schedule_eh(ap);
return 1;
}
} else {
/* PMP is attached and SNTF is available */
struct ata_link *link;
/* check and notify ATAPI AN */
ata_for_each_link(link, ap, EDGE) {
if (!(sntf & (1 << link->pmp)))
continue;
if ((link->device->class == ATA_DEV_ATAPI) &&
(link->device->flags & ATA_DFLAG_AN))
ata_scsi_media_change_notify(link->device);
}
/* If PMP is reporting that PHY status of some
* downstream ports has changed, schedule EH.
*/
if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
ata_port_schedule_eh(ap);
return 1;
}
return 0;
}
}
EXPORT_SYMBOL_GPL(sata_async_notification);
/**
* ata_eh_read_log_10h - Read log page 10h for NCQ error details
* @dev: Device to read log page 10h from
* @tag: Resulting tag of the failed command
* @tf: Resulting taskfile registers of the failed command
*
* Read log page 10h to obtain NCQ error details and clear error
* condition.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
static int ata_eh_read_log_10h(struct ata_device *dev,
int *tag, struct ata_taskfile *tf)
{
u8 *buf = dev->link->ap->sector_buf;
unsigned int err_mask;
u8 csum;
int i;
err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
if (err_mask)
return -EIO;
csum = 0;
for (i = 0; i < ATA_SECT_SIZE; i++)
csum += buf[i];
if (csum)
ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
csum);
if (buf[0] & 0x80)
return -ENOENT;
*tag = buf[0] & 0x1f;
tf->status = buf[2];
tf->error = buf[3];
tf->lbal = buf[4];
tf->lbam = buf[5];
tf->lbah = buf[6];
tf->device = buf[7];
tf->hob_lbal = buf[8];
tf->hob_lbam = buf[9];
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
if (ata_id_has_ncq_autosense(dev->id) && (tf->status & ATA_SENSE))
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
/**
* ata_eh_read_sense_success_ncq_log - Read the sense data for successful
* NCQ commands log
* @link: ATA link to get sense data for
*
* Read the sense data for successful NCQ commands log page to obtain
* sense data for all NCQ commands that completed successfully with
* the sense data available bit set.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
{
struct ata_device *dev = link->device;
struct ata_port *ap = dev->link->ap;
u8 *buf = ap->ncq_sense_buf;
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
u64 sense_valid, val;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
if (err_mask) {
ata_dev_err(dev,
"Failed to read Sense Data for Successful NCQ Commands log\n");
return -EIO;
}
/* Check the log header */
val = get_unaligned_le64(&buf[0]);
if ((val & 0xffff) != 1 || ((val >> 16) & 0xff) != 0x0f) {
ata_dev_err(dev,
"Invalid Sense Data for Successful NCQ Commands log\n");
return -EIO;
}
sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
((u64)buf[10] << 16) | ((u64)buf[11] << 24);
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
!(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) ||
qc->err_mask ||
ata_dev_phys_link(qc->dev) != link)
continue;
/*
* If the command does not have any sense data, clear ATA_SENSE.
* Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished.
*/
if (!(sense_valid & (1ULL << tag))) {
qc->result_tf.status &= ~ATA_SENSE;
continue;
}
sense = &buf[32 + 24 * tag];
sk = sense[0];
asc = sense[1];
ascq = sense[2];
if (!ata_scsi_sense_is_valid(sk, asc, ascq)) {
ret = -EIO;
continue;
}
/* Set sense without also setting scsicmd->result */
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
qc->scsicmd->sense_buffer, sk,
asc, ascq);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
/*
* If we have sense data, call scsi_check_sense() in order to
* set the correct SCSI ML byte (if any). No point in checking
* the return value, since the command has already completed
* successfully.
*/
scsi_check_sense(qc->scsicmd);
}
return ret;
}
EXPORT_SYMBOL_GPL(ata_eh_read_sense_success_ncq_log);
/**
* ata_eh_analyze_ncq_error - analyze NCQ error
* @link: ATA link to analyze NCQ error for
*
* Read log page 10h, determine the offending qc and acquire
* error status TF. For NCQ device errors, all LLDDs have to do
* is setting AC_ERR_DEV in ehi->err_mask. This function takes
* care of the rest.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_eh_analyze_ncq_error(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev = link->device;
struct ata_queued_cmd *qc;
struct ata_taskfile tf;
int tag, rc;
/* if frozen, we can't do much */
if (ata_port_is_frozen(ap))
return;
/* is it NCQ device error? */
if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
return;
/* has LLDD analyzed already? */
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH))
continue;
if (qc->err_mask)
return;
}
/* okay, this error is ours */
memset(&tf, 0, sizeof(tf));
rc = ata_eh_read_log_10h(dev, &tag, &tf);
if (rc) {
ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
rc);
return;
}
if (!(link->sactive & (1 << tag))) {
ata_link_err(link, "log page 10h reported inactive tag %d\n",
tag);
return;
}
/* we've got the perpetrator, condemn it */
qc = __ata_qc_from_tag(ap, tag);
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
/*
* If the device supports NCQ autosense, ata_eh_read_log_10h() will have
* stored the sense data in qc->result_tf.auxiliary.
*/
if (qc->result_tf.auxiliary) {
char sense_key, asc, ascq;
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
asc = (qc->result_tf.auxiliary >> 8) & 0xff;
ascq = qc->result_tf.auxiliary & 0xff;
if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
ascq);
ata_scsi_set_sense_information(dev, qc->scsicmd,
&qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
}
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD ||
ata_dev_phys_link(qc->dev) != link)
continue;
/* Skip the single QC which caused the NCQ error. */
if (qc->err_mask)
continue;
/*
* For SATA, the STATUS and ERROR fields are shared for all NCQ
* commands that were completed with the same SDB FIS.
* Therefore, we have to clear the ATA_ERR bit for all QCs
* except the one that caused the NCQ error.
*/
qc->result_tf.status &= ~ATA_ERR;
qc->result_tf.error = 0;
/*
* If we get a NCQ error, that means that a single command was
* aborted. All other failed commands for our link should be
* retried and has no business of going though further scrutiny
* by ata_eh_link_autopsy().
*/
qc->flags |= ATA_QCFLAG_RETRY;
}
ehc->i.err_mask &= ~AC_ERR_DEV;
}
EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
| linux-master | drivers/ata/libata-sata.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Promise PATA TX2/TX4/TX2000/133 IDE driver for pdc20268 to pdc20277.
*
* Ported to libata by:
* Albert Lee <[email protected]> IBM Corporation
*
* Copyright (C) 1998-2002 Andre Hedrick <[email protected]>
* Portions Copyright (C) 1999 Promise Technology, Inc.
*
* Author: Frank Tiernan ([email protected])
* Released under terms of General Public License
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware information only available under NDA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ktime.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#define DRV_NAME "pata_pdc2027x"
#define DRV_VERSION "1.0"
enum {
PDC_MMIO_BAR = 5,
PDC_UDMA_100 = 0,
PDC_UDMA_133 = 1,
PDC_100_MHZ = 100000000,
PDC_133_MHZ = 133333333,
PDC_SYS_CTL = 0x1100,
PDC_ATA_CTL = 0x1104,
PDC_GLOBAL_CTL = 0x1108,
PDC_CTCR0 = 0x110C,
PDC_CTCR1 = 0x1110,
PDC_BYTE_COUNT = 0x1120,
PDC_PLL_CTL = 0x1202,
};
static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int pdc2027x_reinit_one(struct pci_dev *pdev);
#endif
static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline);
static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
static unsigned int pdc2027x_mode_filter(struct ata_device *adev, unsigned int mask);
static int pdc2027x_cable_detect(struct ata_port *ap);
static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed);
/*
* ATA Timing Tables based on 133MHz controller clock.
* These tables are only used when the controller is in 133MHz clock.
* If the controller is in 100MHz clock, the ASIC hardware will
* set the timing registers automatically when "set feature" command
* is issued to the device. However, if the controller clock is 133MHz,
* the following tables must be used.
*/
static const struct pdc2027x_pio_timing {
u8 value0, value1, value2;
} pdc2027x_pio_timing_tbl[] = {
{ 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
{ 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
{ 0x23, 0x26, 0x64 }, /* PIO mode 2 */
{ 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
{ 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
};
static const struct pdc2027x_mdma_timing {
u8 value0, value1;
} pdc2027x_mdma_timing_tbl[] = {
{ 0xdf, 0x5f }, /* MDMA mode 0 */
{ 0x6b, 0x27 }, /* MDMA mode 1 */
{ 0x69, 0x25 }, /* MDMA mode 2 */
};
static const struct pdc2027x_udma_timing {
u8 value0, value1, value2;
} pdc2027x_udma_timing_tbl[] = {
{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
{ 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
{ 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
{ 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
{ 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
{ 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
{ 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
};
static const struct pci_device_id pdc2027x_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), PDC_UDMA_100 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), PDC_UDMA_133 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), PDC_UDMA_100 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), PDC_UDMA_133 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), PDC_UDMA_133 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), PDC_UDMA_133 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), PDC_UDMA_133 },
{ } /* terminate list */
};
static struct pci_driver pdc2027x_pci_driver = {
.name = DRV_NAME,
.id_table = pdc2027x_pci_tbl,
.probe = pdc2027x_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = pdc2027x_reinit_one,
#endif
};
static const struct scsi_host_template pdc2027x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations pdc2027x_pata100_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma = pdc2027x_check_atapi_dma,
.cable_detect = pdc2027x_cable_detect,
.prereset = pdc2027x_prereset,
};
static struct ata_port_operations pdc2027x_pata133_ops = {
.inherits = &pdc2027x_pata100_ops,
.mode_filter = pdc2027x_mode_filter,
.set_piomode = pdc2027x_set_piomode,
.set_dmamode = pdc2027x_set_dmamode,
.set_mode = pdc2027x_set_mode,
};
static struct ata_port_info pdc2027x_port_info[] = {
/* PDC_UDMA_100 */
{
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &pdc2027x_pata100_ops,
},
/* PDC_UDMA_133 */
{
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc2027x_pata133_ops,
},
};
MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Albert Lee");
MODULE_DESCRIPTION("libata driver module for Promise PDC20268 to PDC20277");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
/**
* port_mmio - Get the MMIO address of PDC2027x extended registers
* @ap: Port
* @offset: offset from mmio base
*/
static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset)
{
return ap->host->iomap[PDC_MMIO_BAR] + ap->port_no * 0x100 + offset;
}
/**
* dev_mmio - Get the MMIO address of PDC2027x extended registers
* @ap: Port
* @adev: device
* @offset: offset from mmio base
*/
static inline void __iomem *dev_mmio(struct ata_port *ap, struct ata_device *adev, unsigned int offset)
{
u8 adj = (adev->devno) ? 0x08 : 0x00;
return port_mmio(ap, offset) + adj;
}
/**
* pdc2027x_cable_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
*
* Read 80c cable indicator from Promise extended register.
* This register is latched when the system is reset.
*
* LOCKING:
* None (inherited from caller).
*/
static int pdc2027x_cable_detect(struct ata_port *ap)
{
u32 cgcr;
/* check cable detect results */
cgcr = ioread32(port_mmio(ap, PDC_GLOBAL_CTL));
if (cgcr & (1 << 26))
goto cbl40;
ata_port_dbg(ap, "No cable or 80-conductor cable\n");
return ATA_CBL_PATA80;
cbl40:
ata_port_info(ap, DRV_NAME ":40-conductor cable detected\n");
return ATA_CBL_PATA40;
}
/**
* pdc2027x_port_enabled - Check PDC ATA control register to see whether the port is enabled.
* @ap: Port to check
*/
static inline int pdc2027x_port_enabled(struct ata_port *ap)
{
return ioread8(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
}
/**
* pdc2027x_prereset - prereset for PATA host controller
* @link: Target link
* @deadline: deadline jiffies for the operation
*
* Probeinit including cable detection.
*
* LOCKING:
* None (inherited from caller).
*/
static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline)
{
/* Check whether port enabled */
if (!pdc2027x_port_enabled(link->ap))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* pdc2027x_mode_filter - mode selection filter
* @adev: ATA device
* @mask: list of modes proposed
*
* Block UDMA on devices that cause trouble with this controller.
*/
static unsigned int pdc2027x_mode_filter(struct ata_device *adev, unsigned int mask)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
struct ata_device *pair = ata_dev_pair(adev);
if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
return mask;
/* Check for slave of a Maxtor at UDMA6 */
ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
ATA_ID_PROD_LEN + 1);
/* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
return mask;
}
/**
* pdc2027x_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port to configure
* @adev: um
*
* Set PIO mode for device.
*
* LOCKING:
* None (inherited from caller).
*/
static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
u32 ctcr0, ctcr1;
ata_port_dbg(ap, "adev->pio_mode[%X]\n", adev->pio_mode);
/* Sanity check */
if (pio > 4) {
ata_port_err(ap, "Unknown pio mode [%d] ignored\n", pio);
return;
}
/* Set the PIO timing registers using value table for 133MHz */
ata_port_dbg(ap, "Set pio regs... \n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
(pdc2027x_pio_timing_tbl[pio].value1 << 8);
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0x00ffffff;
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
ata_port_dbg(ap, "Set to pio mode[%u] \n", pio);
}
/**
* pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings
* @ap: Port to configure
* @adev: um
*
* Set UDMA mode for device.
*
* LOCKING:
* None (inherited from caller).
*/
static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int dma_mode = adev->dma_mode;
u32 ctcr0, ctcr1;
if ((dma_mode >= XFER_UDMA_0) &&
(dma_mode <= XFER_UDMA_6)) {
/* Set the UDMA timing registers with value table for 133MHz */
unsigned int udma_mode = dma_mode & 0x07;
if (dma_mode == XFER_UDMA_2) {
/*
* Turn off tHOLD.
* If tHOLD is '1', the hardware will add half clock for data hold time.
* This code segment seems to be no effect. tHOLD will be overwritten below.
*/
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
ata_port_dbg(ap, "Set udma regs... \n");
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
(pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
ata_port_dbg(ap, "Set mdma regs... \n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
(pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode);
} else {
ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode);
}
}
/**
* pdc2027x_set_mode - Set the timing registers back to correct values.
* @link: link to configure
* @r_failed: Returned device for failure
*
* The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
* automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
* This function overwrites the possibly incorrect values set by the hardware to be correct.
*/
static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
int rc;
rc = ata_do_set_mode(link, r_failed);
if (rc < 0)
return rc;
ata_for_each_dev(dev, link, ENABLED) {
pdc2027x_set_piomode(ap, dev);
/*
* Enable prefetch if the device support PIO only.
*/
if (dev->xfer_shift == ATA_SHIFT_PIO) {
u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1));
ctcr1 |= (1 << 25);
iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
ata_dev_dbg(dev, "Turn on prefetch\n");
} else {
pdc2027x_set_dmamode(ap, dev);
}
}
return 0;
}
/**
* pdc2027x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
* @qc: Metadata associated with taskfile to check
*
* LOCKING:
* None (inherited from caller).
*
* RETURNS: 0 when ATAPI DMA can be used
* 1 otherwise
*/
static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *scsicmd = cmd->cmnd;
int rc = 1; /* atapi dma off by default */
/*
* This workaround is from Promise's GPL driver.
* If ATAPI DMA is used for commands not in the
* following white list, say MODE_SENSE and REQUEST_SENSE,
* pdc2027x might hit the irq lost problem.
*/
switch (scsicmd[0]) {
case READ_10:
case WRITE_10:
case READ_12:
case WRITE_12:
case READ_6:
case WRITE_6:
case 0xad: /* READ_DVD_STRUCTURE */
case 0xbe: /* READ_CD */
/* ATAPI DMA is ok */
rc = 0;
break;
default:
;
}
return rc;
}
/**
* pdc_read_counter - Read the ctr counter
* @host: target ATA host
*/
static long pdc_read_counter(struct ata_host *host)
{
void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
long counter;
int retry = 1;
u32 bccrl, bccrh, bccrlv, bccrhv;
retry:
bccrl = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
bccrh = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
/* Read the counter values again for verification */
bccrlv = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
bccrhv = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
counter = (bccrh << 15) | bccrl;
dev_dbg(host->dev, "bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
dev_dbg(host->dev, "bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
/*
* The 30-bit decreasing counter are read by 2 pieces.
* Incorrect value may be read when both bccrh and bccrl are changing.
* Ex. When 7900 decrease to 78FF, wrong value 7800 might be read.
*/
if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
retry--;
dev_dbg(host->dev, "rereading counter\n");
goto retry;
}
return counter;
}
/**
* pdc_adjust_pll - Adjust the PLL input clock in Hz.
*
* @host: target ATA host
* @pll_clock: The input of PLL in HZ
* @board_idx: board identifier
*/
static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int board_idx)
{
void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
u16 pll_ctl;
long pll_clock_khz = pll_clock / 1000;
long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
long ratio = pout_required / pll_clock_khz;
int F, R;
/* Sanity check */
if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
dev_err(host->dev, "Invalid PLL input clock %ldkHz, give up!\n",
pll_clock_khz);
return;
}
dev_dbg(host->dev, "pout_required is %ld\n", pout_required);
/* Show the current clock value of PLL control register
* (maybe already configured by the firmware)
*/
pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
dev_dbg(host->dev, "pll_ctl[%X]\n", pll_ctl);
/*
* Calculate the ratio of F, R and OD
* POUT = (F + 2) / (( R + 2) * NO)
*/
if (ratio < 8600L) { /* 8.6x */
/* Using NO = 0x01, R = 0x0D */
R = 0x0d;
} else if (ratio < 12900L) { /* 12.9x */
/* Using NO = 0x01, R = 0x08 */
R = 0x08;
} else if (ratio < 16100L) { /* 16.1x */
/* Using NO = 0x01, R = 0x06 */
R = 0x06;
} else if (ratio < 64000L) { /* 64x */
R = 0x00;
} else {
/* Invalid ratio */
dev_err(host->dev, "Invalid ratio %ld, give up!\n", ratio);
return;
}
F = (ratio * (R+2)) / 1000 - 2;
if (unlikely(F < 0 || F > 127)) {
/* Invalid F */
dev_err(host->dev, "F[%d] invalid!\n", F);
return;
}
dev_dbg(host->dev, "F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
pll_ctl = (R << 8) | F;
dev_dbg(host->dev, "Writing pll_ctl[%X]\n", pll_ctl);
iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL);
ioread16(mmio_base + PDC_PLL_CTL); /* flush */
/* Wait the PLL circuit to be stable */
msleep(30);
/*
* Show the current clock value of PLL control register
* (maybe configured by the firmware)
*/
pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
dev_dbg(host->dev, "pll_ctl[%X]\n", pll_ctl);
return;
}
/**
* pdc_detect_pll_input_clock - Detect the PLL input clock in Hz.
* @host: target ATA host
* Ex. 16949000 on 33MHz PCI bus for pdc20275.
* Half of the PCI clock.
*/
static long pdc_detect_pll_input_clock(struct ata_host *host)
{
void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
u32 scr;
long start_count, end_count;
ktime_t start_time, end_time;
long pll_clock, usec_elapsed;
/* Start the test mode */
scr = ioread32(mmio_base + PDC_SYS_CTL);
dev_dbg(host->dev, "scr[%X]\n", scr);
iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
ioread32(mmio_base + PDC_SYS_CTL); /* flush */
/* Read current counter value */
start_count = pdc_read_counter(host);
start_time = ktime_get();
/* Let the counter run for 100 ms. */
msleep(100);
/* Read the counter values again */
end_count = pdc_read_counter(host);
end_time = ktime_get();
/* Stop the test mode */
scr = ioread32(mmio_base + PDC_SYS_CTL);
dev_dbg(host->dev, "scr[%X]\n", scr);
iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
ioread32(mmio_base + PDC_SYS_CTL); /* flush */
/* calculate the input clock in Hz */
usec_elapsed = (long) ktime_us_delta(end_time, start_time);
pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 *
(100000000 / usec_elapsed);
dev_dbg(host->dev, "start[%ld] end[%ld] PLL input clock[%ld]HZ\n",
start_count, end_count, pll_clock);
return pll_clock;
}
/**
* pdc_hardware_init - Initialize the hardware.
* @host: target ATA host
* @board_idx: board identifier
*/
static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
{
long pll_clock;
/*
* Detect PLL input clock rate.
* On some system, where PCI bus is running at non-standard clock rate.
* Ex. 25MHz or 40MHz, we have to adjust the cycle_time.
* The pdc20275 controller employs PLL circuit to help correct timing registers setting.
*/
pll_clock = pdc_detect_pll_input_clock(host);
dev_info(host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
/* Adjust PLL control register */
pdc_adjust_pll(host, pll_clock, board_idx);
}
/**
* pdc_ata_setup_port - setup the mmio address
* @port: ata ioports to setup
* @base: base address
*/
static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr =
port->data_addr = base;
port->feature_addr =
port->error_addr = base + 0x05;
port->nsect_addr = base + 0x0a;
port->lbal_addr = base + 0x0f;
port->lbam_addr = base + 0x10;
port->lbah_addr = base + 0x15;
port->device_addr = base + 0x1a;
port->command_addr =
port->status_addr = base + 0x1f;
port->altstatus_addr =
port->ctl_addr = base + 0x81a;
}
/**
* pdc2027x_init_one - PCI probe function
* Called when an instance of PCI adapter is inserted.
* This function checks whether the hardware is supported,
* initialize hardware and register an instance of ata_host to
* libata. (implements struct pci_driver.probe() )
*
* @pdev: instance of pci_dev found
* @ent: matching entry in the id_tbl[]
*/
static int pdc2027x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] =
{ &pdc2027x_port_info[board_idx], NULL };
struct ata_host *host;
void __iomem *mmio_base;
int i, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
mmio_base = host->iomap[PDC_MMIO_BAR];
for (i = 0; i < 2; i++) {
struct ata_port *ap = host->ports[i];
pdc_ata_setup_port(&ap->ioaddr, mmio_base + cmd_offset[i]);
ap->ioaddr.bmdma_addr = mmio_base + bmdma_offset[i];
ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, PDC_MMIO_BAR, cmd_offset[i], "cmd");
}
//pci_enable_intx(pdev);
/* initialize adapter */
pdc_hardware_init(host, board_idx);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &pdc2027x_sht);
}
#ifdef CONFIG_PM_SLEEP
static int pdc2027x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
unsigned int board_idx;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
if (pdev->device == PCI_DEVICE_ID_PROMISE_20268 ||
pdev->device == PCI_DEVICE_ID_PROMISE_20270)
board_idx = PDC_UDMA_100;
else
board_idx = PDC_UDMA_133;
pdc_hardware_init(host, board_idx);
ata_host_resume(host);
return 0;
}
#endif
module_pci_driver(pdc2027x_pci_driver);
| linux-master | drivers/ata/pata_pdc2027x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_artop.c - ARTOP ATA controller driver
*
* (C) 2006 Red Hat
* (C) 2007,2011 Bartlomiej Zolnierkiewicz
*
* Based in part on drivers/ide/pci/aec62xx.c
* Copyright (C) 1999-2002 Andre Hedrick <[email protected]>
* 865/865R fixes for Macintosh card version from a patch to the old
* driver by Thibaut VARENE <[email protected]>
* When setting the PCI latency we must set 0x80 or higher for burst
* performance Alessandro Zummo <[email protected]>
*
* TODO
* Investigate no_dsc on 850R
* Clock detect
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_artop"
#define DRV_VERSION "0.4.8"
/*
* The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
* get PCI bus speed functionality we leave this as 0. Its a variable
* for when we get the functionality and also for folks wanting to
* test stuff.
*/
static int clock = 0;
/**
* artop62x0_pre_reset - probe begin
* @link: link
* @deadline: deadline jiffies for the operation
*
* Nothing complicated needed here.
*/
static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits artop_enable_bits[] = {
{ 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
{ 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/* Odd numbered device ids are the units with enable bits. */
if ((pdev->device & 1) &&
!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* artop6260_cable_detect - identify cable type
* @ap: Port
*
* Identify the cable type for the ARTOP interface in question
*/
static int artop6260_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
pci_read_config_byte(pdev, 0x49, &tmp);
if (tmp & (1 << ap->port_no))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* artop6210_load_piomode - Load a set of PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device
* @pio: PIO mode
*
* Set PIO mode for device, in host controller PCI config space. This
* is used both to set PIO timings in PIO mode and also to set the
* matching PIO clocking for UDMA, as well as the MWDMA timings.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
static const u16 timing[2][5] = {
{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
};
/* Load the PIO timing active/recovery bits */
pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
}
/**
* artop6210_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set PIO mode for device, in host controller PCI config space. For
* ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
* the event UDMA is used the later call to set_dmamode will set the
* bits as required.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
u8 ultra;
artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
pci_read_config_byte(pdev, 0x54, &ultra);
ultra &= ~(3 << (2 * dn));
pci_write_config_byte(pdev, 0x54, ultra);
}
/**
* artop6260_load_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
* @pio: PIO mode
*
* Set PIO mode for device, in host controller PCI config space. The
* ARTOP6260 and relatives store the timing data differently.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
static const u8 timing[2][5] = {
{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
};
/* Load the PIO timing active/recovery bits */
pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
}
/**
* artop6260_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set PIO mode for device, in host controller PCI config space. For
* ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
* the event UDMA is used the later call to set_dmamode will set the
* bits as required.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
}
/**
* artop6210_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device whose timings we are configuring
*
* Set DMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
u8 ultra;
if (adev->dma_mode == XFER_MW_DMA_0)
pio = 1;
else
pio = 4;
/* Load the PIO timing active/recovery bits */
artop6210_load_piomode(ap, adev, pio);
pci_read_config_byte(pdev, 0x54, &ultra);
ultra &= ~(3 << (2 * dn));
/* Add ultra DMA bits if in UDMA mode */
if (adev->dma_mode >= XFER_UDMA_0) {
u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
if (mode == 0)
mode = 1;
ultra |= (mode << (2 * dn));
}
pci_write_config_byte(pdev, 0x54, ultra);
}
/**
* artop6260_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set DMA mode for device, in host controller PCI config space. The
* ARTOP6260 and relatives store the timing data differently.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
if (adev->dma_mode == XFER_MW_DMA_0)
pio = 1;
else
pio = 4;
/* Load the PIO timing active/recovery bits */
artop6260_load_piomode(ap, adev, pio);
/* Add ultra DMA bits if in UDMA mode */
pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
if (adev->dma_mode >= XFER_UDMA_0) {
u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
if (mode == 0)
mode = 1;
ultra |= (mode << (4 * adev->devno));
}
pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
}
/**
* artop6210_qc_defer - implement serialization
* @qc: command
*
* Issue commands per host on this chip.
*/
static int artop6210_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_host *host = qc->ap->host;
struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
int rc;
/* First apply the usual rules */
rc = ata_std_qc_defer(qc);
if (rc != 0)
return rc;
/* Now apply serialization rules. Only allow a command if the
other channel state machine is idle */
if (alt && alt->qc_active)
return ATA_DEFER_PORT;
return 0;
}
static const struct scsi_host_template artop_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations artop6210_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
.prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
static struct ata_port_operations artop6260_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
.prereset = artop62x0_pre_reset,
};
static void atp8xx_fixup(struct pci_dev *pdev)
{
u8 reg;
switch (pdev->device) {
case 0x0005:
/* BIOS may have left us in UDMA, clear it before libata probe */
pci_write_config_byte(pdev, 0x54, 0);
break;
case 0x0008:
case 0x0009:
/* Mac systems come up with some registers not set as we
will need them */
/* Clear reset & test bits */
pci_read_config_byte(pdev, 0x49, ®);
pci_write_config_byte(pdev, 0x49, reg & ~0x30);
/* PCI latency must be > 0x80 for burst mode, tweak it
* if required.
*/
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, ®);
if (reg <= 0x80)
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
/* Enable IRQ output and burst mode */
pci_read_config_byte(pdev, 0x4a, ®);
pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
break;
}
}
/**
* artop_init_one - Register ARTOP ATA PCI device with kernel services
* @pdev: PCI device to register
* @id: PCI device ID
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info_6210 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &artop6210_ops,
};
static const struct ata_port_info info_626x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &artop6260_ops,
};
static const struct ata_port_info info_628x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &artop6260_ops,
};
static const struct ata_port_info info_628x_fast = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &artop6260_ops,
};
const struct ata_port_info *ppi[] = { NULL, NULL };
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
switch (id->driver_data) {
case 0: /* 6210 variant */
ppi[0] = &info_6210;
break;
case 1: /* 6260 */
ppi[0] = &info_626x;
break;
case 2: /* 6280 or 6280 + fast */
if (inb(pci_resource_start(pdev, 4)) & 0x10)
ppi[0] = &info_628x_fast;
else
ppi[0] = &info_628x;
break;
}
BUG_ON(ppi[0] == NULL);
atp8xx_fixup(pdev);
return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
}
static const struct pci_device_id artop_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, 0x0005), 0 },
{ PCI_VDEVICE(ARTOP, 0x0006), 1 },
{ PCI_VDEVICE(ARTOP, 0x0007), 1 },
{ PCI_VDEVICE(ARTOP, 0x0008), 2 },
{ PCI_VDEVICE(ARTOP, 0x0009), 2 },
{ } /* terminate list */
};
#ifdef CONFIG_PM_SLEEP
static int atp8xx_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
atp8xx_fixup(pdev);
ata_host_resume(host);
return 0;
}
#endif
static struct pci_driver artop_pci_driver = {
.name = DRV_NAME,
.id_table = artop_pci_tbl,
.probe = artop_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = atp8xx_reinit_one,
#endif
};
module_pci_driver(artop_pci_driver);
MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_artop.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pata-legacy.c - Legacy port PATA/SATA controller driver.
* Copyright 2005/2006 Red Hat, all rights reserved.
*
* An ATA driver for the legacy ATA ports.
*
* Data Sources:
* Opti 82C465/82C611 support: Data sheets at opti-inc.com
* HT6560 series:
* Promise 20230/20620:
* http://www.ryston.cz/petr/vlb/pdc20230b.html
* http://www.ryston.cz/petr/vlb/pdc20230c.html
* http://www.ryston.cz/petr/vlb/pdc20630.html
* QDI65x0:
* http://www.ryston.cz/petr/vlb/qd6500.html
* http://www.ryston.cz/petr/vlb/qd6580.html
*
* QDI65x0 probe code based on drivers/ide/legacy/qd65xx.c
* Rewritten from the work of Colten Edwards <[email protected]> by
* Samuel Thibault <[email protected]>
*
* Unsupported but docs exist:
* Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
*
* This driver handles legacy (that is "ISA/VLB side") IDE ports found
* on PC class systems. There are three hybrid devices that are exceptions
* The Cyrix 5510/5520 where a pre SFF ATA device is on the bridge and
* the MPIIX where the tuning is PCI side but the IDE is "ISA side".
*
* Specific support is included for the ht6560a/ht6560b/opti82c611a/
* opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
*
* Support for the Winbond 83759A when operating in advanced mode.
* Multichip mode is not currently supported.
*
* Use the autospeed and pio_mask options with:
* Appian ADI/2 aka CLPD7220 or AIC25VL01.
* Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
* Goldstar GM82C711, PIC-1288A-125, UMC 82C871F, Winbond W83759,
* Winbond W83759A, Promise PDC20230-B
*
* For now use autospeed and pio_mask as above with the W83759A. This may
* change.
*/
#include <linux/async.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#define DRV_NAME "pata_legacy"
#define DRV_VERSION "0.6.5"
#define NR_HOST 6
static int all;
module_param(all, int, 0444);
MODULE_PARM_DESC(all,
"Set to probe unclaimed pri/sec ISA port ranges even if PCI");
static int probe_all;
module_param(probe_all, int, 0);
MODULE_PARM_DESC(probe_all,
"Set to probe tertiary+ ISA port ranges even if PCI");
static int probe_mask = ~0;
module_param(probe_mask, int, 0);
MODULE_PARM_DESC(probe_mask, "Probe mask for legacy ISA PATA ports");
static int autospeed;
module_param(autospeed, int, 0);
MODULE_PARM_DESC(autospeed, "Chip present that snoops speed changes");
static int pio_mask = ATA_PIO4;
module_param(pio_mask, int, 0);
MODULE_PARM_DESC(pio_mask, "PIO range for autospeed devices");
static int iordy_mask = 0xFFFFFFFF;
module_param(iordy_mask, int, 0);
MODULE_PARM_DESC(iordy_mask, "Use IORDY if available");
static int ht6560a;
module_param(ht6560a, int, 0);
MODULE_PARM_DESC(ht6560a, "HT 6560A on primary 1, second 2, both 3");
static int ht6560b;
module_param(ht6560b, int, 0);
MODULE_PARM_DESC(ht6560b, "HT 6560B on primary 1, secondary 2, both 3");
static int opti82c611a;
module_param(opti82c611a, int, 0);
MODULE_PARM_DESC(opti82c611a,
"Opti 82c611A on primary 1, secondary 2, both 3");
static int opti82c46x;
module_param(opti82c46x, int, 0);
MODULE_PARM_DESC(opti82c46x,
"Opti 82c465MV on primary 1, secondary 2, both 3");
#ifdef CONFIG_PATA_QDI_MODULE
static int qdi = 1;
#else
static int qdi;
#endif
module_param(qdi, int, 0);
MODULE_PARM_DESC(qdi, "Set to probe QDI controllers");
#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
static int winbond = 1;
#else
static int winbond;
#endif
module_param(winbond, int, 0);
MODULE_PARM_DESC(winbond,
"Set to probe Winbond controllers, "
"give I/O port if non standard");
enum controller {
BIOS = 0,
SNOOP = 1,
PDC20230 = 2,
HT6560A = 3,
HT6560B = 4,
OPTI611A = 5,
OPTI46X = 6,
QDI6500 = 7,
QDI6580 = 8,
QDI6580DP = 9, /* Dual channel mode is different */
W83759A = 10,
UNKNOWN = -1
};
struct legacy_data {
unsigned long timing;
u8 clock[2];
u8 last;
int fast;
enum controller type;
struct platform_device *platform_dev;
};
struct legacy_probe {
unsigned char *name;
unsigned long port;
unsigned int irq;
unsigned int slot;
enum controller type;
unsigned long private;
};
struct legacy_controller {
const char *name;
struct ata_port_operations *ops;
unsigned int pio_mask;
unsigned int flags;
unsigned int pflags;
int (*setup)(struct platform_device *, struct legacy_probe *probe,
struct legacy_data *data);
};
static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
static struct legacy_probe probe_list[NR_HOST];
static struct legacy_data legacy_data[NR_HOST];
static struct ata_host *legacy_host[NR_HOST];
static int nr_legacy_host;
/**
* legacy_probe_add - Add interface to probe list
* @port: Controller port
* @irq: IRQ number
* @type: Controller type
* @private: Controller specific info
*
* Add an entry into the probe list for ATA controllers. This is used
* to add the default ISA slots and then to build up the table
* further according to other ISA/VLB/Weird device scans
*
* An I/O port list is used to keep ordering stable and sane, as we
* don't have any good way to talk about ordering otherwise
*/
static int legacy_probe_add(unsigned long port, unsigned int irq,
enum controller type, unsigned long private)
{
struct legacy_probe *lp = &probe_list[0];
int i;
struct legacy_probe *free = NULL;
for (i = 0; i < NR_HOST; i++) {
if (lp->port == 0 && free == NULL)
free = lp;
/* Matching port, or the correct slot for ordering */
if (lp->port == port || legacy_port[i] == port) {
if (!(probe_mask & 1 << i))
return -1;
free = lp;
break;
}
lp++;
}
if (free == NULL) {
printk(KERN_ERR "pata_legacy: Too many interfaces.\n");
return -1;
}
/* Fill in the entry for later probing */
free->port = port;
free->irq = irq;
free->type = type;
free->private = private;
return 0;
}
/**
* legacy_set_mode - mode setting
* @link: IDE link
* @unused: Device that failed when error is returned
*
* Use a non standard set_mode function. We don't want to be tuned.
*
* The BIOS configured everything. Our job is not to fiddle. Just use
* whatever PIO the hardware is using and leave it at that. When we
* get some kind of nice user driven API for control then we can
* expand on this as per hdparm in the base kernel.
*/
static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
ata_dev_info(dev, "configured for PIO\n");
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
}
return 0;
}
static const struct scsi_host_template legacy_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static const struct ata_port_operations legacy_base_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
};
/*
* These ops are used if the user indicates the hardware
* snoops the commands to decide on the mode and handles the
* mode selection "magically" itself. Several legacy controllers
* do this. The mode range can be set if it is not 0x1F by setting
* pio_mask as well.
*/
static struct ata_port_operations simple_port_ops = {
.inherits = &legacy_base_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
};
static struct ata_port_operations legacy_port_ops = {
.inherits = &legacy_base_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
.set_mode = legacy_set_mode,
};
/*
* Promise 20230C and 20620 support
*
* This controller supports PIO0 to PIO2. We set PIO timings
* conservatively to allow for 50MHz Vesa Local Bus. The 20620 DMA
* support is weird being DMA to controller and PIO'd to the host
* and not supported.
*/
static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
int tries = 5;
int pio = adev->pio_mode - XFER_PIO_0;
u8 rt;
unsigned long flags;
/* Safe as UP only. Force I/Os to occur together */
local_irq_save(flags);
/* Unlock the control interface */
do {
inb(0x1F5);
outb(inb(0x1F2) | 0x80, 0x1F2);
inb(0x1F2);
inb(0x3F6);
inb(0x3F6);
inb(0x1F2);
inb(0x1F2);
}
while ((inb(0x1F2) & 0x80) && --tries);
local_irq_restore(flags);
outb(inb(0x1F4) & 0x07, 0x1F4);
rt = inb(0x1F3);
rt &= ~(0x07 << (3 * !adev->devno));
if (pio)
rt |= (1 + 3 * pio) << (3 * !adev->devno);
outb(rt, 0x1F3);
udelay(100);
outb(inb(0x1F2) | 0x01, 0x1F2);
udelay(100);
inb(0x1F5);
}
static unsigned int pdc_data_xfer_vlb(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
struct ata_device *dev = qc->dev;
struct ata_port *ap = dev->link->ap;
int slop = buflen & 3;
/* 32bit I/O capable *and* we need to write a whole number of dwords */
if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)
&& (ap->pflags & ATA_PFLAG_PIO32)) {
unsigned long flags;
local_irq_save(flags);
/* Perform the 32bit I/O synchronization sequence */
ioread8(ap->ioaddr.nsect_addr);
ioread8(ap->ioaddr.nsect_addr);
ioread8(ap->ioaddr.nsect_addr);
/* Now the data */
if (rw == READ)
ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
else
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) {
__le32 pad = 0;
if (rw == READ) {
pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop);
} else {
memcpy(&pad, buf + buflen - slop, slop);
iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
}
buflen += 4 - slop;
}
local_irq_restore(flags);
} else
buflen = ata_sff_data_xfer32(qc, buf, buflen, rw);
return buflen;
}
static struct ata_port_operations pdc20230_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = pdc20230_set_piomode,
.sff_data_xfer = pdc_data_xfer_vlb,
};
/*
* Holtek 6560A support
*
* This controller supports PIO0 to PIO2 (no IORDY even though higher
* timings can be loaded).
*/
static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
u8 active, recover;
struct ata_timing t;
/* Get the timing data in cycles. For now play safe at 50Mhz */
ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
active = clamp_val(t.active, 2, 15);
recover = clamp_val(t.recover, 4, 15);
inb(0x3E6);
inb(0x3E6);
inb(0x3E6);
inb(0x3E6);
iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
ioread8(ap->ioaddr.status_addr);
}
static struct ata_port_operations ht6560a_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = ht6560a_set_piomode,
};
/*
* Holtek 6560B support
*
* This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO
* setting unless we see an ATAPI device in which case we force it off.
*
* FIXME: need to implement 2nd channel support.
*/
static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
u8 active, recover;
struct ata_timing t;
/* Get the timing data in cycles. For now play safe at 50Mhz */
ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
active = clamp_val(t.active, 2, 15);
recover = clamp_val(t.recover, 2, 16) & 0x0F;
inb(0x3E6);
inb(0x3E6);
inb(0x3E6);
inb(0x3E6);
iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
if (adev->class != ATA_DEV_ATA) {
u8 rconf = inb(0x3E6);
if (rconf & 0x24) {
rconf &= ~0x24;
outb(rconf, 0x3E6);
}
}
ioread8(ap->ioaddr.status_addr);
}
static struct ata_port_operations ht6560b_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = ht6560b_set_piomode,
};
/*
* Opti core chipset helpers
*/
/**
* opti_syscfg - read OPTI chipset configuration
* @reg: Configuration register to read
*
* Returns the value of an OPTI system board configuration register.
*/
static u8 opti_syscfg(u8 reg)
{
unsigned long flags;
u8 r;
/* Uniprocessor chipset and must force cycles adjancent */
local_irq_save(flags);
outb(reg, 0x22);
r = inb(0x24);
local_irq_restore(flags);
return r;
}
/*
* Opti 82C611A
*
* This controller supports PIO0 to PIO3.
*/
static void opti82c611a_set_piomode(struct ata_port *ap,
struct ata_device *adev)
{
u8 active, recover, setup;
struct ata_timing t;
struct ata_device *pair = ata_dev_pair(adev);
int clock;
int khz[4] = { 50000, 40000, 33000, 25000 };
u8 rc;
/* Enter configuration mode */
ioread16(ap->ioaddr.error_addr);
ioread16(ap->ioaddr.error_addr);
iowrite8(3, ap->ioaddr.nsect_addr);
/* Read VLB clock strapping */
clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03];
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
/* Setup timing is shared */
if (pair) {
struct ata_timing tp;
ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
}
active = clamp_val(t.active, 2, 17) - 2;
recover = clamp_val(t.recover, 1, 16) - 1;
setup = clamp_val(t.setup, 1, 4) - 1;
/* Select the right timing bank for write timing */
rc = ioread8(ap->ioaddr.lbal_addr);
rc &= 0x7F;
rc |= (adev->devno << 7);
iowrite8(rc, ap->ioaddr.lbal_addr);
/* Write the timings */
iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
/* Select the right bank for read timings, also
load the shared timings for address */
rc = ioread8(ap->ioaddr.device_addr);
rc &= 0xC0;
rc |= adev->devno; /* Index select */
rc |= (setup << 4) | 0x04;
iowrite8(rc, ap->ioaddr.device_addr);
/* Load the read timings */
iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
/* Ensure the timing register mode is right */
rc = ioread8(ap->ioaddr.lbal_addr);
rc &= 0x73;
rc |= 0x84;
iowrite8(rc, ap->ioaddr.lbal_addr);
/* Exit command mode */
iowrite8(0x83, ap->ioaddr.nsect_addr);
}
static struct ata_port_operations opti82c611a_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = opti82c611a_set_piomode,
};
/*
* Opti 82C465MV
*
* This controller supports PIO0 to PIO3. Unlike the 611A the MVB
* version is dual channel but doesn't have a lot of unique registers.
*/
static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
u8 active, recover, setup;
struct ata_timing t;
struct ata_device *pair = ata_dev_pair(adev);
int clock;
int khz[4] = { 50000, 40000, 33000, 25000 };
u8 rc;
u8 sysclk;
/* Get the clock */
sysclk = (opti_syscfg(0xAC) & 0xC0) >> 6; /* BIOS set */
/* Enter configuration mode */
ioread16(ap->ioaddr.error_addr);
ioread16(ap->ioaddr.error_addr);
iowrite8(3, ap->ioaddr.nsect_addr);
/* Read VLB clock strapping */
clock = 1000000000 / khz[sysclk];
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
/* Setup timing is shared */
if (pair) {
struct ata_timing tp;
ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
}
active = clamp_val(t.active, 2, 17) - 2;
recover = clamp_val(t.recover, 1, 16) - 1;
setup = clamp_val(t.setup, 1, 4) - 1;
/* Select the right timing bank for write timing */
rc = ioread8(ap->ioaddr.lbal_addr);
rc &= 0x7F;
rc |= (adev->devno << 7);
iowrite8(rc, ap->ioaddr.lbal_addr);
/* Write the timings */
iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
/* Select the right bank for read timings, also
load the shared timings for address */
rc = ioread8(ap->ioaddr.device_addr);
rc &= 0xC0;
rc |= adev->devno; /* Index select */
rc |= (setup << 4) | 0x04;
iowrite8(rc, ap->ioaddr.device_addr);
/* Load the read timings */
iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
/* Ensure the timing register mode is right */
rc = ioread8(ap->ioaddr.lbal_addr);
rc &= 0x73;
rc |= 0x84;
iowrite8(rc, ap->ioaddr.lbal_addr);
/* Exit command mode */
iowrite8(0x83, ap->ioaddr.nsect_addr);
/* We need to know this for quad device on the MVB */
ap->host->private_data = ap;
}
/**
* opti82c46x_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings. The
* MVB has a single set of timing registers and these are shared
* across channels. As there are two registers we really ought to
* track the last two used values as a sort of register window. For
* now we just reload on a channel switch. On the single channel
* setup this condition never fires so we do nothing extra.
*
* FIXME: dual channel needs ->serialize support
*/
static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
/* If timings are set and for the wrong channel (2nd test is
due to a libata shortcoming and will eventually go I hope) */
if (ap->host->private_data != ap->host
&& ap->host->private_data != NULL)
opti82c46x_set_piomode(ap, adev);
return ata_sff_qc_issue(qc);
}
static struct ata_port_operations opti82c46x_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = opti82c46x_set_piomode,
.qc_issue = opti82c46x_qc_issue,
};
/**
* qdi65x0_set_piomode - PIO setup for QDI65x0
* @ap: Port
* @adev: Device
*
* In single channel mode the 6580 has one clock per device and we can
* avoid the requirement to clock switch. We also have to load the timing
* into the right clock according to whether we are master or slave.
*
* In dual channel mode the 6580 has one clock per channel and we have
* to software clockswitch in qc_issue.
*/
static void qdi65x0_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
struct legacy_data *ld_qdi = ap->host->private_data;
int active, recovery;
u8 timing;
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
if (ld_qdi->fast) {
active = 8 - clamp_val(t.active, 1, 8);
recovery = 18 - clamp_val(t.recover, 3, 18);
} else {
active = 9 - clamp_val(t.active, 2, 9);
recovery = 15 - clamp_val(t.recover, 0, 15);
}
timing = (recovery << 4) | active | 0x08;
ld_qdi->clock[adev->devno] = timing;
if (ld_qdi->type == QDI6580)
outb(timing, ld_qdi->timing + 2 * adev->devno);
else
outb(timing, ld_qdi->timing + 2 * ap->port_no);
/* Clear the FIFO */
if (ld_qdi->type != QDI6500 && adev->class != ATA_DEV_ATA)
outb(0x5F, (ld_qdi->timing & 0xFFF0) + 3);
}
/**
* qdi_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings.
*/
static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct legacy_data *ld_qdi = ap->host->private_data;
if (ld_qdi->clock[adev->devno] != ld_qdi->last) {
if (adev->pio_mode) {
ld_qdi->last = ld_qdi->clock[adev->devno];
outb(ld_qdi->clock[adev->devno], ld_qdi->timing +
2 * ap->port_no);
}
}
return ata_sff_qc_issue(qc);
}
static unsigned int vlb32_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_device *adev = qc->dev;
struct ata_port *ap = adev->link->ap;
int slop = buflen & 3;
if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)
&& (ap->pflags & ATA_PFLAG_PIO32)) {
if (rw == WRITE)
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
else
ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) {
__le32 pad = 0;
if (rw == WRITE) {
memcpy(&pad, buf + buflen - slop, slop);
iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
} else {
pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop);
}
}
return (buflen + 3) & ~3;
} else
return ata_sff_data_xfer(qc, buf, buflen, rw);
}
static int qdi_port(struct platform_device *dev,
struct legacy_probe *lp, struct legacy_data *ld)
{
if (devm_request_region(&dev->dev, lp->private, 4, "qdi") == NULL)
return -EBUSY;
ld->timing = lp->private;
return 0;
}
static struct ata_port_operations qdi6500_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = qdi65x0_set_piomode,
.qc_issue = qdi_qc_issue,
.sff_data_xfer = vlb32_data_xfer,
};
static struct ata_port_operations qdi6580_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = qdi65x0_set_piomode,
.sff_data_xfer = vlb32_data_xfer,
};
static struct ata_port_operations qdi6580dp_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = qdi65x0_set_piomode,
.qc_issue = qdi_qc_issue,
.sff_data_xfer = vlb32_data_xfer,
};
static DEFINE_SPINLOCK(winbond_lock);
static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
{
unsigned long flags;
spin_lock_irqsave(&winbond_lock, flags);
outb(reg, port + 0x01);
outb(val, port + 0x02);
spin_unlock_irqrestore(&winbond_lock, flags);
}
static u8 winbond_readcfg(unsigned long port, u8 reg)
{
u8 val;
unsigned long flags;
spin_lock_irqsave(&winbond_lock, flags);
outb(reg, port + 0x01);
val = inb(port + 0x02);
spin_unlock_irqrestore(&winbond_lock, flags);
return val;
}
static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
struct legacy_data *ld_winbond = ap->host->private_data;
int active, recovery;
u8 reg;
int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
reg = winbond_readcfg(ld_winbond->timing, 0x81);
/* Get the timing data in cycles */
if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
else
ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
timing = (active << 4) | recovery;
winbond_writecfg(ld_winbond->timing, timing, reg);
/* Load the setup timing */
reg = 0x35;
if (adev->class != ATA_DEV_ATA)
reg |= 0x08; /* FIFO off */
if (!ata_pio_need_iordy(adev))
reg |= 0x02; /* IORDY off */
reg |= (clamp_val(t.setup, 0, 3) << 6);
winbond_writecfg(ld_winbond->timing, timing + 1, reg);
}
static int winbond_port(struct platform_device *dev,
struct legacy_probe *lp, struct legacy_data *ld)
{
if (devm_request_region(&dev->dev, lp->private, 4, "winbond") == NULL)
return -EBUSY;
ld->timing = lp->private;
return 0;
}
static struct ata_port_operations winbond_port_ops = {
.inherits = &legacy_base_port_ops,
.set_piomode = winbond_set_piomode,
.sff_data_xfer = vlb32_data_xfer,
};
static struct legacy_controller controllers[] = {
{"BIOS", &legacy_port_ops, ATA_PIO4,
ATA_FLAG_NO_IORDY, 0, NULL },
{"Snooping", &simple_port_ops, ATA_PIO4,
0, 0, NULL },
{"PDC20230", &pdc20230_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY,
ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, NULL },
{"HT6560A", &ht6560a_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY, 0, NULL },
{"HT6560B", &ht6560b_port_ops, ATA_PIO4,
ATA_FLAG_NO_IORDY, 0, NULL },
{"OPTI82C611A", &opti82c611a_port_ops, ATA_PIO3,
0, 0, NULL },
{"OPTI82C46X", &opti82c46x_port_ops, ATA_PIO3,
0, 0, NULL },
{"QDI6500", &qdi6500_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY,
ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
{"QDI6580", &qdi6580_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
{"QDI6580DP", &qdi6580dp_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
{"W83759A", &winbond_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE,
winbond_port }
};
/**
* probe_chip_type - Discover controller
* @probe: Probe entry to check
*
* Probe an ATA port and identify the type of controller. We don't
* check if the controller appears to be driveless at this point.
*/
static __init int probe_chip_type(struct legacy_probe *probe)
{
int mask = 1 << probe->slot;
if (winbond && (probe->port == 0x1F0 || probe->port == 0x170)) {
u8 reg = winbond_readcfg(winbond, 0x81);
reg |= 0x80; /* jumpered mode off */
winbond_writecfg(winbond, 0x81, reg);
reg = winbond_readcfg(winbond, 0x83);
reg |= 0xF0; /* local control */
winbond_writecfg(winbond, 0x83, reg);
reg = winbond_readcfg(winbond, 0x85);
reg |= 0xF0; /* programmable timing */
winbond_writecfg(winbond, 0x85, reg);
reg = winbond_readcfg(winbond, 0x81);
if (reg & mask)
return W83759A;
}
if (probe->port == 0x1F0) {
unsigned long flags;
local_irq_save(flags);
/* Probes */
outb(inb(0x1F2) | 0x80, 0x1F2);
inb(0x1F5);
inb(0x1F2);
inb(0x3F6);
inb(0x3F6);
inb(0x1F2);
inb(0x1F2);
if ((inb(0x1F2) & 0x80) == 0) {
/* PDC20230c or 20630 ? */
printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller"
" detected.\n");
udelay(100);
inb(0x1F5);
local_irq_restore(flags);
return PDC20230;
} else {
outb(0x55, 0x1F2);
inb(0x1F2);
inb(0x1F2);
if (inb(0x1F2) == 0x00)
printk(KERN_INFO "PDC20230-B VLB ATA "
"controller detected.\n");
local_irq_restore(flags);
return BIOS;
}
}
if (ht6560a & mask)
return HT6560A;
if (ht6560b & mask)
return HT6560B;
if (opti82c611a & mask)
return OPTI611A;
if (opti82c46x & mask)
return OPTI46X;
if (autospeed & mask)
return SNOOP;
return BIOS;
}
/**
* legacy_init_one - attach a legacy interface
* @probe: probe record
*
* Register an ISA bus IDE interface. Such interfaces are PIO and we
* assume do not support IRQ sharing.
*/
static __init int legacy_init_one(struct legacy_probe *probe)
{
struct legacy_controller *controller = &controllers[probe->type];
int pio_modes = controller->pio_mask;
unsigned long io = probe->port;
u32 mask = (1 << probe->slot);
struct ata_port_operations *ops = controller->ops;
struct legacy_data *ld = &legacy_data[probe->slot];
struct ata_host *host = NULL;
struct ata_port *ap;
struct platform_device *pdev;
struct ata_device *dev;
void __iomem *io_addr, *ctrl_addr;
u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
int ret;
iordy |= controller->flags;
pdev = platform_device_register_simple(DRV_NAME, probe->slot, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
ret = -EBUSY;
if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
devm_request_region(&pdev->dev, io + 0x0206, 1,
"pata_legacy") == NULL)
goto fail;
ret = -ENOMEM;
io_addr = devm_ioport_map(&pdev->dev, io, 8);
ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
if (!io_addr || !ctrl_addr)
goto fail;
ld->type = probe->type;
if (controller->setup)
if (controller->setup(pdev, probe, ld) < 0)
goto fail;
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
goto fail;
ap = host->ports[0];
ap->ops = ops;
ap->pio_mask = pio_modes;
ap->flags |= ATA_FLAG_SLAVE_POSS | iordy;
ap->pflags |= controller->pflags;
ap->ioaddr.cmd_addr = io_addr;
ap->ioaddr.altstatus_addr = ctrl_addr;
ap->ioaddr.ctl_addr = ctrl_addr;
ata_sff_std_ports(&ap->ioaddr);
ap->host->private_data = ld;
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206);
ret = ata_host_activate(host, probe->irq, ata_sff_interrupt, 0,
&legacy_sht);
if (ret)
goto fail;
async_synchronize_full();
ld->platform_dev = pdev;
/* Nothing found means we drop the port as its probably not there */
ret = -ENODEV;
ata_for_each_dev(dev, &ap->link, ALL) {
if (!ata_dev_absent(dev)) {
legacy_host[probe->slot] = host;
ld->platform_dev = pdev;
return 0;
}
}
ata_host_detach(host);
fail:
platform_device_unregister(pdev);
return ret;
}
/**
* legacy_check_special_cases - ATA special cases
* @p: PCI device to check
* @primary: set this if we find an ATA master
* @secondary: set this if we find an ATA secondary
*
* A small number of vendors implemented early PCI ATA interfaces
* on bridge logic without the ATA interface being PCI visible.
* Where we have a matching PCI driver we must skip the relevant
* device here. If we don't know about it then the legacy driver
* is the right driver anyway.
*/
static void __init legacy_check_special_cases(struct pci_dev *p, int *primary,
int *secondary)
{
/* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
if (p->vendor == 0x1078 && p->device == 0x0000) {
*primary = *secondary = 1;
return;
}
/* Cyrix CS5520 pre SFF MWDMA ATA on the bridge */
if (p->vendor == 0x1078 && p->device == 0x0002) {
*primary = *secondary = 1;
return;
}
/* Intel MPIIX - PIO ATA on non PCI side of bridge */
if (p->vendor == 0x8086 && p->device == 0x1234) {
u16 r;
pci_read_config_word(p, 0x6C, &r);
if (r & 0x8000) {
/* ATA port enabled */
if (r & 0x4000)
*secondary = 1;
else
*primary = 1;
}
return;
}
}
static __init void probe_opti_vlb(void)
{
/* If an OPTI 82C46X is present find out where the channels are */
static const char *optis[4] = {
"3/463MV", "5MV",
"5MVA", "5MVB"
};
u8 chans = 1;
u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
opti82c46x = 3; /* Assume master and slave first */
printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n",
optis[ctrl]);
if (ctrl == 3)
chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
ctrl = opti_syscfg(0xAC);
/* Check enabled and this port is the 465MV port. On the
MVB we may have two channels */
if (ctrl & 8) {
if (chans == 2) {
legacy_probe_add(0x1F0, 14, OPTI46X, 0);
legacy_probe_add(0x170, 15, OPTI46X, 0);
}
if (ctrl & 4)
legacy_probe_add(0x170, 15, OPTI46X, 0);
else
legacy_probe_add(0x1F0, 14, OPTI46X, 0);
} else
legacy_probe_add(0x1F0, 14, OPTI46X, 0);
}
static __init void qdi65_identify_port(u8 r, u8 res, unsigned long port)
{
static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
/* Check card type */
if ((r & 0xF0) == 0xC0) {
/* QD6500: single channel */
if (r & 8)
/* Disabled ? */
return;
legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
QDI6500, port);
}
if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
/* QD6580: dual channel */
if (!request_region(port + 2 , 2, "pata_qdi")) {
release_region(port, 2);
return;
}
res = inb(port + 3);
/* Single channel mode ? */
if (res & 1)
legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
QDI6580, port);
else { /* Dual channel mode */
legacy_probe_add(0x1F0, 14, QDI6580DP, port);
/* port + 0x02, r & 0x04 */
legacy_probe_add(0x170, 15, QDI6580DP, port + 2);
}
release_region(port + 2, 2);
}
}
static __init void probe_qdi_vlb(void)
{
unsigned long flags;
static const unsigned long qd_port[2] = { 0x30, 0xB0 };
int i;
/*
* Check each possible QD65xx base address
*/
for (i = 0; i < 2; i++) {
unsigned long port = qd_port[i];
u8 r, res;
if (request_region(port, 2, "pata_qdi")) {
/* Check for a card */
local_irq_save(flags);
/* I have no h/w that needs this delay but it
is present in the historic code */
r = inb(port);
udelay(1);
outb(0x19, port);
udelay(1);
res = inb(port);
udelay(1);
outb(r, port);
udelay(1);
local_irq_restore(flags);
/* Fail */
if (res == 0x19) {
release_region(port, 2);
continue;
}
/* Passes the presence test */
r = inb(port + 1);
udelay(1);
/* Check port agrees with port set */
if ((r & 2) >> 1 == i)
qdi65_identify_port(r, res, port);
release_region(port, 2);
}
}
}
/**
* legacy_init - attach legacy interfaces
*
* Attach legacy IDE interfaces by scanning the usual IRQ/port suspects.
* Right now we do not scan the ide0 and ide1 address but should do so
* for non PCI systems or systems with no PCI IDE legacy mode devices.
* If you fix that note there are special cases to consider like VLB
* drivers and CS5510/20.
*/
static __init int legacy_init(void)
{
int i;
int ct = 0;
int primary = 0;
int secondary = 0;
int pci_present = 0;
struct legacy_probe *pl = &probe_list[0];
int slot = 0;
struct pci_dev *p = NULL;
for_each_pci_dev(p) {
int r;
/* Check for any overlap of the system ATA mappings. Native
mode controllers stuck on these addresses or some devices
in 'raid' mode won't be found by the storage class test */
for (r = 0; r < 6; r++) {
if (pci_resource_start(p, r) == 0x1f0)
primary = 1;
if (pci_resource_start(p, r) == 0x170)
secondary = 1;
}
/* Check for special cases */
legacy_check_special_cases(p, &primary, &secondary);
/* If PCI bus is present then don't probe for tertiary
legacy ports */
pci_present = 1;
}
if (winbond == 1)
winbond = 0x130; /* Default port, alt is 1B0 */
if (primary == 0 || all)
legacy_probe_add(0x1F0, 14, UNKNOWN, 0);
if (secondary == 0 || all)
legacy_probe_add(0x170, 15, UNKNOWN, 0);
if (probe_all || !pci_present) {
/* ISA/VLB extra ports */
legacy_probe_add(0x1E8, 11, UNKNOWN, 0);
legacy_probe_add(0x168, 10, UNKNOWN, 0);
legacy_probe_add(0x1E0, 8, UNKNOWN, 0);
legacy_probe_add(0x160, 12, UNKNOWN, 0);
}
if (opti82c46x)
probe_opti_vlb();
if (qdi)
probe_qdi_vlb();
for (i = 0; i < NR_HOST; i++, pl++) {
if (pl->port == 0)
continue;
if (pl->type == UNKNOWN)
pl->type = probe_chip_type(pl);
pl->slot = slot++;
if (legacy_init_one(pl) == 0)
ct++;
}
if (ct != 0)
return 0;
return -ENODEV;
}
static __exit void legacy_exit(void)
{
int i;
for (i = 0; i < nr_legacy_host; i++) {
struct legacy_data *ld = &legacy_data[i];
ata_host_detach(legacy_host[i]);
platform_device_unregister(ld->platform_dev);
}
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for legacy ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("pata_qdi");
MODULE_ALIAS("pata_winbond");
module_init(legacy_init);
module_exit(legacy_exit);
| linux-master | drivers/ata/pata_legacy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_sil.c - Silicon Image SATA
*
* Maintained by: Tejun Heo <[email protected]>
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2003-2005 Red Hat, Inc.
* Copyright 2003 Benjamin Herrenschmidt
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Documentation for SiI 3112:
* http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
*
* Other errata and documentation available under NDA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "sata_sil"
#define DRV_VERSION "2.4"
#define SIL_DMA_BOUNDARY 0x7fffffffUL
enum {
SIL_MMIO_BAR = 5,
/*
* host flags
*/
SIL_FLAG_NO_SATA_IRQ = (1 << 28),
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
SIL_FLAG_MOD15WRITE = (1 << 30),
SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA,
/*
* Controller IDs
*/
sil_3112 = 0,
sil_3112_no_sata_irq = 1,
sil_3512 = 2,
sil_3114 = 3,
/*
* Register offsets
*/
SIL_SYSCFG = 0x48,
/*
* Register bits
*/
/* SYSCFG */
SIL_MASK_IDE0_INT = (1 << 22),
SIL_MASK_IDE1_INT = (1 << 23),
SIL_MASK_IDE2_INT = (1 << 24),
SIL_MASK_IDE3_INT = (1 << 25),
SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
SIL_MASK_4PORT = SIL_MASK_2PORT |
SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
/* BMDMA/BMDMA2 */
SIL_INTR_STEERING = (1 << 1),
SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
/* SIEN */
SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
/*
* Others
*/
SIL_QUIRK_MOD15WRITE = (1 << 0),
SIL_QUIRK_UDMA5MAX = (1 << 1),
};
static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int sil_pci_device_resume(struct pci_dev *pdev);
#endif
static void sil_dev_config(struct ata_device *dev);
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
static void sil_bmdma_setup(struct ata_queued_cmd *qc);
static void sil_bmdma_start(struct ata_queued_cmd *qc);
static void sil_bmdma_stop(struct ata_queued_cmd *qc);
static void sil_freeze(struct ata_port *ap);
static void sil_thaw(struct ata_port *ap);
static const struct pci_device_id sil_pci_tbl[] = {
{ PCI_VDEVICE(CMD, 0x3112), sil_3112 },
{ PCI_VDEVICE(CMD, 0x0240), sil_3112 },
{ PCI_VDEVICE(CMD, 0x3512), sil_3512 },
{ PCI_VDEVICE(CMD, 0x3114), sil_3114 },
{ PCI_VDEVICE(ATI, 0x436e), sil_3112 },
{ PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
{ PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
{ } /* terminate list */
};
/* TODO firmware versions should be added - eric */
static const struct sil_drivelist {
const char *product;
unsigned int quirk;
} sil_blacklist [] = {
{ "ST320012AS", SIL_QUIRK_MOD15WRITE },
{ "ST330013AS", SIL_QUIRK_MOD15WRITE },
{ "ST340017AS", SIL_QUIRK_MOD15WRITE },
{ "ST360015AS", SIL_QUIRK_MOD15WRITE },
{ "ST380023AS", SIL_QUIRK_MOD15WRITE },
{ "ST3120023AS", SIL_QUIRK_MOD15WRITE },
{ "ST340014ASL", SIL_QUIRK_MOD15WRITE },
{ "ST360014ASL", SIL_QUIRK_MOD15WRITE },
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
{ "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
{ "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
{ }
};
static struct pci_driver sil_pci_driver = {
.name = DRV_NAME,
.id_table = sil_pci_tbl,
.probe = sil_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sil_pci_device_resume,
#endif
};
static const struct scsi_host_template sil_sht = {
ATA_BASE_SHT(DRV_NAME),
/** These controllers support Large Block Transfer which allows
transfer chunks up to 2GB and which cross 64KB boundaries,
therefore the DMA limits are more relaxed than standard ATA SFF. */
.dma_boundary = SIL_DMA_BOUNDARY,
.sg_tablesize = ATA_MAX_PRD
};
static struct ata_port_operations sil_ops = {
.inherits = &ata_bmdma32_port_ops,
.dev_config = sil_dev_config,
.set_mode = sil_set_mode,
.bmdma_setup = sil_bmdma_setup,
.bmdma_start = sil_bmdma_start,
.bmdma_stop = sil_bmdma_stop,
.qc_prep = sil_qc_prep,
.freeze = sil_freeze,
.thaw = sil_thaw,
.scr_read = sil_scr_read,
.scr_write = sil_scr_write,
};
static const struct ata_port_info sil_port_info[] = {
/* sil_3112 */
{
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil_ops,
},
/* sil_3112_no_sata_irq */
{
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
SIL_FLAG_NO_SATA_IRQ,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil_ops,
},
/* sil_3512 */
{
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil_ops,
},
/* sil_3114 */
{
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil_ops,
},
};
/* per-port register offsets */
/* TODO: we can probably calculate rather than use a table */
static const struct {
unsigned long tf; /* ATA taskfile register block */
unsigned long ctl; /* ATA control/altstatus register block */
unsigned long bmdma; /* DMA register block */
unsigned long bmdma2; /* DMA register block #2 */
unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
unsigned long scr; /* SATA control register block */
unsigned long sien; /* SATA Interrupt Enable register */
unsigned long xfer_mode;/* data transfer mode register */
unsigned long sfis_cfg; /* SATA FIS reception config register */
} sil_port[] = {
/* port 0 ... */
/* tf ctl bmdma bmdma2 fifo scr sien mode sfis */
{ 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
{ 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
{ 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
{ 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
/* ... port 3 */
};
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
MODULE_VERSION(DRV_VERSION);
static int slow_down;
module_param(slow_down, int, 0444);
MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
static void sil_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
/* clear start/stop bit - can safely always write 0 */
iowrite8(0, bmdma2);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_dma_pause(ap);
}
static void sil_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
/* load PRD table addr. */
iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
static void sil_bmdma_start(struct ata_queued_cmd *qc)
{
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
u8 dmactl = ATA_DMA_START;
/* set transfer direction, start host DMA transaction
Note: For Large Block Transfer to work, the DMA must be started
using the bmdma2 register. */
if (!rw)
dmactl |= ATA_DMA_WR;
iowrite8(dmactl, bmdma2);
}
/* The way God intended PCI IDE scatter/gather lists to look and behave... */
static void sil_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct ata_bmdma_prd *prd, *last_prd = NULL;
unsigned int si;
prd = &ap->bmdma_prd[0];
for_each_sg(qc->sg, sg, qc->n_elem, si) {
/* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
u32 addr = (u32) sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
prd->addr = cpu_to_le32(addr);
prd->flags_len = cpu_to_le32(sg_len);
last_prd = prd;
prd++;
}
if (likely(last_prd))
last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
sil_fill_sg(qc);
return AC_ERR_OK;
}
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
{
u8 cache_line = 0;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
return cache_line;
}
/**
* sil_set_mode - wrap set_mode functions
* @link: link to set up
* @r_failed: returned device when we fail
*
* Wrap the libata method for device setup as after the setup we need
* to inspect the results and do some configuration work
*/
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
{
struct ata_port *ap = link->ap;
void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
struct ata_device *dev;
u32 tmp, dev_mode[2] = { };
int rc;
rc = ata_do_set_mode(link, r_failed);
if (rc)
return rc;
ata_for_each_dev(dev, link, ALL) {
if (!ata_dev_enabled(dev))
dev_mode[dev->devno] = 0; /* PIO0/1/2 */
else if (dev->flags & ATA_DFLAG_PIO)
dev_mode[dev->devno] = 1; /* PIO3/4 */
else
dev_mode[dev->devno] = 3; /* UDMA */
/* value 2 indicates MDMA */
}
tmp = readl(addr);
tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
tmp |= dev_mode[0];
tmp |= (dev_mode[1] << 4);
writel(tmp, addr);
readl(addr); /* flush */
return 0;
}
static inline void __iomem *sil_scr_addr(struct ata_port *ap,
unsigned int sc_reg)
{
void __iomem *offset = ap->ioaddr.scr_addr;
switch (sc_reg) {
case SCR_STATUS:
return offset + 4;
case SCR_ERROR:
return offset + 8;
case SCR_CONTROL:
return offset;
default:
/* do nothing */
break;
}
return NULL;
}
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
if (mmio) {
*val = readl(mmio);
return 0;
}
return -EINVAL;
}
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
if (mmio) {
writel(val, mmio);
return 0;
}
return -EINVAL;
}
static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
u8 status;
if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
u32 serror = 0xffffffff;
/* SIEN doesn't mask SATA IRQs on some 3112s. Those
* controllers continue to assert IRQ as long as
* SError bits are pending. Clear SError immediately.
*/
sil_scr_read(&ap->link, SCR_ERROR, &serror);
sil_scr_write(&ap->link, SCR_ERROR, serror);
/* Sometimes spurious interrupts occur, double check
* it's PHYRDY CHG.
*/
if (serror & SERR_PHYRDY_CHG) {
ap->link.eh_info.serror |= serror;
goto freeze;
}
if (!(bmdma2 & SIL_DMA_COMPLETE))
return;
}
if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
/* this sometimes happens, just clear IRQ */
ap->ops->sff_check_status(ap);
return;
}
/* Check whether we are expecting interrupt in this state */
switch (ap->hsm_task_state) {
case HSM_ST_FIRST:
/* Some pre-ATAPI-4 devices assert INTRQ
* at this state when ready to receive CDB.
*/
/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
* The flag was turned on only for atapi devices. No
* need to check ata_is_atapi(qc->tf.protocol) again.
*/
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
goto err_hsm;
break;
case HSM_ST_LAST:
if (ata_is_dma(qc->tf.protocol)) {
/* clear DMA-Start bit */
ap->ops->bmdma_stop(qc);
if (bmdma2 & SIL_DMA_ERROR) {
qc->err_mask |= AC_ERR_HOST_BUS;
ap->hsm_task_state = HSM_ST_ERR;
}
}
break;
case HSM_ST:
break;
default:
goto err_hsm;
}
/* check main status, clearing INTRQ */
status = ap->ops->sff_check_status(ap);
if (unlikely(status & ATA_BUSY))
goto err_hsm;
/* ack bmdma irq events */
ata_bmdma_irq_clear(ap);
/* kick HSM in the ass */
ata_sff_hsm_move(ap, qc, status, 0);
if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
return;
err_hsm:
qc->err_mask |= AC_ERR_HSM;
freeze:
ata_port_freeze(ap);
}
static irqreturn_t sil_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
int handled = 0;
int i;
spin_lock(&host->lock);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
/* turn off SATA_IRQ if not supported */
if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
bmdma2 &= ~SIL_DMA_SATA_IRQ;
if (bmdma2 == 0xffffffff ||
!(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
continue;
sil_host_intr(ap, bmdma2);
handled = 1;
}
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void sil_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
u32 tmp;
/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
writel(0, mmio_base + sil_port[ap->port_no].sien);
/* plug IRQ */
tmp = readl(mmio_base + SIL_SYSCFG);
tmp |= SIL_MASK_IDE0_INT << ap->port_no;
writel(tmp, mmio_base + SIL_SYSCFG);
readl(mmio_base + SIL_SYSCFG); /* flush */
/* Ensure DMA_ENABLE is off.
*
* This is because the controller will not give us access to the
* taskfile registers while a DMA is in progress
*/
iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
ap->ioaddr.bmdma_addr);
/* According to ata_bmdma_stop, an HDMA transition requires
* on PIO cycle. But we can't read a taskfile register.
*/
ioread8(ap->ioaddr.bmdma_addr);
}
static void sil_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
u32 tmp;
/* clear IRQ */
ap->ops->sff_check_status(ap);
ata_bmdma_irq_clear(ap);
/* turn on SATA IRQ if supported */
if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
/* turn on IRQ */
tmp = readl(mmio_base + SIL_SYSCFG);
tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
writel(tmp, mmio_base + SIL_SYSCFG);
}
/**
* sil_dev_config - Apply device/host-specific errata fixups
* @dev: Device to be examined
*
* After the IDENTIFY [PACKET] DEVICE step is complete, and a
* device is known to be present, this function is called.
* We apply two errata fixups which are specific to Silicon Image,
* a Seagate and a Maxtor fixup.
*
* For certain Seagate devices, we must limit the maximum sectors
* to under 8K.
*
* For certain Maxtor devices, we must not program the drive
* beyond udma5.
*
* Both fixups are unfairly pessimistic. As soon as I get more
* information on these errata, I will create a more exhaustive
* list, and apply the fixups to only the specific
* devices/hosts/firmwares that need it.
*
* 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
* The Maxtor quirk is in the blacklist, but I'm keeping the original
* pessimistic fix for the following reasons...
* - There seems to be less info on it, only one device gleaned off the
* Windows driver, maybe only one is affected. More info would be greatly
* appreciated.
* - But then again UDMA5 is hardly anything to complain about
*/
static void sil_dev_config(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
unsigned int n, quirks = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
/* This controller doesn't support trim */
dev->horkage |= ATA_HORKAGE_NOTRIM;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
for (n = 0; sil_blacklist[n].product; n++)
if (!strcmp(sil_blacklist[n].product, model_num)) {
quirks = sil_blacklist[n].quirk;
break;
}
/* limit requests to 15 sectors */
if (slow_down ||
((ap->flags & SIL_FLAG_MOD15WRITE) &&
(quirks & SIL_QUIRK_MOD15WRITE))) {
if (print_info)
ata_dev_info(dev,
"applying Seagate errata fix (mod15write workaround)\n");
dev->max_sectors = 15;
return;
}
/* limit to udma5 */
if (quirks & SIL_QUIRK_UDMA5MAX) {
if (print_info)
ata_dev_info(dev, "applying Maxtor errata fix %s\n",
model_num);
dev->udma_mask &= ATA_UDMA5;
return;
}
}
static void sil_init_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
u8 cls;
u32 tmp;
int i;
/* Initialize FIFO PCI bus arbitration */
cls = sil_get_device_cache_line(pdev);
if (cls) {
cls >>= 3;
cls++; /* cls = (line_size/8)+1 */
for (i = 0; i < host->n_ports; i++)
writew(cls << 8 | cls,
mmio_base + sil_port[i].fifo_cfg);
} else
dev_warn(&pdev->dev,
"cache line size not set. Driver may not function\n");
/* Apply R_ERR on DMA activate FIS errata workaround */
if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
int cnt;
for (i = 0, cnt = 0; i < host->n_ports; i++) {
tmp = readl(mmio_base + sil_port[i].sfis_cfg);
if ((tmp & 0x3) != 0x01)
continue;
if (!cnt)
dev_info(&pdev->dev,
"Applying R_ERR on DMA activate FIS errata fix\n");
writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
cnt++;
}
}
if (host->n_ports == 4) {
/* flip the magic "make 4 ports work" bit */
tmp = readl(mmio_base + sil_port[2].bmdma);
if ((tmp & SIL_INTR_STEERING) == 0)
writel(tmp | SIL_INTR_STEERING,
mmio_base + sil_port[2].bmdma);
}
}
static bool sil_broken_system_poweroff(struct pci_dev *pdev)
{
static const struct dmi_system_id broken_systems[] = {
{
.ident = "HP Compaq nx6325",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x12UL,
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
if (dmi) {
unsigned long slot = (unsigned long)dmi->driver_data;
/* apply the quirk only to on-board controllers */
return slot == PCI_SLOT(pdev->devfn);
}
return false;
}
static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int board_id = ent->driver_data;
struct ata_port_info pi = sil_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ata_host *host;
void __iomem *mmio_base;
int n_ports, rc;
unsigned int i;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = 2;
if (board_id == sil_3114)
n_ports = 4;
if (sil_broken_system_poweroff(pdev)) {
pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
ATA_FLAG_NO_HIBERNATE_SPINDOWN;
dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
"on poweroff and hibernation\n");
}
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
mmio_base = host->iomap[SIL_MMIO_BAR];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct ata_ioports *ioaddr = &ap->ioaddr;
ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
ioaddr->altstatus_addr =
ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
ioaddr->scr_addr = mmio_base + sil_port[i].scr;
ata_sff_std_ports(ioaddr);
ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
}
/* initialize and activate */
sil_init_controller(host);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
&sil_sht);
}
#ifdef CONFIG_PM_SLEEP
static int sil_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
sil_init_controller(host);
ata_host_resume(host);
return 0;
}
#endif
module_pci_driver(sil_pci_driver);
| linux-master | drivers/ata/sata_sil.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* A low-level PATA driver to handle a Compact Flash connected on the
* Mikrotik's RouterBoard 532 board.
*
* Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
* Copyright (C) 2008 Florian Fainelli <[email protected]>
*
* This file was based on: drivers/ata/pata_ixp4xx_cf.c
* Copyright (C) 2006-07 Tower Technologies
* Author: Alessandro Zummo <[email protected]>
*
* Also was based on the driver for Linux 2.4.xx published by Mikrotik for
* their RouterBoard 1xx and 5xx series devices. The original Mikrotik code
* seems not to have a license.
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
#include <asm/mach-rc32434/rb.h>
#define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
#define RB500_CF_MAXPORTS 1
#define RB500_CF_IO_DELAY 400
#define RB500_CF_REG_BASE 0x0800
#define RB500_CF_REG_ERR 0x080D
#define RB500_CF_REG_CTRL 0x080E
/* 32bit buffered data register offset */
#define RB500_CF_REG_DBUF32 0x0C00
struct rb532_cf_info {
void __iomem *iobase;
struct gpio_desc *gpio_line;
unsigned int irq;
};
/* ------------------------------------------------------------------------ */
static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
{
struct ata_host *ah = dev_instance;
struct rb532_cf_info *info = ah->private_data;
if (gpiod_get_value(info->gpio_line)) {
irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
ata_sff_interrupt(info->irq, dev_instance);
} else {
irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
}
return IRQ_HANDLED;
}
static struct ata_port_operations rb532_pata_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
};
/* ------------------------------------------------------------------------ */
static const struct scsi_host_template rb532_pata_sht = {
ATA_PIO_SHT(DRV_NAME),
};
/* ------------------------------------------------------------------------ */
static void rb532_pata_setup_ports(struct ata_host *ah)
{
struct rb532_cf_info *info = ah->private_data;
struct ata_port *ap;
ap = ah->ports[0];
ap->ops = &rb532_pata_port_ops;
ap->pio_mask = ATA_PIO4;
ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE;
ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL;
ata_sff_std_ports(&ap->ioaddr);
ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DBUF32;
ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR;
}
static int rb532_pata_driver_probe(struct platform_device *pdev)
{
int irq;
struct gpio_desc *gpiod;
struct resource *res;
struct ata_host *ah;
struct rb532_cf_info *info;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "no IOMEM resource found\n");
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (!irq)
return -EINVAL;
gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
if (IS_ERR(gpiod)) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
return PTR_ERR(gpiod);
}
gpiod_set_consumer_name(gpiod, DRV_NAME);
/* allocate host */
ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS);
if (!ah)
return -ENOMEM;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
ah->private_data = info;
info->gpio_line = gpiod;
info->irq = irq;
info->iobase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!info->iobase)
return -ENOMEM;
rb532_pata_setup_ports(ah);
ret = ata_host_activate(ah, irq, rb532_pata_irq_handler,
IRQF_TRIGGER_LOW, &rb532_pata_sht);
if (ret)
return ret;
return 0;
}
static void rb532_pata_driver_remove(struct platform_device *pdev)
{
struct ata_host *ah = platform_get_drvdata(pdev);
ata_host_detach(ah);
}
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
.remove_new = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
},
};
#define DRV_INFO DRV_DESC " version " DRV_VERSION
module_platform_driver(rb532_pata_platform_driver);
MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
MODULE_AUTHOR("Florian Fainelli <[email protected]>");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/pata_rb532_cf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Amiga Gayle PATA controller driver
*
* Copyright (c) 2018 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Based on gayle.c:
*
* Created 12 Jul 1997 by Geert Uytterhoeven
*/
#include <linux/ata.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/zorro.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/amigayle.h>
#include <asm/setup.h>
#define DRV_NAME "pata_gayle"
#define DRV_VERSION "0.1.0"
#define GAYLE_CONTROL 0x101a
static const struct scsi_host_template pata_gayle_sht = {
ATA_PIO_SHT(DRV_NAME),
};
/* FIXME: is this needed? */
static unsigned int pata_gayle_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_device *dev = qc->dev;
struct ata_port *ap = dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1;
/* Transfer multiple of 2 bytes */
if (rw == READ)
raw_insw((u16 *)data_addr, (u16 *)buf, words);
else
raw_outsw((u16 *)data_addr, (u16 *)buf, words);
/* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
unsigned char pad[2] = { };
/* Point buf to the tail of buffer */
buf += buflen - 1;
if (rw == READ) {
raw_insw((u16 *)data_addr, (u16 *)pad, 1);
*buf = pad[0];
} else {
pad[0] = *buf;
raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
}
words++;
}
return words << 1;
}
/*
* Provide our own set_mode() as we don't want to change anything that has
* already been configured..
*/
static int pata_gayle_set_mode(struct ata_link *link,
struct ata_device **unused)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
static bool pata_gayle_irq_check(struct ata_port *ap)
{
u8 ch;
ch = z_readb((unsigned long)ap->private_data);
return !!(ch & GAYLE_IRQ_IDE);
}
static void pata_gayle_irq_clear(struct ata_port *ap)
{
(void)z_readb((unsigned long)ap->ioaddr.status_addr);
z_writeb(0x7c, (unsigned long)ap->private_data);
}
static struct ata_port_operations pata_gayle_a1200_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = pata_gayle_data_xfer,
.sff_irq_check = pata_gayle_irq_check,
.sff_irq_clear = pata_gayle_irq_clear,
.cable_detect = ata_cable_unknown,
.set_mode = pata_gayle_set_mode,
};
static struct ata_port_operations pata_gayle_a4000_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = pata_gayle_data_xfer,
.cable_detect = ata_cable_unknown,
.set_mode = pata_gayle_set_mode,
};
static int __init pata_gayle_init_one(struct platform_device *pdev)
{
struct resource *res;
struct gayle_ide_platform_data *pdata;
struct ata_host *host;
struct ata_port *ap;
void __iomem *base;
int ret;
pdata = dev_get_platdata(&pdev->dev);
dev_info(&pdev->dev, "Amiga Gayle IDE controller (A%u style)\n",
pdata->explicit_ack ? 1200 : 4000);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), DRV_NAME)) {
pr_err(DRV_NAME ": resources busy\n");
return -EBUSY;
}
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
if (pdata->explicit_ack)
ap->ops = &pata_gayle_a1200_ops;
else
ap->ops = &pata_gayle_a4000_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
base = ZTWO_VADDR(pdata->base);
ap->ioaddr.data_addr = base;
ap->ioaddr.error_addr = base + 2 + 1 * 4;
ap->ioaddr.feature_addr = base + 2 + 1 * 4;
ap->ioaddr.nsect_addr = base + 2 + 2 * 4;
ap->ioaddr.lbal_addr = base + 2 + 3 * 4;
ap->ioaddr.lbam_addr = base + 2 + 4 * 4;
ap->ioaddr.lbah_addr = base + 2 + 5 * 4;
ap->ioaddr.device_addr = base + 2 + 6 * 4;
ap->ioaddr.status_addr = base + 2 + 7 * 4;
ap->ioaddr.command_addr = base + 2 + 7 * 4;
ap->ioaddr.altstatus_addr = base + GAYLE_CONTROL;
ap->ioaddr.ctl_addr = base + GAYLE_CONTROL;
ap->private_data = (void *)ZTWO_VADDR(pdata->irqport);
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", pdata->base,
pdata->base + GAYLE_CONTROL);
ret = ata_host_activate(host, IRQ_AMIGA_PORTS, ata_sff_interrupt,
IRQF_SHARED, &pata_gayle_sht);
if (ret)
return ret;
platform_set_drvdata(pdev, host);
return 0;
}
static int __exit pata_gayle_remove_one(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
ata_host_detach(host);
return 0;
}
static struct platform_driver pata_gayle_driver = {
.remove = __exit_p(pata_gayle_remove_one),
.driver = {
.name = "amiga-gayle-ide",
},
};
module_platform_driver_probe(pata_gayle_driver, pata_gayle_init_one);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for Amiga Gayle PATA");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:amiga-gayle-ide");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_gayle.c |
/*
* pata_sil680.c - SIL680 PATA for new ATA layer
* (C) 2005 Red Hat Inc
*
* based upon
*
* linux/drivers/ide/pci/siimage.c Version 1.07 Nov 30, 2003
*
* Copyright (C) 2001-2002 Andre Hedrick <[email protected]>
* Copyright (C) 2003 Red Hat <[email protected]>
*
* May be copied or modified under the terms of the GNU General Public License
*
* Documentation publicly available.
*
* If you have strange problems with nVidia chipset systems please
* see the SI support documentation and update your system BIOS
* if necessary
*
* TODO
* If we know all our devices are LBA28 (or LBA28 sized) we could use
* the command fifo mode.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_sil680"
#define DRV_VERSION "0.4.9"
#define SIL680_MMIO_BAR 5
/**
* sil680_selreg - return register base
* @ap: ATA interface
* @r: config offset
*
* Turn a config register offset into the right address in PCI space
* to access the control register in question.
*
* Thankfully this is a configuration operation so isn't performance
* criticial.
*/
static int sil680_selreg(struct ata_port *ap, int r)
{
return 0xA0 + (ap->port_no << 4) + r;
}
/**
* sil680_seldev - return register base
* @ap: ATA interface
* @adev: ATA device
* @r: config offset
*
* Turn a config register offset into the right address in PCI space
* to access the control register in question including accounting for
* the unit shift.
*/
static int sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
{
return 0xA0 + (ap->port_no << 4) + r + (adev->devno << 1);
}
/**
* sil680_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection. The SIL680 stores this in PCI config
* space for us.
*/
static int sil680_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int addr = sil680_selreg(ap, 0);
u8 ata66;
pci_read_config_byte(pdev, addr, &ata66);
if (ata66 & 1)
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
}
/**
* sil680_set_piomode - set PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the SIL680 registers for PIO mode. Note that the task speed
* registers are shared between the devices so we must pick the lowest
* mode for command work.
*/
static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u16 speed_p[5] = {
0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1
};
static const u16 speed_t[5] = {
0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1
};
int tfaddr = sil680_selreg(ap, 0x02);
int addr = sil680_seldev(ap, adev, 0x04);
int addr_mask = 0x80 + 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio = adev->pio_mode - XFER_PIO_0;
int lowest_pio = pio;
int port_shift = 4 * adev->devno;
u16 reg;
u8 mode;
struct ata_device *pair = ata_dev_pair(adev);
if (pair != NULL && adev->pio_mode > pair->pio_mode)
lowest_pio = pair->pio_mode - XFER_PIO_0;
pci_write_config_word(pdev, addr, speed_p[pio]);
pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
pci_read_config_word(pdev, tfaddr-2, ®);
pci_read_config_byte(pdev, addr_mask, &mode);
reg &= ~0x0200; /* Clear IORDY */
mode &= ~(3 << port_shift); /* Clear IORDY and DMA bits */
if (ata_pio_need_iordy(adev)) {
reg |= 0x0200; /* Enable IORDY */
mode |= 1 << port_shift;
}
pci_write_config_word(pdev, tfaddr-2, reg);
pci_write_config_byte(pdev, addr_mask, mode);
}
/**
* sil680_set_dmamode - set DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the sil680 chipset.
*
* The MWDMA mode values are pulled from a lookup table
* while the chipset uses mode number for UDMA.
*/
static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 ultra_table[2][7] = {
{ 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */
{ 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */
};
static const u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int ma = sil680_seldev(ap, adev, 0x08);
int ua = sil680_seldev(ap, adev, 0x0C);
int addr_mask = 0x80 + 4 * ap->port_no;
int port_shift = adev->devno * 4;
u8 scsc, mode;
u16 multi, ultra;
pci_read_config_byte(pdev, 0x8A, &scsc);
pci_read_config_byte(pdev, addr_mask, &mode);
pci_read_config_word(pdev, ma, &multi);
pci_read_config_word(pdev, ua, &ultra);
/* Mask timing bits */
ultra &= ~0x3F;
mode &= ~(0x03 << port_shift);
/* Extract scsc */
scsc = (scsc & 0x30) ? 1 : 0;
if (adev->dma_mode >= XFER_UDMA_0) {
multi = 0x10C1;
ultra |= ultra_table[scsc][adev->dma_mode - XFER_UDMA_0];
mode |= (0x03 << port_shift);
} else {
multi = dma_table[adev->dma_mode - XFER_MW_DMA_0];
mode |= (0x02 << port_shift);
}
pci_write_config_byte(pdev, addr_mask, mode);
pci_write_config_word(pdev, ma, multi);
pci_write_config_word(pdev, ua, ultra);
}
/**
* sil680_sff_exec_command - issue ATA command to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
*
* Issues ATA command, with proper synchronization with interrupt
* handler / other threads. Use our MMIO space for PCI posting to avoid
* a hideously slow cycle all the way to the device.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void sil680_sff_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
iowrite8(tf->command, ap->ioaddr.command_addr);
ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
static bool sil680_sff_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int addr = sil680_selreg(ap, 1);
u8 val;
pci_read_config_byte(pdev, addr, &val);
return val & 0x08;
}
static const struct scsi_host_template sil680_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations sil680_port_ops = {
.inherits = &ata_bmdma32_port_ops,
.sff_exec_command = sil680_sff_exec_command,
.sff_irq_check = sil680_sff_irq_check,
.cable_detect = sil680_cable_detect,
.set_piomode = sil680_set_piomode,
.set_dmamode = sil680_set_dmamode,
};
/**
* sil680_init_chip - chip setup
* @pdev: PCI device
* @try_mmio: Indicates to caller whether MMIO should be attempted
*
* Perform all the chip setup which must be done both when the device
* is powered up on boot and when we resume in case we resumed from RAM.
* Returns the final clock settings.
*/
static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
{
u8 tmpbyte = 0;
/* FIXME: double check */
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
pdev->revision ? 1 : 255);
pci_write_config_byte(pdev, 0x80, 0x00);
pci_write_config_byte(pdev, 0x84, 0x00);
pci_read_config_byte(pdev, 0x8A, &tmpbyte);
dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
tmpbyte & 1, tmpbyte & 0x30);
*try_mmio = 0;
#ifdef CONFIG_PPC
if (machine_is(cell))
*try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
#endif
switch (tmpbyte & 0x30) {
case 0x00:
/* 133 clock attempt to force it on */
pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
break;
case 0x30:
/* if clocking is disabled */
/* 133 clock attempt to force it on */
pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
break;
case 0x10:
/* 133 already */
break;
case 0x20:
/* BIOS set PCI x2 clocking */
break;
}
pci_read_config_byte(pdev, 0x8A, &tmpbyte);
dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
tmpbyte & 1, tmpbyte & 0x30);
pci_write_config_byte(pdev, 0xA1, 0x72);
pci_write_config_word(pdev, 0xA2, 0x328A);
pci_write_config_dword(pdev, 0xA4, 0x62DD62DD);
pci_write_config_dword(pdev, 0xA8, 0x43924392);
pci_write_config_dword(pdev, 0xAC, 0x40094009);
pci_write_config_byte(pdev, 0xB1, 0x72);
pci_write_config_word(pdev, 0xB2, 0x328A);
pci_write_config_dword(pdev, 0xB4, 0x62DD62DD);
pci_write_config_dword(pdev, 0xB8, 0x43924392);
pci_write_config_dword(pdev, 0xBC, 0x40094009);
switch (tmpbyte & 0x30) {
case 0x00:
dev_info(&pdev->dev, "sil680: 100MHz clock.\n");
break;
case 0x10:
dev_info(&pdev->dev, "sil680: 133MHz clock.\n");
break;
case 0x20:
dev_info(&pdev->dev, "sil680: Using PCI clock.\n");
break;
/* This last case is _NOT_ ok */
case 0x30:
dev_err(&pdev->dev, "sil680: Clock disabled ?\n");
}
return tmpbyte & 0x30;
}
static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sil680_port_ops
};
static const struct ata_port_info info_slow = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil680_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
struct ata_host *host;
void __iomem *mmio_base;
int rc, try_mmio;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
switch (sil680_init_chip(pdev, &try_mmio)) {
case 0:
ppi[0] = &info_slow;
break;
case 0x30:
return -ENODEV;
}
if (!try_mmio)
goto use_ioports;
/* Try to acquire MMIO resources and fallback to PIO if
* that fails
*/
rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
if (rc)
goto use_ioports;
/* Allocate host and set it up */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
host->iomap = pcim_iomap_table(pdev);
/* Setup DMA masks */
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(pdev);
/* Get MMIO base and initialize port addresses */
mmio_base = host->iomap[SIL680_MMIO_BAR];
host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
ata_sff_std_ports(&host->ports[0]->ioaddr);
host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
ata_sff_std_ports(&host->ports[1]->ioaddr);
/* Register & activate */
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sil680_sht);
use_ioports:
return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
#ifdef CONFIG_PM_SLEEP
static int sil680_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int try_mmio, rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
sil680_init_chip(pdev, &try_mmio);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id sil680[] = {
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
{ },
};
static struct pci_driver sil680_pci_driver = {
.name = DRV_NAME,
.id_table = sil680,
.probe = sil680_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sil680_reinit_one,
#endif
};
module_pci_driver(sil680_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for SI680 PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil680);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_sil680.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Libata based driver for Apple "macio" family of PATA controllers
*
* Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
* <[email protected]>
*
* Some bits and pieces from drivers/ide/ppc/pmac.c
*
*/
#undef DEBUG
#undef DEBUG_DMA
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/scatterlist.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <asm/macio.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/mediabay.h>
#ifdef DEBUG_DMA
#define dev_dbgdma(dev, format, arg...) \
dev_printk(KERN_DEBUG , dev , format , ## arg)
#else
#define dev_dbgdma(dev, format, arg...) \
({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
#endif
#define DRV_NAME "pata_macio"
#define DRV_VERSION "0.9"
/* Models of macio ATA controller */
enum {
controller_ohare, /* OHare based */
controller_heathrow, /* Heathrow/Paddington */
controller_kl_ata3, /* KeyLargo ATA-3 */
controller_kl_ata4, /* KeyLargo ATA-4 */
controller_un_ata6, /* UniNorth2 ATA-6 */
controller_k2_ata6, /* K2 ATA-6 */
controller_sh_ata6, /* Shasta ATA-6 */
};
static const char* macio_ata_names[] = {
"OHare ATA", /* OHare based */
"Heathrow ATA", /* Heathrow/Paddington */
"KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
"KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
"UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
"K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
"Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
};
/*
* Extra registers, both 32-bit little-endian
*/
#define IDE_TIMING_CONFIG 0x200
#define IDE_INTERRUPT 0x300
/* Kauai (U2) ATA has different register setup */
#define IDE_KAUAI_PIO_CONFIG 0x200
#define IDE_KAUAI_ULTRA_CONFIG 0x210
#define IDE_KAUAI_POLL_CONFIG 0x220
/*
* Timing configuration register definitions
*/
/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
#define IDE_SYSCLK_NS 30 /* 33Mhz cell */
#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
/* 133Mhz cell, found in shasta.
* See comments about 100 Mhz Uninorth 2...
* Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
* weird and I don't now why .. at this stage
*/
#define TR_133_PIOREG_PIO_MASK 0xff000fff
#define TR_133_PIOREG_MDMA_MASK 0x00fff800
#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
#define TR_133_UDMAREG_UDMA_EN 0x00000001
/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
* (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
* controlled like gem or fw. It appears to be an evolution of keylargo
* ATA4 with a timing register extended to 2x32bits registers (one
* for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
* It has it's own local feature control register as well.
*
* After scratching my mind over the timing values, at least for PIO
* and MDMA, I think I've figured the format of the timing register,
* though I use pre-calculated tables for UDMA as usual...
*/
#define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */
#define TR_100_PIO_ADDRSETUP_SHIFT 24
#define TR_100_MDMA_MASK 0x00fff000
#define TR_100_MDMA_RECOVERY_MASK 0x00fc0000
#define TR_100_MDMA_RECOVERY_SHIFT 18
#define TR_100_MDMA_ACCESS_MASK 0x0003f000
#define TR_100_MDMA_ACCESS_SHIFT 12
#define TR_100_PIO_MASK 0xff000fff
#define TR_100_PIO_RECOVERY_MASK 0x00000fc0
#define TR_100_PIO_RECOVERY_SHIFT 6
#define TR_100_PIO_ACCESS_MASK 0x0000003f
#define TR_100_PIO_ACCESS_SHIFT 0
#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
#define TR_100_UDMAREG_UDMA_EN 0x00000001
/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
* 40 connector cable and to 4 on 80 connector one.
* Clock unit is 15ns (66Mhz)
*
* 3 Values can be programmed:
* - Write data setup, which appears to match the cycle time. They
* also call it DIOW setup.
* - Ready to pause time (from spec)
* - Address setup. That one is weird. I don't see where exactly
* it fits in UDMA cycles, I got it's name from an obscure piece
* of commented out code in Darwin. They leave it to 0, we do as
* well, despite a comment that would lead to think it has a
* min value of 45ns.
* Apple also add 60ns to the write data setup (or cycle time ?) on
* reads.
*/
#define TR_66_UDMA_MASK 0xfff00000
#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
#define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */
#define TR_66_PIO_ADDRSETUP_SHIFT 29
#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
#define TR_66_UDMA_RDY2PAUS_SHIFT 25
#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
#define TR_66_UDMA_WRDATASETUP_SHIFT 21
#define TR_66_MDMA_MASK 0x000ffc00
#define TR_66_MDMA_RECOVERY_MASK 0x000f8000
#define TR_66_MDMA_RECOVERY_SHIFT 15
#define TR_66_MDMA_ACCESS_MASK 0x00007c00
#define TR_66_MDMA_ACCESS_SHIFT 10
#define TR_66_PIO_MASK 0xe00003ff
#define TR_66_PIO_RECOVERY_MASK 0x000003e0
#define TR_66_PIO_RECOVERY_SHIFT 5
#define TR_66_PIO_ACCESS_MASK 0x0000001f
#define TR_66_PIO_ACCESS_SHIFT 0
/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
* Can do pio & mdma modes, clock unit is 30ns (33Mhz)
*
* The access time and recovery time can be programmed. Some older
* Darwin code base limit OHare to 150ns cycle time. I decided to do
* the same here fore safety against broken old hardware ;)
* The HalfTick bit, when set, adds half a clock (15ns) to the access
* time and removes one from recovery. It's not supported on KeyLargo
* implementation afaik. The E bit appears to be set for PIO mode 0 and
* is used to reach long timings used in this mode.
*/
#define TR_33_MDMA_MASK 0x003ff800
#define TR_33_MDMA_RECOVERY_MASK 0x001f0000
#define TR_33_MDMA_RECOVERY_SHIFT 16
#define TR_33_MDMA_ACCESS_MASK 0x0000f800
#define TR_33_MDMA_ACCESS_SHIFT 11
#define TR_33_MDMA_HALFTICK 0x00200000
#define TR_33_PIO_MASK 0x000007ff
#define TR_33_PIO_E 0x00000400
#define TR_33_PIO_RECOVERY_MASK 0x000003e0
#define TR_33_PIO_RECOVERY_SHIFT 5
#define TR_33_PIO_ACCESS_MASK 0x0000001f
#define TR_33_PIO_ACCESS_SHIFT 0
/*
* Interrupt register definitions. Only present on newer cells
* (Keylargo and later afaik) so we don't use it.
*/
#define IDE_INTR_DMA 0x80000000
#define IDE_INTR_DEVICE 0x40000000
/*
* FCR Register on Kauai. Not sure what bit 0x4 is ...
*/
#define KAUAI_FCR_UATA_MAGIC 0x00000004
#define KAUAI_FCR_UATA_RESET_N 0x00000002
#define KAUAI_FCR_UATA_ENABLE 0x00000001
/* Allow up to 256 DBDMA commands per xfer */
#define MAX_DCMDS 256
/* Don't let a DMA segment go all the way to 64K */
#define MAX_DBDMA_SEG 0xff00
/*
* Wait 1s for disk to answer on IDE bus after a hard reset
* of the device (via GPIO/FCR).
*
* Some devices seem to "pollute" the bus even after dropping
* the BSY bit (typically some combo drives slave on the UDMA
* bus) after a hard reset. Since we hard reset all drives on
* KeyLargo ATA66, we have to keep that delay around. I may end
* up not hard resetting anymore on these and keep the delay only
* for older interfaces instead (we have to reset when coming
* from MacOS...) --BenH.
*/
#define IDE_WAKEUP_DELAY_MS 1000
struct pata_macio_timing;
struct pata_macio_priv {
int kind;
int aapl_bus_id;
int mediabay : 1;
struct device_node *node;
struct macio_dev *mdev;
struct pci_dev *pdev;
struct device *dev;
int irq;
u32 treg[2][2];
void __iomem *tfregs;
void __iomem *kauai_fcr;
struct dbdma_cmd * dma_table_cpu;
dma_addr_t dma_table_dma;
struct ata_host *host;
const struct pata_macio_timing *timings;
};
/* Previous variants of this driver used to calculate timings
* for various variants of the chip and use tables for others.
*
* Not only was this confusing, but in addition, it isn't clear
* whether our calculation code was correct. It didn't entirely
* match the darwin code and whatever documentation I could find
* on these cells
*
* I decided to entirely rely on a table instead for this version
* of the driver. Also, because I don't really care about derated
* modes and really old HW other than making it work, I'm not going
* to calculate / snoop timing values for something else than the
* standard modes.
*/
struct pata_macio_timing {
int mode;
u32 reg1; /* Bits to set in first timing reg */
u32 reg2; /* Bits to set in second timing reg */
};
static const struct pata_macio_timing pata_macio_ohare_timings[] = {
{ XFER_PIO_0, 0x00000526, 0, },
{ XFER_PIO_1, 0x00000085, 0, },
{ XFER_PIO_2, 0x00000025, 0, },
{ XFER_PIO_3, 0x00000025, 0, },
{ XFER_PIO_4, 0x00000025, 0, },
{ XFER_MW_DMA_0, 0x00074000, 0, },
{ XFER_MW_DMA_1, 0x00221000, 0, },
{ XFER_MW_DMA_2, 0x00211000, 0, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
{ XFER_PIO_0, 0x00000526, 0, },
{ XFER_PIO_1, 0x00000085, 0, },
{ XFER_PIO_2, 0x00000025, 0, },
{ XFER_PIO_3, 0x00000025, 0, },
{ XFER_PIO_4, 0x00000025, 0, },
{ XFER_MW_DMA_0, 0x00074000, 0, },
{ XFER_MW_DMA_1, 0x00221000, 0, },
{ XFER_MW_DMA_2, 0x00211000, 0, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_kl33_timings[] = {
{ XFER_PIO_0, 0x00000526, 0, },
{ XFER_PIO_1, 0x00000085, 0, },
{ XFER_PIO_2, 0x00000025, 0, },
{ XFER_PIO_3, 0x00000025, 0, },
{ XFER_PIO_4, 0x00000025, 0, },
{ XFER_MW_DMA_0, 0x00084000, 0, },
{ XFER_MW_DMA_1, 0x00021800, 0, },
{ XFER_MW_DMA_2, 0x00011800, 0, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_kl66_timings[] = {
{ XFER_PIO_0, 0x0000038c, 0, },
{ XFER_PIO_1, 0x0000020a, 0, },
{ XFER_PIO_2, 0x00000127, 0, },
{ XFER_PIO_3, 0x000000c6, 0, },
{ XFER_PIO_4, 0x00000065, 0, },
{ XFER_MW_DMA_0, 0x00084000, 0, },
{ XFER_MW_DMA_1, 0x00029800, 0, },
{ XFER_MW_DMA_2, 0x00019400, 0, },
{ XFER_UDMA_0, 0x19100000, 0, },
{ XFER_UDMA_1, 0x14d00000, 0, },
{ XFER_UDMA_2, 0x10900000, 0, },
{ XFER_UDMA_3, 0x0c700000, 0, },
{ XFER_UDMA_4, 0x0c500000, 0, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_kauai_timings[] = {
{ XFER_PIO_0, 0x08000a92, 0, },
{ XFER_PIO_1, 0x0800060f, 0, },
{ XFER_PIO_2, 0x0800038b, 0, },
{ XFER_PIO_3, 0x05000249, 0, },
{ XFER_PIO_4, 0x04000148, 0, },
{ XFER_MW_DMA_0, 0x00618000, 0, },
{ XFER_MW_DMA_1, 0x00209000, 0, },
{ XFER_MW_DMA_2, 0x00148000, 0, },
{ XFER_UDMA_0, 0, 0x000070c1, },
{ XFER_UDMA_1, 0, 0x00005d81, },
{ XFER_UDMA_2, 0, 0x00004a61, },
{ XFER_UDMA_3, 0, 0x00003a51, },
{ XFER_UDMA_4, 0, 0x00002a31, },
{ XFER_UDMA_5, 0, 0x00002921, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_shasta_timings[] = {
{ XFER_PIO_0, 0x0a000c97, 0, },
{ XFER_PIO_1, 0x07000712, 0, },
{ XFER_PIO_2, 0x040003cd, 0, },
{ XFER_PIO_3, 0x0500028b, 0, },
{ XFER_PIO_4, 0x0400010a, 0, },
{ XFER_MW_DMA_0, 0x00820800, 0, },
{ XFER_MW_DMA_1, 0x0028b000, 0, },
{ XFER_MW_DMA_2, 0x001ca000, 0, },
{ XFER_UDMA_0, 0, 0x00035901, },
{ XFER_UDMA_1, 0, 0x000348b1, },
{ XFER_UDMA_2, 0, 0x00033881, },
{ XFER_UDMA_3, 0, 0x00033861, },
{ XFER_UDMA_4, 0, 0x00033841, },
{ XFER_UDMA_5, 0, 0x00033031, },
{ XFER_UDMA_6, 0, 0x00033021, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing *pata_macio_find_timing(
struct pata_macio_priv *priv,
int mode)
{
int i;
for (i = 0; priv->timings[i].mode > 0; i++) {
if (priv->timings[i].mode == mode)
return &priv->timings[i];
}
return NULL;
}
static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
{
struct pata_macio_priv *priv = ap->private_data;
void __iomem *rbase = ap->ioaddr.cmd_addr;
if (priv->kind == controller_sh_ata6 ||
priv->kind == controller_un_ata6 ||
priv->kind == controller_k2_ata6) {
writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
} else
writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
}
static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
{
ata_sff_dev_select(ap, device);
/* Apply timings */
pata_macio_apply_timings(ap, device);
}
static void pata_macio_set_timings(struct ata_port *ap,
struct ata_device *adev)
{
struct pata_macio_priv *priv = ap->private_data;
const struct pata_macio_timing *t;
dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
adev->devno,
adev->pio_mode,
ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
adev->dma_mode,
ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
/* First clear timings */
priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
/* Now get the PIO timings */
t = pata_macio_find_timing(priv, adev->pio_mode);
if (t == NULL) {
dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
adev->pio_mode);
t = pata_macio_find_timing(priv, XFER_PIO_0);
}
BUG_ON(t == NULL);
/* PIO timings only ever use the first treg */
priv->treg[adev->devno][0] |= t->reg1;
/* Now get DMA timings */
t = pata_macio_find_timing(priv, adev->dma_mode);
if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
}
BUG_ON(t == NULL);
/* DMA timings can use both tregs */
priv->treg[adev->devno][0] |= t->reg1;
priv->treg[adev->devno][1] |= t->reg2;
dev_dbg(priv->dev, " -> %08x %08x\n",
priv->treg[adev->devno][0],
priv->treg[adev->devno][1]);
/* Apply to hardware */
pata_macio_apply_timings(ap, adev->devno);
}
/*
* Blast some well known "safe" values to the timing registers at init or
* wakeup from sleep time, before we do real calculation
*/
static void pata_macio_default_timings(struct pata_macio_priv *priv)
{
unsigned int value, value2 = 0;
switch(priv->kind) {
case controller_sh_ata6:
value = 0x0a820c97;
value2 = 0x00033031;
break;
case controller_un_ata6:
case controller_k2_ata6:
value = 0x08618a92;
value2 = 0x00002921;
break;
case controller_kl_ata4:
value = 0x0008438c;
break;
case controller_kl_ata3:
value = 0x00084526;
break;
case controller_heathrow:
case controller_ohare:
default:
value = 0x00074526;
break;
}
priv->treg[0][0] = priv->treg[1][0] = value;
priv->treg[0][1] = priv->treg[1][1] = value2;
}
static int pata_macio_cable_detect(struct ata_port *ap)
{
struct pata_macio_priv *priv = ap->private_data;
/* Get cable type from device-tree */
if (priv->kind == controller_kl_ata4 ||
priv->kind == controller_un_ata6 ||
priv->kind == controller_k2_ata6 ||
priv->kind == controller_sh_ata6) {
const char* cable = of_get_property(priv->node, "cable-type",
NULL);
struct device_node *root = of_find_node_by_path("/");
const char *model = of_get_property(root, "model", NULL);
of_node_put(root);
if (cable && !strncmp(cable, "80-", 3)) {
/* Some drives fail to detect 80c cable in PowerBook
* These machine use proprietary short IDE cable
* anyway
*/
if (!strncmp(model, "PowerBook", 9))
return ATA_CBL_PATA40_SHORT;
else
return ATA_CBL_PATA80;
}
}
/* G5's seem to have incorrect cable type in device-tree.
* Let's assume they always have a 80 conductor cable, this seem to
* be always the case unless the user mucked around
*/
if (of_device_is_compatible(priv->node, "K2-UATA") ||
of_device_is_compatible(priv->node, "shasta-ata"))
return ATA_CBL_PATA80;
/* Anything else is 40 connectors */
return ATA_CBL_PATA40;
}
static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
{
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
struct pata_macio_priv *priv = ap->private_data;
struct scatterlist *sg;
struct dbdma_cmd *table;
unsigned int si, pi;
dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
__func__, qc, qc->flags, write, qc->dev->devno);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
table = (struct dbdma_cmd *) priv->dma_table_cpu;
pi = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, sg_len, len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
/* table overflow should never happen */
BUG_ON (pi++ >= MAX_DCMDS);
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
table->req_count = cpu_to_le16(len);
table->phy_addr = cpu_to_le32(addr);
table->cmd_dep = 0;
table->xfer_status = 0;
table->res_count = 0;
addr += len;
sg_len -= len;
++table;
}
}
/* Should never happen according to Tejun */
BUG_ON(!pi);
/* Convert the last command to an input/output */
table--;
table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
table++;
/* Add the stop command to the end of the list */
memset(table, 0, sizeof(struct dbdma_cmd));
table->command = cpu_to_le16(DBDMA_STOP);
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
return AC_ERR_OK;
}
static void pata_macio_freeze(struct ata_port *ap)
{
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
if (dma_regs) {
unsigned int timeout = 1000000;
/* Make sure DMA controller is stopped */
writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
while (--timeout && (readl(&dma_regs->status) & RUN))
udelay(1);
}
ata_sff_freeze(ap);
}
static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_macio_priv *priv = ap->private_data;
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
int dev = qc->dev->devno;
dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
/* Make sure DMA commands updates are visible */
writel(priv->dma_table_dma, &dma_regs->cmdptr);
/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
* UDMA reads
*/
if (priv->kind == controller_kl_ata4 &&
(priv->treg[dev][0] & TR_66_UDMA_EN)) {
void __iomem *rbase = ap->ioaddr.cmd_addr;
u32 reg = priv->treg[dev][0];
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
reg += 0x00800000;
writel(reg, rbase + IDE_TIMING_CONFIG);
}
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_macio_priv *priv = ap->private_data;
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
writel((RUN << 16) | RUN, &dma_regs->control);
/* Make sure it gets to the controller right now */
(void)readl(&dma_regs->control);
}
static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_macio_priv *priv = ap->private_data;
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
unsigned int timeout = 1000000;
dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
/* Stop the DMA engine and wait for it to full halt */
writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
while (--timeout && (readl(&dma_regs->status) & RUN))
udelay(1);
}
static u8 pata_macio_bmdma_status(struct ata_port *ap)
{
struct pata_macio_priv *priv = ap->private_data;
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
u32 dstat, rstat = ATA_DMA_INTR;
unsigned long timeout = 0;
dstat = readl(&dma_regs->status);
dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
/* We have two things to deal with here:
*
* - The dbdma won't stop if the command was started
* but completed with an error without transferring all
* datas. This happens when bad blocks are met during
* a multi-block transfer.
*
* - The dbdma fifo hasn't yet finished flushing to
* system memory when the disk interrupt occurs.
*/
/* First check for errors */
if ((dstat & (RUN|DEAD)) != RUN)
rstat |= ATA_DMA_ERR;
/* If ACTIVE is cleared, the STOP command has been hit and
* the transfer is complete. If not, we have to flush the
* channel.
*/
if ((dstat & ACTIVE) == 0)
return rstat;
dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
/* If dbdma didn't execute the STOP command yet, the
* active bit is still set. We consider that we aren't
* sharing interrupts (which is hopefully the case with
* those controllers) and so we just try to flush the
* channel for pending data in the fifo
*/
udelay(1);
writel((FLUSH << 16) | FLUSH, &dma_regs->control);
for (;;) {
udelay(1);
dstat = readl(&dma_regs->status);
if ((dstat & FLUSH) == 0)
break;
if (++timeout > 1000) {
dev_warn(priv->dev, "timeout flushing DMA\n");
rstat |= ATA_DMA_ERR;
break;
}
}
return rstat;
}
/* port_start is when we allocate the DMA command list */
static int pata_macio_port_start(struct ata_port *ap)
{
struct pata_macio_priv *priv = ap->private_data;
if (ap->ioaddr.bmdma_addr == NULL)
return 0;
/* Allocate space for the DBDMA commands.
*
* The +2 is +1 for the stop command and +1 to allow for
* aligning the start address to a multiple of 16 bytes.
*/
priv->dma_table_cpu =
dmam_alloc_coherent(priv->dev,
(MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
&priv->dma_table_dma, GFP_KERNEL);
if (priv->dma_table_cpu == NULL) {
dev_err(priv->dev, "Unable to allocate DMA command list\n");
ap->ioaddr.bmdma_addr = NULL;
ap->mwdma_mask = 0;
ap->udma_mask = 0;
}
return 0;
}
static void pata_macio_irq_clear(struct ata_port *ap)
{
struct pata_macio_priv *priv = ap->private_data;
/* Nothing to do here */
dev_dbgdma(priv->dev, "%s\n", __func__);
}
static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
{
dev_dbg(priv->dev, "Enabling & resetting... \n");
if (priv->mediabay)
return;
if (priv->kind == controller_ohare && !resume) {
/* The code below is having trouble on some ohare machines
* (timing related ?). Until I can put my hand on one of these
* units, I keep the old way
*/
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
} else {
int rc;
/* Reset and enable controller */
rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
priv->node, priv->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
priv->node, priv->aapl_bus_id, 1);
msleep(10);
/* Only bother waiting if there's a reset control */
if (rc == 0) {
ppc_md.feature_call(PMAC_FTR_IDE_RESET,
priv->node, priv->aapl_bus_id, 0);
msleep(IDE_WAKEUP_DELAY_MS);
}
}
/* If resuming a PCI device, restore the config space here */
if (priv->pdev && resume) {
int rc;
pci_restore_state(priv->pdev);
rc = pcim_enable_device(priv->pdev);
if (rc)
dev_err(&priv->pdev->dev,
"Failed to enable device after resume (%d)\n",
rc);
else
pci_set_master(priv->pdev);
}
/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
* seem necessary and speeds up the boot process
*/
if (priv->kauai_fcr)
writel(KAUAI_FCR_UATA_MAGIC |
KAUAI_FCR_UATA_RESET_N |
KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
}
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
static int pata_macio_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
struct ata_device *dev;
u16 cmd;
int rc;
/* First call original */
rc = ata_scsi_slave_config(sdev);
if (rc)
return rc;
/* This is lifted from sata_nv */
dev = &ap->link.device[sdev->id];
/* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) {
blk_queue_update_dma_alignment(sdev->request_queue, 31);
blk_queue_update_dma_pad(sdev->request_queue, 31);
/* Tell the world about it */
ata_dev_info(dev, "OHare alignment limits applied\n");
return 0;
}
/* We only have issues with ATAPI */
if (dev->class != ATA_DEV_ATAPI)
return 0;
/* Shasta and K2 seem to have "issues" with reads ... */
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */
blk_queue_update_dma_alignment(sdev->request_queue, 15);
blk_queue_update_dma_pad(sdev->request_queue, 15);
/* We enable MWI and hack cache line size directly here, this
* is specific to this chipset and not normal values, we happen
* to somewhat know what we are doing here (which is basically
* to do the same Apple does and pray they did not get it wrong :-)
*/
BUG_ON(!priv->pdev);
pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(priv->pdev, PCI_COMMAND,
cmd | PCI_COMMAND_INVALIDATE);
/* Tell the world about it */
ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
{
/* First, core libata suspend to do most of the work */
ata_host_suspend(priv->host, mesg);
/* Restore to default timings */
pata_macio_default_timings(priv);
/* Mask interrupt. Not strictly necessary but old driver did
* it and I'd rather not change that here */
disable_irq(priv->irq);
/* The media bay will handle itself just fine */
if (priv->mediabay)
return 0;
/* Kauai has bus control FCRs directly here */
if (priv->kauai_fcr) {
u32 fcr = readl(priv->kauai_fcr);
fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
writel(fcr, priv->kauai_fcr);
}
/* For PCI, save state and disable DMA. No need to call
* pci_set_power_state(), the HW doesn't do D states that
* way, the platform code will take care of suspending the
* ASIC properly
*/
if (priv->pdev) {
pci_save_state(priv->pdev);
pci_disable_device(priv->pdev);
}
/* Disable the bus on older machines and the cell on kauai */
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
priv->aapl_bus_id, 0);
return 0;
}
static int pata_macio_do_resume(struct pata_macio_priv *priv)
{
/* Reset and re-enable the HW */
pata_macio_reset_hw(priv, 1);
/* Sanitize drive timings */
pata_macio_apply_timings(priv->host->ports[0], 0);
/* We want our IRQ back ! */
enable_irq(priv->irq);
/* Let the libata core take it from there */
ata_host_resume(priv->host);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static const struct scsi_host_template pata_macio_sht = {
__ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
/* Not sure what the real max is but we know it's less than 64K, let's
* use 64K minus 256
*/
.max_segment_size = MAX_DBDMA_SEG,
.slave_configure = pata_macio_slave_config,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static struct ata_port_operations pata_macio_ops = {
.inherits = &ata_bmdma_port_ops,
.freeze = pata_macio_freeze,
.set_piomode = pata_macio_set_timings,
.set_dmamode = pata_macio_set_timings,
.cable_detect = pata_macio_cable_detect,
.sff_dev_select = pata_macio_dev_select,
.qc_prep = pata_macio_qc_prep,
.bmdma_setup = pata_macio_bmdma_setup,
.bmdma_start = pata_macio_bmdma_start,
.bmdma_stop = pata_macio_bmdma_stop,
.bmdma_status = pata_macio_bmdma_status,
.port_start = pata_macio_port_start,
.sff_irq_clear = pata_macio_irq_clear,
};
static void pata_macio_invariants(struct pata_macio_priv *priv)
{
const int *bidp;
/* Identify the type of controller */
if (of_device_is_compatible(priv->node, "shasta-ata")) {
priv->kind = controller_sh_ata6;
priv->timings = pata_macio_shasta_timings;
} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
priv->kind = controller_un_ata6;
priv->timings = pata_macio_kauai_timings;
} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
priv->kind = controller_k2_ata6;
priv->timings = pata_macio_kauai_timings;
} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
if (of_node_name_eq(priv->node, "ata-4")) {
priv->kind = controller_kl_ata4;
priv->timings = pata_macio_kl66_timings;
} else {
priv->kind = controller_kl_ata3;
priv->timings = pata_macio_kl33_timings;
}
} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
priv->kind = controller_heathrow;
priv->timings = pata_macio_heathrow_timings;
} else {
priv->kind = controller_ohare;
priv->timings = pata_macio_ohare_timings;
}
/* XXX FIXME --- setup priv->mediabay here */
/* Get Apple bus ID (for clock and ASIC control) */
bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
priv->aapl_bus_id = bidp ? *bidp : 0;
/* Fixup missing Apple bus ID in case of media-bay */
if (priv->mediabay && !bidp)
priv->aapl_bus_id = 1;
}
static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
void __iomem * base, void __iomem * dma)
{
/* cmd_addr is the base of regs for that port */
ioaddr->cmd_addr = base;
/* taskfile registers */
ioaddr->data_addr = base + (ATA_REG_DATA << 4);
ioaddr->error_addr = base + (ATA_REG_ERR << 4);
ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4);
ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4);
ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4);
ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4);
ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4);
ioaddr->device_addr = base + (ATA_REG_DEVICE << 4);
ioaddr->status_addr = base + (ATA_REG_STATUS << 4);
ioaddr->command_addr = base + (ATA_REG_CMD << 4);
ioaddr->altstatus_addr = base + 0x160;
ioaddr->ctl_addr = base + 0x160;
ioaddr->bmdma_addr = dma;
}
static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
struct ata_port_info *pinfo)
{
int i = 0;
pinfo->pio_mask = 0;
pinfo->mwdma_mask = 0;
pinfo->udma_mask = 0;
while (priv->timings[i].mode > 0) {
unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
switch(priv->timings[i].mode & 0xf0) {
case 0x00: /* PIO */
pinfo->pio_mask |= (mask >> 8);
break;
case 0x20: /* MWDMA */
pinfo->mwdma_mask |= mask;
break;
case 0x40: /* UDMA */
pinfo->udma_mask |= mask;
break;
}
i++;
}
dev_dbg(priv->dev, "Supported masks: PIO=%x, MWDMA=%x, UDMA=%x\n",
pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
}
static int pata_macio_common_init(struct pata_macio_priv *priv,
resource_size_t tfregs,
resource_size_t dmaregs,
resource_size_t fcregs,
unsigned long irq)
{
struct ata_port_info pinfo;
const struct ata_port_info *ppi[] = { &pinfo, NULL };
void __iomem *dma_regs = NULL;
/* Fill up privates with various invariants collected from the
* device-tree
*/
pata_macio_invariants(priv);
/* Make sure we have sane initial timings in the cache */
pata_macio_default_timings(priv);
/* Allocate libata host for 1 port */
memset(&pinfo, 0, sizeof(struct ata_port_info));
pmac_macio_calc_timing_masks(priv, &pinfo);
pinfo.flags = ATA_FLAG_SLAVE_POSS;
pinfo.port_ops = &pata_macio_ops;
pinfo.private_data = priv;
priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
if (priv->host == NULL) {
dev_err(priv->dev, "Failed to allocate ATA port structure\n");
return -ENOMEM;
}
/* Setup the private data in host too */
priv->host->private_data = priv;
/* Map base registers */
priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
if (priv->tfregs == NULL) {
dev_err(priv->dev, "Failed to map ATA ports\n");
return -ENOMEM;
}
priv->host->iomap = &priv->tfregs;
/* Map DMA regs */
if (dmaregs != 0) {
dma_regs = devm_ioremap(priv->dev, dmaregs,
sizeof(struct dbdma_regs));
if (dma_regs == NULL)
dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
}
/* If chip has local feature control, map those regs too */
if (fcregs != 0) {
priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
if (priv->kauai_fcr == NULL) {
dev_err(priv->dev, "Failed to map ATA FCR register\n");
return -ENOMEM;
}
}
/* Setup port data structure */
pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
priv->tfregs, dma_regs);
priv->host->ports[0]->private_data = priv;
/* hard-reset the controller */
pata_macio_reset_hw(priv, 0);
pata_macio_apply_timings(priv->host->ports[0], 0);
/* Enable bus master if necessary */
if (priv->pdev && dma_regs)
pci_set_master(priv->pdev);
dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
macio_ata_names[priv->kind], priv->aapl_bus_id);
/* Start it up */
priv->irq = irq;
return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
&pata_macio_sht);
}
static int pata_macio_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
struct pata_macio_priv *priv;
resource_size_t tfregs, dmaregs = 0;
unsigned long irq;
int rc;
/* Check for broken device-trees */
if (macio_resource_count(mdev) == 0) {
dev_err(&mdev->ofdev.dev,
"No addresses for controller\n");
return -ENXIO;
}
/* Enable managed resources */
macio_enable_devres(mdev);
/* Allocate and init private data structure */
priv = devm_kzalloc(&mdev->ofdev.dev,
sizeof(struct pata_macio_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->node = of_node_get(mdev->ofdev.dev.of_node);
priv->mdev = mdev;
priv->dev = &mdev->ofdev.dev;
/* Request memory resource for taskfile registers */
if (macio_request_resource(mdev, 0, "pata-macio")) {
dev_err(&mdev->ofdev.dev,
"Cannot obtain taskfile resource\n");
return -EBUSY;
}
tfregs = macio_resource_start(mdev, 0);
/* Request resources for DMA registers if any */
if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "pata-macio-dma"))
dev_err(&mdev->ofdev.dev,
"Cannot obtain DMA resource\n");
else
dmaregs = macio_resource_start(mdev, 1);
}
/*
* Fixup missing IRQ for some old implementations with broken
* device-trees.
*
* This is a bit bogus, it should be fixed in the device-tree itself,
* via the existing macio fixups, based on the type of interrupt
* controller in the machine. However, I have no test HW for this case,
* and this trick works well enough on those old machines...
*/
if (macio_irq_count(mdev) == 0) {
dev_warn(&mdev->ofdev.dev,
"No interrupts for controller, using 13\n");
irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
/* Prevvent media bay callbacks until fully registered */
lock_media_bay(priv->mdev->media_bay);
/* Get register addresses and call common initialization */
rc = pata_macio_common_init(priv,
tfregs, /* Taskfile regs */
dmaregs, /* DBDMA regs */
0, /* Feature control */
irq);
unlock_media_bay(priv->mdev->media_bay);
return rc;
}
static int pata_macio_detach(struct macio_dev *mdev)
{
struct ata_host *host = macio_get_drvdata(mdev);
struct pata_macio_priv *priv = host->private_data;
lock_media_bay(priv->mdev->media_bay);
/* Make sure the mediabay callback doesn't try to access
* dead stuff
*/
priv->host->private_data = NULL;
ata_host_detach(host);
unlock_media_bay(priv->mdev->media_bay);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
{
struct ata_host *host = macio_get_drvdata(mdev);
return pata_macio_do_suspend(host->private_data, mesg);
}
static int pata_macio_resume(struct macio_dev *mdev)
{
struct ata_host *host = macio_get_drvdata(mdev);
return pata_macio_do_resume(host->private_data);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PMAC_MEDIABAY
static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
{
struct ata_host *host = macio_get_drvdata(mdev);
struct ata_port *ap;
struct ata_eh_info *ehi;
struct ata_device *dev;
unsigned long flags;
if (!host || !host->private_data)
return;
ap = host->ports[0];
spin_lock_irqsave(ap->lock, flags);
ehi = &ap->link.eh_info;
if (mb_state == MB_CD) {
ata_ehi_push_desc(ehi, "mediabay plug");
ata_ehi_hotplugged(ehi);
ata_port_freeze(ap);
} else {
ata_ehi_push_desc(ehi, "mediabay unplug");
ata_for_each_dev(dev, &ap->link, ALL)
dev->flags |= ATA_DFLAG_DETACH;
ata_port_abort(ap);
}
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PMAC_MEDIABAY */
static int pata_macio_pci_attach(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pata_macio_priv *priv;
struct device_node *np;
resource_size_t rbase;
/* We cannot use a MacIO controller without its OF device node */
np = pci_device_to_OF_node(pdev);
if (np == NULL) {
dev_err(&pdev->dev,
"Cannot find OF device node for controller\n");
return -ENODEV;
}
/* Check that it can be enabled */
if (pcim_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot enable controller PCI device\n");
return -ENXIO;
}
/* Allocate and init private data structure */
priv = devm_kzalloc(&pdev->dev,
sizeof(struct pata_macio_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->node = of_node_get(np);
priv->pdev = pdev;
priv->dev = &pdev->dev;
/* Get MMIO regions */
if (pci_request_regions(pdev, "pata-macio")) {
dev_err(&pdev->dev,
"Cannot obtain PCI resources\n");
return -EBUSY;
}
/* Get register addresses and call common initialization */
rbase = pci_resource_start(pdev, 0);
if (pata_macio_common_init(priv,
rbase + 0x2000, /* Taskfile regs */
rbase + 0x1000, /* DBDMA regs */
rbase, /* Feature control */
pdev->irq))
return -ENXIO;
return 0;
}
static void pata_macio_pci_detach(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
ata_host_detach(host);
}
#ifdef CONFIG_PM_SLEEP
static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
return pata_macio_do_suspend(host->private_data, mesg);
}
static int pata_macio_pci_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
return pata_macio_do_resume(host->private_data);
}
#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id pata_macio_match[] =
{
{ .name = "IDE", },
{ .name = "ATA", },
{ .type = "ide", },
{ .type = "ata", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pata_macio_match);
static struct macio_driver pata_macio_driver =
{
.driver = {
.name = "pata-macio",
.owner = THIS_MODULE,
.of_match_table = pata_macio_match,
},
.probe = pata_macio_attach,
.remove = pata_macio_detach,
#ifdef CONFIG_PM_SLEEP
.suspend = pata_macio_suspend,
.resume = pata_macio_resume,
#endif
#ifdef CONFIG_PMAC_MEDIABAY
.mediabay_event = pata_macio_mb_event,
#endif
};
static const struct pci_device_id pata_macio_pci_match[] = {
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
{},
};
static struct pci_driver pata_macio_pci_driver = {
.name = "pata-pci-macio",
.id_table = pata_macio_pci_match,
.probe = pata_macio_pci_attach,
.remove = pata_macio_pci_detach,
#ifdef CONFIG_PM_SLEEP
.suspend = pata_macio_pci_suspend,
.resume = pata_macio_pci_resume,
#endif
.driver = {
.owner = THIS_MODULE,
},
};
MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
static int __init pata_macio_init(void)
{
int rc;
if (!machine_is(powermac))
return -ENODEV;
rc = pci_register_driver(&pata_macio_pci_driver);
if (rc)
return rc;
rc = macio_register_driver(&pata_macio_driver);
if (rc) {
pci_unregister_driver(&pata_macio_pci_driver);
return rc;
}
return 0;
}
static void __exit pata_macio_exit(void)
{
macio_unregister_driver(&pata_macio_driver);
pci_unregister_driver(&pata_macio_pci_driver);
}
module_init(pata_macio_init);
module_exit(pata_macio_exit);
MODULE_AUTHOR("Benjamin Herrenschmidt");
MODULE_DESCRIPTION("Apple MacIO PATA driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_macio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Libata driver for the HighPoint 371N, 372N, and 302N UDMA66 ATA controllers.
*
* This driver is heavily based upon:
*
* linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
*
* Copyright (C) 1999-2003 Andre Hedrick <[email protected]>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
* Portions Copyright (C) 2005-2010 MontaVista Software, Inc.
*
*
* TODO
* Work out best PLL policy
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
#define DRV_VERSION "0.3.19"
enum {
PCI66 = (1 << 1),
USE_DPLL = (1 << 0)
};
struct hpt_clock {
u8 xfer_speed;
u32 timing;
};
/* key for bus clock timings
* bit
* 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
* cycles = value + 1
* 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
* cycles = value + 1
* 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
* register access.
* 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file
* register access.
* 18:20 udma_cycle_time. Clock cycles for UDMA xfer.
* 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock.
* 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer.
* 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file
* register access.
* 28 UDMA enable.
* 29 DMA enable.
* 30 PIO_MST enable. If set, the chip is in bus master mode during
* PIO xfer.
* 31 FIFO enable. Only for PIO.
*/
/* 66MHz DPLL clocks */
static struct hpt_clock hpt3x2n_clocks[] = {
{ XFER_UDMA_7, 0x1c869c62 },
{ XFER_UDMA_6, 0x1c869c62 },
{ XFER_UDMA_5, 0x1c8a9c62 },
{ XFER_UDMA_4, 0x1c8a9c62 },
{ XFER_UDMA_3, 0x1c8e9c62 },
{ XFER_UDMA_2, 0x1c929c62 },
{ XFER_UDMA_1, 0x1c9a9c62 },
{ XFER_UDMA_0, 0x1c829c62 },
{ XFER_MW_DMA_2, 0x2c829c62 },
{ XFER_MW_DMA_1, 0x2c829c66 },
{ XFER_MW_DMA_0, 0x2c829d2e },
{ XFER_PIO_4, 0x0c829c62 },
{ XFER_PIO_3, 0x0c829c84 },
{ XFER_PIO_2, 0x0c829ca6 },
{ XFER_PIO_1, 0x0d029d26 },
{ XFER_PIO_0, 0x0d029d5e },
};
/**
* hpt3x2n_find_mode - reset the hpt3x2n bus
* @ap: ATA port
* @speed: transfer mode
*
* Return the 32bit register programming information for this channel
* that matches the speed provided. For the moment the clocks table
* is hard coded but easy to change. This will be needed if we use
* different DPLLs
*/
static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
{
struct hpt_clock *clocks = hpt3x2n_clocks;
while (clocks->xfer_speed) {
if (clocks->xfer_speed == speed)
return clocks->timing;
clocks++;
}
BUG();
return 0xffffffffU; /* silence compiler warning */
}
/**
* hpt372n_filter - mode selection filter
* @adev: ATA device
* @mask: mode mask
*
* The Marvell bridge chips used on the HighPoint SATA cards do not seem
* to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
*/
static unsigned int hpt372n_filter(struct ata_device *adev, unsigned int mask)
{
if (ata_id_is_sata(adev->id))
mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
return mask;
}
/**
* hpt3x2n_cable_detect - Detect the cable type
* @ap: ATA port to detect on
*
* Return the cable type attached to this port
*/
static int hpt3x2n_cable_detect(struct ata_port *ap)
{
u8 scr2, ata66;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_byte(pdev, 0x5B, &scr2);
pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
udelay(10); /* debounce */
/* Cable register now active */
pci_read_config_byte(pdev, 0x5A, &ata66);
/* Restore state */
pci_write_config_byte(pdev, 0x5B, scr2);
if (ata66 & (2 >> ap->port_no))
return ATA_CBL_PATA40;
else
return ATA_CBL_PATA80;
}
/**
* hpt3x2n_pre_reset - reset the hpt3x2n bus
* @link: ATA link to reset
* @deadline: deadline jiffies for the operation
*
* Perform the initial reset handling for the 3x2n series controllers.
* Reset the hardware and state machine,
*/
static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits hpt3x2n_enable_bits[] = {
{ 0x50, 1, 0x04, 0x04 },
{ 0x54, 1, 0x04, 0x04 }
};
u8 mcr2;
if (!pci_test_config_bits(pdev, &hpt3x2n_enable_bits[ap->port_no]))
return -ENOENT;
/* Reset the state machine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(100);
/* Fast interrupt prediction disable, hold off interrupt disable */
pci_read_config_byte(pdev, 0x51 + 4 * ap->port_no, &mcr2);
mcr2 &= ~0x07;
pci_write_config_byte(pdev, 0x51 + 4 * ap->port_no, mcr2);
return ata_sff_prereset(link, deadline);
}
static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int addr = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
u32 reg, timing, mask;
/* Determine timing mask and find matching mode entry */
if (mode < XFER_MW_DMA_0)
mask = 0xcfc3ffff;
else if (mode < XFER_UDMA_0)
mask = 0x31c001ff;
else
mask = 0x303c0000;
timing = hpt3x2n_find_mode(ap, mode);
pci_read_config_dword(pdev, addr, ®);
reg = (reg & ~mask) | (timing & mask);
pci_write_config_dword(pdev, addr, reg);
}
/**
* hpt3x2n_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Perform PIO mode setup.
*/
static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
hpt3x2n_set_mode(ap, adev, adev->pio_mode);
}
/**
* hpt3x2n_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes.
*/
static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
hpt3x2n_set_mode(ap, adev, adev->dma_mode);
}
/**
* hpt3x2n_bmdma_stop - DMA engine stop
* @qc: ATA command
*
* Clean up after the HPT3x2n and later DMA engine
*/
static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int mscreg = 0x50 + 4 * ap->port_no;
u8 bwsr_stat, msc_stat;
pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
pci_read_config_byte(pdev, mscreg, &msc_stat);
if (bwsr_stat & (1 << ap->port_no))
pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
ata_bmdma_stop(qc);
}
/**
* hpt3x2n_set_clock - clock control
* @ap: ATA port
* @source: 0x21 or 0x23 for PLL or PCI sourced clock
*
* Switch the ATA bus clock between the PLL and PCI clock sources
* while correctly isolating the bus and resetting internal logic
*
* We must use the DPLL for
* - writing
* - second channel UDMA7 (SATA ports) or higher
* - 66MHz PCI
*
* or we will underclock the device and get reduced performance.
*/
static void hpt3x2n_set_clock(struct ata_port *ap, int source)
{
void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
/* Tristate the bus */
iowrite8(0x80, bmdma+0x73);
iowrite8(0x80, bmdma+0x77);
/* Switch clock and reset channels */
iowrite8(source, bmdma+0x7B);
iowrite8(0xC0, bmdma+0x79);
/* Reset state machines, avoid enabling the disabled channels */
iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
/* Complete reset */
iowrite8(0x00, bmdma+0x79);
/* Reconnect channels to bus */
iowrite8(0x00, bmdma+0x73);
iowrite8(0x00, bmdma+0x77);
}
static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
{
long flags = (long)ap->host->private_data;
/* See if we should use the DPLL */
if (writing)
return USE_DPLL; /* Needed for write */
if (flags & PCI66)
return USE_DPLL; /* Needed at 66Mhz */
return 0;
}
static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
int rc, flags = (long)ap->host->private_data;
int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
/* First apply the usual rules */
rc = ata_std_qc_defer(qc);
if (rc != 0)
return rc;
if ((flags & USE_DPLL) != dpll && alt->qc_active)
return ATA_DEFER_PORT;
return 0;
}
static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
int flags = (long)ap->host->private_data;
int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
if ((flags & USE_DPLL) != dpll) {
flags &= ~USE_DPLL;
flags |= dpll;
ap->host->private_data = (void *)(long)flags;
hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
}
return ata_bmdma_qc_issue(qc);
}
static const struct scsi_host_template hpt3x2n_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/*
* Configuration for HPT302N/371N.
*/
static struct ata_port_operations hpt3xxn_port_ops = {
.inherits = &ata_bmdma_port_ops,
.bmdma_stop = hpt3x2n_bmdma_stop,
.qc_defer = hpt3x2n_qc_defer,
.qc_issue = hpt3x2n_qc_issue,
.cable_detect = hpt3x2n_cable_detect,
.set_piomode = hpt3x2n_set_piomode,
.set_dmamode = hpt3x2n_set_dmamode,
.prereset = hpt3x2n_pre_reset,
};
/*
* Configuration for HPT372N. Same as 302N/371N but we have a mode filter.
*/
static struct ata_port_operations hpt372n_port_ops = {
.inherits = &hpt3xxn_port_ops,
.mode_filter = &hpt372n_filter,
};
/**
* hpt3xn_calibrate_dpll - Calibrate the DPLL loop
* @dev: PCI device
*
* Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this
* succeeds
*/
static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
{
u8 reg5b;
u32 reg5c;
int tries;
for (tries = 0; tries < 0x5000; tries++) {
udelay(50);
pci_read_config_byte(dev, 0x5b, ®5b);
if (reg5b & 0x80) {
/* See if it stays set */
for (tries = 0; tries < 0x1000; tries++) {
pci_read_config_byte(dev, 0x5b, ®5b);
/* Failed ? */
if ((reg5b & 0x80) == 0)
return 0;
}
/* Turn off tuning, we have the DPLL set */
pci_read_config_dword(dev, 0x5c, ®5c);
pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
return 1;
}
}
/* Never went stable */
return 0;
}
static int hpt3x2n_pci_clock(struct pci_dev *pdev, unsigned int base)
{
unsigned int freq;
u32 fcnt;
/*
* Some devices do not let this value be accessed via PCI space
* according to the old driver.
*/
fcnt = inl(pci_resource_start(pdev, 4) + 0x90);
if ((fcnt >> 12) != 0xABCDE) {
u32 total = 0;
int i;
u16 sr;
dev_warn(&pdev->dev, "BIOS clock data not set\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
pci_read_config_word(pdev, 0x78, &sr);
total += sr & 0x1FF;
udelay(15);
}
fcnt = total / 128;
}
fcnt &= 0x1FF;
freq = (fcnt * base) / 192; /* in MHz */
/* Clamp to bands */
if (freq < 40)
return 33;
if (freq < 45)
return 40;
if (freq < 55)
return 50;
return 66;
}
/**
* hpt3x2n_init_one - Initialise an HPT37X/302
* @dev: PCI device
* @id: Entry in match table
*
* Initialise an HPT3x2n device. There are some interesting complications
* here. Firstly the chip may report 366 and be one of several variants.
* Secondly all the timings depend on the clock for the chip which we must
* detect and look up
*
* This is the known chip mappings. It may be missing a couple of later
* releases.
*
* Chip version PCI Rev Notes
* HPT372 4 (HPT366) 5 Other driver
* HPT372N 4 (HPT366) 6 UDMA133
* HPT372 5 (HPT372) 1 Other driver
* HPT372N 5 (HPT372) 2 UDMA133
* HPT302 6 (HPT302) * Other driver
* HPT302N 6 (HPT302) > 1 UDMA133
* HPT371 7 (HPT371) * Other driver
* HPT371N 7 (HPT371) > 1 UDMA133
* HPT374 8 (HPT374) * Other driver
* HPT372N 9 (HPT372N) * UDMA133
*
* (1) UDMA133 support depends on the bus clock
*/
static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
/* HPT372N - UDMA133 */
static const struct ata_port_info info_hpt372n = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &hpt372n_port_ops
};
/* HPT302N and HPT371N - UDMA133 */
static const struct ata_port_info info_hpt3xxn = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &hpt3xxn_port_ops
};
const struct ata_port_info *ppi[] = { &info_hpt3xxn, NULL };
u8 rev = dev->revision;
u8 irqmask;
unsigned int pci_mhz;
unsigned int f_low, f_high;
int adjust;
unsigned long iobase = pci_resource_start(dev, 4);
void *hpriv = (void *)USE_DPLL;
int rc;
rc = pcim_enable_device(dev);
if (rc)
return rc;
switch (dev->device) {
case PCI_DEVICE_ID_TTI_HPT366:
/* 372N if rev >= 6 */
if (rev < 6)
return -ENODEV;
goto hpt372n;
case PCI_DEVICE_ID_TTI_HPT371:
/* 371N if rev >= 2 */
if (rev < 2)
return -ENODEV;
break;
case PCI_DEVICE_ID_TTI_HPT372:
/* 372N if rev >= 2 */
if (rev < 2)
return -ENODEV;
goto hpt372n;
case PCI_DEVICE_ID_TTI_HPT302:
/* 302N if rev >= 2 */
if (rev < 2)
return -ENODEV;
break;
case PCI_DEVICE_ID_TTI_HPT372N:
hpt372n:
ppi[0] = &info_hpt372n;
break;
default:
dev_err(&dev->dev,"PCI table is bogus, please report (%d)\n",
dev->device);
return -ENODEV;
}
/* Ok so this is a chip we support */
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
pci_read_config_byte(dev, 0x5A, &irqmask);
irqmask &= ~0x10;
pci_write_config_byte(dev, 0x5a, irqmask);
/*
* HPT371 chips physically have only one channel, the secondary one,
* but the primary channel registers do exist! Go figure...
* So, we manually disable the non-existing channel here
* (if the BIOS hasn't done this already).
*/
if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
u8 mcr1;
pci_read_config_byte(dev, 0x50, &mcr1);
mcr1 &= ~0x04;
pci_write_config_byte(dev, 0x50, mcr1);
}
/*
* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
* 50 for UDMA100. Right now we always use 66
*/
pci_mhz = hpt3x2n_pci_clock(dev, 77);
f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */
f_high = f_low + 2; /* Tolerance */
pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
/* PLL clock */
pci_write_config_byte(dev, 0x5B, 0x21);
/* Unlike the 37x we don't try jiggling the frequency */
for (adjust = 0; adjust < 8; adjust++) {
if (hpt3xn_calibrate_dpll(dev))
break;
pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
}
if (adjust == 8) {
dev_err(&dev->dev, "DPLL did not stabilize!\n");
return -ENODEV;
}
dev_info(&dev->dev, "bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
/*
* Set our private data up. We only need a few flags
* so we use it directly.
*/
if (pci_mhz > 60)
hpriv = (void *)(PCI66 | USE_DPLL);
/*
* On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
* the MISC. register to stretch the UltraDMA Tss timing.
* NOTE: This register is only writeable via I/O space.
*/
if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
/* Now kick off ATA set up */
return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
}
static const struct pci_device_id hpt3x2n[] = {
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), },
{ },
};
static struct pci_driver hpt3x2n_pci_driver = {
.name = DRV_NAME,
.id_table = hpt3x2n,
.probe = hpt3x2n_init_one,
.remove = ata_pci_remove_one
};
module_pci_driver(hpt3x2n_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, hpt3x2n);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_hpt3x2n.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.